aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-devices-memory14
-rw-r--r--Documentation/ABI/testing/sysfs-devices-system-cpu14
-rw-r--r--Documentation/Changes2
-rw-r--r--Documentation/DocBook/Makefile38
-rw-r--r--Documentation/DocBook/media-entities.tmpl18
-rw-r--r--Documentation/DocBook/media-indices.tmpl4
-rw-r--r--Documentation/DocBook/procfs-guide.tmpl626
-rw-r--r--Documentation/DocBook/procfs_example.c201
-rw-r--r--Documentation/DocBook/v4l/common.xml35
-rw-r--r--Documentation/DocBook/v4l/compat.xml16
-rw-r--r--Documentation/DocBook/v4l/v4l2.xml26
-rw-r--r--Documentation/DocBook/v4l/videodev2.h.xml116
-rw-r--r--Documentation/DocBook/v4l/vidioc-enum-dv-presets.xml238
-rw-r--r--Documentation/DocBook/v4l/vidioc-enuminput.xml36
-rw-r--r--Documentation/DocBook/v4l/vidioc-enumoutput.xml36
-rw-r--r--Documentation/DocBook/v4l/vidioc-g-dv-preset.xml111
-rw-r--r--Documentation/DocBook/v4l/vidioc-g-dv-timings.xml224
-rw-r--r--Documentation/DocBook/v4l/vidioc-g-std.xml6
-rw-r--r--Documentation/DocBook/v4l/vidioc-query-dv-preset.xml85
-rw-r--r--Documentation/DocBook/v4l/vidioc-querystd.xml6
-rw-r--r--Documentation/SubmitChecklist5
-rw-r--r--Documentation/device-mapper/snapshot.txt60
-rw-r--r--Documentation/fb/viafb.txt12
-rw-r--r--Documentation/feature-removal-schedule.txt9
-rw-r--r--Documentation/filesystems/00-INDEX12
-rw-r--r--Documentation/filesystems/nfs/00-INDEX16
-rw-r--r--Documentation/filesystems/nfs/Exporting (renamed from Documentation/filesystems/Exporting)0
-rw-r--r--Documentation/filesystems/nfs/knfsd-stats.txt (renamed from Documentation/filesystems/knfsd-stats.txt)0
-rw-r--r--Documentation/filesystems/nfs/nfs-rdma.txt (renamed from Documentation/filesystems/nfs-rdma.txt)0
-rw-r--r--Documentation/filesystems/nfs/nfs.txt (renamed from Documentation/filesystems/nfs.txt)0
-rw-r--r--Documentation/filesystems/nfs/nfs41-server.txt (renamed from Documentation/filesystems/nfs41-server.txt)9
-rw-r--r--Documentation/filesystems/nfs/nfsroot.txt (renamed from Documentation/filesystems/nfsroot.txt)0
-rw-r--r--Documentation/filesystems/nfs/rpc-cache.txt (renamed from Documentation/filesystems/rpc-cache.txt)0
-rw-r--r--Documentation/filesystems/porting2
-rw-r--r--Documentation/filesystems/proc.txt9
-rw-r--r--Documentation/filesystems/seq_file.txt4
-rw-r--r--Documentation/gpio.txt15
-rw-r--r--Documentation/hwmon/lis3lv02d55
-rw-r--r--Documentation/hwmon/w83627ehf10
-rw-r--r--Documentation/i2c/writing-clients2
-rw-r--r--Documentation/infiniband/ipoib.txt10
-rw-r--r--Documentation/kernel-parameters.txt11
-rw-r--r--Documentation/memory-hotplug.txt11
-rw-r--r--Documentation/misc-devices/ad525x_dpot.txt57
-rw-r--r--Documentation/nommu-mmap.txt26
-rw-r--r--Documentation/powerpc/dts-bindings/4xx/ppc440spe-adma.txt93
-rw-r--r--Documentation/video4linux/gspca.txt34
-rw-r--r--Documentation/video4linux/sh_mobile_ceu_camera.txt157
-rw-r--r--Documentation/video4linux/v4l2-framework.txt16
-rw-r--r--Documentation/vm/hugetlbpage.txt262
-rw-r--r--Documentation/vm/ksm.txt22
-rw-r--r--Documentation/vm/page-types.c68
-rw-r--r--MAINTAINERS27
-rw-r--r--arch/alpha/include/asm/core_t2.h34
-rw-r--r--arch/alpha/include/asm/elf.h1
-rw-r--r--arch/alpha/include/asm/spinlock.h38
-rw-r--r--arch/alpha/include/asm/spinlock_types.h8
-rw-r--r--arch/alpha/kernel/core_t2.c2
-rw-r--r--arch/alpha/kernel/irq.c4
-rw-r--r--arch/alpha/kernel/srm_env.c65
-rw-r--r--arch/arm/include/asm/elf.h1
-rw-r--r--arch/arm/include/asm/mach/irq.h4
-rw-r--r--arch/arm/include/asm/spinlock.h40
-rw-r--r--arch/arm/include/asm/spinlock_types.h8
-rw-r--r--arch/arm/kernel/irq.c12
-rw-r--r--arch/arm/mach-at91/include/mach/atmel-mci.h24
-rw-r--r--arch/arm/mach-bcmring/arch.c10
-rw-r--r--arch/arm/mach-bcmring/include/mach/reg_nand.h66
-rw-r--r--arch/arm/mach-bcmring/include/mach/reg_umi.h237
-rw-r--r--arch/arm/mach-davinci/board-da850-evm.c24
-rw-r--r--arch/arm/mach-davinci/include/mach/nand.h4
-rw-r--r--arch/arm/mach-ep93xx/include/mach/ep93xx_keypad.h11
-rw-r--r--arch/arm/mach-nomadik/board-nhk8815.c11
-rw-r--r--arch/arm/mach-ns9xxx/irq.c8
-rw-r--r--arch/arm/mach-s3c2442/mach-gta02.c3
-rw-r--r--arch/arm/mach-u300/include/mach/coh901318.h281
-rw-r--r--arch/arm/plat-mxc/include/mach/mxc_nand.h3
-rw-r--r--arch/arm/plat-omap/debug-leds.c2
-rw-r--r--arch/arm/plat-omap/gpio.c2
-rw-r--r--arch/arm/plat-s3c/include/plat/nand.h2
-rw-r--r--arch/avr32/Kconfig13
-rw-r--r--arch/avr32/Makefile2
-rw-r--r--arch/avr32/boards/atngw100/Kconfig25
-rw-r--r--arch/avr32/boards/atngw100/evklcd10x.c7
-rw-r--r--arch/avr32/boards/atngw100/mrmt.c1
-rw-r--r--arch/avr32/boards/atngw100/setup.c121
-rw-r--r--arch/avr32/configs/atngw100_defconfig383
-rw-r--r--arch/avr32/configs/atngw100_evklcd100_defconfig605
-rw-r--r--arch/avr32/configs/atngw100_evklcd101_defconfig599
-rw-r--r--arch/avr32/configs/atngw100mkii_defconfig1414
-rw-r--r--arch/avr32/configs/atngw100mkii_evklcd100_defconfig1549
-rw-r--r--arch/avr32/configs/atngw100mkii_evklcd101_defconfig1549
-rw-r--r--arch/avr32/configs/atstk1002_defconfig415
-rw-r--r--arch/avr32/configs/atstk1006_defconfig297
-rw-r--r--arch/avr32/include/asm/elf.h1
-rw-r--r--arch/avr32/include/asm/hardirq.h19
-rw-r--r--arch/avr32/kernel/irq.c13
-rw-r--r--arch/avr32/kernel/vmlinux.lds.S64
-rw-r--r--arch/avr32/mach-at32ap/at32ap700x.c53
-rw-r--r--arch/avr32/mach-at32ap/include/mach/atmel-mci.h24
-rw-r--r--arch/avr32/mach-at32ap/include/mach/board.h1
-rw-r--r--arch/blackfin/include/asm/bfin-lq035q1.h28
-rw-r--r--arch/blackfin/include/asm/elf.h1
-rw-r--r--arch/blackfin/include/asm/spinlock.h62
-rw-r--r--arch/blackfin/include/asm/spinlock_types.h8
-rw-r--r--arch/blackfin/kernel/irqchip.c6
-rw-r--r--arch/blackfin/kernel/traps.c4
-rw-r--r--arch/cris/include/arch-v32/arch/spinlock.h62
-rw-r--r--arch/cris/include/asm/elf.h2
-rw-r--r--arch/cris/kernel/irq.c4
-rw-r--r--arch/frv/include/asm/elf.h1
-rw-r--r--arch/frv/kernel/irq.c4
-rw-r--r--arch/h8300/include/asm/elf.h1
-rw-r--r--arch/h8300/kernel/irq.c4
-rw-r--r--arch/ia64/hp/common/sba_iommu.c38
-rw-r--r--arch/ia64/ia32/elfcore32.h2
-rw-r--r--arch/ia64/include/asm/bitops.h2
-rw-r--r--arch/ia64/include/asm/dma-mapping.h2
-rw-r--r--arch/ia64/include/asm/elf.h1
-rw-r--r--arch/ia64/include/asm/hw_irq.h6
-rw-r--r--arch/ia64/include/asm/io.h2
-rw-r--r--arch/ia64/include/asm/mca.h5
-rw-r--r--arch/ia64/include/asm/numa.h2
-rw-r--r--arch/ia64/include/asm/rwsem.h2
-rw-r--r--arch/ia64/include/asm/spinlock.h76
-rw-r--r--arch/ia64/include/asm/spinlock_types.h8
-rw-r--r--arch/ia64/kernel/iosapic.c6
-rw-r--r--arch/ia64/kernel/irq.c4
-rw-r--r--arch/ia64/kernel/irq_ia64.c10
-rw-r--r--arch/ia64/kernel/mca.c11
-rw-r--r--arch/ia64/mm/ioremap.c11
-rw-r--r--arch/ia64/sn/pci/tioca_provider.c19
-rw-r--r--arch/m32r/include/asm/elf.h1
-rw-r--r--arch/m32r/include/asm/spinlock.h48
-rw-r--r--arch/m32r/include/asm/spinlock_types.h8
-rw-r--r--arch/m32r/kernel/irq.c4
-rw-r--r--arch/m68k/include/asm/elf.h1
-rw-r--r--arch/microblaze/include/asm/elf.h1
-rw-r--r--arch/microblaze/kernel/irq.c4
-rw-r--r--arch/mips/include/asm/elf.h1
-rw-r--r--arch/mips/include/asm/spinlock.h78
-rw-r--r--arch/mips/include/asm/spinlock_types.h8
-rw-r--r--arch/mips/kernel/irq.c4
-rw-r--r--arch/mips/vr41xx/common/icu.c92
-rw-r--r--arch/mn10300/include/asm/elf.h1
-rw-r--r--arch/mn10300/kernel/irq.c4
-rw-r--r--arch/parisc/include/asm/atomic.h10
-rw-r--r--arch/parisc/include/asm/bug.h4
-rw-r--r--arch/parisc/include/asm/elf.h1
-rw-r--r--arch/parisc/include/asm/ftrace.h14
-rw-r--r--arch/parisc/include/asm/spinlock.h64
-rw-r--r--arch/parisc/include/asm/spinlock_types.h12
-rw-r--r--arch/parisc/kernel/asm-offsets.c3
-rw-r--r--arch/parisc/kernel/irq.c8
-rw-r--r--arch/parisc/kernel/signal.c1
-rw-r--r--arch/parisc/kernel/smp.c9
-rw-r--r--arch/parisc/kernel/sys_parisc32.c6
-rw-r--r--arch/parisc/kernel/unwind.c50
-rw-r--r--arch/parisc/lib/bitops.c4
-rw-r--r--arch/powerpc/include/asm/async_tx.h47
-rw-r--r--arch/powerpc/include/asm/dcr-regs.h23
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h2
-rw-r--r--arch/powerpc/include/asm/elf.h1
-rw-r--r--arch/powerpc/include/asm/ptrace.h2
-rw-r--r--arch/powerpc/include/asm/rtas.h2
-rw-r--r--arch/powerpc/include/asm/spinlock.h68
-rw-r--r--arch/powerpc/include/asm/spinlock_types.h8
-rw-r--r--arch/powerpc/kernel/iommu.c4
-rw-r--r--arch/powerpc/kernel/irq.c8
-rw-r--r--arch/powerpc/kernel/rtas.c16
-rw-r--r--arch/powerpc/kernel/traps.c9
-rw-r--r--arch/powerpc/lib/locks.c8
-rw-r--r--arch/powerpc/platforms/52xx/media5200.c8
-rw-r--r--arch/powerpc/platforms/cell/interrupt.c8
-rw-r--r--arch/powerpc/platforms/iseries/irq.c4
-rw-r--r--arch/powerpc/platforms/pasemi/setup.c10
-rw-r--r--arch/powerpc/platforms/pseries/xics.c4
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c4
-rw-r--r--arch/powerpc/sysdev/uic.c8
-rw-r--r--arch/s390/appldata/appldata_base.c2
-rw-r--r--arch/s390/include/asm/elf.h1
-rw-r--r--arch/s390/include/asm/spinlock.h66
-rw-r--r--arch/s390/include/asm/spinlock_types.h8
-rw-r--r--arch/s390/kernel/debug.c3
-rw-r--r--arch/s390/lib/spinlock.c46
-rw-r--r--arch/score/include/asm/elf.h1
-rw-r--r--arch/sh/Kconfig.debug44
-rw-r--r--arch/sh/boards/mach-ap325rxa/setup.c44
-rw-r--r--arch/sh/boards/mach-ecovec24/setup.c322
-rw-r--r--arch/sh/boards/mach-kfr2r09/lcd_wqvga.c6
-rw-r--r--arch/sh/boards/mach-kfr2r09/setup.c14
-rw-r--r--arch/sh/boards/mach-migor/setup.c32
-rw-r--r--arch/sh/boards/mach-se/7722/irq.c7
-rw-r--r--arch/sh/boards/mach-se/7724/setup.c17
-rw-r--r--arch/sh/configs/ecovec24-romimage_defconfig2
-rw-r--r--arch/sh/configs/ecovec24_defconfig2
-rw-r--r--arch/sh/configs/rts7751r2d1_defconfig2
-rw-r--r--arch/sh/configs/rts7751r2dplus_defconfig2
-rw-r--r--arch/sh/include/asm/elf.h1
-rw-r--r--arch/sh/include/asm/io.h11
-rw-r--r--arch/sh/include/asm/pgtable_32.h5
-rw-r--r--arch/sh/include/asm/spinlock.h58
-rw-r--r--arch/sh/include/asm/spinlock_types.h8
-rw-r--r--arch/sh/include/asm/unistd_32.h3
-rw-r--r--arch/sh/include/asm/unistd_64.h3
-rw-r--r--arch/sh/include/mach-kfr2r09/mach/kfr2r09.h6
-rw-r--r--arch/sh/kernel/Makefile3
-rw-r--r--arch/sh/kernel/cpu/irq/ipr.c7
-rw-r--r--arch/sh/kernel/cpu/sh2/setup-sh7619.c71
-rw-r--r--arch/sh/kernel/cpu/sh2a/setup-mxg.c23
-rw-r--r--arch/sh/kernel/cpu/sh2a/setup-sh7201.c181
-rw-r--r--arch/sh/kernel/cpu/sh2a/setup-sh7203.c89
-rw-r--r--arch/sh/kernel/cpu/sh2a/setup-sh7206.c89
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh7705.c49
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh770x.c80
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh7710.c50
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh7720.c50
-rw-r--r--arch/sh/kernel/cpu/sh4/setup-sh4-202.c23
-rw-r--r--arch/sh/kernel/cpu/sh4/setup-sh7750.c47
-rw-r--r--arch/sh/kernel/cpu/sh4/setup-sh7760.c89
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7343.c112
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7366.c39
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7722.c91
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7723.c160
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7724.c149
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7757.c92
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7763.c81
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7770.c221
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7780.c60
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7785.c159
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7786.c132
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-shx3.c76
-rw-r--r--arch/sh/kernel/cpu/sh5/fpu.c4
-rw-r--r--arch/sh/kernel/cpu/sh5/setup-sh5.c22
-rw-r--r--arch/sh/kernel/early_printk.c157
-rw-r--r--arch/sh/kernel/ftrace.c76
-rw-r--r--arch/sh/kernel/irq.c4
-rw-r--r--arch/sh/kernel/process_64.c4
-rw-r--r--arch/sh/kernel/ptrace_64.c4
-rw-r--r--arch/sh/kernel/setup.c3
-rw-r--r--arch/sh/kernel/signal_64.c2
-rw-r--r--arch/sh/kernel/syscalls_32.S1
-rw-r--r--arch/sh/kernel/traps_32.c18
-rw-r--r--arch/sh/kernel/traps_64.c4
-rw-r--r--arch/sh/mm/cache-sh4.c3
-rw-r--r--arch/sh/mm/ioremap_32.c10
-rw-r--r--arch/sh/mm/ioremap_64.c6
-rw-r--r--arch/sh/mm/numa.c15
-rw-r--r--arch/sparc/Kconfig1
-rw-r--r--arch/sparc/Kconfig.debug14
-rw-r--r--arch/sparc/include/asm/elf_32.h2
-rw-r--r--arch/sparc/include/asm/elf_64.h1
-rw-r--r--arch/sparc/include/asm/spinlock_32.h62
-rw-r--r--arch/sparc/include/asm/spinlock_64.h54
-rw-r--r--arch/sparc/include/asm/spinlock_types.h8
-rw-r--r--arch/sparc/include/asm/string_32.h78
-rw-r--r--arch/sparc/include/asm/string_64.h25
-rw-r--r--arch/sparc/include/asm/thread_info_64.h2
-rw-r--r--arch/sparc/include/asm/uaccess_32.h15
-rw-r--r--arch/sparc/include/asm/uaccess_64.h23
-rw-r--r--arch/sparc/include/asm/unistd.h2
-rw-r--r--arch/sparc/kernel/entry.S2
-rw-r--r--arch/sparc/kernel/ftrace.c11
-rw-r--r--arch/sparc/kernel/iommu.c3
-rw-r--r--arch/sparc/kernel/irq_64.c8
-rw-r--r--arch/sparc/kernel/kprobes.c3
-rw-r--r--arch/sparc/kernel/ldc.c20
-rw-r--r--arch/sparc/kernel/mdesc.c21
-rw-r--r--arch/sparc/kernel/of_device_64.c14
-rw-r--r--arch/sparc/kernel/ptrace_64.c10
-rw-r--r--arch/sparc/kernel/syscalls.S14
-rw-r--r--arch/sparc/kernel/time_64.c26
-rw-r--r--arch/sparc/kernel/unaligned_32.c15
-rw-r--r--arch/sparc/kernel/unaligned_64.c23
-rw-r--r--arch/sparc/kernel/visemul.c3
-rw-r--r--arch/sparc/lib/Makefile1
-rw-r--r--arch/sparc/lib/bzero.S5
-rw-r--r--arch/sparc/lib/checksum_32.S2
-rw-r--r--arch/sparc/lib/ksyms.c2
-rw-r--r--arch/sparc/lib/mcount.S5
-rw-r--r--arch/sparc/lib/memcpy.S3
-rw-r--r--arch/sparc/lib/memset.S3
-rw-r--r--arch/sparc/lib/usercopy.c8
-rw-r--r--arch/sparc/math-emu/math_32.c3
-rw-r--r--arch/sparc/math-emu/math_64.c2
-rw-r--r--arch/sparc/mm/fault_64.c24
-rw-r--r--arch/sparc/mm/sun4c.c17
-rw-r--r--arch/um/drivers/mconsole_kern.c30
-rw-r--r--arch/um/drivers/ubd_kern.c36
-rw-r--r--arch/um/kernel/exitcode.c43
-rw-r--r--arch/um/kernel/irq.c4
-rw-r--r--arch/um/kernel/process.c31
-rw-r--r--arch/um/sys-i386/asm/elf.h1
-rw-r--r--arch/um/sys-ppc/asm/elf.h2
-rw-r--r--arch/um/sys-x86_64/asm/elf.h1
-rw-r--r--arch/x86/Kconfig11
-rw-r--r--arch/x86/include/asm/dma-mapping.h2
-rw-r--r--arch/x86/include/asm/elf.h1
-rw-r--r--arch/x86/include/asm/geode.h219
-rw-r--r--arch/x86/include/asm/olpc.h2
-rw-r--r--arch/x86/include/asm/paravirt.h14
-rw-r--r--arch/x86/include/asm/paravirt_types.h14
-rw-r--r--arch/x86/include/asm/ptrace.h2
-rw-r--r--arch/x86/include/asm/spinlock.h62
-rw-r--r--arch/x86/include/asm/spinlock_types.h10
-rw-r--r--arch/x86/include/asm/topology.h9
-rw-r--r--arch/x86/include/asm/uv/bios.h11
-rw-r--r--arch/x86/include/asm/uv/uv_hub.h44
-rw-r--r--arch/x86/kernel/Makefile1
-rw-r--r--arch/x86/kernel/amd_iommu.c4
-rw-r--r--arch/x86/kernel/apic/io_apic.c4
-rw-r--r--arch/x86/kernel/bios_uv.c8
-rw-r--r--arch/x86/kernel/cpu/mtrr/if.c11
-rw-r--r--arch/x86/kernel/dumpstack.c8
-rw-r--r--arch/x86/kernel/geode_32.c196
-rw-r--r--arch/x86/kernel/irq.c14
-rw-r--r--arch/x86/kernel/mfgpt_32.c410
-rw-r--r--arch/x86/kernel/olpc.c4
-rw-r--r--arch/x86/kernel/paravirt-spinlocks.c4
-rw-r--r--arch/x86/kernel/pci-calgary_64.c6
-rw-r--r--arch/x86/kernel/pci-gart_64.c6
-rw-r--r--arch/x86/kernel/ptrace.c51
-rw-r--r--arch/x86/kernel/reboot_fixups_32.c2
-rw-r--r--arch/x86/kernel/tsc_sync.c10
-rw-r--r--arch/x86/xen/spinlock.c16
-rw-r--r--arch/xtensa/include/asm/elf.h1
-rw-r--r--arch/xtensa/kernel/irq.c4
-rw-r--r--block/blk-settings.c7
-rw-r--r--block/cfq-iosched.c94
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/atm/iphase.c2
-rw-r--r--drivers/base/node.c196
-rw-r--r--drivers/block/drbd/drbd_nl.c3
-rw-r--r--drivers/block/floppy.c5
-rw-r--r--drivers/block/xd.c30
-rw-r--r--drivers/char/efirtc.c1
-rw-r--r--drivers/char/hvc_iucv.c2
-rw-r--r--drivers/char/ipmi/ipmi_kcs_sm.c2
-rw-r--r--drivers/char/keyboard.c10
-rw-r--r--drivers/char/mem.c161
-rw-r--r--drivers/char/misc.c26
-rw-r--r--drivers/char/random.c10
-rw-r--r--drivers/char/sysrq.c2
-rw-r--r--drivers/char/vt.c50
-rw-r--r--drivers/clocksource/Kconfig9
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/cs5535-clockevt.c197
-rw-r--r--drivers/cpuidle/governors/ladder.c3
-rw-r--r--drivers/dma/Kconfig18
-rw-r--r--drivers/dma/Makefile2
-rw-r--r--drivers/dma/at_hdmac.c2
-rw-r--r--drivers/dma/coh901318.c1325
-rw-r--r--drivers/dma/coh901318_lli.c318
-rw-r--r--drivers/dma/coh901318_lli.h124
-rw-r--r--drivers/dma/dmatest.c16
-rw-r--r--drivers/dma/dw_dmac.c2
-rw-r--r--drivers/dma/iop-adma.c4
-rw-r--r--drivers/dma/ppc4xx/Makefile1
-rw-r--r--drivers/dma/ppc4xx/adma.c5027
-rw-r--r--drivers/dma/ppc4xx/adma.h195
-rw-r--r--drivers/dma/ppc4xx/dma.h223
-rw-r--r--drivers/dma/ppc4xx/xor.h110
-rw-r--r--drivers/dma/shdma.c36
-rw-r--r--drivers/dma/shdma.h14
-rw-r--r--drivers/dma/txx9dmac.c2
-rw-r--r--drivers/edac/edac_mce_amd.c24
-rw-r--r--drivers/edac/i5100_edac.c252
-rw-r--r--drivers/firmware/Kconfig4
-rw-r--r--drivers/firmware/dmi_scan.c5
-rw-r--r--drivers/gpio/Kconfig16
-rw-r--r--drivers/gpio/Makefile2
-rw-r--r--drivers/gpio/cs5535-gpio.c355
-rw-r--r--drivers/gpio/gpiolib.c161
-rw-r--r--drivers/gpio/langwell_gpio.c2
-rw-r--r--drivers/gpio/timbgpio.c342
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv40_graph.c20
-rw-r--r--drivers/gpu/drm/radeon/Makefile2
-rw-r--r--drivers/gpu/drm/radeon/r100.c54
-rw-r--r--drivers/gpu/drm/radeon/r100_track.h5
-rw-r--r--drivers/gpu/drm/radeon/r200.c10
-rw-r--r--drivers/gpu/drm/radeon/r300.c24
-rw-r--r--drivers/gpu/drm/radeon/r600.c19
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c267
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c506
-rw-r--r--drivers/gpu/drm/radeon/r600_reg.h74
-rw-r--r--drivers/gpu/drm/radeon/radeon.h25
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c21
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h16
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c14
-rw-r--r--drivers/gpu/drm/radeon/rs400.c2
-rw-r--r--drivers/gpu/drm/radeon/rv770.c14
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c77
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c2
-rw-r--r--drivers/hwmon/Kconfig22
-rw-r--r--drivers/hwmon/adm1021.c11
-rw-r--r--drivers/hwmon/adm1025.c12
-rw-r--r--drivers/hwmon/adm1026.c11
-rw-r--r--drivers/hwmon/adm1029.c14
-rw-r--r--drivers/hwmon/adm1031.c9
-rw-r--r--drivers/hwmon/adm9240.c9
-rw-r--r--drivers/hwmon/ads7828.c13
-rw-r--r--drivers/hwmon/adt7462.c11
-rw-r--r--drivers/hwmon/adt7470.c11
-rw-r--r--drivers/hwmon/adt7473.c11
-rw-r--r--drivers/hwmon/adt7475.c6
-rw-r--r--drivers/hwmon/applesmc.c2
-rw-r--r--drivers/hwmon/asb100.c11
-rw-r--r--drivers/hwmon/atxp1.c11
-rw-r--r--drivers/hwmon/dme1737.c10
-rw-r--r--drivers/hwmon/ds1621.c9
-rw-r--r--drivers/hwmon/f75375s.c9
-rw-r--r--drivers/hwmon/fschmd.c9
-rw-r--r--drivers/hwmon/gl518sm.c11
-rw-r--r--drivers/hwmon/gl520sm.c13
-rw-r--r--drivers/hwmon/lis3lv02d.c231
-rw-r--r--drivers/hwmon/lis3lv02d.h51
-rw-r--r--drivers/hwmon/lm63.c15
-rw-r--r--drivers/hwmon/lm73.c9
-rw-r--r--drivers/hwmon/lm75.c14
-rw-r--r--drivers/hwmon/lm77.c12
-rw-r--r--drivers/hwmon/lm78.c9
-rw-r--r--drivers/hwmon/lm80.c13
-rw-r--r--drivers/hwmon/lm83.c12
-rw-r--r--drivers/hwmon/lm85.c16
-rw-r--r--drivers/hwmon/lm87.c12
-rw-r--r--drivers/hwmon/lm90.c14
-rw-r--r--drivers/hwmon/lm92.c9
-rw-r--r--drivers/hwmon/lm93.c8
-rw-r--r--drivers/hwmon/lm95241.c9
-rw-r--r--drivers/hwmon/max1619.c14
-rw-r--r--drivers/hwmon/max6650.c10
-rw-r--r--drivers/hwmon/pcf8591.c5
-rw-r--r--drivers/hwmon/smsc47m192.c11
-rw-r--r--drivers/hwmon/thmc50.c8
-rw-r--r--drivers/hwmon/tmp401.c9
-rw-r--r--drivers/hwmon/tmp421.c7
-rw-r--r--drivers/hwmon/w83627ehf.c72
-rw-r--r--drivers/hwmon/w83781d.c10
-rw-r--r--drivers/hwmon/w83791d.c9
-rw-r--r--drivers/hwmon/w83792d.c9
-rw-r--r--drivers/hwmon/w83793.c9
-rw-r--r--drivers/hwmon/w83l785ts.c14
-rw-r--r--drivers/hwmon/w83l786ng.c10
-rw-r--r--drivers/i2c/busses/i2c-pxa.c2
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c2
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c2
-rw-r--r--drivers/i2c/i2c-core.c52
-rw-r--r--drivers/infiniband/core/addr.c275
-rw-r--r--drivers/infiniband/core/cma.c133
-rw-r--r--drivers/infiniband/core/sa_query.c6
-rw-r--r--drivers/infiniband/core/ucma.c57
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c2
-rw-r--r--drivers/infiniband/hw/amso1100/c2_qp.c14
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c32
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h1
-rw-r--r--drivers/infiniband/hw/ehca/ehca_eq.c9
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_reqs.c67
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c10
-rw-r--r--drivers/infiniband/hw/mlx4/main.c2
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c25
-rw-r--r--drivers/infiniband/hw/nes/Kconfig9
-rw-r--r--drivers/infiniband/hw/nes/nes.c5
-rw-r--r--drivers/infiniband/hw/nes/nes.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c201
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.h7
-rw-r--r--drivers/infiniband/hw/nes/nes_context.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c40
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.h29
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_user.h3
-rw-r--r--drivers/infiniband/hw/nes/nes_utils.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c817
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.h23
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c1
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c122
-rw-r--r--drivers/input/input.c10
-rw-r--r--drivers/input/keyboard/adp5588-keys.c2
-rw-r--r--drivers/input/keyboard/ep93xx_keypad.c150
-rw-r--r--drivers/input/keyboard/sh_keysc.c2
-rw-r--r--drivers/input/misc/bfin_rotary.c2
-rw-r--r--drivers/input/misc/pcspkr.c2
-rw-r--r--drivers/input/mouse/alps.c265
-rw-r--r--drivers/input/mouse/alps.h1
-rw-r--r--drivers/input/serio/altera_ps2.c15
-rw-r--r--drivers/input/serio/ambakmi.c9
-rw-r--r--drivers/input/serio/at32psif.c3
-rw-r--r--drivers/input/serio/gscps2.c6
-rw-r--r--drivers/input/serio/hil_mlc.c8
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h8
-rw-r--r--drivers/input/serio/i8042.c88
-rw-r--r--drivers/input/serio/sa1111ps2.c10
-rw-r--r--drivers/input/tablet/wacom.h11
-rw-r--r--drivers/input/tablet/wacom_sys.c231
-rw-r--r--drivers/input/tablet/wacom_wac.c368
-rw-r--r--drivers/input/tablet/wacom_wac.h29
-rw-r--r--drivers/input/touchscreen/Kconfig12
-rw-r--r--drivers/input/touchscreen/Makefile1
-rw-r--r--drivers/input/touchscreen/mc13783_ts.c258
-rw-r--r--drivers/input/touchscreen/pcap_ts.c2
-rw-r--r--drivers/leds/led-class.c2
-rw-r--r--drivers/leds/ledtrig-timer.c4
-rw-r--r--drivers/md/dm-crypt.c207
-rw-r--r--drivers/md/dm-exception-store.c33
-rw-r--r--drivers/md/dm-exception-store.h62
-rw-r--r--drivers/md/dm-io.c120
-rw-r--r--drivers/md/dm-ioctl.c123
-rw-r--r--drivers/md/dm-kcopyd.c5
-rw-r--r--drivers/md/dm-log.c77
-rw-r--r--drivers/md/dm-mpath.c95
-rw-r--r--drivers/md/dm-raid1.c219
-rw-r--r--drivers/md/dm-region-hash.c31
-rw-r--r--drivers/md/dm-snap-persistent.c195
-rw-r--r--drivers/md/dm-snap-transient.c24
-rw-r--r--drivers/md/dm-snap.c1279
-rw-r--r--drivers/md/dm-sysfs.c10
-rw-r--r--drivers/md/dm-table.c9
-rw-r--r--drivers/md/dm-uevent.c9
-rw-r--r--drivers/md/dm.c643
-rw-r--r--drivers/md/dm.h13
-rw-r--r--drivers/md/md.c20
-rw-r--r--drivers/media/IR/Kconfig9
-rw-r--r--drivers/media/IR/Makefile5
-rw-r--r--drivers/media/IR/ir-functions.c (renamed from drivers/media/common/ir-functions.c)20
-rw-r--r--drivers/media/IR/ir-keymaps.c (renamed from drivers/media/common/ir-keymaps.c)219
-rw-r--r--drivers/media/IR/ir-keytable.c (renamed from drivers/media/common/ir-keytable.c)90
-rw-r--r--drivers/media/Kconfig1
-rw-r--r--drivers/media/Makefile2
-rw-r--r--drivers/media/common/Makefile2
-rw-r--r--drivers/media/common/saa7146_fops.c60
-rw-r--r--drivers/media/dvb/dm1105/dm1105.c14
-rw-r--r--drivers/media/dvb/dvb-usb/Kconfig8
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700.h26
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700_core.c101
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700_devices.c725
-rw-r--r--drivers/media/dvb/dvb-usb/dibusb-common.c15
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-ids.h3
-rw-r--r--drivers/media/dvb/dvb-usb/dw2102.c456
-rw-r--r--drivers/media/dvb/dvb-usb/friio-fe.c10
-rw-r--r--drivers/media/dvb/dvb-usb/gp8psk-fe.c4
-rw-r--r--drivers/media/dvb/frontends/Kconfig9
-rw-r--r--drivers/media/dvb/frontends/Makefile1
-rw-r--r--drivers/media/dvb/frontends/au8522_decoder.c4
-rw-r--r--drivers/media/dvb/frontends/dib0070.c674
-rw-r--r--drivers/media/dvb/frontends/dib0070.h4
-rw-r--r--drivers/media/dvb/frontends/dib0090.c1522
-rw-r--r--drivers/media/dvb/frontends/dib0090.h108
-rw-r--r--drivers/media/dvb/frontends/dib8000.c137
-rw-r--r--drivers/media/dvb/frontends/dib8000.h32
-rw-r--r--drivers/media/dvb/frontends/dibx000_common.c15
-rw-r--r--drivers/media/dvb/frontends/dibx000_common.h71
-rw-r--r--drivers/media/dvb/frontends/lgs8gxx.c4
-rw-r--r--drivers/media/dvb/frontends/lnbp21.c28
-rw-r--r--drivers/media/dvb/frontends/stv0900_core.c13
-rw-r--r--drivers/media/dvb/frontends/stv090x.c7
-rw-r--r--drivers/media/dvb/frontends/stv090x.h2
-rw-r--r--drivers/media/dvb/siano/smsdvb.c4
-rw-r--r--drivers/media/dvb/siano/smssdio.c8
-rw-r--r--drivers/media/dvb/siano/smsusb.c18
-rw-r--r--drivers/media/dvb/ttpci/budget-ci.c52
-rw-r--r--drivers/media/radio/Kconfig7
-rw-r--r--drivers/media/radio/radio-aimslab.c4
-rw-r--r--drivers/media/radio/radio-aztech.c4
-rw-r--r--drivers/media/radio/radio-gemtek-pci.c4
-rw-r--r--drivers/media/radio/radio-maestro.c4
-rw-r--r--drivers/media/radio/radio-maxiradio.c4
-rw-r--r--drivers/media/radio/radio-mr800.c4
-rw-r--r--drivers/media/radio/radio-rtrack2.c4
-rw-r--r--drivers/media/radio/radio-sf16fmi.c82
-rw-r--r--drivers/media/radio/radio-sf16fmr2.c4
-rw-r--r--drivers/media/radio/radio-tea5764.c12
-rw-r--r--drivers/media/radio/radio-terratec.c4
-rw-r--r--drivers/media/radio/radio-trust.c4
-rw-r--r--drivers/media/radio/radio-typhoon.c4
-rw-r--r--drivers/media/radio/radio-zoltrix.c4
-rw-r--r--drivers/media/radio/si470x/radio-si470x-common.c98
-rw-r--r--drivers/media/radio/si470x/radio-si470x-i2c.c219
-rw-r--r--drivers/media/radio/si470x/radio-si470x-usb.c97
-rw-r--r--drivers/media/radio/si470x/radio-si470x.h5
-rw-r--r--drivers/media/video/Kconfig10
-rw-r--r--drivers/media/video/Makefile3
-rw-r--r--drivers/media/video/arv.c5
-rw-r--r--drivers/media/video/au0828/au0828-video.c36
-rw-r--r--drivers/media/video/au0828/au0828.h1
-rw-r--r--drivers/media/video/bt8xx/bttv-driver.c41
-rw-r--r--drivers/media/video/bt8xx/bttv-i2c.c4
-rw-r--r--drivers/media/video/bt8xx/bttv-input.c9
-rw-r--r--drivers/media/video/c-qcam.c4
-rw-r--r--drivers/media/video/cafe_ccic.c1
-rw-r--r--drivers/media/video/cpia.c221
-rw-r--r--drivers/media/video/cpia2/cpia2_v4l.c34
-rw-r--r--drivers/media/video/cx18/cx18-fileops.c4
-rw-r--r--drivers/media/video/cx18/cx18-streams.c20
-rw-r--r--drivers/media/video/cx231xx/cx231xx-cards.c23
-rw-r--r--drivers/media/video/cx231xx/cx231xx-core.c26
-rw-r--r--drivers/media/video/cx231xx/cx231xx-input.c10
-rw-r--r--drivers/media/video/cx231xx/cx231xx-video.c58
-rw-r--r--drivers/media/video/cx231xx/cx231xx.h2
-rw-r--r--drivers/media/video/cx23885/cimax2.c107
-rw-r--r--drivers/media/video/cx23885/cx23885-417.c29
-rw-r--r--drivers/media/video/cx23885/cx23885-core.c11
-rw-r--r--drivers/media/video/cx23885/cx23885-input.c9
-rw-r--r--drivers/media/video/cx23885/cx23885-video.c57
-rw-r--r--drivers/media/video/cx23885/cx23885.h3
-rw-r--r--drivers/media/video/cx88/cx88-blackbird.c26
-rw-r--r--drivers/media/video/cx88/cx88-input.c9
-rw-r--r--drivers/media/video/cx88/cx88-mpeg.c15
-rw-r--r--drivers/media/video/cx88/cx88-video.c68
-rw-r--r--drivers/media/video/cx88/cx88.h2
-rw-r--r--drivers/media/video/davinci/vpfe_capture.c8
-rw-r--r--drivers/media/video/davinci/vpif.c2
-rw-r--r--drivers/media/video/davinci/vpif_capture.c2
-rw-r--r--drivers/media/video/davinci/vpif_display.c1
-rw-r--r--drivers/media/video/davinci/vpss.c2
-rw-r--r--drivers/media/video/em28xx/em28xx-cards.c7
-rw-r--r--drivers/media/video/em28xx/em28xx-core.c30
-rw-r--r--drivers/media/video/em28xx/em28xx-input.c68
-rw-r--r--drivers/media/video/em28xx/em28xx-video.c59
-rw-r--r--drivers/media/video/em28xx/em28xx.h5
-rw-r--r--drivers/media/video/et61x251/et61x251_core.c45
-rw-r--r--drivers/media/video/gspca/conex.c4
-rw-r--r--drivers/media/video/gspca/etoms.c4
-rw-r--r--drivers/media/video/gspca/gl860/gl860-mi1320.c2
-rw-r--r--drivers/media/video/gspca/gl860/gl860-mi2020.c2
-rw-r--r--drivers/media/video/gspca/gl860/gl860.c14
-rw-r--r--drivers/media/video/gspca/gspca.c67
-rw-r--r--drivers/media/video/gspca/gspca.h10
-rw-r--r--drivers/media/video/gspca/m5602/m5602_core.c4
-rw-r--r--drivers/media/video/gspca/m5602/m5602_ov9650.c2
-rw-r--r--drivers/media/video/gspca/m5602/m5602_s5k4aa.c10
-rw-r--r--drivers/media/video/gspca/mr97310a.c2
-rw-r--r--drivers/media/video/gspca/ov519.c14
-rw-r--r--drivers/media/video/gspca/pac7302.c25
-rw-r--r--drivers/media/video/gspca/pac7311.c4
-rw-r--r--drivers/media/video/gspca/sn9c20x.c12
-rw-r--r--drivers/media/video/gspca/sonixb.c4
-rw-r--r--drivers/media/video/gspca/spca506.c4
-rw-r--r--drivers/media/video/gspca/stk014.c106
-rw-r--r--drivers/media/video/gspca/sunplus.c237
-rw-r--r--drivers/media/video/gspca/zc3xx.c36
-rw-r--r--drivers/media/video/hdpvr/hdpvr-core.c16
-rw-r--r--drivers/media/video/hdpvr/hdpvr-video.c2
-rw-r--r--drivers/media/video/ir-kbd-i2c.c12
-rw-r--r--drivers/media/video/ivtv/ivtv-fileops.c4
-rw-r--r--drivers/media/video/ivtv/ivtv-streams.c19
-rw-r--r--drivers/media/video/meye.c1
-rw-r--r--drivers/media/video/mt9m001.c205
-rw-r--r--drivers/media/video/mt9m111.c189
-rw-r--r--drivers/media/video/mt9t031.c281
-rw-r--r--drivers/media/video/mt9t112.c1177
-rw-r--r--drivers/media/video/mt9v022.c228
-rw-r--r--drivers/media/video/mx1_camera.c126
-rw-r--r--drivers/media/video/mx3_camera.c296
-rw-r--r--drivers/media/video/omap24xxcam.c10
-rw-r--r--drivers/media/video/ov511.c9
-rw-r--r--drivers/media/video/ov772x.c233
-rw-r--r--drivers/media/video/ov9640.c107
-rw-r--r--drivers/media/video/pms.c2
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-v4l2.c21
-rw-r--r--drivers/media/video/pwc/pwc-if.c5
-rw-r--r--drivers/media/video/pxa_camera.c335
-rw-r--r--drivers/media/video/rj54n1cb0c.c474
-rw-r--r--drivers/media/video/s2255drv.c55
-rw-r--r--drivers/media/video/saa5246a.c1
-rw-r--r--drivers/media/video/saa7134/saa7134-cards.c22
-rw-r--r--drivers/media/video/saa7134/saa7134-core.c19
-rw-r--r--drivers/media/video/saa7134/saa7134-empress.c20
-rw-r--r--drivers/media/video/saa7134/saa7134-input.c71
-rw-r--r--drivers/media/video/saa7134/saa7134-video.c39
-rw-r--r--drivers/media/video/se401.c4
-rw-r--r--drivers/media/video/sh_mobile_ceu_camera.c522
-rw-r--r--drivers/media/video/sn9c102/sn9c102_core.c47
-rw-r--r--drivers/media/video/soc_camera.c106
-rw-r--r--drivers/media/video/soc_camera_platform.c40
-rw-r--r--drivers/media/video/soc_mediabus.c157
-rw-r--r--drivers/media/video/stk-webcam.c9
-rw-r--r--drivers/media/video/stradis.c4
-rw-r--r--drivers/media/video/stv680.c5
-rw-r--r--drivers/media/video/tw9910.c337
-rw-r--r--drivers/media/video/usbvideo/usbvideo.c4
-rw-r--r--drivers/media/video/usbvideo/vicam.c5
-rw-r--r--drivers/media/video/usbvision/usbvision-i2c.c4
-rw-r--r--drivers/media/video/usbvision/usbvision-video.c34
-rw-r--r--drivers/media/video/uvc/uvc_driver.c1
-rw-r--r--drivers/media/video/uvc/uvc_video.c2
-rw-r--r--drivers/media/video/v4l2-common.c47
-rw-r--r--drivers/media/video/v4l2-compat-ioctl32.c6
-rw-r--r--drivers/media/video/v4l2-dev.c22
-rw-r--r--drivers/media/video/v4l2-ioctl.c147
-rw-r--r--drivers/media/video/videobuf-dma-contig.c6
-rw-r--r--drivers/media/video/vino.c1
-rw-r--r--drivers/media/video/vivi.c22
-rw-r--r--drivers/media/video/w9968cf.c34
-rw-r--r--drivers/media/video/zc0301/zc0301_core.c44
-rw-r--r--drivers/media/video/zoran/zoran_driver.c1
-rw-r--r--drivers/media/video/zr364xx.c5
-rw-r--r--drivers/mfd/twl4030-codec.c10
-rw-r--r--drivers/mfd/wm831x-core.c9
-rw-r--r--drivers/misc/Kconfig48
-rw-r--r--drivers/misc/Makefile3
-rw-r--r--drivers/misc/ad525x_dpot.c666
-rw-r--r--drivers/misc/cs5535-mfgpt.c370
-rw-r--r--drivers/misc/eeprom/eeprom.c8
-rw-r--r--drivers/misc/ics932s401.c11
-rw-r--r--drivers/misc/ioc4.c16
-rw-r--r--drivers/misc/sgi-gru/gru.h11
-rw-r--r--drivers/misc/sgi-gru/gru_instructions.h144
-rw-r--r--drivers/misc/sgi-gru/grufault.c311
-rw-r--r--drivers/misc/sgi-gru/grufile.c290
-rw-r--r--drivers/misc/sgi-gru/gruhandles.c70
-rw-r--r--drivers/misc/sgi-gru/gruhandles.h37
-rw-r--r--drivers/misc/sgi-gru/grukdump.c13
-rw-r--r--drivers/misc/sgi-gru/grukservices.c211
-rw-r--r--drivers/misc/sgi-gru/grukservices.h14
-rw-r--r--drivers/misc/sgi-gru/grulib.h21
-rw-r--r--drivers/misc/sgi-gru/grumain.c228
-rw-r--r--drivers/misc/sgi-gru/gruprocfs.c42
-rw-r--r--drivers/misc/sgi-gru/grutables.h75
-rw-r--r--drivers/misc/sgi-gru/grutlbpurge.c14
-rw-r--r--drivers/misc/sgi-xp/xp.h1
-rw-r--r--drivers/misc/sgi-xp/xp_main.c3
-rw-r--r--drivers/misc/sgi-xp/xp_sn2.c10
-rw-r--r--drivers/misc/sgi-xp/xp_uv.c33
-rw-r--r--drivers/misc/sgi-xp/xpc_partition.c13
-rw-r--r--drivers/misc/sgi-xp/xpc_uv.c46
-rw-r--r--drivers/misc/ti_dac7512.c101
-rw-r--r--drivers/mmc/core/Kconfig4
-rw-r--r--drivers/mmc/core/core.c16
-rw-r--r--drivers/mmc/core/core.h2
-rw-r--r--drivers/mmc/core/mmc.c23
-rw-r--r--drivers/mmc/core/sd.c21
-rw-r--r--drivers/mmc/core/sdio_cis.c167
-rw-r--r--drivers/mmc/host/Kconfig27
-rw-r--r--drivers/mmc/host/Makefile2
-rw-r--r--drivers/mmc/host/atmel-mci.c141
-rw-r--r--drivers/mmc/host/bfin_sdh.c639
-rw-r--r--drivers/mmc/host/davinci_mmc.c1349
-rw-r--r--drivers/mmc/host/mxcmmc.c10
-rw-r--r--drivers/mmc/host/omap.c10
-rw-r--r--drivers/mmc/host/pxamci.c2
-rw-r--r--drivers/mmc/host/s3cmci.c13
-rw-r--r--drivers/mmc/host/sdhci-pci.c75
-rw-r--r--drivers/mmc/host/tmio_mmc.c2
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c35
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c17
-rwxr-xr-xdrivers/mtd/chips/cfi_util.c7
-rw-r--r--drivers/mtd/chips/jedec_probe.c8
-rw-r--r--drivers/mtd/devices/m25p80.c334
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c19
-rw-r--r--drivers/mtd/maps/Kconfig6
-rw-r--r--drivers/mtd/maps/Makefile2
-rw-r--r--drivers/mtd/maps/ipaq-flash.c460
-rw-r--r--drivers/mtd/maps/ixp4xx.c6
-rw-r--r--drivers/mtd/maps/physmap.c21
-rw-r--r--drivers/mtd/maps/sa1100-flash.c2
-rw-r--r--drivers/mtd/maps/vmu-flash.c9
-rw-r--r--drivers/mtd/mtd_blkdevs.c5
-rw-r--r--drivers/mtd/mtdcore.c2
-rw-r--r--drivers/mtd/mtdoops.c389
-rw-r--r--drivers/mtd/nand/Kconfig16
-rw-r--r--drivers/mtd/nand/Makefile1
-rw-r--r--drivers/mtd/nand/alauda.c11
-rw-r--r--drivers/mtd/nand/atmel_nand.c5
-rw-r--r--drivers/mtd/nand/bcm_umi_bch.c213
-rw-r--r--drivers/mtd/nand/bcm_umi_nand.c581
-rw-r--r--drivers/mtd/nand/davinci_nand.c4
-rw-r--r--drivers/mtd/nand/excite_nandflash.c2
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c86
-rw-r--r--drivers/mtd/nand/fsl_upm.c2
-rw-r--r--drivers/mtd/nand/mxc_nand.c783
-rw-r--r--drivers/mtd/nand/nand_base.c141
-rw-r--r--drivers/mtd/nand/nand_bcm_umi.c149
-rw-r--r--drivers/mtd/nand/nand_bcm_umi.h358
-rw-r--r--drivers/mtd/nand/nand_ecc.c25
-rw-r--r--drivers/mtd/nand/nandsim.c7
-rw-r--r--drivers/mtd/nand/nomadik_nand.c2
-rw-r--r--drivers/mtd/nand/plat_nand.c50
-rw-r--r--drivers/mtd/nand/s3c2410.c2
-rw-r--r--drivers/mtd/nand/txx9ndfmc.c3
-rw-r--r--drivers/mtd/onenand/omap2.c22
-rw-r--r--drivers/mtd/onenand/onenand_base.c745
-rw-r--r--drivers/mtd/tests/Makefile1
-rw-r--r--drivers/mtd/tests/mtd_nandecctest.c87
-rw-r--r--drivers/mtd/tests/mtd_oobtest.c18
-rw-r--r--drivers/mtd/tests/mtd_pagetest.c1
-rw-r--r--drivers/net/3c59x.c2
-rw-r--r--drivers/net/bcm63xx_enet.c12
-rw-r--r--drivers/net/bonding/bond_3ad.c171
-rw-r--r--drivers/net/bonding/bond_alb.c38
-rw-r--r--drivers/net/bonding/bond_ipv6.c12
-rw-r--r--drivers/net/bonding/bond_main.c607
-rw-r--r--drivers/net/bonding/bond_sysfs.c327
-rw-r--r--drivers/net/can/Kconfig2
-rw-r--r--drivers/net/can/at91_can.c2
-rw-r--r--drivers/net/can/bfin_can.c2
-rw-r--r--drivers/net/can/mcp251x.c13
-rw-r--r--drivers/net/can/mscan/mscan.c3
-rw-r--r--drivers/net/can/sja1000/sja1000.c18
-rw-r--r--drivers/net/can/ti_hecc.c2
-rw-r--r--drivers/net/can/usb/ems_usb.c2
-rw-r--r--drivers/net/cpmac.c2
-rw-r--r--drivers/net/dm9000.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_82598.c38
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c20
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h2
-rw-r--r--drivers/net/mlx4/alloc.c37
-rw-r--r--drivers/net/mlx4/fw.c3
-rw-r--r--drivers/net/mlx4/sense.c2
-rw-r--r--drivers/net/pcmcia/3c574_cs.c4
-rw-r--r--drivers/net/pcmcia/3c589_cs.c4
-rw-r--r--drivers/net/r8169.c2
-rw-r--r--drivers/net/sfc/selftest.c2
-rw-r--r--drivers/net/sh_eth.c7
-rw-r--r--drivers/net/sky2.c15
-rw-r--r--drivers/net/smsc911x.c2
-rw-r--r--drivers/net/usb/rtl8150.c2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c2
-rw-r--r--drivers/parisc/dino.c2
-rw-r--r--drivers/parisc/eisa.c2
-rw-r--r--drivers/parisc/gsc.c2
-rw-r--r--drivers/parisc/iosapic.c2
-rw-r--r--drivers/parisc/led.c59
-rw-r--r--drivers/parisc/pdc_stable.c9
-rw-r--r--drivers/parisc/superio.c2
-rw-r--r--drivers/parport/parport_pc.c2
-rw-r--r--drivers/pci/dmar.c110
-rw-r--r--drivers/pci/intel-iommu.c78
-rw-r--r--drivers/pci/intr_remapping.c3
-rw-r--r--drivers/pci/pcie/portdrv_pci.c2
-rw-r--r--drivers/pcmcia/pxa2xx_base.c2
-rw-r--r--drivers/pcmcia/yenta_socket.c2
-rw-r--r--drivers/platform/x86/acerhdf.c2
-rw-r--r--drivers/platform/x86/eeepc-laptop.c2
-rw-r--r--drivers/platform/x86/hp-wmi.c2
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c7
-rw-r--r--drivers/pnp/interface.c36
-rw-r--r--drivers/pnp/pnpbios/proc.c204
-rw-r--r--drivers/power/Kconfig7
-rw-r--r--drivers/power/Makefile1
-rw-r--r--drivers/power/pcf50633-charger.c228
-rw-r--r--drivers/power/power_supply_sysfs.c5
-rw-r--r--drivers/power/wm831x_backup.c233
-rw-r--r--drivers/power/wm831x_power.c144
-rw-r--r--drivers/power/wm97xx_battery.c2
-rw-r--r--drivers/rtc/Kconfig28
-rw-r--r--drivers/rtc/Makefile3
-rw-r--r--drivers/rtc/rtc-at32ap700x.c4
-rw-r--r--drivers/rtc/rtc-bq32k.c204
-rw-r--r--drivers/rtc/rtc-bq4802.c3
-rw-r--r--drivers/rtc/rtc-cmos.c78
-rw-r--r--drivers/rtc/rtc-ds1302.c1
-rw-r--r--drivers/rtc/rtc-ds1305.c14
-rw-r--r--drivers/rtc/rtc-ds1307.c2
-rw-r--r--drivers/rtc/rtc-ds1511.c148
-rw-r--r--drivers/rtc/rtc-ds1553.c149
-rw-r--r--drivers/rtc/rtc-ds1742.c59
-rw-r--r--drivers/rtc/rtc-m48t35.c16
-rw-r--r--drivers/rtc/rtc-m48t59.c11
-rw-r--r--drivers/rtc/rtc-mc13783.c262
-rw-r--r--drivers/rtc/rtc-mv.c157
-rw-r--r--drivers/rtc/rtc-nuc900.c342
-rw-r--r--drivers/rtc/rtc-omap.c47
-rw-r--r--drivers/rtc/rtc-pcf50633.c5
-rw-r--r--drivers/rtc/rtc-pcf8563.c4
-rw-r--r--drivers/rtc/rtc-pcf8583.c3
-rw-r--r--drivers/rtc/rtc-pl031.c23
-rw-r--r--drivers/rtc/rtc-pxa.c2
-rw-r--r--drivers/rtc/rtc-sa1100.c2
-rw-r--r--drivers/rtc/rtc-sh.c2
-rw-r--r--drivers/rtc/rtc-stk17ta8.c124
-rw-r--r--drivers/rtc/rtc-tx4939.c51
-rw-r--r--drivers/rtc/rtc-v3020.c8
-rw-r--r--drivers/rtc/rtc-vr41xx.c4
-rw-r--r--drivers/rtc/rtc-wm831x.c2
-rw-r--r--drivers/rtc/rtc-wm8350.c14
-rw-r--r--drivers/rtc/rtc-x1205.c53
-rw-r--r--drivers/s390/block/dasd_proc.c5
-rw-r--r--drivers/s390/block/dcssblk.c2
-rw-r--r--drivers/s390/block/xpram.c2
-rw-r--r--drivers/s390/char/monreader.c2
-rw-r--r--drivers/s390/char/monwriter.c2
-rw-r--r--drivers/s390/char/sclp.c2
-rw-r--r--drivers/s390/char/sclp_cmd.c2
-rw-r--r--drivers/s390/char/vmlogrdr.c2
-rw-r--r--drivers/s390/cio/ccwgroup.c2
-rw-r--r--drivers/s390/cio/css.c2
-rw-r--r--drivers/s390/cio/device.c2
-rw-r--r--drivers/s390/net/netiucv.c2
-rw-r--r--drivers/s390/net/smsgiucv.c2
-rw-r--r--drivers/scsi/ipr.c4
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c4
-rw-r--r--drivers/serial/ioc3_serial.c4
-rw-r--r--drivers/serial/ioc4_serial.c20
-rw-r--r--drivers/serial/pxa.c2
-rw-r--r--drivers/serial/sh-sci.c56
-rw-r--r--drivers/sh/intc.c2
-rw-r--r--drivers/sh/pfc.c2
-rw-r--r--drivers/sn/ioc3.c17
-rw-r--r--drivers/spi/pxa2xx_spi.c2
-rw-r--r--drivers/spi/spi_s3c24xx.c2
-rw-r--r--drivers/staging/cx25821/cx25821-audups11.c33
-rw-r--r--drivers/staging/cx25821/cx25821-video.c6
-rw-r--r--drivers/staging/cx25821/cx25821-video0.c33
-rw-r--r--drivers/staging/cx25821/cx25821-video1.c33
-rw-r--r--drivers/staging/cx25821/cx25821-video2.c34
-rw-r--r--drivers/staging/cx25821/cx25821-video3.c34
-rw-r--r--drivers/staging/cx25821/cx25821-video4.c34
-rw-r--r--drivers/staging/cx25821/cx25821-video5.c34
-rw-r--r--drivers/staging/cx25821/cx25821-video6.c34
-rw-r--r--drivers/staging/cx25821/cx25821-video7.c34
-rw-r--r--drivers/staging/cx25821/cx25821-videoioctl.c32
-rw-r--r--drivers/staging/cx25821/cx25821-vidups10.c33
-rw-r--r--drivers/staging/cx25821/cx25821-vidups9.c33
-rw-r--r--drivers/staging/go7007/go7007-v4l2.c5
-rw-r--r--drivers/uio/uio_pdrv_genirq.c2
-rw-r--r--drivers/usb/core/hcd-pci.c2
-rw-r--r--drivers/usb/core/hcd.h2
-rw-r--r--drivers/usb/core/usb.c25
-rw-r--r--drivers/usb/host/ehci-au1xxx.c2
-rw-r--r--drivers/usb/host/isp1362-hcd.c26
-rw-r--r--drivers/usb/host/ohci-au1xxx.c2
-rw-r--r--drivers/usb/host/ohci-pxa27x.c2
-rw-r--r--drivers/usb/host/r8a66597-hcd.c2
-rw-r--r--drivers/usb/musb/musb_core.c2
-rw-r--r--drivers/video/Kconfig15
-rw-r--r--drivers/video/Makefile1
-rw-r--r--drivers/video/atafb.c3
-rw-r--r--drivers/video/backlight/da903x_bl.c2
-rw-r--r--drivers/video/backlight/lcd.c4
-rw-r--r--drivers/video/bfin-lq035q1-fb.c826
-rw-r--r--drivers/video/bfin-t350mcqb-fb.c32
-rw-r--r--drivers/video/clps711xfb.c50
-rw-r--r--drivers/video/da8xx-fb.c175
-rw-r--r--drivers/video/display/display-sysfs.c2
-rw-r--r--drivers/video/ep93xx-fb.c2
-rw-r--r--drivers/video/geode/display_gx.c4
-rw-r--r--drivers/video/geode/gxfb.h2
-rw-r--r--drivers/video/geode/gxfb_core.c2
-rw-r--r--drivers/video/geode/lxfb.h12
-rw-r--r--drivers/video/geode/lxfb_ops.c4
-rw-r--r--drivers/video/geode/suspend_gx.c2
-rw-r--r--drivers/video/geode/video_gx.c2
-rw-r--r--drivers/video/hitfb.c2
-rw-r--r--drivers/video/i810/i810_dvt.c53
-rw-r--r--drivers/video/intelfb/intelfbdrv.c3
-rw-r--r--drivers/video/intelfb/intelfbhw.c47
-rw-r--r--drivers/video/intelfb/intelfbhw.h1
-rw-r--r--drivers/video/matrox/g450_pll.c3
-rw-r--r--drivers/video/maxinefb.c3
-rw-r--r--drivers/video/mb862xx/Makefile2
-rw-r--r--drivers/video/mb862xx/mb862xxfb.c14
-rw-r--r--drivers/video/mb862xx/mb862xxfb.h2
-rw-r--r--drivers/video/mb862xx/mb862xxfb_accel.c331
-rw-r--r--drivers/video/mb862xx/mb862xxfb_accel.h203
-rw-r--r--drivers/video/modedb.c24
-rw-r--r--drivers/video/output.c2
-rw-r--r--drivers/video/pmag-ba-fb.c3
-rw-r--r--drivers/video/pmagb-b-fb.c3
-rw-r--r--drivers/video/pxafb.c7
-rw-r--r--drivers/video/sh_mobile_lcdcfb.c12
-rw-r--r--drivers/video/sis/sis_main.c2
-rw-r--r--drivers/video/sm501fb.c249
-rw-r--r--drivers/video/via/lcd.c40
-rw-r--r--drivers/video/via/viafbdev.c2
-rw-r--r--drivers/watchdog/adx_wdt.c2
-rw-r--r--fs/Kconfig4
-rw-r--r--fs/aio.c40
-rw-r--r--fs/autofs4/autofs_i.h38
-rw-r--r--fs/autofs4/expire.c8
-rw-r--r--fs/autofs4/inode.c2
-rw-r--r--fs/autofs4/root.c616
-rw-r--r--fs/binfmt_elf.c11
-rw-r--r--fs/binfmt_elf_fdpic.c11
-rw-r--r--fs/btrfs/Kconfig1
-rw-r--r--fs/cachefiles/daemon.c4
-rw-r--r--fs/cifs/export.c2
-rw-r--r--fs/compat.c2
-rw-r--r--fs/direct-io.c165
-rw-r--r--fs/exec.c9
-rw-r--r--fs/exportfs/expfs.c2
-rw-r--r--fs/ext2/dir.c2
-rw-r--r--fs/ext2/ext2.h1
-rw-r--r--fs/ext2/file.c21
-rw-r--r--fs/ext2/super.c22
-rw-r--r--fs/ext4/Kconfig1
-rw-r--r--fs/ext4/super.c7
-rw-r--r--fs/fat/fat.h3
-rw-r--r--fs/fat/fatent.c25
-rw-r--r--fs/fat/inode.c8
-rw-r--r--fs/fat/misc.c57
-rw-r--r--fs/fscache/object-list.c2
-rw-r--r--fs/gfs2/Kconfig1
-rw-r--r--fs/gfs2/sys.c16
-rw-r--r--fs/hfs/catalog.c4
-rw-r--r--fs/hfs/dir.c11
-rw-r--r--fs/hfs/super.c7
-rw-r--r--fs/hpfs/super.c17
-rw-r--r--fs/isofs/export.c2
-rw-r--r--fs/jbd/Kconfig1
-rw-r--r--fs/jbd2/Kconfig1
-rw-r--r--fs/jffs2/gc.c3
-rw-r--r--fs/jffs2/readinode.c2
-rw-r--r--fs/jffs2/summary.c2
-rw-r--r--fs/lockd/svc4proc.c4
-rw-r--r--fs/lockd/svcproc.c4
-rw-r--r--fs/nfs/Kconfig2
-rw-r--r--fs/nfsctl.c2
-rw-r--r--fs/nfsd/auth.c12
-rw-r--r--fs/nfsd/cache.h (renamed from include/linux/nfsd/cache.h)5
-rw-r--r--fs/nfsd/export.c65
-rw-r--r--fs/nfsd/lockd.c10
-rw-r--r--fs/nfsd/nfs2acl.c27
-rw-r--r--fs/nfsd/nfs3acl.c15
-rw-r--r--fs/nfsd/nfs3proc.c20
-rw-r--r--fs/nfsd/nfs3xdr.c15
-rw-r--r--fs/nfsd/nfs4acl.c12
-rw-r--r--fs/nfsd/nfs4callback.c19
-rw-r--r--fs/nfsd/nfs4idmap.c17
-rw-r--r--fs/nfsd/nfs4proc.c19
-rw-r--r--fs/nfsd/nfs4recover.c16
-rw-r--r--fs/nfsd/nfs4state.c84
-rw-r--r--fs/nfsd/nfs4xdr.c26
-rw-r--r--fs/nfsd/nfscache.c14
-rw-r--r--fs/nfsd/nfsctl.c51
-rw-r--r--fs/nfsd/nfsd.h (renamed from include/linux/nfsd/nfsd.h)98
-rw-r--r--fs/nfsd/nfsfh.c102
-rw-r--r--fs/nfsd/nfsfh.h208
-rw-r--r--fs/nfsd/nfsproc.c22
-rw-r--r--fs/nfsd/nfssvc.c22
-rw-r--r--fs/nfsd/nfsxdr.c12
-rw-r--r--fs/nfsd/state.h (renamed from include/linux/nfsd/state.h)14
-rw-r--r--fs/nfsd/stats.c11
-rw-r--r--fs/nfsd/vfs.c134
-rw-r--r--fs/nfsd/vfs.h101
-rw-r--r--fs/nfsd/xdr.h (renamed from include/linux/nfsd/xdr.h)10
-rw-r--r--fs/nfsd/xdr3.h (renamed from include/linux/nfsd/xdr3.h)4
-rw-r--r--fs/nfsd/xdr4.h (renamed from include/linux/nfsd/xdr4.h)5
-rw-r--r--fs/nilfs2/Kconfig1
-rw-r--r--fs/ocfs2/aops.c34
-rw-r--r--fs/proc/base.c72
-rw-r--r--fs/proc/generic.c21
-rw-r--r--fs/proc/inode.c31
-rw-r--r--fs/proc/internal.h10
-rw-r--r--fs/proc/task_mmu.c45
-rw-r--r--fs/proc/task_nommu.c8
-rw-r--r--fs/qnx4/bitmap.c24
-rw-r--r--fs/qnx4/inode.c22
-rw-r--r--fs/reiserfs/Kconfig1
-rw-r--r--fs/reiserfs/Makefile6
-rw-r--r--fs/reiserfs/procfs.c65
-rw-r--r--fs/reiserfs/super.c4
-rw-r--r--fs/ubifs/debug.c9
-rw-r--r--fs/ubifs/super.c7
-rw-r--r--fs/ufs/dir.c10
-rw-r--r--fs/ufs/namei.c8
-rw-r--r--fs/ufs/super.c52
-rw-r--r--fs/ufs/ufs.h4
-rw-r--r--fs/xfs/Makefile8
-rw-r--r--fs/xfs/linux-2.6/xfs_acl.c1
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c72
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.h2
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c117
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.h33
-rw-r--r--fs/xfs/linux-2.6/xfs_fs_subr.c3
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl.c1
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl32.c1
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c1
-rw-r--r--fs/xfs/linux-2.6/xfs_linux.h1
-rw-r--r--fs/xfs/linux-2.6/xfs_lrw.c87
-rw-r--r--fs/xfs/linux-2.6/xfs_lrw.h45
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c104
-rw-r--r--fs/xfs/linux-2.6/xfs_super.h7
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.c1
-rw-r--r--fs/xfs/linux-2.6/xfs_trace.c75
-rw-r--r--fs/xfs/linux-2.6/xfs_trace.h1369
-rw-r--r--fs/xfs/linux-2.6/xfs_vnode.h4
-rw-r--r--fs/xfs/quota/xfs_dquot.c110
-rw-r--r--fs/xfs/quota/xfs_dquot.h21
-rw-r--r--fs/xfs/quota/xfs_qm.c40
-rw-r--r--fs/xfs/quota/xfs_qm_syscalls.c4
-rw-r--r--fs/xfs/support/ktrace.c323
-rw-r--r--fs/xfs/support/ktrace.h85
-rw-r--r--fs/xfs/xfs.h16
-rw-r--r--fs/xfs/xfs_ag.h14
-rw-r--r--fs/xfs/xfs_alloc.c230
-rw-r--r--fs/xfs/xfs_alloc.h27
-rw-r--r--fs/xfs/xfs_alloc_btree.c1
-rw-r--r--fs/xfs/xfs_attr.c107
-rw-r--r--fs/xfs/xfs_attr.h10
-rw-r--r--fs/xfs/xfs_attr_leaf.c14
-rw-r--r--fs/xfs/xfs_attr_sf.h40
-rw-r--r--fs/xfs/xfs_bmap.c942
-rw-r--r--fs/xfs/xfs_bmap.h58
-rw-r--r--fs/xfs/xfs_bmap_btree.c6
-rw-r--r--fs/xfs/xfs_btree.c5
-rw-r--r--fs/xfs/xfs_btree_trace.h17
-rw-r--r--fs/xfs/xfs_buf_item.c87
-rw-r--r--fs/xfs/xfs_buf_item.h20
-rw-r--r--fs/xfs/xfs_da_btree.c3
-rw-r--r--fs/xfs/xfs_da_btree.h7
-rw-r--r--fs/xfs/xfs_dfrag.c2
-rw-r--r--fs/xfs/xfs_dir2.c8
-rw-r--r--fs/xfs/xfs_dir2_block.c20
-rw-r--r--fs/xfs/xfs_dir2_leaf.c21
-rw-r--r--fs/xfs/xfs_dir2_node.c27
-rw-r--r--fs/xfs/xfs_dir2_sf.c26
-rw-r--r--fs/xfs/xfs_dir2_trace.c216
-rw-r--r--fs/xfs/xfs_dir2_trace.h72
-rw-r--r--fs/xfs/xfs_filestream.c8
-rw-r--r--fs/xfs/xfs_fsops.c2
-rw-r--r--fs/xfs/xfs_iget.c111
-rw-r--r--fs/xfs/xfs_inode.c79
-rw-r--r--fs/xfs/xfs_inode.h82
-rw-r--r--fs/xfs/xfs_inode_item.c5
-rw-r--r--fs/xfs/xfs_iomap.c85
-rw-r--r--fs/xfs/xfs_iomap.h8
-rw-r--r--fs/xfs/xfs_log.c181
-rw-r--r--fs/xfs/xfs_log_priv.h20
-rw-r--r--fs/xfs/xfs_log_recover.c15
-rw-r--r--fs/xfs/xfs_mount.c2
-rw-r--r--fs/xfs/xfs_quota.h8
-rw-r--r--fs/xfs/xfs_rename.c1
-rw-r--r--fs/xfs/xfs_rtalloc.c1
-rw-r--r--fs/xfs/xfs_rw.c3
-rw-r--r--fs/xfs/xfs_trans.h47
-rw-r--r--fs/xfs/xfs_trans_buf.c62
-rw-r--r--fs/xfs/xfs_vnodeops.c8
-rw-r--r--include/asm-generic/bitops/atomic.h10
-rw-r--r--include/asm-generic/bug.h8
-rw-r--r--include/asm-generic/gpio.h6
-rw-r--r--include/asm-generic/mman-common.h5
-rw-r--r--include/linux/aio.h4
-rw-r--r--include/linux/atmel-mci.h4
-rw-r--r--include/linux/bitmap.h11
-rw-r--r--include/linux/can/dev.h9
-rw-r--r--include/linux/cs5535.h172
-rw-r--r--include/linux/ctype.h3
-rw-r--r--include/linux/device-mapper.h8
-rw-r--r--include/linux/dm-dirty-log.h6
-rw-r--r--include/linux/dm-ioctl.h13
-rw-r--r--include/linux/dm-region-hash.h3
-rw-r--r--include/linux/dmaengine.h2
-rw-r--r--include/linux/dynamic_debug.h13
-rw-r--r--include/linux/efi.h6
-rw-r--r--include/linux/err.h5
-rw-r--r--include/linux/exportfs.h2
-rw-r--r--include/linux/fs.h22
-rw-r--r--include/linux/gpio.h6
-rw-r--r--include/linux/hrtimer.h2
-rw-r--r--include/linux/hugetlb.h6
-rw-r--r--include/linux/i2c.h92
-rw-r--r--include/linux/i8042.h18
-rw-r--r--include/linux/init_task.h10
-rw-r--r--include/linux/intel-iommu.h1
-rw-r--r--include/linux/iommu-helper.h3
-rw-r--r--include/linux/ioport.h4
-rw-r--r--include/linux/ipc_namespace.h2
-rw-r--r--include/linux/irq.h2
-rw-r--r--include/linux/kallsyms.h12
-rw-r--r--include/linux/kernel.h55
-rw-r--r--include/linux/kexec.h2
-rw-r--r--include/linux/kmsg_dump.h60
-rw-r--r--include/linux/ksm.h96
-rw-r--r--include/linux/lis3lv02d.h15
-rw-r--r--include/linux/memcontrol.h17
-rw-r--r--include/linux/memory_hotplug.h1
-rw-r--r--include/linux/mempolicy.h3
-rw-r--r--include/linux/mfd/pcf50633/core.h7
-rw-r--r--include/linux/mfd/pcf50633/mbc.h1
-rw-r--r--include/linux/migrate.h8
-rw-r--r--include/linux/mlx4/device.h1
-rw-r--r--include/linux/mm.h25
-rw-r--r--include/linux/mtd/bbm.h35
-rw-r--r--include/linux/mtd/cfi.h9
-rw-r--r--include/linux/mtd/flashchip.h9
-rw-r--r--include/linux/mtd/nand.h97
-rw-r--r--include/linux/mtd/nand_ecc.h10
-rw-r--r--include/linux/mtd/onenand.h23
-rw-r--r--include/linux/mtd/onenand_regs.h2
-rw-r--r--include/linux/nfs_xdr.h1
-rw-r--r--include/linux/nfsacl.h1
-rw-r--r--include/linux/nfsd/export.h19
-rw-r--r--include/linux/nfsd/nfsfh.h206
-rw-r--r--include/linux/nfsd/syscall.h8
-rw-r--r--include/linux/node.h16
-rw-r--r--include/linux/nodemask.h33
-rw-r--r--include/linux/numa.h2
-rw-r--r--include/linux/oom.h4
-rw-r--r--include/linux/page-flags.h8
-rw-r--r--include/linux/page_cgroup.h7
-rw-r--r--include/linux/perf_event.h2
-rw-r--r--include/linux/plist.h43
-rw-r--r--include/linux/pm.h2
-rw-r--r--include/linux/ptrace.h23
-rw-r--r--include/linux/reiserfs_fs.h35
-rw-r--r--include/linux/rmap.h43
-rw-r--r--include/linux/rtmutex.h6
-rw-r--r--include/linux/rtnetlink.h6
-rw-r--r--include/linux/rwlock.h125
-rw-r--r--include/linux/rwlock_api_smp.h282
-rw-r--r--include/linux/rwlock_types.h56
-rw-r--r--include/linux/rwsem-spinlock.h6
-rw-r--r--include/linux/sched.h18
-rw-r--r--include/linux/sem.h5
-rw-r--r--include/linux/sm501-regs.h2
-rw-r--r--include/linux/spinlock.h377
-rw-r--r--include/linux/spinlock_api_smp.h360
-rw-r--r--include/linux/spinlock_api_up.h66
-rw-r--r--include/linux/spinlock_types.h92
-rw-r--r--include/linux/spinlock_types_up.h12
-rw-r--r--include/linux/spinlock_up.h42
-rw-r--r--include/linux/string.h10
-rw-r--r--include/linux/sunrpc/debug.h3
-rw-r--r--include/linux/sunrpc/rpc_rdma.h2
-rw-r--r--include/linux/sunrpc/svc.h7
-rw-r--r--include/linux/swap.h67
-rw-r--r--include/linux/timb_gpio.h37
-rw-r--r--include/linux/tracehook.h7
-rw-r--r--include/linux/tty.h2
-rw-r--r--include/linux/videodev2.h123
-rw-r--r--include/linux/vmstat.h2
-rw-r--r--include/linux/vt.h15
-rw-r--r--include/media/ir-common.h39
-rw-r--r--include/media/ir-core.h62
-rw-r--r--include/media/mt9t112.h30
-rw-r--r--include/media/ov772x.h4
-rw-r--r--include/media/rj54n1cb0c.h19
-rw-r--r--include/media/saa7146_vv.h4
-rw-r--r--include/media/sh_mobile_ceu.h2
-rw-r--r--include/media/soc_camera.h30
-rw-r--r--include/media/soc_camera_platform.h3
-rw-r--r--include/media/soc_mediabus.h65
-rw-r--r--include/media/tw9910.h1
-rw-r--r--include/media/v4l2-chip-ident.h2
-rw-r--r--include/media/v4l2-common.h2
-rw-r--r--include/media/v4l2-dev.h23
-rw-r--r--include/media/v4l2-ioctl.h15
-rw-r--r--include/media/v4l2-mediabus.h61
-rw-r--r--include/media/v4l2-subdev.h61
-rw-r--r--include/net/dst.h2
-rw-r--r--include/net/ip.h1
-rw-r--r--include/net/ipv6.h8
-rw-r--r--include/net/netfilter/ipv6/nf_conntrack_ipv6.h2
-rw-r--r--include/net/tcp.h3
-rw-r--r--include/rdma/ib_addr.h36
-rw-r--r--include/rdma/ib_sa.h6
-rw-r--r--include/rdma/ib_user_sa.h16
-rw-r--r--include/rdma/ib_verbs.h5
-rw-r--r--include/rdma/rdma_user_cm.h6
-rw-r--r--include/video/da8xx-fb.h1
-rw-r--r--include/video/sh_mobile_lcdc.h2
-rw-r--r--init/Kconfig22
-rw-r--r--init/main.c18
-rw-r--r--ipc/msg.c1
-rw-r--r--ipc/sem.c214
-rw-r--r--ipc/shm.c1
-rw-r--r--kernel/acct.c3
-rw-r--r--kernel/exit.c2
-rw-r--r--kernel/fork.c13
-rw-r--r--kernel/futex.c50
-rw-r--r--kernel/hrtimer.c50
-rw-r--r--kernel/hw_breakpoint.c4
-rw-r--r--kernel/irq/autoprobe.c20
-rw-r--r--kernel/irq/chip.c86
-rw-r--r--kernel/irq/handle.c22
-rw-r--r--kernel/irq/internals.h2
-rw-r--r--kernel/irq/manage.c50
-rw-r--r--kernel/irq/migration.c2
-rw-r--r--kernel/irq/numa_migrate.c8
-rw-r--r--kernel/irq/pm.c8
-rw-r--r--kernel/irq/proc.c4
-rw-r--r--kernel/irq/spurious.c14
-rw-r--r--kernel/kexec.c59
-rw-r--r--kernel/ksysfs.c21
-rw-r--r--kernel/lockdep.c20
-rw-r--r--kernel/mutex-debug.h12
-rw-r--r--kernel/panic.c3
-rw-r--r--kernel/params.c8
-rw-r--r--kernel/perf_event.c106
-rw-r--r--kernel/pid.c12
-rw-r--r--kernel/power/console.c7
-rw-r--r--kernel/printk.c119
-rw-r--r--kernel/relay.c2
-rw-r--r--kernel/rtmutex-debug.c4
-rw-r--r--kernel/rtmutex.c106
-rw-r--r--kernel/sched.c223
-rw-r--r--kernel/sched_cpupri.c10
-rw-r--r--kernel/sched_cpupri.h2
-rw-r--r--kernel/sched_debug.c4
-rw-r--r--kernel/sched_fair.c4
-rw-r--r--kernel/sched_idletask.c4
-rw-r--r--kernel/sched_rt.c60
-rw-r--r--kernel/signal.c38
-rw-r--r--kernel/smp.c35
-rw-r--r--kernel/spinlock.c306
-rw-r--r--kernel/sys.c8
-rw-r--r--kernel/sysctl.c18
-rw-r--r--kernel/time/clockevents.c14
-rw-r--r--kernel/time/tick-broadcast.c42
-rw-r--r--kernel/time/tick-common.c20
-rw-r--r--kernel/time/tick-internal.h1
-rw-r--r--kernel/time/timecompare.c2
-rw-r--r--kernel/time/timer_list.c6
-rw-r--r--kernel/time/timer_stats.c17
-rw-r--r--kernel/trace/ring_buffer.c16
-rw-r--r--kernel/trace/trace.c54
-rw-r--r--kernel/trace/trace_clock.c8
-rw-r--r--kernel/trace/trace_sched_wakeup.c16
-rw-r--r--kernel/trace/trace_selftest.c4
-rw-r--r--kernel/trace/trace_stack.c16
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--lib/argv_split.c13
-rw-r--r--lib/bitmap.c81
-rw-r--r--lib/crc32.c121
-rw-r--r--lib/ctype.c50
-rw-r--r--lib/debugobjects.c74
-rw-r--r--lib/dynamic_debug.c4
-rw-r--r--lib/genalloc.c33
-rw-r--r--lib/iommu-helper.c59
-rw-r--r--lib/kernel_lock.c22
-rw-r--r--lib/parser.c11
-rw-r--r--lib/plist.c8
-rw-r--r--lib/rwsem-spinlock.c23
-rw-r--r--lib/spinlock_debug.c64
-rw-r--r--lib/string.c25
-rw-r--r--lib/swiotlb.c4
-rw-r--r--lib/vsprintf.c395
-rw-r--r--mm/Kconfig16
-rw-r--r--mm/bootmem.c8
-rw-r--r--mm/hugetlb.c551
-rw-r--r--mm/internal.h23
-rw-r--r--mm/ksm.c953
-rw-r--r--mm/memcontrol.c426
-rw-r--r--mm/memory-failure.c2
-rw-r--r--mm/memory.c31
-rw-r--r--mm/memory_hotplug.c16
-rw-r--r--mm/mempolicy.c69
-rw-r--r--mm/migrate.c131
-rw-r--r--mm/mincore.c37
-rw-r--r--mm/mlock.c45
-rw-r--r--mm/mmap.c50
-rw-r--r--mm/nommu.c8
-rw-r--r--mm/oom_kill.c103
-rw-r--r--mm/page_alloc.c26
-rw-r--r--mm/page_io.c17
-rw-r--r--mm/pagewalk.c32
-rw-r--r--mm/rmap.c354
-rw-r--r--mm/shmem.c11
-rw-r--r--mm/swapfile.c847
-rw-r--r--mm/truncate.c6
-rw-r--r--mm/vmalloc.c7
-rw-r--r--mm/vmscan.c321
-rw-r--r--mm/vmstat.c3
-rw-r--r--net/core/dev.c11
-rw-r--r--net/core/rtnetlink.c6
-rw-r--r--net/core/skbuff.c2
-rw-r--r--net/ipv4/Kconfig6
-rw-r--r--net/ipv4/ipconfig.c2
-rw-r--r--net/ipv4/netfilter/nf_defrag_ipv4.c21
-rw-r--r--net/ipv4/syncookies.c27
-rw-r--r--net/ipv4/tcp_input.c24
-rw-r--r--net/ipv4/tcp_ipv4.c21
-rw-r--r--net/ipv4/tcp_minisocks.c10
-rw-r--r--net/ipv4/tcp_output.c18
-rw-r--r--net/ipv4/udp.c7
-rw-r--r--net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c19
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c7
-rw-r--r--net/ipv6/reassembly.c5
-rw-r--r--net/ipv6/syncookies.c28
-rw-r--r--net/ipv6/tcp_ipv6.c3
-rw-r--r--net/irda/irnet/irnet.h1
-rw-r--r--net/irda/irnet/irnet_ppp.c8
-rw-r--r--net/iucv/af_iucv.c2
-rw-r--r--net/iucv/iucv.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c4
-rw-r--r--net/netfilter/xt_recent.c3
-rw-r--r--net/packet/af_packet.c71
-rw-r--r--net/rds/ib.c4
-rw-r--r--net/rds/iw.c4
-rw-r--r--net/sunrpc/svc_xprt.c31
-rw-r--r--net/sunrpc/svcauth_unix.c53
-rwxr-xr-xscripts/get_maintainer.pl499
-rw-r--r--sound/arm/pxa2xx-ac97.c2
-rw-r--r--sound/isa/gus/gus_mem.c3
-rw-r--r--sound/pci/ac97/ac97_codec.c2
-rw-r--r--sound/pci/cs5535audio/Makefile2
-rw-r--r--sound/pci/cs5535audio/cs5535audio.c1
-rw-r--r--sound/pci/cs5535audio/cs5535audio.h4
-rw-r--r--sound/pci/cs5535audio/cs5535audio_olpc.c26
-rw-r--r--sound/pci/hda/hda_hwdep.c7
-rw-r--r--sound/pci/hda/hda_intel.c3
-rw-r--r--sound/pci/hda/patch_analog.c8
-rw-r--r--sound/pci/hda/patch_realtek.c1
-rw-r--r--sound/soc/codecs/wm8900.c2
-rw-r--r--sound/soc/s3c24xx/s3c24xx_simtec.c2
-rw-r--r--sound/soc/s3c24xx/s3c24xx_simtec.h2
-rw-r--r--sound/soc/soc-core.c2
1408 files changed, 59660 insertions, 24591 deletions
diff --git a/Documentation/ABI/testing/sysfs-devices-memory b/Documentation/ABI/testing/sysfs-devices-memory
index 9fe91c02ee40..bf1627b02a03 100644
--- a/Documentation/ABI/testing/sysfs-devices-memory
+++ b/Documentation/ABI/testing/sysfs-devices-memory
@@ -60,6 +60,19 @@ Description:
60Users: hotplug memory remove tools 60Users: hotplug memory remove tools
61 https://w3.opensource.ibm.com/projects/powerpc-utils/ 61 https://w3.opensource.ibm.com/projects/powerpc-utils/
62 62
63
64What: /sys/devices/system/memoryX/nodeY
65Date: October 2009
66Contact: Linux Memory Management list <linux-mm@kvack.org>
67Description:
68 When CONFIG_NUMA is enabled, a symbolic link that
69 points to the corresponding NUMA node directory.
70
71 For example, the following symbolic link is created for
72 memory section 9 on node0:
73 /sys/devices/system/memory/memory9/node0 -> ../../node/node0
74
75
63What: /sys/devices/system/node/nodeX/memoryY 76What: /sys/devices/system/node/nodeX/memoryY
64Date: September 2008 77Date: September 2008
65Contact: Gary Hade <garyhade@us.ibm.com> 78Contact: Gary Hade <garyhade@us.ibm.com>
@@ -70,4 +83,3 @@ Description:
70 memory section directory. For example, the following symbolic 83 memory section directory. For example, the following symbolic
71 link is created for memory section 9 on node0. 84 link is created for memory section 9 on node0.
72 /sys/devices/system/node/node0/memory9 -> ../../memory/memory9 85 /sys/devices/system/node/node0/memory9 -> ../../memory/memory9
73
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
index 2aae06fcbed7..84a710f87c64 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -92,6 +92,20 @@ Description: Discover NUMA node a CPU belongs to
92 /sys/devices/system/cpu/cpu42/node2 -> ../../node/node2 92 /sys/devices/system/cpu/cpu42/node2 -> ../../node/node2
93 93
94 94
95What: /sys/devices/system/cpu/cpu#/node
96Date: October 2009
97Contact: Linux memory management mailing list <linux-mm@kvack.org>
98Description: Discover NUMA node a CPU belongs to
99
100 When CONFIG_NUMA is enabled, a symbolic link that points
101 to the corresponding NUMA node directory.
102
103 For example, the following symlink is created for cpu42
104 in NUMA node 2:
105
106 /sys/devices/system/cpu/cpu42/node2 -> ../../node/node2
107
108
95What: /sys/devices/system/cpu/cpu#/topology/core_id 109What: /sys/devices/system/cpu/cpu#/topology/core_id
96 /sys/devices/system/cpu/cpu#/topology/core_siblings 110 /sys/devices/system/cpu/cpu#/topology/core_siblings
97 /sys/devices/system/cpu/cpu#/topology/core_siblings_list 111 /sys/devices/system/cpu/cpu#/topology/core_siblings_list
diff --git a/Documentation/Changes b/Documentation/Changes
index 6d0f1efc5bf6..f08b313cd235 100644
--- a/Documentation/Changes
+++ b/Documentation/Changes
@@ -49,6 +49,8 @@ o oprofile 0.9 # oprofiled --version
49o udev 081 # udevinfo -V 49o udev 081 # udevinfo -V
50o grub 0.93 # grub --version 50o grub 0.93 # grub --version
51o mcelog 0.6 51o mcelog 0.6
52o iptables 1.4.1 # iptables -V
53
52 54
53Kernel compilation 55Kernel compilation
54================== 56==================
diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile
index ab8300f67182..325cfd1d6d99 100644
--- a/Documentation/DocBook/Makefile
+++ b/Documentation/DocBook/Makefile
@@ -8,7 +8,7 @@
8 8
9DOCBOOKS := z8530book.xml mcabook.xml device-drivers.xml \ 9DOCBOOKS := z8530book.xml mcabook.xml device-drivers.xml \
10 kernel-hacking.xml kernel-locking.xml deviceiobook.xml \ 10 kernel-hacking.xml kernel-locking.xml deviceiobook.xml \
11 procfs-guide.xml writing_usb_driver.xml networking.xml \ 11 writing_usb_driver.xml networking.xml \
12 kernel-api.xml filesystems.xml lsm.xml usb.xml kgdb.xml \ 12 kernel-api.xml filesystems.xml lsm.xml usb.xml kgdb.xml \
13 gadget.xml libata.xml mtdnand.xml librs.xml rapidio.xml \ 13 gadget.xml libata.xml mtdnand.xml librs.xml rapidio.xml \
14 genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \ 14 genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \
@@ -32,10 +32,10 @@ PS_METHOD = $(prefer-db2x)
32 32
33### 33###
34# The targets that may be used. 34# The targets that may be used.
35PHONY += xmldocs sgmldocs psdocs pdfdocs htmldocs mandocs installmandocs cleandocs media 35PHONY += xmldocs sgmldocs psdocs pdfdocs htmldocs mandocs installmandocs cleandocs xmldoclinks
36 36
37BOOKS := $(addprefix $(obj)/,$(DOCBOOKS)) 37BOOKS := $(addprefix $(obj)/,$(DOCBOOKS))
38xmldocs: $(BOOKS) 38xmldocs: $(BOOKS) xmldoclinks
39sgmldocs: xmldocs 39sgmldocs: xmldocs
40 40
41PS := $(patsubst %.xml, %.ps, $(BOOKS)) 41PS := $(patsubst %.xml, %.ps, $(BOOKS))
@@ -45,15 +45,24 @@ PDF := $(patsubst %.xml, %.pdf, $(BOOKS))
45pdfdocs: $(PDF) 45pdfdocs: $(PDF)
46 46
47HTML := $(sort $(patsubst %.xml, %.html, $(BOOKS))) 47HTML := $(sort $(patsubst %.xml, %.html, $(BOOKS)))
48htmldocs: media $(HTML) 48htmldocs: $(HTML)
49 $(call build_main_index) 49 $(call build_main_index)
50 $(call build_images)
50 51
51MAN := $(patsubst %.xml, %.9, $(BOOKS)) 52MAN := $(patsubst %.xml, %.9, $(BOOKS))
52mandocs: $(MAN) 53mandocs: $(MAN)
53 54
54media: 55build_images = mkdir -p $(objtree)/Documentation/DocBook/media/ && \
55 mkdir -p $(srctree)/Documentation/DocBook/media/ 56 cp $(srctree)/Documentation/DocBook/dvb/*.png $(srctree)/Documentation/DocBook/v4l/*.gif $(objtree)/Documentation/DocBook/media/
56 cp $(srctree)/Documentation/DocBook/dvb/*.png $(srctree)/Documentation/DocBook/v4l/*.gif $(srctree)/Documentation/DocBook/media/ 57
58xmldoclinks:
59ifneq ($(objtree),$(srctree))
60 for dep in dvb media-entities.tmpl media-indices.tmpl v4l; do \
61 rm -f $(objtree)/Documentation/DocBook/$$dep \
62 && ln -s $(srctree)/Documentation/DocBook/$$dep $(objtree)/Documentation/DocBook/ \
63 || exit; \
64 done
65endif
57 66
58installmandocs: mandocs 67installmandocs: mandocs
59 mkdir -p /usr/local/man/man9/ 68 mkdir -p /usr/local/man/man9/
@@ -65,7 +74,7 @@ KERNELDOC = $(srctree)/scripts/kernel-doc
65DOCPROC = $(objtree)/scripts/basic/docproc 74DOCPROC = $(objtree)/scripts/basic/docproc
66 75
67XMLTOFLAGS = -m $(srctree)/Documentation/DocBook/stylesheet.xsl 76XMLTOFLAGS = -m $(srctree)/Documentation/DocBook/stylesheet.xsl
68#XMLTOFLAGS += --skip-validation 77XMLTOFLAGS += --skip-validation
69 78
70### 79###
71# DOCPROC is used for two purposes: 80# DOCPROC is used for two purposes:
@@ -101,17 +110,6 @@ endif
101# Changes in kernel-doc force a rebuild of all documentation 110# Changes in kernel-doc force a rebuild of all documentation
102$(BOOKS): $(KERNELDOC) 111$(BOOKS): $(KERNELDOC)
103 112
104###
105# procfs guide uses a .c file as example code.
106# This requires an explicit dependency
107C-procfs-example = procfs_example.xml
108C-procfs-example2 = $(addprefix $(obj)/,$(C-procfs-example))
109$(obj)/procfs-guide.xml: $(C-procfs-example2)
110
111# List of programs to build
112##oops, this is a kernel module::hostprogs-y := procfs_example
113obj-m += procfs_example.o
114
115# Tell kbuild to always build the programs 113# Tell kbuild to always build the programs
116always := $(hostprogs-y) 114always := $(hostprogs-y)
117 115
@@ -238,7 +236,7 @@ clean-files := $(DOCBOOKS) \
238 $(patsubst %.xml, %.pdf, $(DOCBOOKS)) \ 236 $(patsubst %.xml, %.pdf, $(DOCBOOKS)) \
239 $(patsubst %.xml, %.html, $(DOCBOOKS)) \ 237 $(patsubst %.xml, %.html, $(DOCBOOKS)) \
240 $(patsubst %.xml, %.9, $(DOCBOOKS)) \ 238 $(patsubst %.xml, %.9, $(DOCBOOKS)) \
241 $(C-procfs-example) $(index) 239 $(index)
242 240
243clean-dirs := $(patsubst %.xml,%,$(DOCBOOKS)) man 241clean-dirs := $(patsubst %.xml,%,$(DOCBOOKS)) man
244 242
diff --git a/Documentation/DocBook/media-entities.tmpl b/Documentation/DocBook/media-entities.tmpl
index bb5ab741220e..c725cb852c54 100644
--- a/Documentation/DocBook/media-entities.tmpl
+++ b/Documentation/DocBook/media-entities.tmpl
@@ -23,6 +23,7 @@
23<!ENTITY VIDIOC-ENUMINPUT "<link linkend='vidioc-enuminput'><constant>VIDIOC_ENUMINPUT</constant></link>"> 23<!ENTITY VIDIOC-ENUMINPUT "<link linkend='vidioc-enuminput'><constant>VIDIOC_ENUMINPUT</constant></link>">
24<!ENTITY VIDIOC-ENUMOUTPUT "<link linkend='vidioc-enumoutput'><constant>VIDIOC_ENUMOUTPUT</constant></link>"> 24<!ENTITY VIDIOC-ENUMOUTPUT "<link linkend='vidioc-enumoutput'><constant>VIDIOC_ENUMOUTPUT</constant></link>">
25<!ENTITY VIDIOC-ENUMSTD "<link linkend='vidioc-enumstd'><constant>VIDIOC_ENUMSTD</constant></link>"> 25<!ENTITY VIDIOC-ENUMSTD "<link linkend='vidioc-enumstd'><constant>VIDIOC_ENUMSTD</constant></link>">
26<!ENTITY VIDIOC-ENUM-DV-PRESETS "<link linkend='vidioc-enum-dv-presets'><constant>VIDIOC_ENUM_DV_PRESETS</constant></link>">
26<!ENTITY VIDIOC-ENUM-FMT "<link linkend='vidioc-enum-fmt'><constant>VIDIOC_ENUM_FMT</constant></link>"> 27<!ENTITY VIDIOC-ENUM-FMT "<link linkend='vidioc-enum-fmt'><constant>VIDIOC_ENUM_FMT</constant></link>">
27<!ENTITY VIDIOC-ENUM-FRAMEINTERVALS "<link linkend='vidioc-enum-frameintervals'><constant>VIDIOC_ENUM_FRAMEINTERVALS</constant></link>"> 28<!ENTITY VIDIOC-ENUM-FRAMEINTERVALS "<link linkend='vidioc-enum-frameintervals'><constant>VIDIOC_ENUM_FRAMEINTERVALS</constant></link>">
28<!ENTITY VIDIOC-ENUM-FRAMESIZES "<link linkend='vidioc-enum-framesizes'><constant>VIDIOC_ENUM_FRAMESIZES</constant></link>"> 29<!ENTITY VIDIOC-ENUM-FRAMESIZES "<link linkend='vidioc-enum-framesizes'><constant>VIDIOC_ENUM_FRAMESIZES</constant></link>">
@@ -30,6 +31,8 @@
30<!ENTITY VIDIOC-G-AUDOUT "<link linkend='vidioc-g-audioout'><constant>VIDIOC_G_AUDOUT</constant></link>"> 31<!ENTITY VIDIOC-G-AUDOUT "<link linkend='vidioc-g-audioout'><constant>VIDIOC_G_AUDOUT</constant></link>">
31<!ENTITY VIDIOC-G-CROP "<link linkend='vidioc-g-crop'><constant>VIDIOC_G_CROP</constant></link>"> 32<!ENTITY VIDIOC-G-CROP "<link linkend='vidioc-g-crop'><constant>VIDIOC_G_CROP</constant></link>">
32<!ENTITY VIDIOC-G-CTRL "<link linkend='vidioc-g-ctrl'><constant>VIDIOC_G_CTRL</constant></link>"> 33<!ENTITY VIDIOC-G-CTRL "<link linkend='vidioc-g-ctrl'><constant>VIDIOC_G_CTRL</constant></link>">
34<!ENTITY VIDIOC-G-DV-PRESET "<link linkend='vidioc-g-dv-preset'><constant>VIDIOC_G_DV_PRESET</constant></link>">
35<!ENTITY VIDIOC-G-DV-TIMINGS "<link linkend='vidioc-g-dv-timings'><constant>VIDIOC_G_DV_TIMINGS</constant></link>">
33<!ENTITY VIDIOC-G-ENC-INDEX "<link linkend='vidioc-g-enc-index'><constant>VIDIOC_G_ENC_INDEX</constant></link>"> 36<!ENTITY VIDIOC-G-ENC-INDEX "<link linkend='vidioc-g-enc-index'><constant>VIDIOC_G_ENC_INDEX</constant></link>">
34<!ENTITY VIDIOC-G-EXT-CTRLS "<link linkend='vidioc-g-ext-ctrls'><constant>VIDIOC_G_EXT_CTRLS</constant></link>"> 37<!ENTITY VIDIOC-G-EXT-CTRLS "<link linkend='vidioc-g-ext-ctrls'><constant>VIDIOC_G_EXT_CTRLS</constant></link>">
35<!ENTITY VIDIOC-G-FBUF "<link linkend='vidioc-g-fbuf'><constant>VIDIOC_G_FBUF</constant></link>"> 38<!ENTITY VIDIOC-G-FBUF "<link linkend='vidioc-g-fbuf'><constant>VIDIOC_G_FBUF</constant></link>">
@@ -53,6 +56,7 @@
53<!ENTITY VIDIOC-QUERYCTRL "<link linkend='vidioc-queryctrl'><constant>VIDIOC_QUERYCTRL</constant></link>"> 56<!ENTITY VIDIOC-QUERYCTRL "<link linkend='vidioc-queryctrl'><constant>VIDIOC_QUERYCTRL</constant></link>">
54<!ENTITY VIDIOC-QUERYMENU "<link linkend='vidioc-queryctrl'><constant>VIDIOC_QUERYMENU</constant></link>"> 57<!ENTITY VIDIOC-QUERYMENU "<link linkend='vidioc-queryctrl'><constant>VIDIOC_QUERYMENU</constant></link>">
55<!ENTITY VIDIOC-QUERYSTD "<link linkend='vidioc-querystd'><constant>VIDIOC_QUERYSTD</constant></link>"> 58<!ENTITY VIDIOC-QUERYSTD "<link linkend='vidioc-querystd'><constant>VIDIOC_QUERYSTD</constant></link>">
59<!ENTITY VIDIOC-QUERY-DV-PRESET "<link linkend='vidioc-query-dv-preset'><constant>VIDIOC_QUERY_DV_PRESET</constant></link>">
56<!ENTITY VIDIOC-REQBUFS "<link linkend='vidioc-reqbufs'><constant>VIDIOC_REQBUFS</constant></link>"> 60<!ENTITY VIDIOC-REQBUFS "<link linkend='vidioc-reqbufs'><constant>VIDIOC_REQBUFS</constant></link>">
57<!ENTITY VIDIOC-STREAMOFF "<link linkend='vidioc-streamon'><constant>VIDIOC_STREAMOFF</constant></link>"> 61<!ENTITY VIDIOC-STREAMOFF "<link linkend='vidioc-streamon'><constant>VIDIOC_STREAMOFF</constant></link>">
58<!ENTITY VIDIOC-STREAMON "<link linkend='vidioc-streamon'><constant>VIDIOC_STREAMON</constant></link>"> 62<!ENTITY VIDIOC-STREAMON "<link linkend='vidioc-streamon'><constant>VIDIOC_STREAMON</constant></link>">
@@ -60,6 +64,8 @@
60<!ENTITY VIDIOC-S-AUDOUT "<link linkend='vidioc-g-audioout'><constant>VIDIOC_S_AUDOUT</constant></link>"> 64<!ENTITY VIDIOC-S-AUDOUT "<link linkend='vidioc-g-audioout'><constant>VIDIOC_S_AUDOUT</constant></link>">
61<!ENTITY VIDIOC-S-CROP "<link linkend='vidioc-g-crop'><constant>VIDIOC_S_CROP</constant></link>"> 65<!ENTITY VIDIOC-S-CROP "<link linkend='vidioc-g-crop'><constant>VIDIOC_S_CROP</constant></link>">
62<!ENTITY VIDIOC-S-CTRL "<link linkend='vidioc-g-ctrl'><constant>VIDIOC_S_CTRL</constant></link>"> 66<!ENTITY VIDIOC-S-CTRL "<link linkend='vidioc-g-ctrl'><constant>VIDIOC_S_CTRL</constant></link>">
67<!ENTITY VIDIOC-S-DV-PRESET "<link linkend='vidioc-g-dv-preset'><constant>VIDIOC_S_DV_PRESET</constant></link>">
68<!ENTITY VIDIOC-S-DV-TIMINGS "<link linkend='vidioc-g-dv-timings'><constant>VIDIOC_S_DV_TIMINGS</constant></link>">
63<!ENTITY VIDIOC-S-EXT-CTRLS "<link linkend='vidioc-g-ext-ctrls'><constant>VIDIOC_S_EXT_CTRLS</constant></link>"> 69<!ENTITY VIDIOC-S-EXT-CTRLS "<link linkend='vidioc-g-ext-ctrls'><constant>VIDIOC_S_EXT_CTRLS</constant></link>">
64<!ENTITY VIDIOC-S-FBUF "<link linkend='vidioc-g-fbuf'><constant>VIDIOC_S_FBUF</constant></link>"> 70<!ENTITY VIDIOC-S-FBUF "<link linkend='vidioc-g-fbuf'><constant>VIDIOC_S_FBUF</constant></link>">
65<!ENTITY VIDIOC-S-FMT "<link linkend='vidioc-g-fmt'><constant>VIDIOC_S_FMT</constant></link>"> 71<!ENTITY VIDIOC-S-FMT "<link linkend='vidioc-g-fmt'><constant>VIDIOC_S_FMT</constant></link>">
@@ -118,6 +124,7 @@
118<!-- Structures --> 124<!-- Structures -->
119<!ENTITY v4l2-audio "struct&nbsp;<link linkend='v4l2-audio'>v4l2_audio</link>"> 125<!ENTITY v4l2-audio "struct&nbsp;<link linkend='v4l2-audio'>v4l2_audio</link>">
120<!ENTITY v4l2-audioout "struct&nbsp;<link linkend='v4l2-audioout'>v4l2_audioout</link>"> 126<!ENTITY v4l2-audioout "struct&nbsp;<link linkend='v4l2-audioout'>v4l2_audioout</link>">
127<!ENTITY v4l2-bt-timings "struct&nbsp;<link linkend='v4l2-bt-timings'>v4l2_bt_timings</link>">
121<!ENTITY v4l2-buffer "struct&nbsp;<link linkend='v4l2-buffer'>v4l2_buffer</link>"> 128<!ENTITY v4l2-buffer "struct&nbsp;<link linkend='v4l2-buffer'>v4l2_buffer</link>">
122<!ENTITY v4l2-capability "struct&nbsp;<link linkend='v4l2-capability'>v4l2_capability</link>"> 129<!ENTITY v4l2-capability "struct&nbsp;<link linkend='v4l2-capability'>v4l2_capability</link>">
123<!ENTITY v4l2-captureparm "struct&nbsp;<link linkend='v4l2-captureparm'>v4l2_captureparm</link>"> 130<!ENTITY v4l2-captureparm "struct&nbsp;<link linkend='v4l2-captureparm'>v4l2_captureparm</link>">
@@ -128,6 +135,9 @@
128<!ENTITY v4l2-dbg-chip-ident "struct&nbsp;<link linkend='v4l2-dbg-chip-ident'>v4l2_dbg_chip_ident</link>"> 135<!ENTITY v4l2-dbg-chip-ident "struct&nbsp;<link linkend='v4l2-dbg-chip-ident'>v4l2_dbg_chip_ident</link>">
129<!ENTITY v4l2-dbg-match "struct&nbsp;<link linkend='v4l2-dbg-match'>v4l2_dbg_match</link>"> 136<!ENTITY v4l2-dbg-match "struct&nbsp;<link linkend='v4l2-dbg-match'>v4l2_dbg_match</link>">
130<!ENTITY v4l2-dbg-register "struct&nbsp;<link linkend='v4l2-dbg-register'>v4l2_dbg_register</link>"> 137<!ENTITY v4l2-dbg-register "struct&nbsp;<link linkend='v4l2-dbg-register'>v4l2_dbg_register</link>">
138<!ENTITY v4l2-dv-enum-preset "struct&nbsp;<link linkend='v4l2-dv-enum-preset'>v4l2_dv_enum_preset</link>">
139<!ENTITY v4l2-dv-preset "struct&nbsp;<link linkend='v4l2-dv-preset'>v4l2_dv_preset</link>">
140<!ENTITY v4l2-dv-timings "struct&nbsp;<link linkend='v4l2-dv-timings'>v4l2_dv_timings</link>">
131<!ENTITY v4l2-enc-idx "struct&nbsp;<link linkend='v4l2-enc-idx'>v4l2_enc_idx</link>"> 141<!ENTITY v4l2-enc-idx "struct&nbsp;<link linkend='v4l2-enc-idx'>v4l2_enc_idx</link>">
132<!ENTITY v4l2-enc-idx-entry "struct&nbsp;<link linkend='v4l2-enc-idx-entry'>v4l2_enc_idx_entry</link>"> 142<!ENTITY v4l2-enc-idx-entry "struct&nbsp;<link linkend='v4l2-enc-idx-entry'>v4l2_enc_idx_entry</link>">
133<!ENTITY v4l2-encoder-cmd "struct&nbsp;<link linkend='v4l2-encoder-cmd'>v4l2_encoder_cmd</link>"> 143<!ENTITY v4l2-encoder-cmd "struct&nbsp;<link linkend='v4l2-encoder-cmd'>v4l2_encoder_cmd</link>">
@@ -243,6 +253,10 @@
243<!ENTITY sub-enumaudioout SYSTEM "v4l/vidioc-enumaudioout.xml"> 253<!ENTITY sub-enumaudioout SYSTEM "v4l/vidioc-enumaudioout.xml">
244<!ENTITY sub-enuminput SYSTEM "v4l/vidioc-enuminput.xml"> 254<!ENTITY sub-enuminput SYSTEM "v4l/vidioc-enuminput.xml">
245<!ENTITY sub-enumoutput SYSTEM "v4l/vidioc-enumoutput.xml"> 255<!ENTITY sub-enumoutput SYSTEM "v4l/vidioc-enumoutput.xml">
256<!ENTITY sub-enum-dv-presets SYSTEM "v4l/vidioc-enum-dv-presets.xml">
257<!ENTITY sub-g-dv-preset SYSTEM "v4l/vidioc-g-dv-preset.xml">
258<!ENTITY sub-query-dv-preset SYSTEM "v4l/vidioc-query-dv-preset.xml">
259<!ENTITY sub-g-dv-timings SYSTEM "v4l/vidioc-g-dv-timings.xml">
246<!ENTITY sub-enumstd SYSTEM "v4l/vidioc-enumstd.xml"> 260<!ENTITY sub-enumstd SYSTEM "v4l/vidioc-enumstd.xml">
247<!ENTITY sub-g-audio SYSTEM "v4l/vidioc-g-audio.xml"> 261<!ENTITY sub-g-audio SYSTEM "v4l/vidioc-g-audio.xml">
248<!ENTITY sub-g-audioout SYSTEM "v4l/vidioc-g-audioout.xml"> 262<!ENTITY sub-g-audioout SYSTEM "v4l/vidioc-g-audioout.xml">
@@ -333,6 +347,10 @@
333<!ENTITY enumaudioout SYSTEM "v4l/vidioc-enumaudioout.xml"> 347<!ENTITY enumaudioout SYSTEM "v4l/vidioc-enumaudioout.xml">
334<!ENTITY enuminput SYSTEM "v4l/vidioc-enuminput.xml"> 348<!ENTITY enuminput SYSTEM "v4l/vidioc-enuminput.xml">
335<!ENTITY enumoutput SYSTEM "v4l/vidioc-enumoutput.xml"> 349<!ENTITY enumoutput SYSTEM "v4l/vidioc-enumoutput.xml">
350<!ENTITY enum-dv-presets SYSTEM "v4l/vidioc-enum-dv-presets.xml">
351<!ENTITY g-dv-preset SYSTEM "v4l/vidioc-g-dv-preset.xml">
352<!ENTITY query-dv-preset SYSTEM "v4l/vidioc-query-dv-preset.xml">
353<!ENTITY g-dv-timings SYSTEM "v4l/vidioc-g-dv-timings.xml">
336<!ENTITY enumstd SYSTEM "v4l/vidioc-enumstd.xml"> 354<!ENTITY enumstd SYSTEM "v4l/vidioc-enumstd.xml">
337<!ENTITY g-audio SYSTEM "v4l/vidioc-g-audio.xml"> 355<!ENTITY g-audio SYSTEM "v4l/vidioc-g-audio.xml">
338<!ENTITY g-audioout SYSTEM "v4l/vidioc-g-audioout.xml"> 356<!ENTITY g-audioout SYSTEM "v4l/vidioc-g-audioout.xml">
diff --git a/Documentation/DocBook/media-indices.tmpl b/Documentation/DocBook/media-indices.tmpl
index 9e30a236d74f..78d6031de001 100644
--- a/Documentation/DocBook/media-indices.tmpl
+++ b/Documentation/DocBook/media-indices.tmpl
@@ -36,6 +36,7 @@
36<indexentry><primaryie>enum&nbsp;<link linkend='v4l2-preemphasis'>v4l2_preemphasis</link></primaryie></indexentry> 36<indexentry><primaryie>enum&nbsp;<link linkend='v4l2-preemphasis'>v4l2_preemphasis</link></primaryie></indexentry>
37<indexentry><primaryie>struct&nbsp;<link linkend='v4l2-audio'>v4l2_audio</link></primaryie></indexentry> 37<indexentry><primaryie>struct&nbsp;<link linkend='v4l2-audio'>v4l2_audio</link></primaryie></indexentry>
38<indexentry><primaryie>struct&nbsp;<link linkend='v4l2-audioout'>v4l2_audioout</link></primaryie></indexentry> 38<indexentry><primaryie>struct&nbsp;<link linkend='v4l2-audioout'>v4l2_audioout</link></primaryie></indexentry>
39<indexentry><primaryie>struct&nbsp;<link linkend='v4l2-bt-timings'>v4l2_bt_timings</link></primaryie></indexentry>
39<indexentry><primaryie>struct&nbsp;<link linkend='v4l2-buffer'>v4l2_buffer</link></primaryie></indexentry> 40<indexentry><primaryie>struct&nbsp;<link linkend='v4l2-buffer'>v4l2_buffer</link></primaryie></indexentry>
40<indexentry><primaryie>struct&nbsp;<link linkend='v4l2-capability'>v4l2_capability</link></primaryie></indexentry> 41<indexentry><primaryie>struct&nbsp;<link linkend='v4l2-capability'>v4l2_capability</link></primaryie></indexentry>
41<indexentry><primaryie>struct&nbsp;<link linkend='v4l2-captureparm'>v4l2_captureparm</link></primaryie></indexentry> 42<indexentry><primaryie>struct&nbsp;<link linkend='v4l2-captureparm'>v4l2_captureparm</link></primaryie></indexentry>
@@ -46,6 +47,9 @@
46<indexentry><primaryie>struct&nbsp;<link linkend='v4l2-dbg-chip-ident'>v4l2_dbg_chip_ident</link></primaryie></indexentry> 47<indexentry><primaryie>struct&nbsp;<link linkend='v4l2-dbg-chip-ident'>v4l2_dbg_chip_ident</link></primaryie></indexentry>
47<indexentry><primaryie>struct&nbsp;<link linkend='v4l2-dbg-match'>v4l2_dbg_match</link></primaryie></indexentry> 48<indexentry><primaryie>struct&nbsp;<link linkend='v4l2-dbg-match'>v4l2_dbg_match</link></primaryie></indexentry>
48<indexentry><primaryie>struct&nbsp;<link linkend='v4l2-dbg-register'>v4l2_dbg_register</link></primaryie></indexentry> 49<indexentry><primaryie>struct&nbsp;<link linkend='v4l2-dbg-register'>v4l2_dbg_register</link></primaryie></indexentry>
50<indexentry><primaryie>struct&nbsp;<link linkend='v4l2-dv-enum-preset'>v4l2_dv_enum_preset</link></primaryie></indexentry>
51<indexentry><primaryie>struct&nbsp;<link linkend='v4l2-dv-preset'>v4l2_dv_preset</link></primaryie></indexentry>
52<indexentry><primaryie>struct&nbsp;<link linkend='v4l2-dv-timings'>v4l2_dv_timings</link></primaryie></indexentry>
49<indexentry><primaryie>struct&nbsp;<link linkend='v4l2-enc-idx'>v4l2_enc_idx</link></primaryie></indexentry> 53<indexentry><primaryie>struct&nbsp;<link linkend='v4l2-enc-idx'>v4l2_enc_idx</link></primaryie></indexentry>
50<indexentry><primaryie>struct&nbsp;<link linkend='v4l2-enc-idx-entry'>v4l2_enc_idx_entry</link></primaryie></indexentry> 54<indexentry><primaryie>struct&nbsp;<link linkend='v4l2-enc-idx-entry'>v4l2_enc_idx_entry</link></primaryie></indexentry>
51<indexentry><primaryie>struct&nbsp;<link linkend='v4l2-encoder-cmd'>v4l2_encoder_cmd</link></primaryie></indexentry> 55<indexentry><primaryie>struct&nbsp;<link linkend='v4l2-encoder-cmd'>v4l2_encoder_cmd</link></primaryie></indexentry>
diff --git a/Documentation/DocBook/procfs-guide.tmpl b/Documentation/DocBook/procfs-guide.tmpl
deleted file mode 100644
index 9eba4b7af73d..000000000000
--- a/Documentation/DocBook/procfs-guide.tmpl
+++ /dev/null
@@ -1,626 +0,0 @@
1<?xml version="1.0" encoding="UTF-8"?>
2<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
3 "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" [
4<!ENTITY procfsexample SYSTEM "procfs_example.xml">
5]>
6
7<book id="LKProcfsGuide">
8 <bookinfo>
9 <title>Linux Kernel Procfs Guide</title>
10
11 <authorgroup>
12 <author>
13 <firstname>Erik</firstname>
14 <othername>(J.A.K.)</othername>
15 <surname>Mouw</surname>
16 <affiliation>
17 <address>
18 <email>mouw@nl.linux.org</email>
19 </address>
20 </affiliation>
21 </author>
22 <othercredit>
23 <contrib>
24 This software and documentation were written while working on the
25 LART computing board
26 (<ulink url="http://www.lartmaker.nl/">http://www.lartmaker.nl/</ulink>),
27 which was sponsored by the Delt University of Technology projects
28 Mobile Multi-media Communications and Ubiquitous Communications.
29 </contrib>
30 </othercredit>
31 </authorgroup>
32
33 <revhistory>
34 <revision>
35 <revnumber>1.0</revnumber>
36 <date>May 30, 2001</date>
37 <revremark>Initial revision posted to linux-kernel</revremark>
38 </revision>
39 <revision>
40 <revnumber>1.1</revnumber>
41 <date>June 3, 2001</date>
42 <revremark>Revised after comments from linux-kernel</revremark>
43 </revision>
44 </revhistory>
45
46 <copyright>
47 <year>2001</year>
48 <holder>Erik Mouw</holder>
49 </copyright>
50
51
52 <legalnotice>
53 <para>
54 This documentation is free software; you can redistribute it
55 and/or modify it under the terms of the GNU General Public
56 License as published by the Free Software Foundation; either
57 version 2 of the License, or (at your option) any later
58 version.
59 </para>
60
61 <para>
62 This documentation is distributed in the hope that it will be
63 useful, but WITHOUT ANY WARRANTY; without even the implied
64 warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
65 PURPOSE. See the GNU General Public License for more details.
66 </para>
67
68 <para>
69 You should have received a copy of the GNU General Public
70 License along with this program; if not, write to the Free
71 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
72 MA 02111-1307 USA
73 </para>
74
75 <para>
76 For more details see the file COPYING in the source
77 distribution of Linux.
78 </para>
79 </legalnotice>
80 </bookinfo>
81
82
83
84
85 <toc>
86 </toc>
87
88
89
90
91 <preface id="Preface">
92 <title>Preface</title>
93
94 <para>
95 This guide describes the use of the procfs file system from
96 within the Linux kernel. The idea to write this guide came up on
97 the #kernelnewbies IRC channel (see <ulink
98 url="http://www.kernelnewbies.org/">http://www.kernelnewbies.org/</ulink>),
99 when Jeff Garzik explained the use of procfs and forwarded me a
100 message Alexander Viro wrote to the linux-kernel mailing list. I
101 agreed to write it up nicely, so here it is.
102 </para>
103
104 <para>
105 I'd like to thank Jeff Garzik
106 <email>jgarzik@pobox.com</email> and Alexander Viro
107 <email>viro@parcelfarce.linux.theplanet.co.uk</email> for their input,
108 Tim Waugh <email>twaugh@redhat.com</email> for his <ulink
109 url="http://people.redhat.com/twaugh/docbook/selfdocbook/">Selfdocbook</ulink>,
110 and Marc Joosen <email>marcj@historia.et.tudelft.nl</email> for
111 proofreading.
112 </para>
113
114 <para>
115 Erik
116 </para>
117 </preface>
118
119
120
121
122 <chapter id="intro">
123 <title>Introduction</title>
124
125 <para>
126 The <filename class="directory">/proc</filename> file system
127 (procfs) is a special file system in the linux kernel. It's a
128 virtual file system: it is not associated with a block device
129 but exists only in memory. The files in the procfs are there to
130 allow userland programs access to certain information from the
131 kernel (like process information in <filename
132 class="directory">/proc/[0-9]+/</filename>), but also for debug
133 purposes (like <filename>/proc/ksyms</filename>).
134 </para>
135
136 <para>
137 This guide describes the use of the procfs file system from
138 within the Linux kernel. It starts by introducing all relevant
139 functions to manage the files within the file system. After that
140 it shows how to communicate with userland, and some tips and
141 tricks will be pointed out. Finally a complete example will be
142 shown.
143 </para>
144
145 <para>
146 Note that the files in <filename
147 class="directory">/proc/sys</filename> are sysctl files: they
148 don't belong to procfs and are governed by a completely
149 different API described in the Kernel API book.
150 </para>
151 </chapter>
152
153
154
155
156 <chapter id="managing">
157 <title>Managing procfs entries</title>
158
159 <para>
160 This chapter describes the functions that various kernel
161 components use to populate the procfs with files, symlinks,
162 device nodes, and directories.
163 </para>
164
165 <para>
166 A minor note before we start: if you want to use any of the
167 procfs functions, be sure to include the correct header file!
168 This should be one of the first lines in your code:
169 </para>
170
171 <programlisting>
172#include &lt;linux/proc_fs.h&gt;
173 </programlisting>
174
175
176
177
178 <sect1 id="regularfile">
179 <title>Creating a regular file</title>
180
181 <funcsynopsis>
182 <funcprototype>
183 <funcdef>struct proc_dir_entry* <function>create_proc_entry</function></funcdef>
184 <paramdef>const char* <parameter>name</parameter></paramdef>
185 <paramdef>mode_t <parameter>mode</parameter></paramdef>
186 <paramdef>struct proc_dir_entry* <parameter>parent</parameter></paramdef>
187 </funcprototype>
188 </funcsynopsis>
189
190 <para>
191 This function creates a regular file with the name
192 <parameter>name</parameter>, file mode
193 <parameter>mode</parameter> in the directory
194 <parameter>parent</parameter>. To create a file in the root of
195 the procfs, use <constant>NULL</constant> as
196 <parameter>parent</parameter> parameter. When successful, the
197 function will return a pointer to the freshly created
198 <structname>struct proc_dir_entry</structname>; otherwise it
199 will return <constant>NULL</constant>. <xref
200 linkend="userland"/> describes how to do something useful with
201 regular files.
202 </para>
203
204 <para>
205 Note that it is specifically supported that you can pass a
206 path that spans multiple directories. For example
207 <function>create_proc_entry</function>(<parameter>"drivers/via0/info"</parameter>)
208 will create the <filename class="directory">via0</filename>
209 directory if necessary, with standard
210 <constant>0755</constant> permissions.
211 </para>
212
213 <para>
214 If you only want to be able to read the file, the function
215 <function>create_proc_read_entry</function> described in <xref
216 linkend="convenience"/> may be used to create and initialise
217 the procfs entry in one single call.
218 </para>
219 </sect1>
220
221
222
223
224 <sect1 id="Creating_a_symlink">
225 <title>Creating a symlink</title>
226
227 <funcsynopsis>
228 <funcprototype>
229 <funcdef>struct proc_dir_entry*
230 <function>proc_symlink</function></funcdef> <paramdef>const
231 char* <parameter>name</parameter></paramdef>
232 <paramdef>struct proc_dir_entry*
233 <parameter>parent</parameter></paramdef> <paramdef>const
234 char* <parameter>dest</parameter></paramdef>
235 </funcprototype>
236 </funcsynopsis>
237
238 <para>
239 This creates a symlink in the procfs directory
240 <parameter>parent</parameter> that points from
241 <parameter>name</parameter> to
242 <parameter>dest</parameter>. This translates in userland to
243 <literal>ln -s</literal> <parameter>dest</parameter>
244 <parameter>name</parameter>.
245 </para>
246 </sect1>
247
248 <sect1 id="Creating_a_directory">
249 <title>Creating a directory</title>
250
251 <funcsynopsis>
252 <funcprototype>
253 <funcdef>struct proc_dir_entry* <function>proc_mkdir</function></funcdef>
254 <paramdef>const char* <parameter>name</parameter></paramdef>
255 <paramdef>struct proc_dir_entry* <parameter>parent</parameter></paramdef>
256 </funcprototype>
257 </funcsynopsis>
258
259 <para>
260 Create a directory <parameter>name</parameter> in the procfs
261 directory <parameter>parent</parameter>.
262 </para>
263 </sect1>
264
265
266
267
268 <sect1 id="Removing_an_entry">
269 <title>Removing an entry</title>
270
271 <funcsynopsis>
272 <funcprototype>
273 <funcdef>void <function>remove_proc_entry</function></funcdef>
274 <paramdef>const char* <parameter>name</parameter></paramdef>
275 <paramdef>struct proc_dir_entry* <parameter>parent</parameter></paramdef>
276 </funcprototype>
277 </funcsynopsis>
278
279 <para>
280 Removes the entry <parameter>name</parameter> in the directory
281 <parameter>parent</parameter> from the procfs. Entries are
282 removed by their <emphasis>name</emphasis>, not by the
283 <structname>struct proc_dir_entry</structname> returned by the
284 various create functions. Note that this function doesn't
285 recursively remove entries.
286 </para>
287
288 <para>
289 Be sure to free the <structfield>data</structfield> entry from
290 the <structname>struct proc_dir_entry</structname> before
291 <function>remove_proc_entry</function> is called (that is: if
292 there was some <structfield>data</structfield> allocated, of
293 course). See <xref linkend="usingdata"/> for more information
294 on using the <structfield>data</structfield> entry.
295 </para>
296 </sect1>
297 </chapter>
298
299
300
301
302 <chapter id="userland">
303 <title>Communicating with userland</title>
304
305 <para>
306 Instead of reading (or writing) information directly from
307 kernel memory, procfs works with <emphasis>call back
308 functions</emphasis> for files: functions that are called when
309 a specific file is being read or written. Such functions have
310 to be initialised after the procfs file is created by setting
311 the <structfield>read_proc</structfield> and/or
312 <structfield>write_proc</structfield> fields in the
313 <structname>struct proc_dir_entry*</structname> that the
314 function <function>create_proc_entry</function> returned:
315 </para>
316
317 <programlisting>
318struct proc_dir_entry* entry;
319
320entry->read_proc = read_proc_foo;
321entry->write_proc = write_proc_foo;
322 </programlisting>
323
324 <para>
325 If you only want to use a the
326 <structfield>read_proc</structfield>, the function
327 <function>create_proc_read_entry</function> described in <xref
328 linkend="convenience"/> may be used to create and initialise the
329 procfs entry in one single call.
330 </para>
331
332
333
334 <sect1 id="Reading_data">
335 <title>Reading data</title>
336
337 <para>
338 The read function is a call back function that allows userland
339 processes to read data from the kernel. The read function
340 should have the following format:
341 </para>
342
343 <funcsynopsis>
344 <funcprototype>
345 <funcdef>int <function>read_func</function></funcdef>
346 <paramdef>char* <parameter>buffer</parameter></paramdef>
347 <paramdef>char** <parameter>start</parameter></paramdef>
348 <paramdef>off_t <parameter>off</parameter></paramdef>
349 <paramdef>int <parameter>count</parameter></paramdef>
350 <paramdef>int* <parameter>peof</parameter></paramdef>
351 <paramdef>void* <parameter>data</parameter></paramdef>
352 </funcprototype>
353 </funcsynopsis>
354
355 <para>
356 The read function should write its information into the
357 <parameter>buffer</parameter>, which will be exactly
358 <literal>PAGE_SIZE</literal> bytes long.
359 </para>
360
361 <para>
362 The parameter
363 <parameter>peof</parameter> should be used to signal that the
364 end of the file has been reached by writing
365 <literal>1</literal> to the memory location
366 <parameter>peof</parameter> points to.
367 </para>
368
369 <para>
370 The <parameter>data</parameter>
371 parameter can be used to create a single call back function for
372 several files, see <xref linkend="usingdata"/>.
373 </para>
374
375 <para>
376 The rest of the parameters and the return value are described
377 by a comment in <filename>fs/proc/generic.c</filename> as follows:
378 </para>
379
380 <blockquote>
381 <para>
382 You have three ways to return data:
383 </para>
384 <orderedlist>
385 <listitem>
386 <para>
387 Leave <literal>*start = NULL</literal>. (This is the default.)
388 Put the data of the requested offset at that
389 offset within the buffer. Return the number (<literal>n</literal>)
390 of bytes there are from the beginning of the
391 buffer up to the last byte of data. If the
392 number of supplied bytes (<literal>= n - offset</literal>) is
393 greater than zero and you didn't signal eof
394 and the reader is prepared to take more data
395 you will be called again with the requested
396 offset advanced by the number of bytes
397 absorbed. This interface is useful for files
398 no larger than the buffer.
399 </para>
400 </listitem>
401 <listitem>
402 <para>
403 Set <literal>*start</literal> to an unsigned long value less than
404 the buffer address but greater than zero.
405 Put the data of the requested offset at the
406 beginning of the buffer. Return the number of
407 bytes of data placed there. If this number is
408 greater than zero and you didn't signal eof
409 and the reader is prepared to take more data
410 you will be called again with the requested
411 offset advanced by <literal>*start</literal>. This interface is
412 useful when you have a large file consisting
413 of a series of blocks which you want to count
414 and return as wholes.
415 (Hack by Paul.Russell@rustcorp.com.au)
416 </para>
417 </listitem>
418 <listitem>
419 <para>
420 Set <literal>*start</literal> to an address within the buffer.
421 Put the data of the requested offset at <literal>*start</literal>.
422 Return the number of bytes of data placed there.
423 If this number is greater than zero and you
424 didn't signal eof and the reader is prepared to
425 take more data you will be called again with the
426 requested offset advanced by the number of bytes
427 absorbed.
428 </para>
429 </listitem>
430 </orderedlist>
431 </blockquote>
432
433 <para>
434 <xref linkend="example"/> shows how to use a read call back
435 function.
436 </para>
437 </sect1>
438
439
440
441
442 <sect1 id="Writing_data">
443 <title>Writing data</title>
444
445 <para>
446 The write call back function allows a userland process to write
447 data to the kernel, so it has some kind of control over the
448 kernel. The write function should have the following format:
449 </para>
450
451 <funcsynopsis>
452 <funcprototype>
453 <funcdef>int <function>write_func</function></funcdef>
454 <paramdef>struct file* <parameter>file</parameter></paramdef>
455 <paramdef>const char* <parameter>buffer</parameter></paramdef>
456 <paramdef>unsigned long <parameter>count</parameter></paramdef>
457 <paramdef>void* <parameter>data</parameter></paramdef>
458 </funcprototype>
459 </funcsynopsis>
460
461 <para>
462 The write function should read <parameter>count</parameter>
463 bytes at maximum from the <parameter>buffer</parameter>. Note
464 that the <parameter>buffer</parameter> doesn't live in the
465 kernel's memory space, so it should first be copied to kernel
466 space with <function>copy_from_user</function>. The
467 <parameter>file</parameter> parameter is usually
468 ignored. <xref linkend="usingdata"/> shows how to use the
469 <parameter>data</parameter> parameter.
470 </para>
471
472 <para>
473 Again, <xref linkend="example"/> shows how to use this call back
474 function.
475 </para>
476 </sect1>
477
478
479
480
481 <sect1 id="usingdata">
482 <title>A single call back for many files</title>
483
484 <para>
485 When a large number of almost identical files is used, it's
486 quite inconvenient to use a separate call back function for
487 each file. A better approach is to have a single call back
488 function that distinguishes between the files by using the
489 <structfield>data</structfield> field in <structname>struct
490 proc_dir_entry</structname>. First of all, the
491 <structfield>data</structfield> field has to be initialised:
492 </para>
493
494 <programlisting>
495struct proc_dir_entry* entry;
496struct my_file_data *file_data;
497
498file_data = kmalloc(sizeof(struct my_file_data), GFP_KERNEL);
499entry->data = file_data;
500 </programlisting>
501
502 <para>
503 The <structfield>data</structfield> field is a <type>void
504 *</type>, so it can be initialised with anything.
505 </para>
506
507 <para>
508 Now that the <structfield>data</structfield> field is set, the
509 <function>read_proc</function> and
510 <function>write_proc</function> can use it to distinguish
511 between files because they get it passed into their
512 <parameter>data</parameter> parameter:
513 </para>
514
515 <programlisting>
516int foo_read_func(char *page, char **start, off_t off,
517 int count, int *eof, void *data)
518{
519 int len;
520
521 if(data == file_data) {
522 /* special case for this file */
523 } else {
524 /* normal processing */
525 }
526
527 return len;
528}
529 </programlisting>
530
531 <para>
532 Be sure to free the <structfield>data</structfield> data field
533 when removing the procfs entry.
534 </para>
535 </sect1>
536 </chapter>
537
538
539
540
541 <chapter id="tips">
542 <title>Tips and tricks</title>
543
544
545
546
547 <sect1 id="convenience">
548 <title>Convenience functions</title>
549
550 <funcsynopsis>
551 <funcprototype>
552 <funcdef>struct proc_dir_entry* <function>create_proc_read_entry</function></funcdef>
553 <paramdef>const char* <parameter>name</parameter></paramdef>
554 <paramdef>mode_t <parameter>mode</parameter></paramdef>
555 <paramdef>struct proc_dir_entry* <parameter>parent</parameter></paramdef>
556 <paramdef>read_proc_t* <parameter>read_proc</parameter></paramdef>
557 <paramdef>void* <parameter>data</parameter></paramdef>
558 </funcprototype>
559 </funcsynopsis>
560
561 <para>
562 This function creates a regular file in exactly the same way
563 as <function>create_proc_entry</function> from <xref
564 linkend="regularfile"/> does, but also allows to set the read
565 function <parameter>read_proc</parameter> in one call. This
566 function can set the <parameter>data</parameter> as well, like
567 explained in <xref linkend="usingdata"/>.
568 </para>
569 </sect1>
570
571
572
573 <sect1 id="Modules">
574 <title>Modules</title>
575
576 <para>
577 If procfs is being used from within a module, be sure to set
578 the <structfield>owner</structfield> field in the
579 <structname>struct proc_dir_entry</structname> to
580 <constant>THIS_MODULE</constant>.
581 </para>
582
583 <programlisting>
584struct proc_dir_entry* entry;
585
586entry->owner = THIS_MODULE;
587 </programlisting>
588 </sect1>
589
590
591
592
593 <sect1 id="Mode_and_ownership">
594 <title>Mode and ownership</title>
595
596 <para>
597 Sometimes it is useful to change the mode and/or ownership of
598 a procfs entry. Here is an example that shows how to achieve
599 that:
600 </para>
601
602 <programlisting>
603struct proc_dir_entry* entry;
604
605entry->mode = S_IWUSR |S_IRUSR | S_IRGRP | S_IROTH;
606entry->uid = 0;
607entry->gid = 100;
608 </programlisting>
609
610 </sect1>
611 </chapter>
612
613
614
615
616 <chapter id="example">
617 <title>Example</title>
618
619 <!-- be careful with the example code: it shouldn't be wider than
620 approx. 60 columns, or otherwise it won't fit properly on a page
621 -->
622
623&procfsexample;
624
625 </chapter>
626</book>
diff --git a/Documentation/DocBook/procfs_example.c b/Documentation/DocBook/procfs_example.c
deleted file mode 100644
index a5b11793b1e0..000000000000
--- a/Documentation/DocBook/procfs_example.c
+++ /dev/null
@@ -1,201 +0,0 @@
1/*
2 * procfs_example.c: an example proc interface
3 *
4 * Copyright (C) 2001, Erik Mouw (mouw@nl.linux.org)
5 *
6 * This file accompanies the procfs-guide in the Linux kernel
7 * source. Its main use is to demonstrate the concepts and
8 * functions described in the guide.
9 *
10 * This software has been developed while working on the LART
11 * computing board (http://www.lartmaker.nl), which was sponsored
12 * by the Delt University of Technology projects Mobile Multi-media
13 * Communications and Ubiquitous Communications.
14 *
15 * This program is free software; you can redistribute
16 * it and/or modify it under the terms of the GNU General
17 * Public License as published by the Free Software
18 * Foundation; either version 2 of the License, or (at your
19 * option) any later version.
20 *
21 * This program is distributed in the hope that it will be
22 * useful, but WITHOUT ANY WARRANTY; without even the implied
23 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
24 * PURPOSE. See the GNU General Public License for more
25 * details.
26 *
27 * You should have received a copy of the GNU General Public
28 * License along with this program; if not, write to the
29 * Free Software Foundation, Inc., 59 Temple Place,
30 * Suite 330, Boston, MA 02111-1307 USA
31 *
32 */
33
34#include <linux/module.h>
35#include <linux/kernel.h>
36#include <linux/init.h>
37#include <linux/proc_fs.h>
38#include <linux/jiffies.h>
39#include <asm/uaccess.h>
40
41
42#define MODULE_VERS "1.0"
43#define MODULE_NAME "procfs_example"
44
45#define FOOBAR_LEN 8
46
47struct fb_data_t {
48 char name[FOOBAR_LEN + 1];
49 char value[FOOBAR_LEN + 1];
50};
51
52
53static struct proc_dir_entry *example_dir, *foo_file,
54 *bar_file, *jiffies_file, *symlink;
55
56
57struct fb_data_t foo_data, bar_data;
58
59
60static int proc_read_jiffies(char *page, char **start,
61 off_t off, int count,
62 int *eof, void *data)
63{
64 int len;
65
66 len = sprintf(page, "jiffies = %ld\n",
67 jiffies);
68
69 return len;
70}
71
72
73static int proc_read_foobar(char *page, char **start,
74 off_t off, int count,
75 int *eof, void *data)
76{
77 int len;
78 struct fb_data_t *fb_data = (struct fb_data_t *)data;
79
80 /* DON'T DO THAT - buffer overruns are bad */
81 len = sprintf(page, "%s = '%s'\n",
82 fb_data->name, fb_data->value);
83
84 return len;
85}
86
87
88static int proc_write_foobar(struct file *file,
89 const char *buffer,
90 unsigned long count,
91 void *data)
92{
93 int len;
94 struct fb_data_t *fb_data = (struct fb_data_t *)data;
95
96 if(count > FOOBAR_LEN)
97 len = FOOBAR_LEN;
98 else
99 len = count;
100
101 if(copy_from_user(fb_data->value, buffer, len))
102 return -EFAULT;
103
104 fb_data->value[len] = '\0';
105
106 return len;
107}
108
109
110static int __init init_procfs_example(void)
111{
112 int rv = 0;
113
114 /* create directory */
115 example_dir = proc_mkdir(MODULE_NAME, NULL);
116 if(example_dir == NULL) {
117 rv = -ENOMEM;
118 goto out;
119 }
120 /* create jiffies using convenience function */
121 jiffies_file = create_proc_read_entry("jiffies",
122 0444, example_dir,
123 proc_read_jiffies,
124 NULL);
125 if(jiffies_file == NULL) {
126 rv = -ENOMEM;
127 goto no_jiffies;
128 }
129
130 /* create foo and bar files using same callback
131 * functions
132 */
133 foo_file = create_proc_entry("foo", 0644, example_dir);
134 if(foo_file == NULL) {
135 rv = -ENOMEM;
136 goto no_foo;
137 }
138
139 strcpy(foo_data.name, "foo");
140 strcpy(foo_data.value, "foo");
141 foo_file->data = &foo_data;
142 foo_file->read_proc = proc_read_foobar;
143 foo_file->write_proc = proc_write_foobar;
144
145 bar_file = create_proc_entry("bar", 0644, example_dir);
146 if(bar_file == NULL) {
147 rv = -ENOMEM;
148 goto no_bar;
149 }
150
151 strcpy(bar_data.name, "bar");
152 strcpy(bar_data.value, "bar");
153 bar_file->data = &bar_data;
154 bar_file->read_proc = proc_read_foobar;
155 bar_file->write_proc = proc_write_foobar;
156
157 /* create symlink */
158 symlink = proc_symlink("jiffies_too", example_dir,
159 "jiffies");
160 if(symlink == NULL) {
161 rv = -ENOMEM;
162 goto no_symlink;
163 }
164
165 /* everything OK */
166 printk(KERN_INFO "%s %s initialised\n",
167 MODULE_NAME, MODULE_VERS);
168 return 0;
169
170no_symlink:
171 remove_proc_entry("bar", example_dir);
172no_bar:
173 remove_proc_entry("foo", example_dir);
174no_foo:
175 remove_proc_entry("jiffies", example_dir);
176no_jiffies:
177 remove_proc_entry(MODULE_NAME, NULL);
178out:
179 return rv;
180}
181
182
183static void __exit cleanup_procfs_example(void)
184{
185 remove_proc_entry("jiffies_too", example_dir);
186 remove_proc_entry("bar", example_dir);
187 remove_proc_entry("foo", example_dir);
188 remove_proc_entry("jiffies", example_dir);
189 remove_proc_entry(MODULE_NAME, NULL);
190
191 printk(KERN_INFO "%s %s removed\n",
192 MODULE_NAME, MODULE_VERS);
193}
194
195
196module_init(init_procfs_example);
197module_exit(cleanup_procfs_example);
198
199MODULE_AUTHOR("Erik Mouw");
200MODULE_DESCRIPTION("procfs examples");
201MODULE_LICENSE("GPL");
diff --git a/Documentation/DocBook/v4l/common.xml b/Documentation/DocBook/v4l/common.xml
index b1a81d246d58..c65f0ac9b6ee 100644
--- a/Documentation/DocBook/v4l/common.xml
+++ b/Documentation/DocBook/v4l/common.xml
@@ -716,6 +716,41 @@ if (-1 == ioctl (fd, &VIDIOC-S-STD;, &amp;std_id)) {
716} 716}
717 </programlisting> 717 </programlisting>
718 </example> 718 </example>
719 <section id="dv-timings">
720 <title>Digital Video (DV) Timings</title>
721 <para>
722 The video standards discussed so far has been dealing with Analog TV and the
723corresponding video timings. Today there are many more different hardware interfaces
724such as High Definition TV interfaces (HDMI), VGA, DVI connectors etc., that carry
725video signals and there is a need to extend the API to select the video timings
726for these interfaces. Since it is not possible to extend the &v4l2-std-id; due to
727the limited bits available, a new set of IOCTLs is added to set/get video timings at
728the input and output: </para><itemizedlist>
729 <listitem>
730 <para>DV Presets: Digital Video (DV) presets. These are IDs representing a
731video timing at the input/output. Presets are pre-defined timings implemented
732by the hardware according to video standards. A __u32 data type is used to represent
733a preset unlike the bit mask that is used in &v4l2-std-id; allowing future extensions
734to support as many different presets as needed.</para>
735 </listitem>
736 <listitem>
737 <para>Custom DV Timings: This will allow applications to define more detailed
738custom video timings for the interface. This includes parameters such as width, height,
739polarities, frontporch, backporch etc.
740 </para>
741 </listitem>
742 </itemizedlist>
743 <para>To enumerate and query the attributes of DV presets supported by a device,
744applications use the &VIDIOC-ENUM-DV-PRESETS; ioctl. To get the current DV preset,
745applications use the &VIDIOC-G-DV-PRESET; ioctl and to set a preset they use the
746&VIDIOC-S-DV-PRESET; ioctl.</para>
747 <para>To set custom DV timings for the device, applications use the
748&VIDIOC-S-DV-TIMINGS; ioctl and to get current custom DV timings they use the
749&VIDIOC-G-DV-TIMINGS; ioctl.</para>
750 <para>Applications can make use of the <xref linkend="input-capabilities" /> and
751<xref linkend="output-capabilities"/> flags to decide what ioctls are available to set the
752video timings for the device.</para>
753 </section>
719 </section> 754 </section>
720 755
721 &sub-controls; 756 &sub-controls;
diff --git a/Documentation/DocBook/v4l/compat.xml b/Documentation/DocBook/v4l/compat.xml
index 4d1902a54d61..b9dbdf9e6d29 100644
--- a/Documentation/DocBook/v4l/compat.xml
+++ b/Documentation/DocBook/v4l/compat.xml
@@ -2291,8 +2291,8 @@ was renamed to <structname id="v4l2-chip-ident-old">v4l2_chip_ident_old</structn
2291 <listitem> 2291 <listitem>
2292 <para>New control <constant>V4L2_CID_COLORFX</constant> was added.</para> 2292 <para>New control <constant>V4L2_CID_COLORFX</constant> was added.</para>
2293 </listitem> 2293 </listitem>
2294 </orderedlist> 2294 </orderedlist>
2295 </section> 2295 </section>
2296 <section> 2296 <section>
2297 <title>V4L2 in Linux 2.6.32</title> 2297 <title>V4L2 in Linux 2.6.32</title>
2298 <orderedlist> 2298 <orderedlist>
@@ -2322,8 +2322,16 @@ more information.</para>
2322 <listitem> 2322 <listitem>
2323 <para>Added Remote Controller chapter, describing the default Remote Controller mapping for media devices.</para> 2323 <para>Added Remote Controller chapter, describing the default Remote Controller mapping for media devices.</para>
2324 </listitem> 2324 </listitem>
2325 </orderedlist> 2325 </orderedlist>
2326 </section> 2326 </section>
2327 <section>
2328 <title>V4L2 in Linux 2.6.33</title>
2329 <orderedlist>
2330 <listitem>
2331 <para>Added support for Digital Video timings in order to support HDTV receivers and transmitters.</para>
2332 </listitem>
2333 </orderedlist>
2334 </section>
2327 </section> 2335 </section>
2328 2336
2329 <section id="other"> 2337 <section id="other">
diff --git a/Documentation/DocBook/v4l/v4l2.xml b/Documentation/DocBook/v4l/v4l2.xml
index 937b4157a5d0..060105af49e5 100644
--- a/Documentation/DocBook/v4l/v4l2.xml
+++ b/Documentation/DocBook/v4l/v4l2.xml
@@ -74,6 +74,17 @@ Remote Controller chapter.</contrib>
74 </address> 74 </address>
75 </affiliation> 75 </affiliation>
76 </author> 76 </author>
77
78 <author>
79 <firstname>Muralidharan</firstname>
80 <surname>Karicheri</surname>
81 <contrib>Documented the Digital Video timings API.</contrib>
82 <affiliation>
83 <address>
84 <email>m-karicheri2@ti.com</email>
85 </address>
86 </affiliation>
87 </author>
77 </authorgroup> 88 </authorgroup>
78 89
79 <copyright> 90 <copyright>
@@ -89,7 +100,7 @@ Remote Controller chapter.</contrib>
89 <year>2008</year> 100 <year>2008</year>
90 <year>2009</year> 101 <year>2009</year>
91 <holder>Bill Dirks, Michael H. Schimek, Hans Verkuil, Martin 102 <holder>Bill Dirks, Michael H. Schimek, Hans Verkuil, Martin
92Rubli, Andy Walls, Mauro Carvalho Chehab</holder> 103Rubli, Andy Walls, Muralidharan Karicheri, Mauro Carvalho Chehab</holder>
93 </copyright> 104 </copyright>
94 <legalnotice> 105 <legalnotice>
95 <para>Except when explicitly stated as GPL, programming examples within 106 <para>Except when explicitly stated as GPL, programming examples within
@@ -103,6 +114,13 @@ structs, ioctls) must be noted in more detail in the history chapter
103applications. --> 114applications. -->
104 115
105 <revision> 116 <revision>
117 <revnumber>2.6.33</revnumber>
118 <date>2009-12-03</date>
119 <authorinitials>mk</authorinitials>
120 <revremark>Added documentation for the Digital Video timings API.</revremark>
121 </revision>
122
123 <revision>
106 <revnumber>2.6.32</revnumber> 124 <revnumber>2.6.32</revnumber>
107 <date>2009-08-31</date> 125 <date>2009-08-31</date>
108 <authorinitials>mcc</authorinitials> 126 <authorinitials>mcc</authorinitials>
@@ -355,7 +373,7 @@ and discussions on the V4L mailing list.</revremark>
355</partinfo> 373</partinfo>
356 374
357<title>Video for Linux Two API Specification</title> 375<title>Video for Linux Two API Specification</title>
358 <subtitle>Revision 2.6.32</subtitle> 376 <subtitle>Revision 2.6.33</subtitle>
359 377
360 <chapter id="common"> 378 <chapter id="common">
361 &sub-common; 379 &sub-common;
@@ -411,6 +429,7 @@ and discussions on the V4L mailing list.</revremark>
411 &sub-encoder-cmd; 429 &sub-encoder-cmd;
412 &sub-enumaudio; 430 &sub-enumaudio;
413 &sub-enumaudioout; 431 &sub-enumaudioout;
432 &sub-enum-dv-presets;
414 &sub-enum-fmt; 433 &sub-enum-fmt;
415 &sub-enum-framesizes; 434 &sub-enum-framesizes;
416 &sub-enum-frameintervals; 435 &sub-enum-frameintervals;
@@ -421,6 +440,8 @@ and discussions on the V4L mailing list.</revremark>
421 &sub-g-audioout; 440 &sub-g-audioout;
422 &sub-g-crop; 441 &sub-g-crop;
423 &sub-g-ctrl; 442 &sub-g-ctrl;
443 &sub-g-dv-preset;
444 &sub-g-dv-timings;
424 &sub-g-enc-index; 445 &sub-g-enc-index;
425 &sub-g-ext-ctrls; 446 &sub-g-ext-ctrls;
426 &sub-g-fbuf; 447 &sub-g-fbuf;
@@ -441,6 +462,7 @@ and discussions on the V4L mailing list.</revremark>
441 &sub-querybuf; 462 &sub-querybuf;
442 &sub-querycap; 463 &sub-querycap;
443 &sub-queryctrl; 464 &sub-queryctrl;
465 &sub-query-dv-preset;
444 &sub-querystd; 466 &sub-querystd;
445 &sub-reqbufs; 467 &sub-reqbufs;
446 &sub-s-hw-freq-seek; 468 &sub-s-hw-freq-seek;
diff --git a/Documentation/DocBook/v4l/videodev2.h.xml b/Documentation/DocBook/v4l/videodev2.h.xml
index 3e282ed9f593..068325940658 100644
--- a/Documentation/DocBook/v4l/videodev2.h.xml
+++ b/Documentation/DocBook/v4l/videodev2.h.xml
@@ -734,6 +734,99 @@ struct <link linkend="v4l2-standard">v4l2_standard</link> {
734}; 734};
735 735
736/* 736/*
737 * V I D E O T I M I N G S D V P R E S E T
738 */
739struct <link linkend="v4l2-dv-preset">v4l2_dv_preset</link> {
740 __u32 preset;
741 __u32 reserved[4];
742};
743
744/*
745 * D V P R E S E T S E N U M E R A T I O N
746 */
747struct <link linkend="v4l2-dv-enum-preset">v4l2_dv_enum_preset</link> {
748 __u32 index;
749 __u32 preset;
750 __u8 name[32]; /* Name of the preset timing */
751 __u32 width;
752 __u32 height;
753 __u32 reserved[4];
754};
755
756/*
757 * D V P R E S E T V A L U E S
758 */
759#define V4L2_DV_INVALID 0
760#define V4L2_DV_480P59_94 1 /* BT.1362 */
761#define V4L2_DV_576P50 2 /* BT.1362 */
762#define V4L2_DV_720P24 3 /* SMPTE 296M */
763#define V4L2_DV_720P25 4 /* SMPTE 296M */
764#define V4L2_DV_720P30 5 /* SMPTE 296M */
765#define V4L2_DV_720P50 6 /* SMPTE 296M */
766#define V4L2_DV_720P59_94 7 /* SMPTE 274M */
767#define V4L2_DV_720P60 8 /* SMPTE 274M/296M */
768#define V4L2_DV_1080I29_97 9 /* BT.1120/ SMPTE 274M */
769#define V4L2_DV_1080I30 10 /* BT.1120/ SMPTE 274M */
770#define V4L2_DV_1080I25 11 /* BT.1120 */
771#define V4L2_DV_1080I50 12 /* SMPTE 296M */
772#define V4L2_DV_1080I60 13 /* SMPTE 296M */
773#define V4L2_DV_1080P24 14 /* SMPTE 296M */
774#define V4L2_DV_1080P25 15 /* SMPTE 296M */
775#define V4L2_DV_1080P30 16 /* SMPTE 296M */
776#define V4L2_DV_1080P50 17 /* BT.1120 */
777#define V4L2_DV_1080P60 18 /* BT.1120 */
778
779/*
780 * D V B T T I M I N G S
781 */
782
783/* BT.656/BT.1120 timing data */
784struct <link linkend="v4l2-bt-timings">v4l2_bt_timings</link> {
785 __u32 width; /* width in pixels */
786 __u32 height; /* height in lines */
787 __u32 interlaced; /* Interlaced or progressive */
788 __u32 polarities; /* Positive or negative polarity */
789 __u64 pixelclock; /* Pixel clock in HZ. Ex. 74.25MHz-&gt;74250000 */
790 __u32 hfrontporch; /* Horizpontal front porch in pixels */
791 __u32 hsync; /* Horizontal Sync length in pixels */
792 __u32 hbackporch; /* Horizontal back porch in pixels */
793 __u32 vfrontporch; /* Vertical front porch in pixels */
794 __u32 vsync; /* Vertical Sync length in lines */
795 __u32 vbackporch; /* Vertical back porch in lines */
796 __u32 il_vfrontporch; /* Vertical front porch for bottom field of
797 * interlaced field formats
798 */
799 __u32 il_vsync; /* Vertical sync length for bottom field of
800 * interlaced field formats
801 */
802 __u32 il_vbackporch; /* Vertical back porch for bottom field of
803 * interlaced field formats
804 */
805 __u32 reserved[16];
806} __attribute__ ((packed));
807
808/* Interlaced or progressive format */
809#define V4L2_DV_PROGRESSIVE 0
810#define V4L2_DV_INTERLACED 1
811
812/* Polarities. If bit is not set, it is assumed to be negative polarity */
813#define V4L2_DV_VSYNC_POS_POL 0x00000001
814#define V4L2_DV_HSYNC_POS_POL 0x00000002
815
816
817/* DV timings */
818struct <link linkend="v4l2-dv-timings">v4l2_dv_timings</link> {
819 __u32 type;
820 union {
821 struct <link linkend="v4l2-bt-timings">v4l2_bt_timings</link> bt;
822 __u32 reserved[32];
823 };
824} __attribute__ ((packed));
825
826/* Values for the type field */
827#define V4L2_DV_BT_656_1120 0 /* BT.656/1120 timing type */
828
829/*
737 * V I D E O I N P U T S 830 * V I D E O I N P U T S
738 */ 831 */
739struct <link linkend="v4l2-input">v4l2_input</link> { 832struct <link linkend="v4l2-input">v4l2_input</link> {
@@ -744,7 +837,8 @@ struct <link linkend="v4l2-input">v4l2_input</link> {
744 __u32 tuner; /* Associated tuner */ 837 __u32 tuner; /* Associated tuner */
745 v4l2_std_id std; 838 v4l2_std_id std;
746 __u32 status; 839 __u32 status;
747 __u32 reserved[4]; 840 __u32 capabilities;
841 __u32 reserved[3];
748}; 842};
749 843
750/* Values for the 'type' field */ 844/* Values for the 'type' field */
@@ -775,6 +869,11 @@ struct <link linkend="v4l2-input">v4l2_input</link> {
775#define V4L2_IN_ST_NO_ACCESS 0x02000000 /* Conditional access denied */ 869#define V4L2_IN_ST_NO_ACCESS 0x02000000 /* Conditional access denied */
776#define V4L2_IN_ST_VTR 0x04000000 /* VTR time constant */ 870#define V4L2_IN_ST_VTR 0x04000000 /* VTR time constant */
777 871
872/* capabilities flags */
873#define V4L2_IN_CAP_PRESETS 0x00000001 /* Supports S_DV_PRESET */
874#define V4L2_IN_CAP_CUSTOM_TIMINGS 0x00000002 /* Supports S_DV_TIMINGS */
875#define V4L2_IN_CAP_STD 0x00000004 /* Supports S_STD */
876
778/* 877/*
779 * V I D E O O U T P U T S 878 * V I D E O O U T P U T S
780 */ 879 */
@@ -785,13 +884,19 @@ struct <link linkend="v4l2-output">v4l2_output</link> {
785 __u32 audioset; /* Associated audios (bitfield) */ 884 __u32 audioset; /* Associated audios (bitfield) */
786 __u32 modulator; /* Associated modulator */ 885 __u32 modulator; /* Associated modulator */
787 v4l2_std_id std; 886 v4l2_std_id std;
788 __u32 reserved[4]; 887 __u32 capabilities;
888 __u32 reserved[3];
789}; 889};
790/* Values for the 'type' field */ 890/* Values for the 'type' field */
791#define V4L2_OUTPUT_TYPE_MODULATOR 1 891#define V4L2_OUTPUT_TYPE_MODULATOR 1
792#define V4L2_OUTPUT_TYPE_ANALOG 2 892#define V4L2_OUTPUT_TYPE_ANALOG 2
793#define V4L2_OUTPUT_TYPE_ANALOGVGAOVERLAY 3 893#define V4L2_OUTPUT_TYPE_ANALOGVGAOVERLAY 3
794 894
895/* capabilities flags */
896#define V4L2_OUT_CAP_PRESETS 0x00000001 /* Supports S_DV_PRESET */
897#define V4L2_OUT_CAP_CUSTOM_TIMINGS 0x00000002 /* Supports S_DV_TIMINGS */
898#define V4L2_OUT_CAP_STD 0x00000004 /* Supports S_STD */
899
795/* 900/*
796 * C O N T R O L S 901 * C O N T R O L S
797 */ 902 */
@@ -1626,6 +1731,13 @@ struct <link linkend="v4l2-dbg-chip-ident">v4l2_dbg_chip_ident</link> {
1626#endif 1731#endif
1627 1732
1628#define VIDIOC_S_HW_FREQ_SEEK _IOW('V', 82, struct <link linkend="v4l2-hw-freq-seek">v4l2_hw_freq_seek</link>) 1733#define VIDIOC_S_HW_FREQ_SEEK _IOW('V', 82, struct <link linkend="v4l2-hw-freq-seek">v4l2_hw_freq_seek</link>)
1734#define VIDIOC_ENUM_DV_PRESETS _IOWR('V', 83, struct <link linkend="v4l2-dv-enum-preset">v4l2_dv_enum_preset</link>)
1735#define VIDIOC_S_DV_PRESET _IOWR('V', 84, struct <link linkend="v4l2-dv-preset">v4l2_dv_preset</link>)
1736#define VIDIOC_G_DV_PRESET _IOWR('V', 85, struct <link linkend="v4l2-dv-preset">v4l2_dv_preset</link>)
1737#define VIDIOC_QUERY_DV_PRESET _IOR('V', 86, struct <link linkend="v4l2-dv-preset">v4l2_dv_preset</link>)
1738#define VIDIOC_S_DV_TIMINGS _IOWR('V', 87, struct <link linkend="v4l2-dv-timings">v4l2_dv_timings</link>)
1739#define VIDIOC_G_DV_TIMINGS _IOWR('V', 88, struct <link linkend="v4l2-dv-timings">v4l2_dv_timings</link>)
1740
1629/* Reminder: when adding new ioctls please add support for them to 1741/* Reminder: when adding new ioctls please add support for them to
1630 drivers/media/video/v4l2-compat-ioctl32.c as well! */ 1742 drivers/media/video/v4l2-compat-ioctl32.c as well! */
1631 1743
diff --git a/Documentation/DocBook/v4l/vidioc-enum-dv-presets.xml b/Documentation/DocBook/v4l/vidioc-enum-dv-presets.xml
new file mode 100644
index 000000000000..1d31427edd1b
--- /dev/null
+++ b/Documentation/DocBook/v4l/vidioc-enum-dv-presets.xml
@@ -0,0 +1,238 @@
1<refentry id="vidioc-enum-dv-presets">
2 <refmeta>
3 <refentrytitle>ioctl VIDIOC_ENUM_DV_PRESETS</refentrytitle>
4 &manvol;
5 </refmeta>
6
7 <refnamediv>
8 <refname>VIDIOC_ENUM_DV_PRESETS</refname>
9 <refpurpose>Enumerate supported Digital Video presets</refpurpose>
10 </refnamediv>
11
12 <refsynopsisdiv>
13 <funcsynopsis>
14 <funcprototype>
15 <funcdef>int <function>ioctl</function></funcdef>
16 <paramdef>int <parameter>fd</parameter></paramdef>
17 <paramdef>int <parameter>request</parameter></paramdef>
18 <paramdef>struct v4l2_dv_enum_preset *<parameter>argp</parameter></paramdef>
19 </funcprototype>
20 </funcsynopsis>
21 </refsynopsisdiv>
22
23 <refsect1>
24 <title>Arguments</title>
25
26 <variablelist>
27 <varlistentry>
28 <term><parameter>fd</parameter></term>
29 <listitem>
30 <para>&fd;</para>
31 </listitem>
32 </varlistentry>
33 <varlistentry>
34 <term><parameter>request</parameter></term>
35 <listitem>
36 <para>VIDIOC_ENUM_DV_PRESETS</para>
37 </listitem>
38 </varlistentry>
39 <varlistentry>
40 <term><parameter>argp</parameter></term>
41 <listitem>
42 <para></para>
43 </listitem>
44 </varlistentry>
45 </variablelist>
46 </refsect1>
47
48 <refsect1>
49 <title>Description</title>
50
51 <para>To query the attributes of a DV preset, applications initialize the
52<structfield>index</structfield> field and zero the reserved array of &v4l2-dv-enum-preset;
53and call the <constant>VIDIOC_ENUM_DV_PRESETS</constant> ioctl with a pointer to this
54structure. Drivers fill the rest of the structure or return an
55&EINVAL; when the index is out of bounds. To enumerate all DV Presets supported,
56applications shall begin at index zero, incrementing by one until the
57driver returns <errorcode>EINVAL</errorcode>. Drivers may enumerate a
58different set of DV presets after switching the video input or
59output.</para>
60
61 <table pgwide="1" frame="none" id="v4l2-dv-enum-preset">
62 <title>struct <structname>v4l2_dv_enum_presets</structname></title>
63 <tgroup cols="3">
64 &cs-str;
65 <tbody valign="top">
66 <row>
67 <entry>__u32</entry>
68 <entry><structfield>index</structfield></entry>
69 <entry>Number of the DV preset, set by the
70application.</entry>
71 </row>
72 <row>
73 <entry>__u32</entry>
74 <entry><structfield>preset</structfield></entry>
75 <entry>This field identifies one of the DV preset values listed in <xref linkend="v4l2-dv-presets-vals"/>.</entry>
76 </row>
77 <row>
78 <entry>__u8</entry>
79 <entry><structfield>name</structfield>[24]</entry>
80 <entry>Name of the preset, a NUL-terminated ASCII string, for example: "720P-60", "1080I-60". This information is
81intended for the user.</entry>
82 </row>
83 <row>
84 <entry>__u32</entry>
85 <entry><structfield>width</structfield></entry>
86 <entry>Width of the active video in pixels for the DV preset.</entry>
87 </row>
88 <row>
89 <entry>__u32</entry>
90 <entry><structfield>height</structfield></entry>
91 <entry>Height of the active video in lines for the DV preset.</entry>
92 </row>
93 <row>
94 <entry>__u32</entry>
95 <entry><structfield>reserved</structfield>[4]</entry>
96 <entry>Reserved for future extensions. Drivers must set the array to zero.</entry>
97 </row>
98 </tbody>
99 </tgroup>
100 </table>
101
102 <table pgwide="1" frame="none" id="v4l2-dv-presets-vals">
103 <title>struct <structname>DV Presets</structname></title>
104 <tgroup cols="3">
105 &cs-str;
106 <tbody valign="top">
107 <row>
108 <entry>Preset</entry>
109 <entry>Preset value</entry>
110 <entry>Description</entry>
111 </row>
112 <row>
113 <entry></entry>
114 <entry></entry>
115 <entry></entry>
116 </row>
117 <row>
118 <entry>V4L2_DV_INVALID</entry>
119 <entry>0</entry>
120 <entry>Invalid preset value.</entry>
121 </row>
122 <row>
123 <entry>V4L2_DV_480P59_94</entry>
124 <entry>1</entry>
125 <entry>720x480 progressive video at 59.94 fps as per BT.1362.</entry>
126 </row>
127 <row>
128 <entry>V4L2_DV_576P50</entry>
129 <entry>2</entry>
130 <entry>720x576 progressive video at 50 fps as per BT.1362.</entry>
131 </row>
132 <row>
133 <entry>V4L2_DV_720P24</entry>
134 <entry>3</entry>
135 <entry>1280x720 progressive video at 24 fps as per SMPTE 296M.</entry>
136 </row>
137 <row>
138 <entry>V4L2_DV_720P25</entry>
139 <entry>4</entry>
140 <entry>1280x720 progressive video at 25 fps as per SMPTE 296M.</entry>
141 </row>
142 <row>
143 <entry>V4L2_DV_720P30</entry>
144 <entry>5</entry>
145 <entry>1280x720 progressive video at 30 fps as per SMPTE 296M.</entry>
146 </row>
147 <row>
148 <entry>V4L2_DV_720P50</entry>
149 <entry>6</entry>
150 <entry>1280x720 progressive video at 50 fps as per SMPTE 296M.</entry>
151 </row>
152 <row>
153 <entry>V4L2_DV_720P59_94</entry>
154 <entry>7</entry>
155 <entry>1280x720 progressive video at 59.94 fps as per SMPTE 274M.</entry>
156 </row>
157 <row>
158 <entry>V4L2_DV_720P60</entry>
159 <entry>8</entry>
160 <entry>1280x720 progressive video at 60 fps as per SMPTE 274M/296M.</entry>
161 </row>
162 <row>
163 <entry>V4L2_DV_1080I29_97</entry>
164 <entry>9</entry>
165 <entry>1920x1080 interlaced video at 29.97 fps as per BT.1120/SMPTE 274M.</entry>
166 </row>
167 <row>
168 <entry>V4L2_DV_1080I30</entry>
169 <entry>10</entry>
170 <entry>1920x1080 interlaced video at 30 fps as per BT.1120/SMPTE 274M.</entry>
171 </row>
172 <row>
173 <entry>V4L2_DV_1080I25</entry>
174 <entry>11</entry>
175 <entry>1920x1080 interlaced video at 25 fps as per BT.1120.</entry>
176 </row>
177 <row>
178 <entry>V4L2_DV_1080I50</entry>
179 <entry>12</entry>
180 <entry>1920x1080 interlaced video at 50 fps as per SMPTE 296M.</entry>
181 </row>
182 <row>
183 <entry>V4L2_DV_1080I60</entry>
184 <entry>13</entry>
185 <entry>1920x1080 interlaced video at 60 fps as per SMPTE 296M.</entry>
186 </row>
187 <row>
188 <entry>V4L2_DV_1080P24</entry>
189 <entry>14</entry>
190 <entry>1920x1080 progressive video at 24 fps as per SMPTE 296M.</entry>
191 </row>
192 <row>
193 <entry>V4L2_DV_1080P25</entry>
194 <entry>15</entry>
195 <entry>1920x1080 progressive video at 25 fps as per SMPTE 296M.</entry>
196 </row>
197 <row>
198 <entry>V4L2_DV_1080P30</entry>
199 <entry>16</entry>
200 <entry>1920x1080 progressive video at 30 fps as per SMPTE 296M.</entry>
201 </row>
202 <row>
203 <entry>V4L2_DV_1080P50</entry>
204 <entry>17</entry>
205 <entry>1920x1080 progressive video at 50 fps as per BT.1120.</entry>
206 </row>
207 <row>
208 <entry>V4L2_DV_1080P60</entry>
209 <entry>18</entry>
210 <entry>1920x1080 progressive video at 60 fps as per BT.1120.</entry>
211 </row>
212 </tbody>
213 </tgroup>
214 </table>
215 </refsect1>
216
217 <refsect1>
218 &return-value;
219
220 <variablelist>
221 <varlistentry>
222 <term><errorcode>EINVAL</errorcode></term>
223 <listitem>
224 <para>The &v4l2-dv-enum-preset; <structfield>index</structfield>
225is out of bounds.</para>
226 </listitem>
227 </varlistentry>
228 </variablelist>
229 </refsect1>
230</refentry>
231
232<!--
233Local Variables:
234mode: sgml
235sgml-parent-document: "v4l2.sgml"
236indent-tabs-mode: nil
237End:
238-->
diff --git a/Documentation/DocBook/v4l/vidioc-enuminput.xml b/Documentation/DocBook/v4l/vidioc-enuminput.xml
index 414856b82473..71b868e2fb8f 100644
--- a/Documentation/DocBook/v4l/vidioc-enuminput.xml
+++ b/Documentation/DocBook/v4l/vidioc-enuminput.xml
@@ -124,7 +124,13 @@ current input.</entry>
124 </row> 124 </row>
125 <row> 125 <row>
126 <entry>__u32</entry> 126 <entry>__u32</entry>
127 <entry><structfield>reserved</structfield>[4]</entry> 127 <entry><structfield>capabilities</structfield></entry>
128 <entry>This field provides capabilities for the
129input. See <xref linkend="input-capabilities" /> for flags.</entry>
130 </row>
131 <row>
132 <entry>__u32</entry>
133 <entry><structfield>reserved</structfield>[3]</entry>
128 <entry>Reserved for future extensions. Drivers must set 134 <entry>Reserved for future extensions. Drivers must set
129the array to zero.</entry> 135the array to zero.</entry>
130 </row> 136 </row>
@@ -261,6 +267,34 @@ flag is set Macrovision has been detected.</entry>
261 </tbody> 267 </tbody>
262 </tgroup> 268 </tgroup>
263 </table> 269 </table>
270
271 <!-- Capability flags based on video timings RFC by Muralidharan
272Karicheri, titled RFC (v1.2): V4L - Support for video timings at the
273input/output interface to linux-media@vger.kernel.org on 19 Oct 2009.
274 -->
275 <table frame="none" pgwide="1" id="input-capabilities">
276 <title>Input capabilities</title>
277 <tgroup cols="3">
278 &cs-def;
279 <tbody valign="top">
280 <row>
281 <entry><constant>V4L2_IN_CAP_PRESETS</constant></entry>
282 <entry>0x00000001</entry>
283 <entry>This input supports setting DV presets by using VIDIOC_S_DV_PRESET.</entry>
284 </row>
285 <row>
286 <entry><constant>V4L2_OUT_CAP_CUSTOM_TIMINGS</constant></entry>
287 <entry>0x00000002</entry>
288 <entry>This input supports setting custom video timings by using VIDIOC_S_DV_TIMINGS.</entry>
289 </row>
290 <row>
291 <entry><constant>V4L2_IN_CAP_STD</constant></entry>
292 <entry>0x00000004</entry>
293 <entry>This input supports setting the TV standard by using VIDIOC_S_STD.</entry>
294 </row>
295 </tbody>
296 </tgroup>
297 </table>
264 </refsect1> 298 </refsect1>
265 299
266 <refsect1> 300 <refsect1>
diff --git a/Documentation/DocBook/v4l/vidioc-enumoutput.xml b/Documentation/DocBook/v4l/vidioc-enumoutput.xml
index e8d16dcd50cf..a281d26a195f 100644
--- a/Documentation/DocBook/v4l/vidioc-enumoutput.xml
+++ b/Documentation/DocBook/v4l/vidioc-enumoutput.xml
@@ -114,7 +114,13 @@ details on video standards and how to switch see <xref
114 </row> 114 </row>
115 <row> 115 <row>
116 <entry>__u32</entry> 116 <entry>__u32</entry>
117 <entry><structfield>reserved</structfield>[4]</entry> 117 <entry><structfield>capabilities</structfield></entry>
118 <entry>This field provides capabilities for the
119output. See <xref linkend="output-capabilities" /> for flags.</entry>
120 </row>
121 <row>
122 <entry>__u32</entry>
123 <entry><structfield>reserved</structfield>[3]</entry>
118 <entry>Reserved for future extensions. Drivers must set 124 <entry>Reserved for future extensions. Drivers must set
119the array to zero.</entry> 125the array to zero.</entry>
120 </row> 126 </row>
@@ -147,6 +153,34 @@ CVBS, S-Video, RGB.</entry>
147 </tgroup> 153 </tgroup>
148 </table> 154 </table>
149 155
156 <!-- Capabilities flags based on video timings RFC by Muralidharan
157Karicheri, titled RFC (v1.2): V4L - Support for video timings at the
158input/output interface to linux-media@vger.kernel.org on 19 Oct 2009.
159 -->
160 <table frame="none" pgwide="1" id="output-capabilities">
161 <title>Output capabilities</title>
162 <tgroup cols="3">
163 &cs-def;
164 <tbody valign="top">
165 <row>
166 <entry><constant>V4L2_OUT_CAP_PRESETS</constant></entry>
167 <entry>0x00000001</entry>
168 <entry>This output supports setting DV presets by using VIDIOC_S_DV_PRESET.</entry>
169 </row>
170 <row>
171 <entry><constant>V4L2_OUT_CAP_CUSTOM_TIMINGS</constant></entry>
172 <entry>0x00000002</entry>
173 <entry>This output supports setting custom video timings by using VIDIOC_S_DV_TIMINGS.</entry>
174 </row>
175 <row>
176 <entry><constant>V4L2_OUT_CAP_STD</constant></entry>
177 <entry>0x00000004</entry>
178 <entry>This output supports setting the TV standard by using VIDIOC_S_STD.</entry>
179 </row>
180 </tbody>
181 </tgroup>
182 </table>
183
150 </refsect1> 184 </refsect1>
151 <refsect1> 185 <refsect1>
152 &return-value; 186 &return-value;
diff --git a/Documentation/DocBook/v4l/vidioc-g-dv-preset.xml b/Documentation/DocBook/v4l/vidioc-g-dv-preset.xml
new file mode 100644
index 000000000000..3c6784e132f3
--- /dev/null
+++ b/Documentation/DocBook/v4l/vidioc-g-dv-preset.xml
@@ -0,0 +1,111 @@
1<refentry id="vidioc-g-dv-preset">
2 <refmeta>
3 <refentrytitle>ioctl VIDIOC_G_DV_PRESET, VIDIOC_S_DV_PRESET</refentrytitle>
4 &manvol;
5 </refmeta>
6
7 <refnamediv>
8 <refname>VIDIOC_G_DV_PRESET</refname>
9 <refname>VIDIOC_S_DV_PRESET</refname>
10 <refpurpose>Query or select the DV preset of the current input or output</refpurpose>
11 </refnamediv>
12
13 <refsynopsisdiv>
14 <funcsynopsis>
15 <funcprototype>
16 <funcdef>int <function>ioctl</function></funcdef>
17 <paramdef>int <parameter>fd</parameter></paramdef>
18 <paramdef>int <parameter>request</parameter></paramdef>
19 <paramdef>&v4l2-dv-preset;
20*<parameter>argp</parameter></paramdef>
21 </funcprototype>
22 </funcsynopsis>
23 </refsynopsisdiv>
24
25 <refsect1>
26 <title>Arguments</title>
27
28 <variablelist>
29 <varlistentry>
30 <term><parameter>fd</parameter></term>
31 <listitem>
32 <para>&fd;</para>
33 </listitem>
34 </varlistentry>
35 <varlistentry>
36 <term><parameter>request</parameter></term>
37 <listitem>
38 <para>VIDIOC_G_DV_PRESET, VIDIOC_S_DV_PRESET</para>
39 </listitem>
40 </varlistentry>
41 <varlistentry>
42 <term><parameter>argp</parameter></term>
43 <listitem>
44 <para></para>
45 </listitem>
46 </varlistentry>
47 </variablelist>
48 </refsect1>
49
50 <refsect1>
51 <title>Description</title>
52 <para>To query and select the current DV preset, applications
53use the <constant>VIDIOC_G_DV_PRESET</constant> and <constant>VIDIOC_S_DV_PRESET</constant>
54ioctls which take a pointer to a &v4l2-dv-preset; type as argument.
55Applications must zero the reserved array in &v4l2-dv-preset;.
56<constant>VIDIOC_G_DV_PRESET</constant> returns a dv preset in the field
57<structfield>preset</structfield> of &v4l2-dv-preset;.</para>
58
59 <para><constant>VIDIOC_S_DV_PRESET</constant> accepts a pointer to a &v4l2-dv-preset;
60that has the preset value to be set. Applications must zero the reserved array in &v4l2-dv-preset;.
61If the preset is not supported, it returns an &EINVAL; </para>
62 </refsect1>
63
64 <refsect1>
65 &return-value;
66
67 <variablelist>
68 <varlistentry>
69 <term><errorcode>EINVAL</errorcode></term>
70 <listitem>
71 <para>This ioctl is not supported, or the
72<constant>VIDIOC_S_DV_PRESET</constant>,<constant>VIDIOC_S_DV_PRESET</constant> parameter was unsuitable.</para>
73 </listitem>
74 </varlistentry>
75 <varlistentry>
76 <term><errorcode>EBUSY</errorcode></term>
77 <listitem>
78 <para>The device is busy and therefore can not change the preset.</para>
79 </listitem>
80 </varlistentry>
81 </variablelist>
82
83 <table pgwide="1" frame="none" id="v4l2-dv-preset">
84 <title>struct <structname>v4l2_dv_preset</structname></title>
85 <tgroup cols="3">
86 &cs-str;
87 <tbody valign="top">
88 <row>
89 <entry>__u32</entry>
90 <entry><structfield>preset</structfield></entry>
91 <entry>Preset value to represent the digital video timings</entry>
92 </row>
93 <row>
94 <entry>__u32</entry>
95 <entry><structfield>reserved[4]</structfield></entry>
96 <entry>Reserved fields for future use</entry>
97 </row>
98 </tbody>
99 </tgroup>
100 </table>
101
102 </refsect1>
103</refentry>
104
105<!--
106Local Variables:
107mode: sgml
108sgml-parent-document: "v4l2.sgml"
109indent-tabs-mode: nil
110End:
111-->
diff --git a/Documentation/DocBook/v4l/vidioc-g-dv-timings.xml b/Documentation/DocBook/v4l/vidioc-g-dv-timings.xml
new file mode 100644
index 000000000000..ecc19576bb8f
--- /dev/null
+++ b/Documentation/DocBook/v4l/vidioc-g-dv-timings.xml
@@ -0,0 +1,224 @@
1<refentry id="vidioc-g-dv-timings">
2 <refmeta>
3 <refentrytitle>ioctl VIDIOC_G_DV_TIMINGS, VIDIOC_S_DV_TIMINGS</refentrytitle>
4 &manvol;
5 </refmeta>
6
7 <refnamediv>
8 <refname>VIDIOC_G_DV_TIMINGS</refname>
9 <refname>VIDIOC_S_DV_TIMINGS</refname>
10 <refpurpose>Get or set custom DV timings for input or output</refpurpose>
11 </refnamediv>
12
13 <refsynopsisdiv>
14 <funcsynopsis>
15 <funcprototype>
16 <funcdef>int <function>ioctl</function></funcdef>
17 <paramdef>int <parameter>fd</parameter></paramdef>
18 <paramdef>int <parameter>request</parameter></paramdef>
19 <paramdef>&v4l2-dv-timings;
20*<parameter>argp</parameter></paramdef>
21 </funcprototype>
22 </funcsynopsis>
23 </refsynopsisdiv>
24
25 <refsect1>
26 <title>Arguments</title>
27
28 <variablelist>
29 <varlistentry>
30 <term><parameter>fd</parameter></term>
31 <listitem>
32 <para>&fd;</para>
33 </listitem>
34 </varlistentry>
35 <varlistentry>
36 <term><parameter>request</parameter></term>
37 <listitem>
38 <para>VIDIOC_G_DV_TIMINGS, VIDIOC_S_DV_TIMINGS</para>
39 </listitem>
40 </varlistentry>
41 <varlistentry>
42 <term><parameter>argp</parameter></term>
43 <listitem>
44 <para></para>
45 </listitem>
46 </varlistentry>
47 </variablelist>
48 </refsect1>
49
50 <refsect1>
51 <title>Description</title>
52 <para>To set custom DV timings for the input or output, applications use the
53<constant>VIDIOC_S_DV_TIMINGS</constant> ioctl and to get the current custom timings,
54applications use the <constant>VIDIOC_G_DV_TIMINGS</constant> ioctl. The detailed timing
55information is filled in using the structure &v4l2-dv-timings;. These ioctls take
56a pointer to the &v4l2-dv-timings; structure as argument. If the ioctl is not supported
57or the timing values are not correct, the driver returns &EINVAL;.</para>
58 </refsect1>
59
60 <refsect1>
61 &return-value;
62
63 <variablelist>
64 <varlistentry>
65 <term><errorcode>EINVAL</errorcode></term>
66 <listitem>
67 <para>This ioctl is not supported, or the
68<constant>VIDIOC_S_DV_TIMINGS</constant> parameter was unsuitable.</para>
69 </listitem>
70 </varlistentry>
71 <varlistentry>
72 <term><errorcode>EBUSY</errorcode></term>
73 <listitem>
74 <para>The device is busy and therefore can not change the timings.</para>
75 </listitem>
76 </varlistentry>
77 </variablelist>
78
79 <table pgwide="1" frame="none" id="v4l2-bt-timings">
80 <title>struct <structname>v4l2_bt_timings</structname></title>
81 <tgroup cols="3">
82 &cs-str;
83 <tbody valign="top">
84 <row>
85 <entry>__u32</entry>
86 <entry><structfield>width</structfield></entry>
87 <entry>Width of the active video in pixels</entry>
88 </row>
89 <row>
90 <entry>__u32</entry>
91 <entry><structfield>height</structfield></entry>
92 <entry>Height of the active video in lines</entry>
93 </row>
94 <row>
95 <entry>__u32</entry>
96 <entry><structfield>interlaced</structfield></entry>
97 <entry>Progressive (0) or interlaced (1)</entry>
98 </row>
99 <row>
100 <entry>__u32</entry>
101 <entry><structfield>polarities</structfield></entry>
102 <entry>This is a bit mask that defines polarities of sync signals.
103bit 0 (V4L2_DV_VSYNC_POS_POL) is for vertical sync polarity and bit 1 (V4L2_DV_HSYNC_POS_POL) is for horizontal sync polarity. If the bit is set
104(1) it is positive polarity and if is cleared (0), it is negative polarity.</entry>
105 </row>
106 <row>
107 <entry>__u64</entry>
108 <entry><structfield>pixelclock</structfield></entry>
109 <entry>Pixel clock in Hz. Ex. 74.25MHz->74250000</entry>
110 </row>
111 <row>
112 <entry>__u32</entry>
113 <entry><structfield>hfrontporch</structfield></entry>
114 <entry>Horizontal front porch in pixels</entry>
115 </row>
116 <row>
117 <entry>__u32</entry>
118 <entry><structfield>hsync</structfield></entry>
119 <entry>Horizontal sync length in pixels</entry>
120 </row>
121 <row>
122 <entry>__u32</entry>
123 <entry><structfield>hbackporch</structfield></entry>
124 <entry>Horizontal back porch in pixels</entry>
125 </row>
126 <row>
127 <entry>__u32</entry>
128 <entry><structfield>vfrontporch</structfield></entry>
129 <entry>Vertical front porch in lines</entry>
130 </row>
131 <row>
132 <entry>__u32</entry>
133 <entry><structfield>vsync</structfield></entry>
134 <entry>Vertical sync length in lines</entry>
135 </row>
136 <row>
137 <entry>__u32</entry>
138 <entry><structfield>vbackporch</structfield></entry>
139 <entry>Vertical back porch in lines</entry>
140 </row>
141 <row>
142 <entry>__u32</entry>
143 <entry><structfield>il_vfrontporch</structfield></entry>
144 <entry>Vertical front porch in lines for bottom field of interlaced field formats</entry>
145 </row>
146 <row>
147 <entry>__u32</entry>
148 <entry><structfield>il_vsync</structfield></entry>
149 <entry>Vertical sync length in lines for bottom field of interlaced field formats</entry>
150 </row>
151 <row>
152 <entry>__u32</entry>
153 <entry><structfield>il_vbackporch</structfield></entry>
154 <entry>Vertical back porch in lines for bottom field of interlaced field formats</entry>
155 </row>
156 </tbody>
157 </tgroup>
158 </table>
159
160 <table pgwide="1" frame="none" id="v4l2-dv-timings">
161 <title>struct <structname>v4l2_dv_timings</structname></title>
162 <tgroup cols="4">
163 &cs-str;
164 <tbody valign="top">
165 <row>
166 <entry>__u32</entry>
167 <entry><structfield>type</structfield></entry>
168 <entry></entry>
169 <entry>Type of DV timings as listed in <xref linkend="dv-timing-types"/>.</entry>
170 </row>
171 <row>
172 <entry>union</entry>
173 <entry><structfield></structfield></entry>
174 <entry></entry>
175 </row>
176 <row>
177 <entry></entry>
178 <entry>&v4l2-bt-timings;</entry>
179 <entry><structfield>bt</structfield></entry>
180 <entry>Timings defined by BT.656/1120 specifications</entry>
181 </row>
182 <row>
183 <entry></entry>
184 <entry>__u32</entry>
185 <entry><structfield>reserved</structfield>[32]</entry>
186 <entry></entry>
187 </row>
188 </tbody>
189 </tgroup>
190 </table>
191
192 <table pgwide="1" frame="none" id="dv-timing-types">
193 <title>DV Timing types</title>
194 <tgroup cols="3">
195 &cs-str;
196 <tbody valign="top">
197 <row>
198 <entry>Timing type</entry>
199 <entry>value</entry>
200 <entry>Description</entry>
201 </row>
202 <row>
203 <entry></entry>
204 <entry></entry>
205 <entry></entry>
206 </row>
207 <row>
208 <entry>V4L2_DV_BT_656_1120</entry>
209 <entry>0</entry>
210 <entry>BT.656/1120 timings</entry>
211 </row>
212 </tbody>
213 </tgroup>
214 </table>
215 </refsect1>
216</refentry>
217
218<!--
219Local Variables:
220mode: sgml
221sgml-parent-document: "v4l2.sgml"
222indent-tabs-mode: nil
223End:
224-->
diff --git a/Documentation/DocBook/v4l/vidioc-g-std.xml b/Documentation/DocBook/v4l/vidioc-g-std.xml
index b6f5d267e856..912f8513e5da 100644
--- a/Documentation/DocBook/v4l/vidioc-g-std.xml
+++ b/Documentation/DocBook/v4l/vidioc-g-std.xml
@@ -86,6 +86,12 @@ standards.</para>
86<constant>VIDIOC_S_STD</constant> parameter was unsuitable.</para> 86<constant>VIDIOC_S_STD</constant> parameter was unsuitable.</para>
87 </listitem> 87 </listitem>
88 </varlistentry> 88 </varlistentry>
89 <varlistentry>
90 <term><errorcode>EBUSY</errorcode></term>
91 <listitem>
92 <para>The device is busy and therefore can not change the standard</para>
93 </listitem>
94 </varlistentry>
89 </variablelist> 95 </variablelist>
90 </refsect1> 96 </refsect1>
91</refentry> 97</refentry>
diff --git a/Documentation/DocBook/v4l/vidioc-query-dv-preset.xml b/Documentation/DocBook/v4l/vidioc-query-dv-preset.xml
new file mode 100644
index 000000000000..87e4f0f6151c
--- /dev/null
+++ b/Documentation/DocBook/v4l/vidioc-query-dv-preset.xml
@@ -0,0 +1,85 @@
1<refentry id="vidioc-query-dv-preset">
2 <refmeta>
3 <refentrytitle>ioctl VIDIOC_QUERY_DV_PRESET</refentrytitle>
4 &manvol;
5 </refmeta>
6
7 <refnamediv>
8 <refname>VIDIOC_QUERY_DV_PRESET</refname>
9 <refpurpose>Sense the DV preset received by the current
10input</refpurpose>
11 </refnamediv>
12
13 <refsynopsisdiv>
14 <funcsynopsis>
15 <funcprototype>
16 <funcdef>int <function>ioctl</function></funcdef>
17 <paramdef>int <parameter>fd</parameter></paramdef>
18 <paramdef>int <parameter>request</parameter></paramdef>
19 <paramdef>&v4l2-dv-preset; *<parameter>argp</parameter></paramdef>
20 </funcprototype>
21 </funcsynopsis>
22 </refsynopsisdiv>
23
24 <refsect1>
25 <title>Arguments</title>
26
27 <variablelist>
28 <varlistentry>
29 <term><parameter>fd</parameter></term>
30 <listitem>
31 <para>&fd;</para>
32 </listitem>
33 </varlistentry>
34 <varlistentry>
35 <term><parameter>request</parameter></term>
36 <listitem>
37 <para>VIDIOC_QUERY_DV_PRESET</para>
38 </listitem>
39 </varlistentry>
40 <varlistentry>
41 <term><parameter>argp</parameter></term>
42 <listitem>
43 <para></para>
44 </listitem>
45 </varlistentry>
46 </variablelist>
47 </refsect1>
48
49 <refsect1>
50 <title>Description</title>
51
52 <para>The hardware may be able to detect the current DV preset
53automatically, similar to sensing the video standard. To do so, applications
54call <constant> VIDIOC_QUERY_DV_PRESET</constant> with a pointer to a
55&v4l2-dv-preset; type. Once the hardware detects a preset, that preset is
56returned in the preset field of &v4l2-dv-preset;. When detection is not
57possible or fails, the value V4L2_DV_INVALID is returned.</para>
58 </refsect1>
59
60 <refsect1>
61 &return-value;
62 <variablelist>
63 <varlistentry>
64 <term><errorcode>EINVAL</errorcode></term>
65 <listitem>
66 <para>This ioctl is not supported.</para>
67 </listitem>
68 </varlistentry>
69 <varlistentry>
70 <term><errorcode>EBUSY</errorcode></term>
71 <listitem>
72 <para>The device is busy and therefore can not sense the preset</para>
73 </listitem>
74 </varlistentry>
75 </variablelist>
76 </refsect1>
77</refentry>
78
79<!--
80Local Variables:
81mode: sgml
82sgml-parent-document: "v4l2.sgml"
83indent-tabs-mode: nil
84End:
85-->
diff --git a/Documentation/DocBook/v4l/vidioc-querystd.xml b/Documentation/DocBook/v4l/vidioc-querystd.xml
index b5a7ff934486..1a9e60393091 100644
--- a/Documentation/DocBook/v4l/vidioc-querystd.xml
+++ b/Documentation/DocBook/v4l/vidioc-querystd.xml
@@ -70,6 +70,12 @@ current video input or output.</para>
70 <para>This ioctl is not supported.</para> 70 <para>This ioctl is not supported.</para>
71 </listitem> 71 </listitem>
72 </varlistentry> 72 </varlistentry>
73 <varlistentry>
74 <term><errorcode>EBUSY</errorcode></term>
75 <listitem>
76 <para>The device is busy and therefore can not detect the standard</para>
77 </listitem>
78 </varlistentry>
73 </variablelist> 79 </variablelist>
74 </refsect1> 80 </refsect1>
75</refentry> 81</refentry>
diff --git a/Documentation/SubmitChecklist b/Documentation/SubmitChecklist
index 78a9168ff377..1053a56be3b1 100644
--- a/Documentation/SubmitChecklist
+++ b/Documentation/SubmitChecklist
@@ -15,7 +15,7 @@ kernel patches.
152: Passes allnoconfig, allmodconfig 152: Passes allnoconfig, allmodconfig
16 16
173: Builds on multiple CPU architectures by using local cross-compile tools 173: Builds on multiple CPU architectures by using local cross-compile tools
18 or something like PLM at OSDL. 18 or some other build farm.
19 19
204: ppc64 is a good architecture for cross-compilation checking because it 204: ppc64 is a good architecture for cross-compilation checking because it
21 tends to use `unsigned long' for 64-bit quantities. 21 tends to use `unsigned long' for 64-bit quantities.
@@ -88,3 +88,6 @@ kernel patches.
88 88
8924: All memory barriers {e.g., barrier(), rmb(), wmb()} need a comment in the 8924: All memory barriers {e.g., barrier(), rmb(), wmb()} need a comment in the
90 source code that explains the logic of what they are doing and why. 90 source code that explains the logic of what they are doing and why.
91
9225: If any ioctl's are added by the patch, then also update
93 Documentation/ioctl/ioctl-number.txt.
diff --git a/Documentation/device-mapper/snapshot.txt b/Documentation/device-mapper/snapshot.txt
index a5009c8300f3..e3a77b215135 100644
--- a/Documentation/device-mapper/snapshot.txt
+++ b/Documentation/device-mapper/snapshot.txt
@@ -8,13 +8,19 @@ the block device which are also writable without interfering with the
8original content; 8original content;
9*) To create device "forks", i.e. multiple different versions of the 9*) To create device "forks", i.e. multiple different versions of the
10same data stream. 10same data stream.
11*) To merge a snapshot of a block device back into the snapshot's origin
12device.
11 13
14In the first two cases, dm copies only the chunks of data that get
15changed and uses a separate copy-on-write (COW) block device for
16storage.
12 17
13In both cases, dm copies only the chunks of data that get changed and 18For snapshot merge the contents of the COW storage are merged back into
14uses a separate copy-on-write (COW) block device for storage. 19the origin device.
15 20
16 21
17There are two dm targets available: snapshot and snapshot-origin. 22There are three dm targets available:
23snapshot, snapshot-origin, and snapshot-merge.
18 24
19*) snapshot-origin <origin> 25*) snapshot-origin <origin>
20 26
@@ -40,8 +46,25 @@ The difference is that for transient snapshots less metadata must be
40saved on disk - they can be kept in memory by the kernel. 46saved on disk - they can be kept in memory by the kernel.
41 47
42 48
43How this is used by LVM2 49* snapshot-merge <origin> <COW device> <persistent> <chunksize>
44======================== 50
51takes the same table arguments as the snapshot target except it only
52works with persistent snapshots. This target assumes the role of the
53"snapshot-origin" target and must not be loaded if the "snapshot-origin"
54is still present for <origin>.
55
56Creates a merging snapshot that takes control of the changed chunks
57stored in the <COW device> of an existing snapshot, through a handover
58procedure, and merges these chunks back into the <origin>. Once merging
59has started (in the background) the <origin> may be opened and the merge
60will continue while I/O is flowing to it. Changes to the <origin> are
61deferred until the merging snapshot's corresponding chunk(s) have been
62merged. Once merging has started the snapshot device, associated with
63the "snapshot" target, will return -EIO when accessed.
64
65
66How snapshot is used by LVM2
67============================
45When you create the first LVM2 snapshot of a volume, four dm devices are used: 68When you create the first LVM2 snapshot of a volume, four dm devices are used:
46 69
471) a device containing the original mapping table of the source volume; 701) a device containing the original mapping table of the source volume;
@@ -72,3 +95,30 @@ brw------- 1 root root 254, 12 29 ago 18:15 /dev/mapper/volumeGroup-snap-cow
72brw------- 1 root root 254, 13 29 ago 18:15 /dev/mapper/volumeGroup-snap 95brw------- 1 root root 254, 13 29 ago 18:15 /dev/mapper/volumeGroup-snap
73brw------- 1 root root 254, 10 29 ago 18:14 /dev/mapper/volumeGroup-base 96brw------- 1 root root 254, 10 29 ago 18:14 /dev/mapper/volumeGroup-base
74 97
98
99How snapshot-merge is used by LVM2
100==================================
101A merging snapshot assumes the role of the "snapshot-origin" while
102merging. As such the "snapshot-origin" is replaced with
103"snapshot-merge". The "-real" device is not changed and the "-cow"
104device is renamed to <origin name>-cow to aid LVM2's cleanup of the
105merging snapshot after it completes. The "snapshot" that hands over its
106COW device to the "snapshot-merge" is deactivated (unless using lvchange
107--refresh); but if it is left active it will simply return I/O errors.
108
109A snapshot will merge into its origin with the following command:
110
111lvconvert --merge volumeGroup/snap
112
113we'll now have this situation:
114
115# dmsetup table|grep volumeGroup
116
117volumeGroup-base-real: 0 2097152 linear 8:19 384
118volumeGroup-base-cow: 0 204800 linear 8:19 2097536
119volumeGroup-base: 0 2097152 snapshot-merge 254:11 254:12 P 16
120
121# ls -lL /dev/mapper/volumeGroup-*
122brw------- 1 root root 254, 11 29 ago 18:15 /dev/mapper/volumeGroup-base-real
123brw------- 1 root root 254, 12 29 ago 18:16 /dev/mapper/volumeGroup-base-cow
124brw------- 1 root root 254, 10 29 ago 18:16 /dev/mapper/volumeGroup-base
diff --git a/Documentation/fb/viafb.txt b/Documentation/fb/viafb.txt
index 67dbf442b0b6..f3e046a6a987 100644
--- a/Documentation/fb/viafb.txt
+++ b/Documentation/fb/viafb.txt
@@ -7,7 +7,7 @@
7 VIA UniChrome Family(CLE266, PM800 / CN400 / CN300, 7 VIA UniChrome Family(CLE266, PM800 / CN400 / CN300,
8 P4M800CE / P4M800Pro / CN700 / VN800, 8 P4M800CE / P4M800Pro / CN700 / VN800,
9 CX700 / VX700, K8M890, P4M890, 9 CX700 / VX700, K8M890, P4M890,
10 CN896 / P4M900, VX800) 10 CN896 / P4M900, VX800, VX855)
11 11
12[Driver features] 12[Driver features]
13------------------------ 13------------------------
@@ -154,13 +154,6 @@
154 0 : No Dual Edge Panel (default) 154 0 : No Dual Edge Panel (default)
155 1 : Dual Edge Panel 155 1 : Dual Edge Panel
156 156
157 viafb_video_dev:
158 This option is used to specify video output devices(CRT, DVI, LCD) for
159 duoview case.
160 For example:
161 To output video on DVI, we should use:
162 modprobe viafb viafb_video_dev=DVI...
163
164 viafb_lcd_port: 157 viafb_lcd_port:
165 This option is used to specify LCD output port, 158 This option is used to specify LCD output port,
166 available values are "DVP0" "DVP1" "DFP_HIGHLOW" "DFP_HIGH" "DFP_LOW". 159 available values are "DVP0" "DVP1" "DFP_HIGHLOW" "DFP_HIGH" "DFP_LOW".
@@ -181,9 +174,6 @@ Notes:
181 and bpp, need to call VIAFB specified ioctl interface VIAFB_SET_DEVICE 174 and bpp, need to call VIAFB specified ioctl interface VIAFB_SET_DEVICE
182 instead of calling common ioctl function FBIOPUT_VSCREENINFO since 175 instead of calling common ioctl function FBIOPUT_VSCREENINFO since
183 viafb doesn't support multi-head well, or it will cause screen crush. 176 viafb doesn't support multi-head well, or it will cause screen crush.
184 4. VX800 2D accelerator hasn't been supported in this driver yet. When
185 using driver on VX800, the driver will disable the acceleration
186 function as default.
187 177
188 178
189[Configure viafb with "fbset" tool] 179[Configure viafb with "fbset" tool]
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index eb2c138c277c..21ab9357326d 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -291,15 +291,6 @@ Who: Michael Buesch <mb@bu3sch.de>
291 291
292--------------------------- 292---------------------------
293 293
294What: print_fn_descriptor_symbol()
295When: October 2009
296Why: The %pF vsprintf format provides the same functionality in a
297 simpler way. print_fn_descriptor_symbol() is deprecated but
298 still present to give out-of-tree modules time to change.
299Who: Bjorn Helgaas <bjorn.helgaas@hp.com>
300
301---------------------------
302
303What: /sys/o2cb symlink 294What: /sys/o2cb symlink
304When: January 2010 295When: January 2010
305Why: /sys/fs/o2cb is the proper location for this information - /sys/o2cb 296Why: /sys/fs/o2cb is the proper location for this information - /sys/o2cb
diff --git a/Documentation/filesystems/00-INDEX b/Documentation/filesystems/00-INDEX
index 7001782ab932..875d49696b6e 100644
--- a/Documentation/filesystems/00-INDEX
+++ b/Documentation/filesystems/00-INDEX
@@ -1,7 +1,5 @@
100-INDEX 100-INDEX
2 - this file (info on some of the filesystems supported by linux). 2 - this file (info on some of the filesystems supported by linux).
3Exporting
4 - explanation of how to make filesystems exportable.
5Locking 3Locking
6 - info on locking rules as they pertain to Linux VFS. 4 - info on locking rules as they pertain to Linux VFS.
79p.txt 59p.txt
@@ -68,12 +66,8 @@ mandatory-locking.txt
68 - info on the Linux implementation of Sys V mandatory file locking. 66 - info on the Linux implementation of Sys V mandatory file locking.
69ncpfs.txt 67ncpfs.txt
70 - info on Novell Netware(tm) filesystem using NCP protocol. 68 - info on Novell Netware(tm) filesystem using NCP protocol.
71nfs41-server.txt 69nfs/
72 - info on the Linux server implementation of NFSv4 minor version 1. 70 - nfs-related documentation.
73nfs-rdma.txt
74 - how to install and setup the Linux NFS/RDMA client and server software.
75nfsroot.txt
76 - short guide on setting up a diskless box with NFS root filesystem.
77nilfs2.txt 71nilfs2.txt
78 - info and mount options for the NILFS2 filesystem. 72 - info and mount options for the NILFS2 filesystem.
79ntfs.txt 73ntfs.txt
@@ -92,8 +86,6 @@ relay.txt
92 - info on relay, for efficient streaming from kernel to user space. 86 - info on relay, for efficient streaming from kernel to user space.
93romfs.txt 87romfs.txt
94 - description of the ROMFS filesystem. 88 - description of the ROMFS filesystem.
95rpc-cache.txt
96 - introduction to the caching mechanisms in the sunrpc layer.
97seq_file.txt 89seq_file.txt
98 - how to use the seq_file API 90 - how to use the seq_file API
99sharedsubtree.txt 91sharedsubtree.txt
diff --git a/Documentation/filesystems/nfs/00-INDEX b/Documentation/filesystems/nfs/00-INDEX
new file mode 100644
index 000000000000..2f68cd688769
--- /dev/null
+++ b/Documentation/filesystems/nfs/00-INDEX
@@ -0,0 +1,16 @@
100-INDEX
2 - this file (nfs-related documentation).
3Exporting
4 - explanation of how to make filesystems exportable.
5knfsd-stats.txt
6 - statistics which the NFS server makes available to user space.
7nfs.txt
8 - nfs client, and DNS resolution for fs_locations.
9nfs41-server.txt
10 - info on the Linux server implementation of NFSv4 minor version 1.
11nfs-rdma.txt
12 - how to install and setup the Linux NFS/RDMA client and server software
13nfsroot.txt
14 - short guide on setting up a diskless box with NFS root filesystem.
15rpc-cache.txt
16 - introduction to the caching mechanisms in the sunrpc layer.
diff --git a/Documentation/filesystems/Exporting b/Documentation/filesystems/nfs/Exporting
index 87019d2b5981..87019d2b5981 100644
--- a/Documentation/filesystems/Exporting
+++ b/Documentation/filesystems/nfs/Exporting
diff --git a/Documentation/filesystems/knfsd-stats.txt b/Documentation/filesystems/nfs/knfsd-stats.txt
index 64ced5149d37..64ced5149d37 100644
--- a/Documentation/filesystems/knfsd-stats.txt
+++ b/Documentation/filesystems/nfs/knfsd-stats.txt
diff --git a/Documentation/filesystems/nfs-rdma.txt b/Documentation/filesystems/nfs/nfs-rdma.txt
index e386f7e4bcee..e386f7e4bcee 100644
--- a/Documentation/filesystems/nfs-rdma.txt
+++ b/Documentation/filesystems/nfs/nfs-rdma.txt
diff --git a/Documentation/filesystems/nfs.txt b/Documentation/filesystems/nfs/nfs.txt
index f50f26ce6cd0..f50f26ce6cd0 100644
--- a/Documentation/filesystems/nfs.txt
+++ b/Documentation/filesystems/nfs/nfs.txt
diff --git a/Documentation/filesystems/nfs41-server.txt b/Documentation/filesystems/nfs/nfs41-server.txt
index 5920fe26e6ff..1bd0d0c05171 100644
--- a/Documentation/filesystems/nfs41-server.txt
+++ b/Documentation/filesystems/nfs/nfs41-server.txt
@@ -41,7 +41,7 @@ interoperability problems with future clients. Known issues:
41 conformant with the spec (for example, we don't use kerberos 41 conformant with the spec (for example, we don't use kerberos
42 on the backchannel correctly). 42 on the backchannel correctly).
43 - no trunking support: no clients currently take advantage of 43 - no trunking support: no clients currently take advantage of
44 trunking, but this is a mandatory failure, and its use is 44 trunking, but this is a mandatory feature, and its use is
45 recommended to clients in a number of places. (E.g. to ensure 45 recommended to clients in a number of places. (E.g. to ensure
46 timely renewal in case an existing connection's retry timeouts 46 timely renewal in case an existing connection's retry timeouts
47 have gotten too long; see section 8.3 of the draft.) 47 have gotten too long; see section 8.3 of the draft.)
@@ -213,3 +213,10 @@ The following cases aren't supported yet:
213 DESTROY_CLIENTID, DESTROY_SESSION, EXCHANGE_ID. 213 DESTROY_CLIENTID, DESTROY_SESSION, EXCHANGE_ID.
214* DESTROY_SESSION MUST be the final operation in the COMPOUND request. 214* DESTROY_SESSION MUST be the final operation in the COMPOUND request.
215 215
216Nonstandard compound limitations:
217* No support for a sessions fore channel RPC compound that requires both a
218 ca_maxrequestsize request and a ca_maxresponsesize reply, so we may
219 fail to live up to the promise we made in CREATE_SESSION fore channel
220 negotiation.
221* No more than one IO operation (read, write, readdir) allowed per
222 compound.
diff --git a/Documentation/filesystems/nfsroot.txt b/Documentation/filesystems/nfs/nfsroot.txt
index 3ba0b945aaf8..3ba0b945aaf8 100644
--- a/Documentation/filesystems/nfsroot.txt
+++ b/Documentation/filesystems/nfs/nfsroot.txt
diff --git a/Documentation/filesystems/rpc-cache.txt b/Documentation/filesystems/nfs/rpc-cache.txt
index 8a382bea6808..8a382bea6808 100644
--- a/Documentation/filesystems/rpc-cache.txt
+++ b/Documentation/filesystems/nfs/rpc-cache.txt
diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting
index 92b888d540a6..a7e9746ee7ea 100644
--- a/Documentation/filesystems/porting
+++ b/Documentation/filesystems/porting
@@ -140,7 +140,7 @@ Callers of notify_change() need ->i_mutex now.
140New super_block field "struct export_operations *s_export_op" for 140New super_block field "struct export_operations *s_export_op" for
141explicit support for exporting, e.g. via NFS. The structure is fully 141explicit support for exporting, e.g. via NFS. The structure is fully
142documented at its declaration in include/linux/fs.h, and in 142documented at its declaration in include/linux/fs.h, and in
143Documentation/filesystems/Exporting. 143Documentation/filesystems/nfs/Exporting.
144 144
145Briefly it allows for the definition of decode_fh and encode_fh operations 145Briefly it allows for the definition of decode_fh and encode_fh operations
146to encode and decode filehandles, and allows the filesystem to use 146to encode and decode filehandles, and allows the filesystem to use
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index 94b9f2056f4c..220cc6376ef8 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -38,6 +38,7 @@ Table of Contents
38 3.3 /proc/<pid>/io - Display the IO accounting fields 38 3.3 /proc/<pid>/io - Display the IO accounting fields
39 3.4 /proc/<pid>/coredump_filter - Core dump filtering settings 39 3.4 /proc/<pid>/coredump_filter - Core dump filtering settings
40 3.5 /proc/<pid>/mountinfo - Information about mounts 40 3.5 /proc/<pid>/mountinfo - Information about mounts
41 3.6 /proc/<pid>/comm & /proc/<pid>/task/<tid>/comm
41 42
42 43
43------------------------------------------------------------------------------ 44------------------------------------------------------------------------------
@@ -1409,3 +1410,11 @@ For more information on mount propagation see:
1409 1410
1410 Documentation/filesystems/sharedsubtree.txt 1411 Documentation/filesystems/sharedsubtree.txt
1411 1412
1413
14143.6 /proc/<pid>/comm & /proc/<pid>/task/<tid>/comm
1415--------------------------------------------------------
1416These files provide a method to access a tasks comm value. It also allows for
1417a task to set its own or one of its thread siblings comm value. The comm value
1418is limited in size compared to the cmdline value, so writing anything longer
1419then the kernel's TASK_COMM_LEN (currently 16 chars) will result in a truncated
1420comm value.
diff --git a/Documentation/filesystems/seq_file.txt b/Documentation/filesystems/seq_file.txt
index 0d15ebccf5b0..a1e2e0dda907 100644
--- a/Documentation/filesystems/seq_file.txt
+++ b/Documentation/filesystems/seq_file.txt
@@ -248,9 +248,7 @@ code, that is done in the initialization code in the usual way:
248 { 248 {
249 struct proc_dir_entry *entry; 249 struct proc_dir_entry *entry;
250 250
251 entry = create_proc_entry("sequence", 0, NULL); 251 proc_create("sequence", 0, NULL, &ct_file_ops);
252 if (entry)
253 entry->proc_fops = &ct_file_ops;
254 return 0; 252 return 0;
255 } 253 }
256 254
diff --git a/Documentation/gpio.txt b/Documentation/gpio.txt
index e4e7daed2ba8..1866c27eec69 100644
--- a/Documentation/gpio.txt
+++ b/Documentation/gpio.txt
@@ -531,6 +531,13 @@ and have the following read/write attributes:
531 This file exists only if the pin can be configured as an 531 This file exists only if the pin can be configured as an
532 interrupt generating input pin. 532 interrupt generating input pin.
533 533
534 "active_low" ... reads as either 0 (false) or 1 (true). Write
535 any nonzero value to invert the value attribute both
536 for reading and writing. Existing and subsequent
537 poll(2) support configuration via the edge attribute
538 for "rising" and "falling" edges will follow this
539 setting.
540
534GPIO controllers have paths like /sys/class/gpio/gpiochip42/ (for the 541GPIO controllers have paths like /sys/class/gpio/gpiochip42/ (for the
535controller implementing GPIOs starting at #42) and have the following 542controller implementing GPIOs starting at #42) and have the following
536read-only attributes: 543read-only attributes:
@@ -566,6 +573,8 @@ requested using gpio_request():
566 int gpio_export_link(struct device *dev, const char *name, 573 int gpio_export_link(struct device *dev, const char *name,
567 unsigned gpio) 574 unsigned gpio)
568 575
576 /* change the polarity of a GPIO node in sysfs */
577 int gpio_sysfs_set_active_low(unsigned gpio, int value);
569 578
570After a kernel driver requests a GPIO, it may only be made available in 579After a kernel driver requests a GPIO, it may only be made available in
571the sysfs interface by gpio_export(). The driver can control whether the 580the sysfs interface by gpio_export(). The driver can control whether the
@@ -580,3 +589,9 @@ After the GPIO has been exported, gpio_export_link() allows creating
580symlinks from elsewhere in sysfs to the GPIO sysfs node. Drivers can 589symlinks from elsewhere in sysfs to the GPIO sysfs node. Drivers can
581use this to provide the interface under their own device in sysfs with 590use this to provide the interface under their own device in sysfs with
582a descriptive name. 591a descriptive name.
592
593Drivers can use gpio_sysfs_set_active_low() to hide GPIO line polarity
594differences between boards from user space. This only affects the
595sysfs interface. Polarity change can be done both before and after
596gpio_export(), and previously enabled poll(2) support for either
597rising or falling edge will be reconfigured to follow this setting.
diff --git a/Documentation/hwmon/lis3lv02d b/Documentation/hwmon/lis3lv02d
index effe949a7282..06534f25e643 100644
--- a/Documentation/hwmon/lis3lv02d
+++ b/Documentation/hwmon/lis3lv02d
@@ -3,7 +3,8 @@ Kernel driver lis3lv02d
3 3
4Supported chips: 4Supported chips:
5 5
6 * STMicroelectronics LIS3LV02DL and LIS3LV02DQ 6 * STMicroelectronics LIS3LV02DL, LIS3LV02DQ (12 bits precision)
7 * STMicroelectronics LIS302DL, LIS3L02DQ, LIS331DL (8 bits)
7 8
8Authors: 9Authors:
9 Yan Burman <burman.yan@gmail.com> 10 Yan Burman <burman.yan@gmail.com>
@@ -13,32 +14,52 @@ Authors:
13Description 14Description
14----------- 15-----------
15 16
16This driver provides support for the accelerometer found in various HP 17This driver provides support for the accelerometer found in various HP laptops
17laptops sporting the feature officially called "HP Mobile Data 18sporting the feature officially called "HP Mobile Data Protection System 3D" or
18Protection System 3D" or "HP 3D DriveGuard". It detects automatically 19"HP 3D DriveGuard". It detects automatically laptops with this sensor. Known
19laptops with this sensor. Known models (for now the HP 2133, nc6420, 20models (full list can be found in drivers/hwmon/hp_accel.c) will have their
20nc2510, nc8510, nc84x0, nw9440 and nx9420) will have their axis 21axis automatically oriented on standard way (eg: you can directly play
21automatically oriented on standard way (eg: you can directly play 22neverball). The accelerometer data is readable via
22neverball). The accelerometer data is readable via 23/sys/devices/platform/lis3lv02d. Reported values are scaled
23/sys/devices/platform/lis3lv02d. 24to mg values (1/1000th of earth gravity).
24 25
25Sysfs attributes under /sys/devices/platform/lis3lv02d/: 26Sysfs attributes under /sys/devices/platform/lis3lv02d/:
26position - 3D position that the accelerometer reports. Format: "(x,y,z)" 27position - 3D position that the accelerometer reports. Format: "(x,y,z)"
27calibrate - read: values (x, y, z) that are used as the base for input 28rate - read reports the sampling rate of the accelerometer device in HZ.
28 class device operation. 29 write changes sampling rate of the accelerometer device.
29 write: forces the base to be recalibrated with the current 30 Only values which are supported by HW are accepted.
30 position. 31selftest - performs selftest for the chip as specified by chip manufacturer.
31rate - reports the sampling rate of the accelerometer device in HZ
32 32
33This driver also provides an absolute input class device, allowing 33This driver also provides an absolute input class device, allowing
34the laptop to act as a pinball machine-esque joystick. 34the laptop to act as a pinball machine-esque joystick. Joystick device can be
35calibrated. Joystick device can be in two different modes.
36By default output values are scaled between -32768 .. 32767. In joystick raw
37mode, joystick and sysfs position entry have the same scale. There can be
38small difference due to input system fuzziness feature.
39Events are also available as input event device.
40
41Selftest is meant only for hardware diagnostic purposes. It is not meant to be
42used during normal operations. Position data is not corrupted during selftest
43but interrupt behaviour is not guaranteed to work reliably. In test mode, the
44sensing element is internally moved little bit. Selftest measures difference
45between normal mode and test mode. Chip specifications tell the acceptance
46limit for each type of the chip. Limits are provided via platform data
47to allow adjustment of the limits without a change to the actual driver.
48Seltest returns either "OK x y z" or "FAIL x y z" where x, y and z are
49measured difference between modes. Axes are not remapped in selftest mode.
50Measurement values are provided to help HW diagnostic applications to make
51final decision.
52
53On HP laptops, if the led infrastructure is activated, support for a led
54indicating disk protection will be provided as /sys/class/leds/hp::hddprotect.
35 55
36Another feature of the driver is misc device called "freefall" that 56Another feature of the driver is misc device called "freefall" that
37acts similar to /dev/rtc and reacts on free-fall interrupts received 57acts similar to /dev/rtc and reacts on free-fall interrupts received
38from the device. It supports blocking operations, poll/select and 58from the device. It supports blocking operations, poll/select and
39fasync operation modes. You must read 1 bytes from the device. The 59fasync operation modes. You must read 1 bytes from the device. The
40result is number of free-fall interrupts since the last successful 60result is number of free-fall interrupts since the last successful
41read (or 255 if number of interrupts would not fit). 61read (or 255 if number of interrupts would not fit). See the hpfall.c
62file for an example on using the device.
42 63
43 64
44Axes orientation 65Axes orientation
@@ -55,7 +76,7 @@ the accelerometer are converted into a "standard" organisation of the axes
55 * If the laptop is put upside-down, Z becomes negative 76 * If the laptop is put upside-down, Z becomes negative
56 77
57If your laptop model is not recognized (cf "dmesg"), you can send an 78If your laptop model is not recognized (cf "dmesg"), you can send an
58email to the authors to add it to the database. When reporting a new 79email to the maintainer to add it to the database. When reporting a new
59laptop, please include the output of "dmidecode" plus the value of 80laptop, please include the output of "dmidecode" plus the value of
60/sys/devices/platform/lis3lv02d/position in these four cases. 81/sys/devices/platform/lis3lv02d/position in these four cases.
61 82
diff --git a/Documentation/hwmon/w83627ehf b/Documentation/hwmon/w83627ehf
index 02b74899edaf..b7e42ec4b26b 100644
--- a/Documentation/hwmon/w83627ehf
+++ b/Documentation/hwmon/w83627ehf
@@ -81,8 +81,14 @@ pwm[1-4] - this file stores PWM duty cycle or DC value (fan speed) in range:
81 0 (stop) to 255 (full) 81 0 (stop) to 255 (full)
82 82
83pwm[1-4]_enable - this file controls mode of fan/temperature control: 83pwm[1-4]_enable - this file controls mode of fan/temperature control:
84 * 1 Manual Mode, write to pwm file any value 0-255 (full speed) 84 * 1 Manual mode, write to pwm file any value 0-255 (full speed)
85 * 2 Thermal Cruise 85 * 2 "Thermal Cruise" mode
86 * 3 "Fan Speed Cruise" mode
87 * 4 "Smart Fan III" mode
88
89pwm[1-4]_mode - controls if output is PWM or DC level
90 * 0 DC output (0 - 12v)
91 * 1 PWM output
86 92
87Thermal Cruise mode 93Thermal Cruise mode
88------------------- 94-------------------
diff --git a/Documentation/i2c/writing-clients b/Documentation/i2c/writing-clients
index 7860aafb483d..0a74603eb671 100644
--- a/Documentation/i2c/writing-clients
+++ b/Documentation/i2c/writing-clients
@@ -44,7 +44,7 @@ static struct i2c_driver foo_driver = {
44 /* if device autodetection is needed: */ 44 /* if device autodetection is needed: */
45 .class = I2C_CLASS_SOMETHING, 45 .class = I2C_CLASS_SOMETHING,
46 .detect = foo_detect, 46 .detect = foo_detect,
47 .address_data = &addr_data, 47 .address_list = normal_i2c,
48 48
49 .shutdown = foo_shutdown, /* optional */ 49 .shutdown = foo_shutdown, /* optional */
50 .suspend = foo_suspend, /* optional */ 50 .suspend = foo_suspend, /* optional */
diff --git a/Documentation/infiniband/ipoib.txt b/Documentation/infiniband/ipoib.txt
index 6d40f00b358c..64eeb55d0c09 100644
--- a/Documentation/infiniband/ipoib.txt
+++ b/Documentation/infiniband/ipoib.txt
@@ -36,11 +36,11 @@ Datagram vs Connected modes
36 fabric with a 2K MTU, the IPoIB MTU will be 2048 - 4 = 2044 bytes. 36 fabric with a 2K MTU, the IPoIB MTU will be 2048 - 4 = 2044 bytes.
37 37
38 In connected mode, the IB RC (Reliable Connected) transport is used. 38 In connected mode, the IB RC (Reliable Connected) transport is used.
39 Connected mode is to takes advantage of the connected nature of the 39 Connected mode takes advantage of the connected nature of the IB
40 IB transport and allows an MTU up to the maximal IP packet size of 40 transport and allows an MTU up to the maximal IP packet size of 64K,
41 64K, which reduces the number of IP packets needed for handling 41 which reduces the number of IP packets needed for handling large UDP
42 large UDP datagrams, TCP segments, etc and increases the performance 42 datagrams, TCP segments, etc and increases the performance for large
43 for large messages. 43 messages.
44 44
45 In connected mode, the interface's UD QP is still used for multicast 45 In connected mode, the interface's UD QP is still used for multicast
46 and communication with peers that don't support connected mode. In 46 and communication with peers that don't support connected mode. In
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index ab95d3ada5c7..5ba4d9dff113 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1032,7 +1032,7 @@ and is between 256 and 4096 characters. It is defined in the file
1032 No delay 1032 No delay
1033 1033
1034 ip= [IP_PNP] 1034 ip= [IP_PNP]
1035 See Documentation/filesystems/nfsroot.txt. 1035 See Documentation/filesystems/nfs/nfsroot.txt.
1036 1036
1037 ip2= [HW] Set IO/IRQ pairs for up to 4 IntelliPort boards 1037 ip2= [HW] Set IO/IRQ pairs for up to 4 IntelliPort boards
1038 See comment before ip2_setup() in 1038 See comment before ip2_setup() in
@@ -1553,10 +1553,10 @@ and is between 256 and 4096 characters. It is defined in the file
1553 going to be removed in 2.6.29. 1553 going to be removed in 2.6.29.
1554 1554
1555 nfsaddrs= [NFS] 1555 nfsaddrs= [NFS]
1556 See Documentation/filesystems/nfsroot.txt. 1556 See Documentation/filesystems/nfs/nfsroot.txt.
1557 1557
1558 nfsroot= [NFS] nfs root filesystem for disk-less boxes. 1558 nfsroot= [NFS] nfs root filesystem for disk-less boxes.
1559 See Documentation/filesystems/nfsroot.txt. 1559 See Documentation/filesystems/nfs/nfsroot.txt.
1560 1560
1561 nfs.callback_tcpport= 1561 nfs.callback_tcpport=
1562 [NFS] set the TCP port on which the NFSv4 callback 1562 [NFS] set the TCP port on which the NFSv4 callback
@@ -2729,6 +2729,11 @@ and is between 256 and 4096 characters. It is defined in the file
2729 vmpoff= [KNL,S390] Perform z/VM CP command after power off. 2729 vmpoff= [KNL,S390] Perform z/VM CP command after power off.
2730 Format: <command> 2730 Format: <command>
2731 2731
2732 vt.cur_default= [VT] Default cursor shape.
2733 Format: 0xCCBBAA, where AA, BB, and CC are the same as
2734 the parameters of the <Esc>[?A;B;Cc escape sequence;
2735 see VGA-softcursor.txt. Default: 2 = underline.
2736
2732 vt.default_blu= [VT] 2737 vt.default_blu= [VT]
2733 Format: <blue0>,<blue1>,<blue2>,...,<blue15> 2738 Format: <blue0>,<blue1>,<blue2>,...,<blue15>
2734 Change the default blue palette of the console. 2739 Change the default blue palette of the console.
diff --git a/Documentation/memory-hotplug.txt b/Documentation/memory-hotplug.txt
index bbc8a6a36921..57e7e9cc1870 100644
--- a/Documentation/memory-hotplug.txt
+++ b/Documentation/memory-hotplug.txt
@@ -160,12 +160,15 @@ Under each section, you can see 4 files.
160NOTE: 160NOTE:
161 These directories/files appear after physical memory hotplug phase. 161 These directories/files appear after physical memory hotplug phase.
162 162
163If CONFIG_NUMA is enabled the 163If CONFIG_NUMA is enabled the memoryXXX/ directories can also be accessed
164/sys/devices/system/memory/memoryXXX memory section 164via symbolic links located in the /sys/devices/system/node/node* directories.
165directories can also be accessed via symbolic links located in 165
166the /sys/devices/system/node/node* directories. For example: 166For example:
167/sys/devices/system/node/node0/memory9 -> ../../memory/memory9 167/sys/devices/system/node/node0/memory9 -> ../../memory/memory9
168 168
169A backlink will also be created:
170/sys/devices/system/memory/memory9/node0 -> ../../node/node0
171
169-------------------------------- 172--------------------------------
1704. Physical memory hot-add phase 1734. Physical memory hot-add phase
171-------------------------------- 174--------------------------------
diff --git a/Documentation/misc-devices/ad525x_dpot.txt b/Documentation/misc-devices/ad525x_dpot.txt
new file mode 100644
index 000000000000..0c9413b1cbf3
--- /dev/null
+++ b/Documentation/misc-devices/ad525x_dpot.txt
@@ -0,0 +1,57 @@
1---------------------------------
2 AD525x Digital Potentiometers
3---------------------------------
4
5The ad525x_dpot driver exports a simple sysfs interface. This allows you to
6work with the immediate resistance settings as well as update the saved startup
7settings. Access to the factory programmed tolerance is also provided, but
8interpretation of this settings is required by the end application according to
9the specific part in use.
10
11---------
12 Files
13---------
14
15Each dpot device will have a set of eeprom, rdac, and tolerance files. How
16many depends on the actual part you have, as will the range of allowed values.
17
18The eeprom files are used to program the startup value of the device.
19
20The rdac files are used to program the immediate value of the device.
21
22The tolerance files are the read-only factory programmed tolerance settings
23and may vary greatly on a part-by-part basis. For exact interpretation of
24this field, please consult the datasheet for your part. This is presented
25as a hex file for easier parsing.
26
27-----------
28 Example
29-----------
30
31Locate the device in your sysfs tree. This is probably easiest by going into
32the common i2c directory and locating the device by the i2c slave address.
33
34 # ls /sys/bus/i2c/devices/
35 0-0022 0-0027 0-002f
36
37So assuming the device in question is on the first i2c bus and has the slave
38address of 0x2f, we descend (unrelated sysfs entries have been trimmed).
39
40 # ls /sys/bus/i2c/devices/0-002f/
41 eeprom0 rdac0 tolerance0
42
43You can use simple reads/writes to access these files:
44
45 # cd /sys/bus/i2c/devices/0-002f/
46
47 # cat eeprom0
48 0
49 # echo 10 > eeprom0
50 # cat eeprom0
51 10
52
53 # cat rdac0
54 5
55 # echo 3 > rdac0
56 # cat rdac0
57 3
diff --git a/Documentation/nommu-mmap.txt b/Documentation/nommu-mmap.txt
index b565e8279d13..8e1ddec2c78a 100644
--- a/Documentation/nommu-mmap.txt
+++ b/Documentation/nommu-mmap.txt
@@ -119,6 +119,32 @@ FURTHER NOTES ON NO-MMU MMAP
119 granule but will only discard the excess if appropriately configured as 119 granule but will only discard the excess if appropriately configured as
120 this has an effect on fragmentation. 120 this has an effect on fragmentation.
121 121
122 (*) The memory allocated by a request for an anonymous mapping will normally
123 be cleared by the kernel before being returned in accordance with the
124 Linux man pages (ver 2.22 or later).
125
126 In the MMU case this can be achieved with reasonable performance as
127 regions are backed by virtual pages, with the contents only being mapped
128 to cleared physical pages when a write happens on that specific page
129 (prior to which, the pages are effectively mapped to the global zero page
130 from which reads can take place). This spreads out the time it takes to
131 initialize the contents of a page - depending on the write-usage of the
132 mapping.
133
134 In the no-MMU case, however, anonymous mappings are backed by physical
135 pages, and the entire map is cleared at allocation time. This can cause
136 significant delays during a userspace malloc() as the C library does an
137 anonymous mapping and the kernel then does a memset for the entire map.
138
139 However, for memory that isn't required to be precleared - such as that
140 returned by malloc() - mmap() can take a MAP_UNINITIALIZED flag to
141 indicate to the kernel that it shouldn't bother clearing the memory before
142 returning it. Note that CONFIG_MMAP_ALLOW_UNINITIALIZED must be enabled
143 to permit this, otherwise the flag will be ignored.
144
145 uClibc uses this to speed up malloc(), and the ELF-FDPIC binfmt uses this
146 to allocate the brk and stack region.
147
122 (*) A list of all the private copy and anonymous mappings on the system is 148 (*) A list of all the private copy and anonymous mappings on the system is
123 visible through /proc/maps in no-MMU mode. 149 visible through /proc/maps in no-MMU mode.
124 150
diff --git a/Documentation/powerpc/dts-bindings/4xx/ppc440spe-adma.txt b/Documentation/powerpc/dts-bindings/4xx/ppc440spe-adma.txt
new file mode 100644
index 000000000000..515ebcf1b97d
--- /dev/null
+++ b/Documentation/powerpc/dts-bindings/4xx/ppc440spe-adma.txt
@@ -0,0 +1,93 @@
1PPC440SPe DMA/XOR (DMA Controller and XOR Accelerator)
2
3Device nodes needed for operation of the ppc440spe-adma driver
4are specified hereby. These are I2O/DMA, DMA and XOR nodes
5for DMA engines and Memory Queue Module node. The latter is used
6by ADMA driver for configuration of RAID-6 H/W capabilities of
7the PPC440SPe. In addition to the nodes and properties described
8below, the ranges property of PLB node must specify ranges for
9DMA devices.
10
11 i) The I2O node
12
13 Required properties:
14
15 - compatible : "ibm,i2o-440spe";
16 - reg : <registers mapping>
17 - dcr-reg : <DCR registers range>
18
19 Example:
20
21 I2O: i2o@400100000 {
22 compatible = "ibm,i2o-440spe";
23 reg = <0x00000004 0x00100000 0x100>;
24 dcr-reg = <0x060 0x020>;
25 };
26
27
28 ii) The DMA node
29
30 Required properties:
31
32 - compatible : "ibm,dma-440spe";
33 - cell-index : 1 cell, hardware index of the DMA engine
34 (typically 0x0 and 0x1 for DMA0 and DMA1)
35 - reg : <registers mapping>
36 - dcr-reg : <DCR registers range>
37 - interrupts : <interrupt mapping for DMA0/1 interrupts sources:
38 2 sources: DMAx CS FIFO Needs Service IRQ (on UIC0)
39 and DMA Error IRQ (on UIC1). The latter is common
40 for both DMA engines>.
41 - interrupt-parent : needed for interrupt mapping
42
43 Example:
44
45 DMA0: dma0@400100100 {
46 compatible = "ibm,dma-440spe";
47 cell-index = <0>;
48 reg = <0x00000004 0x00100100 0x100>;
49 dcr-reg = <0x060 0x020>;
50 interrupt-parent = <&DMA0>;
51 interrupts = <0 1>;
52 #interrupt-cells = <1>;
53 #address-cells = <0>;
54 #size-cells = <0>;
55 interrupt-map = <
56 0 &UIC0 0x14 4
57 1 &UIC1 0x16 4>;
58 };
59
60
61 iii) XOR Accelerator node
62
63 Required properties:
64
65 - compatible : "amcc,xor-accelerator";
66 - reg : <registers mapping>
67 - interrupts : <interrupt mapping for XOR interrupt source>
68 - interrupt-parent : for interrupt mapping
69
70 Example:
71
72 xor-accel@400200000 {
73 compatible = "amcc,xor-accelerator";
74 reg = <0x00000004 0x00200000 0x400>;
75 interrupt-parent = <&UIC1>;
76 interrupts = <0x1f 4>;
77 };
78
79
80 iv) Memory Queue Module node
81
82 Required properties:
83
84 - compatible : "ibm,mq-440spe";
85 - dcr-reg : <DCR registers range>
86
87 Example:
88
89 MQ0: mq {
90 compatible = "ibm,mq-440spe";
91 dcr-reg = <0x040 0x020>;
92 };
93
diff --git a/Documentation/video4linux/gspca.txt b/Documentation/video4linux/gspca.txt
index 319d9838e87e..1800a62cf135 100644
--- a/Documentation/video4linux/gspca.txt
+++ b/Documentation/video4linux/gspca.txt
@@ -12,6 +12,7 @@ m5602 0402:5602 ALi Video Camera Controller
12spca501 040a:0002 Kodak DVC-325 12spca501 040a:0002 Kodak DVC-325
13spca500 040a:0300 Kodak EZ200 13spca500 040a:0300 Kodak EZ200
14zc3xx 041e:041e Creative WebCam Live! 14zc3xx 041e:041e Creative WebCam Live!
15ov519 041e:4003 Video Blaster WebCam Go Plus
15spca500 041e:400a Creative PC-CAM 300 16spca500 041e:400a Creative PC-CAM 300
16sunplus 041e:400b Creative PC-CAM 600 17sunplus 041e:400b Creative PC-CAM 600
17sunplus 041e:4012 PC-Cam350 18sunplus 041e:4012 PC-Cam350
@@ -168,10 +169,14 @@ sunplus 055f:c650 Mustek MDC5500Z
168zc3xx 055f:d003 Mustek WCam300A 169zc3xx 055f:d003 Mustek WCam300A
169zc3xx 055f:d004 Mustek WCam300 AN 170zc3xx 055f:d004 Mustek WCam300 AN
170conex 0572:0041 Creative Notebook cx11646 171conex 0572:0041 Creative Notebook cx11646
172ov519 05a9:0511 Video Blaster WebCam 3/WebCam Plus, D-Link USB Digital Video Camera
173ov519 05a9:0518 Creative WebCam
171ov519 05a9:0519 OV519 Microphone 174ov519 05a9:0519 OV519 Microphone
172ov519 05a9:0530 OmniVision 175ov519 05a9:0530 OmniVision
176ov519 05a9:2800 OmniVision SuperCAM
173ov519 05a9:4519 Webcam Classic 177ov519 05a9:4519 Webcam Classic
174ov519 05a9:8519 OmniVision 178ov519 05a9:8519 OmniVision
179ov519 05a9:a511 D-Link USB Digital Video Camera
175ov519 05a9:a518 D-Link DSB-C310 Webcam 180ov519 05a9:a518 D-Link DSB-C310 Webcam
176sunplus 05da:1018 Digital Dream Enigma 1.3 181sunplus 05da:1018 Digital Dream Enigma 1.3
177stk014 05e1:0893 Syntek DV4000 182stk014 05e1:0893 Syntek DV4000
@@ -187,7 +192,7 @@ ov534 06f8:3002 Hercules Blog Webcam
187ov534 06f8:3003 Hercules Dualpix HD Weblog 192ov534 06f8:3003 Hercules Dualpix HD Weblog
188sonixj 06f8:3004 Hercules Classic Silver 193sonixj 06f8:3004 Hercules Classic Silver
189sonixj 06f8:3008 Hercules Deluxe Optical Glass 194sonixj 06f8:3008 Hercules Deluxe Optical Glass
190pac7311 06f8:3009 Hercules Classic Link 195pac7302 06f8:3009 Hercules Classic Link
191spca508 0733:0110 ViewQuest VQ110 196spca508 0733:0110 ViewQuest VQ110
192spca501 0733:0401 Intel Create and Share 197spca501 0733:0401 Intel Create and Share
193spca501 0733:0402 ViewQuest M318B 198spca501 0733:0402 ViewQuest M318B
@@ -199,6 +204,7 @@ sunplus 0733:2221 Mercury Digital Pro 3.1p
199sunplus 0733:3261 Concord 3045 spca536a 204sunplus 0733:3261 Concord 3045 spca536a
200sunplus 0733:3281 Cyberpix S550V 205sunplus 0733:3281 Cyberpix S550V
201spca506 0734:043b 3DeMon USB Capture aka 206spca506 0734:043b 3DeMon USB Capture aka
207ov519 0813:0002 Dual Mode USB Camera Plus
202spca500 084d:0003 D-Link DSC-350 208spca500 084d:0003 D-Link DSC-350
203spca500 08ca:0103 Aiptek PocketDV 209spca500 08ca:0103 Aiptek PocketDV
204sunplus 08ca:0104 Aiptek PocketDVII 1.3 210sunplus 08ca:0104 Aiptek PocketDVII 1.3
@@ -236,15 +242,15 @@ pac7311 093a:2603 Philips SPC 500 NC
236pac7311 093a:2608 Trust WB-3300p 242pac7311 093a:2608 Trust WB-3300p
237pac7311 093a:260e Gigaware VGA PC Camera, Trust WB-3350p, SIGMA cam 2350 243pac7311 093a:260e Gigaware VGA PC Camera, Trust WB-3350p, SIGMA cam 2350
238pac7311 093a:260f SnakeCam 244pac7311 093a:260f SnakeCam
239pac7311 093a:2620 Apollo AC-905 245pac7302 093a:2620 Apollo AC-905
240pac7311 093a:2621 PAC731x 246pac7302 093a:2621 PAC731x
241pac7311 093a:2622 Genius Eye 312 247pac7302 093a:2622 Genius Eye 312
242pac7311 093a:2624 PAC7302 248pac7302 093a:2624 PAC7302
243pac7311 093a:2626 Labtec 2200 249pac7302 093a:2626 Labtec 2200
244pac7311 093a:2628 Genius iLook 300 250pac7302 093a:2628 Genius iLook 300
245pac7311 093a:2629 Genious iSlim 300 251pac7302 093a:2629 Genious iSlim 300
246pac7311 093a:262a Webcam 300k 252pac7302 093a:262a Webcam 300k
247pac7311 093a:262c Philips SPC 230 NC 253pac7302 093a:262c Philips SPC 230 NC
248jeilinj 0979:0280 Sakar 57379 254jeilinj 0979:0280 Sakar 57379
249zc3xx 0ac8:0302 Z-star Vimicro zc0302 255zc3xx 0ac8:0302 Z-star Vimicro zc0302
250vc032x 0ac8:0321 Vimicro generic vc0321 256vc032x 0ac8:0321 Vimicro generic vc0321
@@ -259,6 +265,7 @@ vc032x 0ac8:c002 Sony embedded vimicro
259vc032x 0ac8:c301 Samsung Q1 Ultra Premium 265vc032x 0ac8:c301 Samsung Q1 Ultra Premium
260spca508 0af9:0010 Hama USB Sightcam 100 266spca508 0af9:0010 Hama USB Sightcam 100
261spca508 0af9:0011 Hama USB Sightcam 100 267spca508 0af9:0011 Hama USB Sightcam 100
268ov519 0b62:0059 iBOT2 Webcam
262sonixb 0c45:6001 Genius VideoCAM NB 269sonixb 0c45:6001 Genius VideoCAM NB
263sonixb 0c45:6005 Microdia Sweex Mini Webcam 270sonixb 0c45:6005 Microdia Sweex Mini Webcam
264sonixb 0c45:6007 Sonix sn9c101 + Tas5110D 271sonixb 0c45:6007 Sonix sn9c101 + Tas5110D
@@ -318,8 +325,10 @@ sn9c20x 0c45:62b3 PC Camera (SN9C202 + OV9655)
318sn9c20x 0c45:62bb PC Camera (SN9C202 + OV7660) 325sn9c20x 0c45:62bb PC Camera (SN9C202 + OV7660)
319sn9c20x 0c45:62bc PC Camera (SN9C202 + HV7131R) 326sn9c20x 0c45:62bc PC Camera (SN9C202 + HV7131R)
320sunplus 0d64:0303 Sunplus FashionCam DXG 327sunplus 0d64:0303 Sunplus FashionCam DXG
328ov519 0e96:c001 TRUST 380 USB2 SPACEC@M
321etoms 102c:6151 Qcam Sangha CIF 329etoms 102c:6151 Qcam Sangha CIF
322etoms 102c:6251 Qcam xxxxxx VGA 330etoms 102c:6251 Qcam xxxxxx VGA
331ov519 1046:9967 W9967CF/W9968CF WebCam IC, Video Blaster WebCam Go
323zc3xx 10fd:0128 Typhoon Webshot II USB 300k 0x0128 332zc3xx 10fd:0128 Typhoon Webshot II USB 300k 0x0128
324spca561 10fd:7e50 FlyCam Usb 100 333spca561 10fd:7e50 FlyCam Usb 100
325zc3xx 10fd:8050 Typhoon Webshot II USB 300k 334zc3xx 10fd:8050 Typhoon Webshot II USB 300k
@@ -332,7 +341,12 @@ spca501 1776:501c Arowana 300K CMOS Camera
332t613 17a1:0128 TASCORP JPEG Webcam, NGS Cyclops 341t613 17a1:0128 TASCORP JPEG Webcam, NGS Cyclops
333vc032x 17ef:4802 Lenovo Vc0323+MI1310_SOC 342vc032x 17ef:4802 Lenovo Vc0323+MI1310_SOC
334pac207 2001:f115 D-Link DSB-C120 343pac207 2001:f115 D-Link DSB-C120
344sq905c 2770:9050 sq905c
345sq905c 2770:905c DualCamera
346sq905 2770:9120 Argus Digital Camera DC1512
347sq905c 2770:913d sq905c
335spca500 2899:012c Toptro Industrial 348spca500 2899:012c Toptro Industrial
349ov519 8020:ef04 ov519
336spca508 8086:0110 Intel Easy PC Camera 350spca508 8086:0110 Intel Easy PC Camera
337spca500 8086:0630 Intel Pocket PC Camera 351spca500 8086:0630 Intel Pocket PC Camera
338spca506 99fa:8988 Grandtec V.cap 352spca506 99fa:8988 Grandtec V.cap
diff --git a/Documentation/video4linux/sh_mobile_ceu_camera.txt b/Documentation/video4linux/sh_mobile_ceu_camera.txt
new file mode 100644
index 000000000000..2ae16349a78d
--- /dev/null
+++ b/Documentation/video4linux/sh_mobile_ceu_camera.txt
@@ -0,0 +1,157 @@
1 Cropping and Scaling algorithm, used in the sh_mobile_ceu_camera driver
2 =======================================================================
3
4Terminology
5-----------
6
7sensor scales: horizontal and vertical scales, configured by the sensor driver
8host scales: -"- host driver
9combined scales: sensor_scale * host_scale
10
11
12Generic scaling / cropping scheme
13---------------------------------
14
15-1--
16|
17-2-- -\
18| --\
19| --\
20+-5-- -\ -- -3--
21| ---\
22| --- -4-- -\
23| -\
24| - -6--
25|
26| - -6'-
27| -/
28| --- -4'- -/
29| ---/
30+-5'- -/
31| -- -3'-
32| --/
33| --/
34-2'- -/
35|
36|
37-1'-
38
39Produced by user requests:
40
41S_CROP(left / top = (5) - (1), width / height = (5') - (5))
42S_FMT(width / height = (6') - (6))
43
44Here:
45
46(1) to (1') - whole max width or height
47(1) to (2) - sensor cropped left or top
48(2) to (2') - sensor cropped width or height
49(3) to (3') - sensor scale
50(3) to (4) - CEU cropped left or top
51(4) to (4') - CEU cropped width or height
52(5) to (5') - reverse sensor scale applied to CEU cropped width or height
53(2) to (5) - reverse sensor scale applied to CEU cropped left or top
54(6) to (6') - CEU scale - user window
55
56
57S_FMT
58-----
59
60Do not touch input rectangle - it is already optimal.
61
621. Calculate current sensor scales:
63
64 scale_s = ((3') - (3)) / ((2') - (2))
65
662. Calculate "effective" input crop (sensor subwindow) - CEU crop scaled back at
67current sensor scales onto input window - this is user S_CROP:
68
69 width_u = (5') - (5) = ((4') - (4)) * scale_s
70
713. Calculate new combined scales from "effective" input window to requested user
72window:
73
74 scale_comb = width_u / ((6') - (6))
75
764. Calculate sensor output window by applying combined scales to real input
77window:
78
79 width_s_out = ((2') - (2)) / scale_comb
80
815. Apply iterative sensor S_FMT for sensor output window.
82
83 subdev->video_ops->s_fmt(.width = width_s_out)
84
856. Retrieve sensor output window (g_fmt)
86
877. Calculate new sensor scales:
88
89 scale_s_new = ((3')_new - (3)_new) / ((2') - (2))
90
918. Calculate new CEU crop - apply sensor scales to previously calculated
92"effective" crop:
93
94 width_ceu = (4')_new - (4)_new = width_u / scale_s_new
95 left_ceu = (4)_new - (3)_new = ((5) - (2)) / scale_s_new
96
979. Use CEU cropping to crop to the new window:
98
99 ceu_crop(.width = width_ceu, .left = left_ceu)
100
10110. Use CEU scaling to scale to the requested user window:
102
103 scale_ceu = width_ceu / width
104
105
106S_CROP
107------
108
109If old scale applied to new crop is invalid produce nearest new scale possible
110
1111. Calculate current combined scales.
112
113 scale_comb = (((4') - (4)) / ((6') - (6))) * (((2') - (2)) / ((3') - (3)))
114
1152. Apply iterative sensor S_CROP for new input window.
116
1173. If old combined scales applied to new crop produce an impossible user window,
118adjust scales to produce nearest possible window.
119
120 width_u_out = ((5') - (5)) / scale_comb
121
122 if (width_u_out > max)
123 scale_comb = ((5') - (5)) / max;
124 else if (width_u_out < min)
125 scale_comb = ((5') - (5)) / min;
126
1274. Issue G_CROP to retrieve actual input window.
128
1295. Using actual input window and calculated combined scales calculate sensor
130target output window.
131
132 width_s_out = ((3') - (3)) = ((2') - (2)) / scale_comb
133
1346. Apply iterative S_FMT for new sensor target output window.
135
1367. Issue G_FMT to retrieve the actual sensor output window.
137
1388. Calculate sensor scales.
139
140 scale_s = ((3') - (3)) / ((2') - (2))
141
1429. Calculate sensor output subwindow to be cropped on CEU by applying sensor
143scales to the requested window.
144
145 width_ceu = ((5') - (5)) / scale_s
146
14710. Use CEU cropping for above calculated window.
148
14911. Calculate CEU scales from sensor scales from results of (10) and user window
150from (3)
151
152 scale_ceu = calc_scale(((5') - (5)), &width_u_out)
153
15412. Apply CEU scales.
155
156--
157Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
diff --git a/Documentation/video4linux/v4l2-framework.txt b/Documentation/video4linux/v4l2-framework.txt
index b806edaf3e75..74d677c8b036 100644
--- a/Documentation/video4linux/v4l2-framework.txt
+++ b/Documentation/video4linux/v4l2-framework.txt
@@ -561,6 +561,8 @@ video_device helper functions
561 561
562There are a few useful helper functions: 562There are a few useful helper functions:
563 563
564- file/video_device private data
565
564You can set/get driver private data in the video_device struct using: 566You can set/get driver private data in the video_device struct using:
565 567
566void *video_get_drvdata(struct video_device *vdev); 568void *video_get_drvdata(struct video_device *vdev);
@@ -575,8 +577,7 @@ struct video_device *video_devdata(struct file *file);
575 577
576returns the video_device belonging to the file struct. 578returns the video_device belonging to the file struct.
577 579
578The final helper function combines video_get_drvdata with 580The video_drvdata function combines video_get_drvdata with video_devdata:
579video_devdata:
580 581
581void *video_drvdata(struct file *file); 582void *video_drvdata(struct file *file);
582 583
@@ -584,6 +585,17 @@ You can go from a video_device struct to the v4l2_device struct using:
584 585
585struct v4l2_device *v4l2_dev = vdev->v4l2_dev; 586struct v4l2_device *v4l2_dev = vdev->v4l2_dev;
586 587
588- Device node name
589
590The video_device node kernel name can be retrieved using
591
592const char *video_device_node_name(struct video_device *vdev);
593
594The name is used as a hint by userspace tools such as udev. The function
595should be used where possible instead of accessing the video_device::num and
596video_device::minor fields.
597
598
587video buffer helper functions 599video buffer helper functions
588----------------------------- 600-----------------------------
589 601
diff --git a/Documentation/vm/hugetlbpage.txt b/Documentation/vm/hugetlbpage.txt
index 82a7bd1800b2..bc31636973e3 100644
--- a/Documentation/vm/hugetlbpage.txt
+++ b/Documentation/vm/hugetlbpage.txt
@@ -11,23 +11,21 @@ This optimization is more critical now as bigger and bigger physical memories
11(several GBs) are more readily available. 11(several GBs) are more readily available.
12 12
13Users can use the huge page support in Linux kernel by either using the mmap 13Users can use the huge page support in Linux kernel by either using the mmap
14system call or standard SYSv shared memory system calls (shmget, shmat). 14system call or standard SYSV shared memory system calls (shmget, shmat).
15 15
16First the Linux kernel needs to be built with the CONFIG_HUGETLBFS 16First the Linux kernel needs to be built with the CONFIG_HUGETLBFS
17(present under "File systems") and CONFIG_HUGETLB_PAGE (selected 17(present under "File systems") and CONFIG_HUGETLB_PAGE (selected
18automatically when CONFIG_HUGETLBFS is selected) configuration 18automatically when CONFIG_HUGETLBFS is selected) configuration
19options. 19options.
20 20
21The kernel built with huge page support should show the number of configured 21The /proc/meminfo file provides information about the total number of
22huge pages in the system by running the "cat /proc/meminfo" command. 22persistent hugetlb pages in the kernel's huge page pool. It also displays
23information about the number of free, reserved and surplus huge pages and the
24default huge page size. The huge page size is needed for generating the
25proper alignment and size of the arguments to system calls that map huge page
26regions.
23 27
24/proc/meminfo also provides information about the total number of hugetlb 28The output of "cat /proc/meminfo" will include lines like:
25pages configured in the kernel. It also displays information about the
26number of free hugetlb pages at any time. It also displays information about
27the configured huge page size - this is needed for generating the proper
28alignment and size of the arguments to the above system calls.
29
30The output of "cat /proc/meminfo" will have lines like:
31 29
32..... 30.....
33HugePages_Total: vvv 31HugePages_Total: vvv
@@ -53,59 +51,63 @@ HugePages_Surp is short for "surplus," and is the number of huge pages in
53/proc/filesystems should also show a filesystem of type "hugetlbfs" configured 51/proc/filesystems should also show a filesystem of type "hugetlbfs" configured
54in the kernel. 52in the kernel.
55 53
56/proc/sys/vm/nr_hugepages indicates the current number of configured hugetlb 54/proc/sys/vm/nr_hugepages indicates the current number of "persistent" huge
57pages in the kernel. Super user can dynamically request more (or free some 55pages in the kernel's huge page pool. "Persistent" huge pages will be
58pre-configured) huge pages. 56returned to the huge page pool when freed by a task. A user with root
59The allocation (or deallocation) of hugetlb pages is possible only if there are 57privileges can dynamically allocate more or free some persistent huge pages
60enough physically contiguous free pages in system (freeing of huge pages is 58by increasing or decreasing the value of 'nr_hugepages'.
61possible only if there are enough hugetlb pages free that can be transferred
62back to regular memory pool).
63 59
64Pages that are used as hugetlb pages are reserved inside the kernel and cannot 60Pages that are used as huge pages are reserved inside the kernel and cannot
65be used for other purposes. 61be used for other purposes. Huge pages cannot be swapped out under
62memory pressure.
66 63
67Once the kernel with Hugetlb page support is built and running, a user can 64Once a number of huge pages have been pre-allocated to the kernel huge page
68use either the mmap system call or shared memory system calls to start using 65pool, a user with appropriate privilege can use either the mmap system call
69the huge pages. It is required that the system administrator preallocate 66or shared memory system calls to use the huge pages. See the discussion of
70enough memory for huge page purposes. 67Using Huge Pages, below.
71 68
72The administrator can preallocate huge pages on the kernel boot command line by 69The administrator can allocate persistent huge pages on the kernel boot
73specifying the "hugepages=N" parameter, where 'N' = the number of huge pages 70command line by specifying the "hugepages=N" parameter, where 'N' = the
74requested. This is the most reliable method for preallocating huge pages as 71number of huge pages requested. This is the most reliable method of
75memory has not yet become fragmented. 72allocating huge pages as memory has not yet become fragmented.
76 73
77Some platforms support multiple huge page sizes. To preallocate huge pages 74Some platforms support multiple huge page sizes. To allocate huge pages
78of a specific size, one must preceed the huge pages boot command parameters 75of a specific size, one must preceed the huge pages boot command parameters
79with a huge page size selection parameter "hugepagesz=<size>". <size> must 76with a huge page size selection parameter "hugepagesz=<size>". <size> must
80be specified in bytes with optional scale suffix [kKmMgG]. The default huge 77be specified in bytes with optional scale suffix [kKmMgG]. The default huge
81page size may be selected with the "default_hugepagesz=<size>" boot parameter. 78page size may be selected with the "default_hugepagesz=<size>" boot parameter.
82 79
83/proc/sys/vm/nr_hugepages indicates the current number of configured [default 80When multiple huge page sizes are supported, /proc/sys/vm/nr_hugepages
84size] hugetlb pages in the kernel. Super user can dynamically request more 81indicates the current number of pre-allocated huge pages of the default size.
85(or free some pre-configured) huge pages. 82Thus, one can use the following command to dynamically allocate/deallocate
86 83default sized persistent huge pages:
87Use the following command to dynamically allocate/deallocate default sized
88huge pages:
89 84
90 echo 20 > /proc/sys/vm/nr_hugepages 85 echo 20 > /proc/sys/vm/nr_hugepages
91 86
92This command will try to configure 20 default sized huge pages in the system. 87This command will try to adjust the number of default sized huge pages in the
88huge page pool to 20, allocating or freeing huge pages, as required.
89
93On a NUMA platform, the kernel will attempt to distribute the huge page pool 90On a NUMA platform, the kernel will attempt to distribute the huge page pool
94over the all on-line nodes. These huge pages, allocated when nr_hugepages 91over all the set of allowed nodes specified by the NUMA memory policy of the
95is increased, are called "persistent huge pages". 92task that modifies nr_hugepages. The default for the allowed nodes--when the
93task has default memory policy--is all on-line nodes with memory. Allowed
94nodes with insufficient available, contiguous memory for a huge page will be
95silently skipped when allocating persistent huge pages. See the discussion
96below of the interaction of task memory policy, cpusets and per node attributes
97with the allocation and freeing of persistent huge pages.
96 98
97The success or failure of huge page allocation depends on the amount of 99The success or failure of huge page allocation depends on the amount of
98physically contiguous memory that is preset in system at the time of the 100physically contiguous memory that is present in system at the time of the
99allocation attempt. If the kernel is unable to allocate huge pages from 101allocation attempt. If the kernel is unable to allocate huge pages from
100some nodes in a NUMA system, it will attempt to make up the difference by 102some nodes in a NUMA system, it will attempt to make up the difference by
101allocating extra pages on other nodes with sufficient available contiguous 103allocating extra pages on other nodes with sufficient available contiguous
102memory, if any. 104memory, if any.
103 105
104System administrators may want to put this command in one of the local rc init 106System administrators may want to put this command in one of the local rc
105files. This will enable the kernel to request huge pages early in the boot 107init files. This will enable the kernel to allocate huge pages early in
106process when the possibility of getting physical contiguous pages is still 108the boot process when the possibility of getting physical contiguous pages
107very high. Administrators can verify the number of huge pages actually 109is still very high. Administrators can verify the number of huge pages
108allocated by checking the sysctl or meminfo. To check the per node 110actually allocated by checking the sysctl or meminfo. To check the per node
109distribution of huge pages in a NUMA system, use: 111distribution of huge pages in a NUMA system, use:
110 112
111 cat /sys/devices/system/node/node*/meminfo | fgrep Huge 113 cat /sys/devices/system/node/node*/meminfo | fgrep Huge
@@ -113,45 +115,47 @@ distribution of huge pages in a NUMA system, use:
113/proc/sys/vm/nr_overcommit_hugepages specifies how large the pool of 115/proc/sys/vm/nr_overcommit_hugepages specifies how large the pool of
114huge pages can grow, if more huge pages than /proc/sys/vm/nr_hugepages are 116huge pages can grow, if more huge pages than /proc/sys/vm/nr_hugepages are
115requested by applications. Writing any non-zero value into this file 117requested by applications. Writing any non-zero value into this file
116indicates that the hugetlb subsystem is allowed to try to obtain "surplus" 118indicates that the hugetlb subsystem is allowed to try to obtain that
117huge pages from the buddy allocator, when the normal pool is exhausted. As 119number of "surplus" huge pages from the kernel's normal page pool, when the
118these surplus huge pages go out of use, they are freed back to the buddy 120persistent huge page pool is exhausted. As these surplus huge pages become
119allocator. 121unused, they are freed back to the kernel's normal page pool.
120 122
121When increasing the huge page pool size via nr_hugepages, any surplus 123When increasing the huge page pool size via nr_hugepages, any existing surplus
122pages will first be promoted to persistent huge pages. Then, additional 124pages will first be promoted to persistent huge pages. Then, additional
123huge pages will be allocated, if necessary and if possible, to fulfill 125huge pages will be allocated, if necessary and if possible, to fulfill
124the new huge page pool size. 126the new persistent huge page pool size.
125 127
126The administrator may shrink the pool of preallocated huge pages for 128The administrator may shrink the pool of persistent huge pages for
127the default huge page size by setting the nr_hugepages sysctl to a 129the default huge page size by setting the nr_hugepages sysctl to a
128smaller value. The kernel will attempt to balance the freeing of huge pages 130smaller value. The kernel will attempt to balance the freeing of huge pages
129across all on-line nodes. Any free huge pages on the selected nodes will 131across all nodes in the memory policy of the task modifying nr_hugepages.
130be freed back to the buddy allocator. 132Any free huge pages on the selected nodes will be freed back to the kernel's
131 133normal page pool.
132Caveat: Shrinking the pool via nr_hugepages such that it becomes less 134
133than the number of huge pages in use will convert the balance to surplus 135Caveat: Shrinking the persistent huge page pool via nr_hugepages such that
134huge pages even if it would exceed the overcommit value. As long as 136it becomes less than the number of huge pages in use will convert the balance
135this condition holds, however, no more surplus huge pages will be 137of the in-use huge pages to surplus huge pages. This will occur even if
136allowed on the system until one of the two sysctls are increased 138the number of surplus pages it would exceed the overcommit value. As long as
137sufficiently, or the surplus huge pages go out of use and are freed. 139this condition holds--that is, until nr_hugepages+nr_overcommit_hugepages is
140increased sufficiently, or the surplus huge pages go out of use and are freed--
141no more surplus huge pages will be allowed to be allocated.
138 142
139With support for multiple huge page pools at run-time available, much of 143With support for multiple huge page pools at run-time available, much of
140the huge page userspace interface has been duplicated in sysfs. The above 144the huge page userspace interface in /proc/sys/vm has been duplicated in sysfs.
141information applies to the default huge page size which will be 145The /proc interfaces discussed above have been retained for backwards
142controlled by the /proc interfaces for backwards compatibility. The root 146compatibility. The root huge page control directory in sysfs is:
143huge page control directory in sysfs is:
144 147
145 /sys/kernel/mm/hugepages 148 /sys/kernel/mm/hugepages
146 149
147For each huge page size supported by the running kernel, a subdirectory 150For each huge page size supported by the running kernel, a subdirectory
148will exist, of the form 151will exist, of the form:
149 152
150 hugepages-${size}kB 153 hugepages-${size}kB
151 154
152Inside each of these directories, the same set of files will exist: 155Inside each of these directories, the same set of files will exist:
153 156
154 nr_hugepages 157 nr_hugepages
158 nr_hugepages_mempolicy
155 nr_overcommit_hugepages 159 nr_overcommit_hugepages
156 free_hugepages 160 free_hugepages
157 resv_hugepages 161 resv_hugepages
@@ -159,6 +163,102 @@ Inside each of these directories, the same set of files will exist:
159 163
160which function as described above for the default huge page-sized case. 164which function as described above for the default huge page-sized case.
161 165
166
167Interaction of Task Memory Policy with Huge Page Allocation/Freeing
168
169Whether huge pages are allocated and freed via the /proc interface or
170the /sysfs interface using the nr_hugepages_mempolicy attribute, the NUMA
171nodes from which huge pages are allocated or freed are controlled by the
172NUMA memory policy of the task that modifies the nr_hugepages_mempolicy
173sysctl or attribute. When the nr_hugepages attribute is used, mempolicy
174is ignored.
175
176The recommended method to allocate or free huge pages to/from the kernel
177huge page pool, using the nr_hugepages example above, is:
178
179 numactl --interleave <node-list> echo 20 \
180 >/proc/sys/vm/nr_hugepages_mempolicy
181
182or, more succinctly:
183
184 numactl -m <node-list> echo 20 >/proc/sys/vm/nr_hugepages_mempolicy
185
186This will allocate or free abs(20 - nr_hugepages) to or from the nodes
187specified in <node-list>, depending on whether number of persistent huge pages
188is initially less than or greater than 20, respectively. No huge pages will be
189allocated nor freed on any node not included in the specified <node-list>.
190
191When adjusting the persistent hugepage count via nr_hugepages_mempolicy, any
192memory policy mode--bind, preferred, local or interleave--may be used. The
193resulting effect on persistent huge page allocation is as follows:
194
1951) Regardless of mempolicy mode [see Documentation/vm/numa_memory_policy.txt],
196 persistent huge pages will be distributed across the node or nodes
197 specified in the mempolicy as if "interleave" had been specified.
198 However, if a node in the policy does not contain sufficient contiguous
199 memory for a huge page, the allocation will not "fallback" to the nearest
200 neighbor node with sufficient contiguous memory. To do this would cause
201 undesirable imbalance in the distribution of the huge page pool, or
202 possibly, allocation of persistent huge pages on nodes not allowed by
203 the task's memory policy.
204
2052) One or more nodes may be specified with the bind or interleave policy.
206 If more than one node is specified with the preferred policy, only the
207 lowest numeric id will be used. Local policy will select the node where
208 the task is running at the time the nodes_allowed mask is constructed.
209 For local policy to be deterministic, the task must be bound to a cpu or
210 cpus in a single node. Otherwise, the task could be migrated to some
211 other node at any time after launch and the resulting node will be
212 indeterminate. Thus, local policy is not very useful for this purpose.
213 Any of the other mempolicy modes may be used to specify a single node.
214
2153) The nodes allowed mask will be derived from any non-default task mempolicy,
216 whether this policy was set explicitly by the task itself or one of its
217 ancestors, such as numactl. This means that if the task is invoked from a
218 shell with non-default policy, that policy will be used. One can specify a
219 node list of "all" with numactl --interleave or --membind [-m] to achieve
220 interleaving over all nodes in the system or cpuset.
221
2224) Any task mempolicy specifed--e.g., using numactl--will be constrained by
223 the resource limits of any cpuset in which the task runs. Thus, there will
224 be no way for a task with non-default policy running in a cpuset with a
225 subset of the system nodes to allocate huge pages outside the cpuset
226 without first moving to a cpuset that contains all of the desired nodes.
227
2285) Boot-time huge page allocation attempts to distribute the requested number
229 of huge pages over all on-lines nodes with memory.
230
231Per Node Hugepages Attributes
232
233A subset of the contents of the root huge page control directory in sysfs,
234described above, will be replicated under each the system device of each
235NUMA node with memory in:
236
237 /sys/devices/system/node/node[0-9]*/hugepages/
238
239Under this directory, the subdirectory for each supported huge page size
240contains the following attribute files:
241
242 nr_hugepages
243 free_hugepages
244 surplus_hugepages
245
246The free_' and surplus_' attribute files are read-only. They return the number
247of free and surplus [overcommitted] huge pages, respectively, on the parent
248node.
249
250The nr_hugepages attribute returns the total number of huge pages on the
251specified node. When this attribute is written, the number of persistent huge
252pages on the parent node will be adjusted to the specified value, if sufficient
253resources exist, regardless of the task's mempolicy or cpuset constraints.
254
255Note that the number of overcommit and reserve pages remain global quantities,
256as we don't know until fault time, when the faulting task's mempolicy is
257applied, from which node the huge page allocation will be attempted.
258
259
260Using Huge Pages
261
162If the user applications are going to request huge pages using mmap system 262If the user applications are going to request huge pages using mmap system
163call, then it is required that system administrator mount a file system of 263call, then it is required that system administrator mount a file system of
164type hugetlbfs: 264type hugetlbfs:
@@ -206,9 +306,11 @@ map_hugetlb.c.
206 * requesting huge pages. 306 * requesting huge pages.
207 * 307 *
208 * For the ia64 architecture, the Linux kernel reserves Region number 4 for 308 * For the ia64 architecture, the Linux kernel reserves Region number 4 for
209 * huge pages. That means the addresses starting with 0x800000... will need 309 * huge pages. That means that if one requires a fixed address, a huge page
210 * to be specified. Specifying a fixed address is not required on ppc64, 310 * aligned address starting with 0x800000... will be required. If a fixed
211 * i386 or x86_64. 311 * address is not required, the kernel will select an address in the proper
312 * range.
313 * Other architectures, such as ppc64, i386 or x86_64 are not so constrained.
212 * 314 *
213 * Note: The default shared memory limit is quite low on many kernels, 315 * Note: The default shared memory limit is quite low on many kernels,
214 * you may need to increase it via: 316 * you may need to increase it via:
@@ -237,14 +339,8 @@ map_hugetlb.c.
237 339
238#define dprintf(x) printf(x) 340#define dprintf(x) printf(x)
239 341
240/* Only ia64 requires this */ 342#define ADDR (void *)(0x0UL) /* let kernel choose address */
241#ifdef __ia64__
242#define ADDR (void *)(0x8000000000000000UL)
243#define SHMAT_FLAGS (SHM_RND)
244#else
245#define ADDR (void *)(0x0UL)
246#define SHMAT_FLAGS (0) 343#define SHMAT_FLAGS (0)
247#endif
248 344
249int main(void) 345int main(void)
250{ 346{
@@ -302,10 +398,12 @@ int main(void)
302 * example, the app is requesting memory of size 256MB that is backed by 398 * example, the app is requesting memory of size 256MB that is backed by
303 * huge pages. 399 * huge pages.
304 * 400 *
305 * For ia64 architecture, Linux kernel reserves Region number 4 for huge pages. 401 * For the ia64 architecture, the Linux kernel reserves Region number 4 for
306 * That means the addresses starting with 0x800000... will need to be 402 * huge pages. That means that if one requires a fixed address, a huge page
307 * specified. Specifying a fixed address is not required on ppc64, i386 403 * aligned address starting with 0x800000... will be required. If a fixed
308 * or x86_64. 404 * address is not required, the kernel will select an address in the proper
405 * range.
406 * Other architectures, such as ppc64, i386 or x86_64 are not so constrained.
309 */ 407 */
310#include <stdlib.h> 408#include <stdlib.h>
311#include <stdio.h> 409#include <stdio.h>
@@ -317,14 +415,8 @@ int main(void)
317#define LENGTH (256UL*1024*1024) 415#define LENGTH (256UL*1024*1024)
318#define PROTECTION (PROT_READ | PROT_WRITE) 416#define PROTECTION (PROT_READ | PROT_WRITE)
319 417
320/* Only ia64 requires this */ 418#define ADDR (void *)(0x0UL) /* let kernel choose address */
321#ifdef __ia64__
322#define ADDR (void *)(0x8000000000000000UL)
323#define FLAGS (MAP_SHARED | MAP_FIXED)
324#else
325#define ADDR (void *)(0x0UL)
326#define FLAGS (MAP_SHARED) 419#define FLAGS (MAP_SHARED)
327#endif
328 420
329void check_bytes(char *addr) 421void check_bytes(char *addr)
330{ 422{
diff --git a/Documentation/vm/ksm.txt b/Documentation/vm/ksm.txt
index 262d8e6793a3..b392e496f816 100644
--- a/Documentation/vm/ksm.txt
+++ b/Documentation/vm/ksm.txt
@@ -16,9 +16,9 @@ by sharing the data common between them. But it can be useful to any
16application which generates many instances of the same data. 16application which generates many instances of the same data.
17 17
18KSM only merges anonymous (private) pages, never pagecache (file) pages. 18KSM only merges anonymous (private) pages, never pagecache (file) pages.
19KSM's merged pages are at present locked into kernel memory for as long 19KSM's merged pages were originally locked into kernel memory, but can now
20as they are shared: so cannot be swapped out like the user pages they 20be swapped out just like other user pages (but sharing is broken when they
21replace (but swapping KSM pages should follow soon in a later release). 21are swapped back in: ksmd must rediscover their identity and merge again).
22 22
23KSM only operates on those areas of address space which an application 23KSM only operates on those areas of address space which an application
24has advised to be likely candidates for merging, by using the madvise(2) 24has advised to be likely candidates for merging, by using the madvise(2)
@@ -44,20 +44,12 @@ includes unmapped gaps (though working on the intervening mapped areas),
44and might fail with EAGAIN if not enough memory for internal structures. 44and might fail with EAGAIN if not enough memory for internal structures.
45 45
46Applications should be considerate in their use of MADV_MERGEABLE, 46Applications should be considerate in their use of MADV_MERGEABLE,
47restricting its use to areas likely to benefit. KSM's scans may use 47restricting its use to areas likely to benefit. KSM's scans may use a lot
48a lot of processing power, and its kernel-resident pages are a limited 48of processing power: some installations will disable KSM for that reason.
49resource. Some installations will disable KSM for these reasons.
50 49
51The KSM daemon is controlled by sysfs files in /sys/kernel/mm/ksm/, 50The KSM daemon is controlled by sysfs files in /sys/kernel/mm/ksm/,
52readable by all but writable only by root: 51readable by all but writable only by root:
53 52
54max_kernel_pages - set to maximum number of kernel pages that KSM may use
55 e.g. "echo 100000 > /sys/kernel/mm/ksm/max_kernel_pages"
56 Value 0 imposes no limit on the kernel pages KSM may use;
57 but note that any process using MADV_MERGEABLE can cause
58 KSM to allocate these pages, unswappable until it exits.
59 Default: quarter of memory (chosen to not pin too much)
60
61pages_to_scan - how many present pages to scan before ksmd goes to sleep 53pages_to_scan - how many present pages to scan before ksmd goes to sleep
62 e.g. "echo 100 > /sys/kernel/mm/ksm/pages_to_scan" 54 e.g. "echo 100 > /sys/kernel/mm/ksm/pages_to_scan"
63 Default: 100 (chosen for demonstration purposes) 55 Default: 100 (chosen for demonstration purposes)
@@ -75,7 +67,7 @@ run - set 0 to stop ksmd from running but keep merged pages,
75 67
76The effectiveness of KSM and MADV_MERGEABLE is shown in /sys/kernel/mm/ksm/: 68The effectiveness of KSM and MADV_MERGEABLE is shown in /sys/kernel/mm/ksm/:
77 69
78pages_shared - how many shared unswappable kernel pages KSM is using 70pages_shared - how many shared pages are being used
79pages_sharing - how many more sites are sharing them i.e. how much saved 71pages_sharing - how many more sites are sharing them i.e. how much saved
80pages_unshared - how many pages unique but repeatedly checked for merging 72pages_unshared - how many pages unique but repeatedly checked for merging
81pages_volatile - how many pages changing too fast to be placed in a tree 73pages_volatile - how many pages changing too fast to be placed in a tree
@@ -87,4 +79,4 @@ pages_volatile embraces several different kinds of activity, but a high
87proportion there would also indicate poor use of madvise MADV_MERGEABLE. 79proportion there would also indicate poor use of madvise MADV_MERGEABLE.
88 80
89Izik Eidus, 81Izik Eidus,
90Hugh Dickins, 24 Sept 2009 82Hugh Dickins, 17 Nov 2009
diff --git a/Documentation/vm/page-types.c b/Documentation/vm/page-types.c
index ea44ea502da1..7a7d9bab32ef 100644
--- a/Documentation/vm/page-types.c
+++ b/Documentation/vm/page-types.c
@@ -100,7 +100,7 @@
100#define BIT(name) (1ULL << KPF_##name) 100#define BIT(name) (1ULL << KPF_##name)
101#define BITS_COMPOUND (BIT(COMPOUND_HEAD) | BIT(COMPOUND_TAIL)) 101#define BITS_COMPOUND (BIT(COMPOUND_HEAD) | BIT(COMPOUND_TAIL))
102 102
103static char *page_flag_names[] = { 103static const char *page_flag_names[] = {
104 [KPF_LOCKED] = "L:locked", 104 [KPF_LOCKED] = "L:locked",
105 [KPF_ERROR] = "E:error", 105 [KPF_ERROR] = "E:error",
106 [KPF_REFERENCED] = "R:referenced", 106 [KPF_REFERENCED] = "R:referenced",
@@ -173,7 +173,7 @@ static int kpageflags_fd;
173static int opt_hwpoison; 173static int opt_hwpoison;
174static int opt_unpoison; 174static int opt_unpoison;
175 175
176static char *hwpoison_debug_fs = "/debug/hwpoison"; 176static const char hwpoison_debug_fs[] = "/debug/hwpoison";
177static int hwpoison_inject_fd; 177static int hwpoison_inject_fd;
178static int hwpoison_forget_fd; 178static int hwpoison_forget_fd;
179 179
@@ -560,7 +560,7 @@ static void walk_pfn(unsigned long voffset,
560{ 560{
561 uint64_t buf[KPAGEFLAGS_BATCH]; 561 uint64_t buf[KPAGEFLAGS_BATCH];
562 unsigned long batch; 562 unsigned long batch;
563 unsigned long pages; 563 long pages;
564 unsigned long i; 564 unsigned long i;
565 565
566 while (count) { 566 while (count) {
@@ -673,30 +673,35 @@ static void usage(void)
673 673
674 printf( 674 printf(
675"page-types [options]\n" 675"page-types [options]\n"
676" -r|--raw Raw mode, for kernel developers\n" 676" -r|--raw Raw mode, for kernel developers\n"
677" -a|--addr addr-spec Walk a range of pages\n" 677" -d|--describe flags Describe flags\n"
678" -b|--bits bits-spec Walk pages with specified bits\n" 678" -a|--addr addr-spec Walk a range of pages\n"
679" -p|--pid pid Walk process address space\n" 679" -b|--bits bits-spec Walk pages with specified bits\n"
680" -p|--pid pid Walk process address space\n"
680#if 0 /* planned features */ 681#if 0 /* planned features */
681" -f|--file filename Walk file address space\n" 682" -f|--file filename Walk file address space\n"
682#endif 683#endif
683" -l|--list Show page details in ranges\n" 684" -l|--list Show page details in ranges\n"
684" -L|--list-each Show page details one by one\n" 685" -L|--list-each Show page details one by one\n"
685" -N|--no-summary Don't show summay info\n" 686" -N|--no-summary Don't show summay info\n"
686" -X|--hwpoison hwpoison pages\n" 687" -X|--hwpoison hwpoison pages\n"
687" -x|--unpoison unpoison pages\n" 688" -x|--unpoison unpoison pages\n"
688" -h|--help Show this usage message\n" 689" -h|--help Show this usage message\n"
690"flags:\n"
691" 0x10 bitfield format, e.g.\n"
692" anon bit-name, e.g.\n"
693" 0x10,anon comma-separated list, e.g.\n"
689"addr-spec:\n" 694"addr-spec:\n"
690" N one page at offset N (unit: pages)\n" 695" N one page at offset N (unit: pages)\n"
691" N+M pages range from N to N+M-1\n" 696" N+M pages range from N to N+M-1\n"
692" N,M pages range from N to M-1\n" 697" N,M pages range from N to M-1\n"
693" N, pages range from N to end\n" 698" N, pages range from N to end\n"
694" ,M pages range from 0 to M-1\n" 699" ,M pages range from 0 to M-1\n"
695"bits-spec:\n" 700"bits-spec:\n"
696" bit1,bit2 (flags & (bit1|bit2)) != 0\n" 701" bit1,bit2 (flags & (bit1|bit2)) != 0\n"
697" bit1,bit2=bit1 (flags & (bit1|bit2)) == bit1\n" 702" bit1,bit2=bit1 (flags & (bit1|bit2)) == bit1\n"
698" bit1,~bit2 (flags & (bit1|bit2)) == bit1\n" 703" bit1,~bit2 (flags & (bit1|bit2)) == bit1\n"
699" =bit1,bit2 flags == (bit1|bit2)\n" 704" =bit1,bit2 flags == (bit1|bit2)\n"
700"bit-names:\n" 705"bit-names:\n"
701 ); 706 );
702 707
@@ -884,13 +889,23 @@ static void parse_bits_mask(const char *optarg)
884 add_bits_filter(mask, bits); 889 add_bits_filter(mask, bits);
885} 890}
886 891
892static void describe_flags(const char *optarg)
893{
894 uint64_t flags = parse_flag_names(optarg, 0);
887 895
888static struct option opts[] = { 896 printf("0x%016llx\t%s\t%s\n",
897 (unsigned long long)flags,
898 page_flag_name(flags),
899 page_flag_longname(flags));
900}
901
902static const struct option opts[] = {
889 { "raw" , 0, NULL, 'r' }, 903 { "raw" , 0, NULL, 'r' },
890 { "pid" , 1, NULL, 'p' }, 904 { "pid" , 1, NULL, 'p' },
891 { "file" , 1, NULL, 'f' }, 905 { "file" , 1, NULL, 'f' },
892 { "addr" , 1, NULL, 'a' }, 906 { "addr" , 1, NULL, 'a' },
893 { "bits" , 1, NULL, 'b' }, 907 { "bits" , 1, NULL, 'b' },
908 { "describe" , 1, NULL, 'd' },
894 { "list" , 0, NULL, 'l' }, 909 { "list" , 0, NULL, 'l' },
895 { "list-each" , 0, NULL, 'L' }, 910 { "list-each" , 0, NULL, 'L' },
896 { "no-summary", 0, NULL, 'N' }, 911 { "no-summary", 0, NULL, 'N' },
@@ -907,7 +922,7 @@ int main(int argc, char *argv[])
907 page_size = getpagesize(); 922 page_size = getpagesize();
908 923
909 while ((c = getopt_long(argc, argv, 924 while ((c = getopt_long(argc, argv,
910 "rp:f:a:b:lLNXxh", opts, NULL)) != -1) { 925 "rp:f:a:b:d:lLNXxh", opts, NULL)) != -1) {
911 switch (c) { 926 switch (c) {
912 case 'r': 927 case 'r':
913 opt_raw = 1; 928 opt_raw = 1;
@@ -924,6 +939,9 @@ int main(int argc, char *argv[])
924 case 'b': 939 case 'b':
925 parse_bits_mask(optarg); 940 parse_bits_mask(optarg);
926 break; 941 break;
942 case 'd':
943 describe_flags(optarg);
944 exit(0);
927 case 'l': 945 case 'l':
928 opt_list = 1; 946 opt_list = 1;
929 break; 947 break;
diff --git a/MAINTAINERS b/MAINTAINERS
index 1f21c34124db..d6a27110a747 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -835,13 +835,13 @@ F: arch/arm/mach-pxa/palmte2.c
835F: arch/arm/mach-pxa/include/mach/palmtc.h 835F: arch/arm/mach-pxa/include/mach/palmtc.h
836F: arch/arm/mach-pxa/palmtc.c 836F: arch/arm/mach-pxa/palmtc.c
837 837
838ARM/PALM TREO 680 SUPPORT 838ARM/PALM TREO SUPPORT
839M: Tomas Cech <sleep_walker@suse.cz> 839M: Tomas Cech <sleep_walker@suse.cz>
840L: linux-arm-kernel@lists.infradead.org 840L: linux-arm-kernel@lists.infradead.org
841W: http://hackndev.com 841W: http://hackndev.com
842S: Maintained 842S: Maintained
843F: arch/arm/mach-pxa/include/mach/treo680.h 843F: arch/arm/mach-pxa/include/mach/palmtreo.h
844F: arch/arm/mach-pxa/treo680.c 844F: arch/arm/mach-pxa/palmtreo.c
845 845
846ARM/PALMZ72 SUPPORT 846ARM/PALMZ72 SUPPORT
847M: Sergey Lapin <slapin@ossfans.org> 847M: Sergey Lapin <slapin@ossfans.org>
@@ -1482,8 +1482,8 @@ F: include/linux/coda*.h
1482 1482
1483COMMON INTERNET FILE SYSTEM (CIFS) 1483COMMON INTERNET FILE SYSTEM (CIFS)
1484M: Steve French <sfrench@samba.org> 1484M: Steve French <sfrench@samba.org>
1485L: linux-cifs-client@lists.samba.org 1485L: linux-cifs-client@lists.samba.org (moderated for non-subscribers)
1486L: samba-technical@lists.samba.org 1486L: samba-technical@lists.samba.org (moderated for non-subscribers)
1487W: http://linux-cifs.samba.org/ 1487W: http://linux-cifs.samba.org/
1488T: git git://git.kernel.org/pub/scm/linux/kernel/git/sfrench/cifs-2.6.git 1488T: git git://git.kernel.org/pub/scm/linux/kernel/git/sfrench/cifs-2.6.git
1489S: Supported 1489S: Supported
@@ -3081,8 +3081,11 @@ S: Maintained
3081F: fs/autofs4/ 3081F: fs/autofs4/
3082 3082
3083KERNEL BUILD 3083KERNEL BUILD
3084M: Michal Marek <mmarek@suse.cz>
3085T: git git://repo.or.cz/linux-kbuild.git for-next
3086T: git git://repo.or.cz/linux-kbuild.git for-linus
3084L: linux-kbuild@vger.kernel.org 3087L: linux-kbuild@vger.kernel.org
3085S: Orphan 3088S: Maintained
3086F: Documentation/kbuild/ 3089F: Documentation/kbuild/
3087F: Makefile 3090F: Makefile
3088F: scripts/Makefile.* 3091F: scripts/Makefile.*
@@ -3124,7 +3127,6 @@ L: kvm@vger.kernel.org
3124W: http://kvm.qumranet.com 3127W: http://kvm.qumranet.com
3125S: Supported 3128S: Supported
3126F: arch/x86/include/asm/svm.h 3129F: arch/x86/include/asm/svm.h
3127F: arch/x86/kvm/kvm_svm.h
3128F: arch/x86/kvm/svm.c 3130F: arch/x86/kvm/svm.c
3129 3131
3130KERNEL VIRTUAL MACHINE (KVM) FOR POWERPC 3132KERNEL VIRTUAL MACHINE (KVM) FOR POWERPC
@@ -5974,6 +5976,7 @@ M: Mark Brown <broonie@opensource.wolfsonmicro.com>
5974T: git git://opensource.wolfsonmicro.com/linux-2.6-audioplus 5976T: git git://opensource.wolfsonmicro.com/linux-2.6-audioplus
5975W: http://opensource.wolfsonmicro.com/node/8 5977W: http://opensource.wolfsonmicro.com/node/8
5976S: Supported 5978S: Supported
5979F: Documentation/hwmon/wm83??
5977F: drivers/leds/leds-wm83*.c 5980F: drivers/leds/leds-wm83*.c
5978F: drivers/mfd/wm8*.c 5981F: drivers/mfd/wm8*.c
5979F: drivers/power/wm83*.c 5982F: drivers/power/wm83*.c
@@ -5983,14 +5986,14 @@ F: drivers/video/backlight/wm83*_bl.c
5983F: drivers/watchdog/wm83*_wdt.c 5986F: drivers/watchdog/wm83*_wdt.c
5984F: include/linux/mfd/wm831x/ 5987F: include/linux/mfd/wm831x/
5985F: include/linux/mfd/wm8350/ 5988F: include/linux/mfd/wm8350/
5986F: include/linux/mfd/wm8400/ 5989F: include/linux/mfd/wm8400*
5987F: sound/soc/codecs/wm8350.c 5990F: sound/soc/codecs/wm8350.*
5988F: sound/soc/codecs/wm8400.c 5991F: sound/soc/codecs/wm8400.*
5989 5992
5990X.25 NETWORK LAYER 5993X.25 NETWORK LAYER
5991M: Henner Eisen <eis@baty.hanse.de> 5994M: Andrew Hendry <andrew.hendry@gmail.com>
5992L: linux-x25@vger.kernel.org 5995L: linux-x25@vger.kernel.org
5993S: Maintained 5996S: Odd Fixes
5994F: Documentation/networking/x25* 5997F: Documentation/networking/x25*
5995F: include/net/x25* 5998F: include/net/x25*
5996F: net/x25/ 5999F: net/x25/
diff --git a/arch/alpha/include/asm/core_t2.h b/arch/alpha/include/asm/core_t2.h
index 46bfff58f670..471c07292e0b 100644
--- a/arch/alpha/include/asm/core_t2.h
+++ b/arch/alpha/include/asm/core_t2.h
@@ -435,7 +435,7 @@ extern inline void t2_outl(u32 b, unsigned long addr)
435 set_hae(msb); \ 435 set_hae(msb); \
436} 436}
437 437
438extern spinlock_t t2_hae_lock; 438extern raw_spinlock_t t2_hae_lock;
439 439
440/* 440/*
441 * NOTE: take T2_DENSE_MEM off in each readX/writeX routine, since 441 * NOTE: take T2_DENSE_MEM off in each readX/writeX routine, since
@@ -448,12 +448,12 @@ __EXTERN_INLINE u8 t2_readb(const volatile void __iomem *xaddr)
448 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; 448 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
449 unsigned long result, msb; 449 unsigned long result, msb;
450 unsigned long flags; 450 unsigned long flags;
451 spin_lock_irqsave(&t2_hae_lock, flags); 451 raw_spin_lock_irqsave(&t2_hae_lock, flags);
452 452
453 t2_set_hae; 453 t2_set_hae;
454 454
455 result = *(vip) ((addr << 5) + T2_SPARSE_MEM + 0x00); 455 result = *(vip) ((addr << 5) + T2_SPARSE_MEM + 0x00);
456 spin_unlock_irqrestore(&t2_hae_lock, flags); 456 raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
457 return __kernel_extbl(result, addr & 3); 457 return __kernel_extbl(result, addr & 3);
458} 458}
459 459
@@ -462,12 +462,12 @@ __EXTERN_INLINE u16 t2_readw(const volatile void __iomem *xaddr)
462 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; 462 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
463 unsigned long result, msb; 463 unsigned long result, msb;
464 unsigned long flags; 464 unsigned long flags;
465 spin_lock_irqsave(&t2_hae_lock, flags); 465 raw_spin_lock_irqsave(&t2_hae_lock, flags);
466 466
467 t2_set_hae; 467 t2_set_hae;
468 468
469 result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08); 469 result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08);
470 spin_unlock_irqrestore(&t2_hae_lock, flags); 470 raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
471 return __kernel_extwl(result, addr & 3); 471 return __kernel_extwl(result, addr & 3);
472} 472}
473 473
@@ -480,12 +480,12 @@ __EXTERN_INLINE u32 t2_readl(const volatile void __iomem *xaddr)
480 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; 480 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
481 unsigned long result, msb; 481 unsigned long result, msb;
482 unsigned long flags; 482 unsigned long flags;
483 spin_lock_irqsave(&t2_hae_lock, flags); 483 raw_spin_lock_irqsave(&t2_hae_lock, flags);
484 484
485 t2_set_hae; 485 t2_set_hae;
486 486
487 result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18); 487 result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18);
488 spin_unlock_irqrestore(&t2_hae_lock, flags); 488 raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
489 return result & 0xffffffffUL; 489 return result & 0xffffffffUL;
490} 490}
491 491
@@ -494,14 +494,14 @@ __EXTERN_INLINE u64 t2_readq(const volatile void __iomem *xaddr)
494 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; 494 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
495 unsigned long r0, r1, work, msb; 495 unsigned long r0, r1, work, msb;
496 unsigned long flags; 496 unsigned long flags;
497 spin_lock_irqsave(&t2_hae_lock, flags); 497 raw_spin_lock_irqsave(&t2_hae_lock, flags);
498 498
499 t2_set_hae; 499 t2_set_hae;
500 500
501 work = (addr << 5) + T2_SPARSE_MEM + 0x18; 501 work = (addr << 5) + T2_SPARSE_MEM + 0x18;
502 r0 = *(vuip)(work); 502 r0 = *(vuip)(work);
503 r1 = *(vuip)(work + (4 << 5)); 503 r1 = *(vuip)(work + (4 << 5));
504 spin_unlock_irqrestore(&t2_hae_lock, flags); 504 raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
505 return r1 << 32 | r0; 505 return r1 << 32 | r0;
506} 506}
507 507
@@ -510,13 +510,13 @@ __EXTERN_INLINE void t2_writeb(u8 b, volatile void __iomem *xaddr)
510 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; 510 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
511 unsigned long msb, w; 511 unsigned long msb, w;
512 unsigned long flags; 512 unsigned long flags;
513 spin_lock_irqsave(&t2_hae_lock, flags); 513 raw_spin_lock_irqsave(&t2_hae_lock, flags);
514 514
515 t2_set_hae; 515 t2_set_hae;
516 516
517 w = __kernel_insbl(b, addr & 3); 517 w = __kernel_insbl(b, addr & 3);
518 *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x00) = w; 518 *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x00) = w;
519 spin_unlock_irqrestore(&t2_hae_lock, flags); 519 raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
520} 520}
521 521
522__EXTERN_INLINE void t2_writew(u16 b, volatile void __iomem *xaddr) 522__EXTERN_INLINE void t2_writew(u16 b, volatile void __iomem *xaddr)
@@ -524,13 +524,13 @@ __EXTERN_INLINE void t2_writew(u16 b, volatile void __iomem *xaddr)
524 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; 524 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
525 unsigned long msb, w; 525 unsigned long msb, w;
526 unsigned long flags; 526 unsigned long flags;
527 spin_lock_irqsave(&t2_hae_lock, flags); 527 raw_spin_lock_irqsave(&t2_hae_lock, flags);
528 528
529 t2_set_hae; 529 t2_set_hae;
530 530
531 w = __kernel_inswl(b, addr & 3); 531 w = __kernel_inswl(b, addr & 3);
532 *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08) = w; 532 *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08) = w;
533 spin_unlock_irqrestore(&t2_hae_lock, flags); 533 raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
534} 534}
535 535
536/* 536/*
@@ -542,12 +542,12 @@ __EXTERN_INLINE void t2_writel(u32 b, volatile void __iomem *xaddr)
542 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; 542 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
543 unsigned long msb; 543 unsigned long msb;
544 unsigned long flags; 544 unsigned long flags;
545 spin_lock_irqsave(&t2_hae_lock, flags); 545 raw_spin_lock_irqsave(&t2_hae_lock, flags);
546 546
547 t2_set_hae; 547 t2_set_hae;
548 548
549 *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18) = b; 549 *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18) = b;
550 spin_unlock_irqrestore(&t2_hae_lock, flags); 550 raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
551} 551}
552 552
553__EXTERN_INLINE void t2_writeq(u64 b, volatile void __iomem *xaddr) 553__EXTERN_INLINE void t2_writeq(u64 b, volatile void __iomem *xaddr)
@@ -555,14 +555,14 @@ __EXTERN_INLINE void t2_writeq(u64 b, volatile void __iomem *xaddr)
555 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; 555 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
556 unsigned long msb, work; 556 unsigned long msb, work;
557 unsigned long flags; 557 unsigned long flags;
558 spin_lock_irqsave(&t2_hae_lock, flags); 558 raw_spin_lock_irqsave(&t2_hae_lock, flags);
559 559
560 t2_set_hae; 560 t2_set_hae;
561 561
562 work = (addr << 5) + T2_SPARSE_MEM + 0x18; 562 work = (addr << 5) + T2_SPARSE_MEM + 0x18;
563 *(vuip)work = b; 563 *(vuip)work = b;
564 *(vuip)(work + (4 << 5)) = b >> 32; 564 *(vuip)(work + (4 << 5)) = b >> 32;
565 spin_unlock_irqrestore(&t2_hae_lock, flags); 565 raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
566} 566}
567 567
568__EXTERN_INLINE void __iomem *t2_ioportmap(unsigned long addr) 568__EXTERN_INLINE void __iomem *t2_ioportmap(unsigned long addr)
diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
index 5c75c1b2352a..9baae8afe8a3 100644
--- a/arch/alpha/include/asm/elf.h
+++ b/arch/alpha/include/asm/elf.h
@@ -81,7 +81,6 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
81#define ELF_DATA ELFDATA2LSB 81#define ELF_DATA ELFDATA2LSB
82#define ELF_ARCH EM_ALPHA 82#define ELF_ARCH EM_ALPHA
83 83
84#define USE_ELF_CORE_DUMP
85#define ELF_EXEC_PAGESIZE 8192 84#define ELF_EXEC_PAGESIZE 8192
86 85
87/* This is the location that an ET_DYN program is loaded if exec'ed. Typical 86/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
diff --git a/arch/alpha/include/asm/spinlock.h b/arch/alpha/include/asm/spinlock.h
index e38fb95cb335..d0faca1e992d 100644
--- a/arch/alpha/include/asm/spinlock.h
+++ b/arch/alpha/include/asm/spinlock.h
@@ -12,18 +12,18 @@
12 * We make no fairness assumptions. They have a cost. 12 * We make no fairness assumptions. They have a cost.
13 */ 13 */
14 14
15#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 15#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
16#define __raw_spin_is_locked(x) ((x)->lock != 0) 16#define arch_spin_is_locked(x) ((x)->lock != 0)
17#define __raw_spin_unlock_wait(x) \ 17#define arch_spin_unlock_wait(x) \
18 do { cpu_relax(); } while ((x)->lock) 18 do { cpu_relax(); } while ((x)->lock)
19 19
20static inline void __raw_spin_unlock(raw_spinlock_t * lock) 20static inline void arch_spin_unlock(arch_spinlock_t * lock)
21{ 21{
22 mb(); 22 mb();
23 lock->lock = 0; 23 lock->lock = 0;
24} 24}
25 25
26static inline void __raw_spin_lock(raw_spinlock_t * lock) 26static inline void arch_spin_lock(arch_spinlock_t * lock)
27{ 27{
28 long tmp; 28 long tmp;
29 29
@@ -43,24 +43,24 @@ static inline void __raw_spin_lock(raw_spinlock_t * lock)
43 : "m"(lock->lock) : "memory"); 43 : "m"(lock->lock) : "memory");
44} 44}
45 45
46static inline int __raw_spin_trylock(raw_spinlock_t *lock) 46static inline int arch_spin_trylock(arch_spinlock_t *lock)
47{ 47{
48 return !test_and_set_bit(0, &lock->lock); 48 return !test_and_set_bit(0, &lock->lock);
49} 49}
50 50
51/***********************************************************/ 51/***********************************************************/
52 52
53static inline int __raw_read_can_lock(raw_rwlock_t *lock) 53static inline int arch_read_can_lock(arch_rwlock_t *lock)
54{ 54{
55 return (lock->lock & 1) == 0; 55 return (lock->lock & 1) == 0;
56} 56}
57 57
58static inline int __raw_write_can_lock(raw_rwlock_t *lock) 58static inline int arch_write_can_lock(arch_rwlock_t *lock)
59{ 59{
60 return lock->lock == 0; 60 return lock->lock == 0;
61} 61}
62 62
63static inline void __raw_read_lock(raw_rwlock_t *lock) 63static inline void arch_read_lock(arch_rwlock_t *lock)
64{ 64{
65 long regx; 65 long regx;
66 66
@@ -80,7 +80,7 @@ static inline void __raw_read_lock(raw_rwlock_t *lock)
80 : "m" (*lock) : "memory"); 80 : "m" (*lock) : "memory");
81} 81}
82 82
83static inline void __raw_write_lock(raw_rwlock_t *lock) 83static inline void arch_write_lock(arch_rwlock_t *lock)
84{ 84{
85 long regx; 85 long regx;
86 86
@@ -100,7 +100,7 @@ static inline void __raw_write_lock(raw_rwlock_t *lock)
100 : "m" (*lock) : "memory"); 100 : "m" (*lock) : "memory");
101} 101}
102 102
103static inline int __raw_read_trylock(raw_rwlock_t * lock) 103static inline int arch_read_trylock(arch_rwlock_t * lock)
104{ 104{
105 long regx; 105 long regx;
106 int success; 106 int success;
@@ -122,7 +122,7 @@ static inline int __raw_read_trylock(raw_rwlock_t * lock)
122 return success; 122 return success;
123} 123}
124 124
125static inline int __raw_write_trylock(raw_rwlock_t * lock) 125static inline int arch_write_trylock(arch_rwlock_t * lock)
126{ 126{
127 long regx; 127 long regx;
128 int success; 128 int success;
@@ -144,7 +144,7 @@ static inline int __raw_write_trylock(raw_rwlock_t * lock)
144 return success; 144 return success;
145} 145}
146 146
147static inline void __raw_read_unlock(raw_rwlock_t * lock) 147static inline void arch_read_unlock(arch_rwlock_t * lock)
148{ 148{
149 long regx; 149 long regx;
150 __asm__ __volatile__( 150 __asm__ __volatile__(
@@ -160,17 +160,17 @@ static inline void __raw_read_unlock(raw_rwlock_t * lock)
160 : "m" (*lock) : "memory"); 160 : "m" (*lock) : "memory");
161} 161}
162 162
163static inline void __raw_write_unlock(raw_rwlock_t * lock) 163static inline void arch_write_unlock(arch_rwlock_t * lock)
164{ 164{
165 mb(); 165 mb();
166 lock->lock = 0; 166 lock->lock = 0;
167} 167}
168 168
169#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 169#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
170#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) 170#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
171 171
172#define _raw_spin_relax(lock) cpu_relax() 172#define arch_spin_relax(lock) cpu_relax()
173#define _raw_read_relax(lock) cpu_relax() 173#define arch_read_relax(lock) cpu_relax()
174#define _raw_write_relax(lock) cpu_relax() 174#define arch_write_relax(lock) cpu_relax()
175 175
176#endif /* _ALPHA_SPINLOCK_H */ 176#endif /* _ALPHA_SPINLOCK_H */
diff --git a/arch/alpha/include/asm/spinlock_types.h b/arch/alpha/include/asm/spinlock_types.h
index 8141eb5ebf0d..54c2afce0a1d 100644
--- a/arch/alpha/include/asm/spinlock_types.h
+++ b/arch/alpha/include/asm/spinlock_types.h
@@ -7,14 +7,14 @@
7 7
8typedef struct { 8typedef struct {
9 volatile unsigned int lock; 9 volatile unsigned int lock;
10} raw_spinlock_t; 10} arch_spinlock_t;
11 11
12#define __RAW_SPIN_LOCK_UNLOCKED { 0 } 12#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
13 13
14typedef struct { 14typedef struct {
15 volatile unsigned int lock; 15 volatile unsigned int lock;
16} raw_rwlock_t; 16} arch_rwlock_t;
17 17
18#define __RAW_RW_LOCK_UNLOCKED { 0 } 18#define __ARCH_RW_LOCK_UNLOCKED { 0 }
19 19
20#endif 20#endif
diff --git a/arch/alpha/kernel/core_t2.c b/arch/alpha/kernel/core_t2.c
index d9980d47ab81..e6d90568b65d 100644
--- a/arch/alpha/kernel/core_t2.c
+++ b/arch/alpha/kernel/core_t2.c
@@ -74,7 +74,7 @@
74# define DBG(args) 74# define DBG(args)
75#endif 75#endif
76 76
77DEFINE_SPINLOCK(t2_hae_lock); 77DEFINE_RAW_SPINLOCK(t2_hae_lock);
78 78
79static volatile unsigned int t2_mcheck_any_expected; 79static volatile unsigned int t2_mcheck_any_expected;
80static volatile unsigned int t2_mcheck_last_taken; 80static volatile unsigned int t2_mcheck_last_taken;
diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c
index c0de072b8305..5f2cf23c4648 100644
--- a/arch/alpha/kernel/irq.c
+++ b/arch/alpha/kernel/irq.c
@@ -81,7 +81,7 @@ show_interrupts(struct seq_file *p, void *v)
81#endif 81#endif
82 82
83 if (irq < ACTUAL_NR_IRQS) { 83 if (irq < ACTUAL_NR_IRQS) {
84 spin_lock_irqsave(&irq_desc[irq].lock, flags); 84 raw_spin_lock_irqsave(&irq_desc[irq].lock, flags);
85 action = irq_desc[irq].action; 85 action = irq_desc[irq].action;
86 if (!action) 86 if (!action)
87 goto unlock; 87 goto unlock;
@@ -105,7 +105,7 @@ show_interrupts(struct seq_file *p, void *v)
105 105
106 seq_putc(p, '\n'); 106 seq_putc(p, '\n');
107unlock: 107unlock:
108 spin_unlock_irqrestore(&irq_desc[irq].lock, flags); 108 raw_spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
109 } else if (irq == ACTUAL_NR_IRQS) { 109 } else if (irq == ACTUAL_NR_IRQS) {
110#ifdef CONFIG_SMP 110#ifdef CONFIG_SMP
111 seq_puts(p, "IPI: "); 111 seq_puts(p, "IPI: ");
diff --git a/arch/alpha/kernel/srm_env.c b/arch/alpha/kernel/srm_env.c
index d12af472e1c0..dbbf04f9230e 100644
--- a/arch/alpha/kernel/srm_env.c
+++ b/arch/alpha/kernel/srm_env.c
@@ -33,6 +33,7 @@
33#include <linux/module.h> 33#include <linux/module.h>
34#include <linux/init.h> 34#include <linux/init.h>
35#include <linux/proc_fs.h> 35#include <linux/proc_fs.h>
36#include <linux/seq_file.h>
36#include <asm/console.h> 37#include <asm/console.h>
37#include <asm/uaccess.h> 38#include <asm/uaccess.h>
38#include <asm/machvec.h> 39#include <asm/machvec.h>
@@ -79,42 +80,41 @@ static srm_env_t srm_named_entries[] = {
79static srm_env_t srm_numbered_entries[256]; 80static srm_env_t srm_numbered_entries[256];
80 81
81 82
82static int 83static int srm_env_proc_show(struct seq_file *m, void *v)
83srm_env_read(char *page, char **start, off_t off, int count, int *eof,
84 void *data)
85{ 84{
86 int nbytes;
87 unsigned long ret; 85 unsigned long ret;
88 srm_env_t *entry; 86 srm_env_t *entry;
87 char *page;
89 88
90 if (off != 0) { 89 entry = (srm_env_t *)m->private;
91 *eof = 1; 90 page = (char *)__get_free_page(GFP_USER);
92 return 0; 91 if (!page)
93 } 92 return -ENOMEM;
94 93
95 entry = (srm_env_t *) data; 94 ret = callback_getenv(entry->id, page, PAGE_SIZE);
96 ret = callback_getenv(entry->id, page, count);
97 95
98 if ((ret >> 61) == 0) { 96 if ((ret >> 61) == 0) {
99 nbytes = (int) ret; 97 seq_write(m, page, ret);
100 *eof = 1; 98 ret = 0;
101 } else 99 } else
102 nbytes = -EFAULT; 100 ret = -EFAULT;
101 free_page((unsigned long)page);
102 return ret;
103}
103 104
104 return nbytes; 105static int srm_env_proc_open(struct inode *inode, struct file *file)
106{
107 return single_open(file, srm_env_proc_show, PDE(inode)->data);
105} 108}
106 109
107static int 110static ssize_t srm_env_proc_write(struct file *file, const char __user *buffer,
108srm_env_write(struct file *file, const char __user *buffer, unsigned long count, 111 size_t count, loff_t *pos)
109 void *data)
110{ 112{
111 int res; 113 int res;
112 srm_env_t *entry; 114 srm_env_t *entry = PDE(file->f_path.dentry->d_inode)->data;
113 char *buf = (char *) __get_free_page(GFP_USER); 115 char *buf = (char *) __get_free_page(GFP_USER);
114 unsigned long ret1, ret2; 116 unsigned long ret1, ret2;
115 117
116 entry = (srm_env_t *) data;
117
118 if (!buf) 118 if (!buf)
119 return -ENOMEM; 119 return -ENOMEM;
120 120
@@ -140,6 +140,15 @@ srm_env_write(struct file *file, const char __user *buffer, unsigned long count,
140 return res; 140 return res;
141} 141}
142 142
143static const struct file_operations srm_env_proc_fops = {
144 .owner = THIS_MODULE,
145 .open = srm_env_proc_open,
146 .read = seq_read,
147 .llseek = seq_lseek,
148 .release = single_release,
149 .write = srm_env_proc_write,
150};
151
143static void 152static void
144srm_env_cleanup(void) 153srm_env_cleanup(void)
145{ 154{
@@ -245,15 +254,10 @@ srm_env_init(void)
245 */ 254 */
246 entry = srm_named_entries; 255 entry = srm_named_entries;
247 while (entry->name && entry->id) { 256 while (entry->name && entry->id) {
248 entry->proc_entry = create_proc_entry(entry->name, 257 entry->proc_entry = proc_create_data(entry->name, 0644, named_dir,
249 0644, named_dir); 258 &srm_env_proc_fops, entry);
250 if (!entry->proc_entry) 259 if (!entry->proc_entry)
251 goto cleanup; 260 goto cleanup;
252
253 entry->proc_entry->data = (void *) entry;
254 entry->proc_entry->read_proc = srm_env_read;
255 entry->proc_entry->write_proc = srm_env_write;
256
257 entry++; 261 entry++;
258 } 262 }
259 263
@@ -264,15 +268,12 @@ srm_env_init(void)
264 entry = &srm_numbered_entries[var_num]; 268 entry = &srm_numbered_entries[var_num];
265 entry->name = number[var_num]; 269 entry->name = number[var_num];
266 270
267 entry->proc_entry = create_proc_entry(entry->name, 271 entry->proc_entry = proc_create_data(entry->name, 0644, numbered_dir,
268 0644, numbered_dir); 272 &srm_env_proc_fops, entry);
269 if (!entry->proc_entry) 273 if (!entry->proc_entry)
270 goto cleanup; 274 goto cleanup;
271 275
272 entry->id = var_num; 276 entry->id = var_num;
273 entry->proc_entry->data = (void *) entry;
274 entry->proc_entry->read_proc = srm_env_read;
275 entry->proc_entry->write_proc = srm_env_write;
276 } 277 }
277 278
278 printk(KERN_INFO "%s: version %s loaded successfully\n", NAME, 279 printk(KERN_INFO "%s: version %s loaded successfully\n", NAME,
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
index 6aac3f5bb2f3..a399bb5730f1 100644
--- a/arch/arm/include/asm/elf.h
+++ b/arch/arm/include/asm/elf.h
@@ -101,7 +101,6 @@ extern int arm_elf_read_implies_exec(const struct elf32_hdr *, int);
101int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs); 101int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
102#define ELF_CORE_COPY_TASK_REGS dump_task_regs 102#define ELF_CORE_COPY_TASK_REGS dump_task_regs
103 103
104#define USE_ELF_CORE_DUMP
105#define ELF_EXEC_PAGESIZE 4096 104#define ELF_EXEC_PAGESIZE 4096
106 105
107/* This is the location that an ET_DYN program is loaded if exec'ed. Typical 106/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
diff --git a/arch/arm/include/asm/mach/irq.h b/arch/arm/include/asm/mach/irq.h
index acac5302e4ea..8920b2d6e3b8 100644
--- a/arch/arm/include/asm/mach/irq.h
+++ b/arch/arm/include/asm/mach/irq.h
@@ -26,9 +26,9 @@ extern int show_fiq_list(struct seq_file *, void *);
26 */ 26 */
27#define do_bad_IRQ(irq,desc) \ 27#define do_bad_IRQ(irq,desc) \
28do { \ 28do { \
29 spin_lock(&desc->lock); \ 29 raw_spin_lock(&desc->lock); \
30 handle_bad_irq(irq, desc); \ 30 handle_bad_irq(irq, desc); \
31 spin_unlock(&desc->lock); \ 31 raw_spin_unlock(&desc->lock); \
32} while(0) 32} while(0)
33 33
34#endif 34#endif
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index c13681ac1ede..c91c64cab922 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -17,13 +17,13 @@
17 * Locked value: 1 17 * Locked value: 1
18 */ 18 */
19 19
20#define __raw_spin_is_locked(x) ((x)->lock != 0) 20#define arch_spin_is_locked(x) ((x)->lock != 0)
21#define __raw_spin_unlock_wait(lock) \ 21#define arch_spin_unlock_wait(lock) \
22 do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) 22 do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
23 23
24#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 24#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
25 25
26static inline void __raw_spin_lock(raw_spinlock_t *lock) 26static inline void arch_spin_lock(arch_spinlock_t *lock)
27{ 27{
28 unsigned long tmp; 28 unsigned long tmp;
29 29
@@ -43,7 +43,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
43 smp_mb(); 43 smp_mb();
44} 44}
45 45
46static inline int __raw_spin_trylock(raw_spinlock_t *lock) 46static inline int arch_spin_trylock(arch_spinlock_t *lock)
47{ 47{
48 unsigned long tmp; 48 unsigned long tmp;
49 49
@@ -63,7 +63,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
63 } 63 }
64} 64}
65 65
66static inline void __raw_spin_unlock(raw_spinlock_t *lock) 66static inline void arch_spin_unlock(arch_spinlock_t *lock)
67{ 67{
68 smp_mb(); 68 smp_mb();
69 69
@@ -86,7 +86,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
86 * just write zero since the lock is exclusively held. 86 * just write zero since the lock is exclusively held.
87 */ 87 */
88 88
89static inline void __raw_write_lock(raw_rwlock_t *rw) 89static inline void arch_write_lock(arch_rwlock_t *rw)
90{ 90{
91 unsigned long tmp; 91 unsigned long tmp;
92 92
@@ -106,7 +106,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
106 smp_mb(); 106 smp_mb();
107} 107}
108 108
109static inline int __raw_write_trylock(raw_rwlock_t *rw) 109static inline int arch_write_trylock(arch_rwlock_t *rw)
110{ 110{
111 unsigned long tmp; 111 unsigned long tmp;
112 112
@@ -126,7 +126,7 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
126 } 126 }
127} 127}
128 128
129static inline void __raw_write_unlock(raw_rwlock_t *rw) 129static inline void arch_write_unlock(arch_rwlock_t *rw)
130{ 130{
131 smp_mb(); 131 smp_mb();
132 132
@@ -142,7 +142,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
142} 142}
143 143
144/* write_can_lock - would write_trylock() succeed? */ 144/* write_can_lock - would write_trylock() succeed? */
145#define __raw_write_can_lock(x) ((x)->lock == 0) 145#define arch_write_can_lock(x) ((x)->lock == 0)
146 146
147/* 147/*
148 * Read locks are a bit more hairy: 148 * Read locks are a bit more hairy:
@@ -156,7 +156,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
156 * currently active. However, we know we won't have any write 156 * currently active. However, we know we won't have any write
157 * locks. 157 * locks.
158 */ 158 */
159static inline void __raw_read_lock(raw_rwlock_t *rw) 159static inline void arch_read_lock(arch_rwlock_t *rw)
160{ 160{
161 unsigned long tmp, tmp2; 161 unsigned long tmp, tmp2;
162 162
@@ -176,7 +176,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
176 smp_mb(); 176 smp_mb();
177} 177}
178 178
179static inline void __raw_read_unlock(raw_rwlock_t *rw) 179static inline void arch_read_unlock(arch_rwlock_t *rw)
180{ 180{
181 unsigned long tmp, tmp2; 181 unsigned long tmp, tmp2;
182 182
@@ -198,7 +198,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
198 : "cc"); 198 : "cc");
199} 199}
200 200
201static inline int __raw_read_trylock(raw_rwlock_t *rw) 201static inline int arch_read_trylock(arch_rwlock_t *rw)
202{ 202{
203 unsigned long tmp, tmp2 = 1; 203 unsigned long tmp, tmp2 = 1;
204 204
@@ -215,13 +215,13 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw)
215} 215}
216 216
217/* read_can_lock - would read_trylock() succeed? */ 217/* read_can_lock - would read_trylock() succeed? */
218#define __raw_read_can_lock(x) ((x)->lock < 0x80000000) 218#define arch_read_can_lock(x) ((x)->lock < 0x80000000)
219 219
220#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 220#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
221#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) 221#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
222 222
223#define _raw_spin_relax(lock) cpu_relax() 223#define arch_spin_relax(lock) cpu_relax()
224#define _raw_read_relax(lock) cpu_relax() 224#define arch_read_relax(lock) cpu_relax()
225#define _raw_write_relax(lock) cpu_relax() 225#define arch_write_relax(lock) cpu_relax()
226 226
227#endif /* __ASM_SPINLOCK_H */ 227#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/arm/include/asm/spinlock_types.h b/arch/arm/include/asm/spinlock_types.h
index 43e83f6d2ee5..d14d197ae04a 100644
--- a/arch/arm/include/asm/spinlock_types.h
+++ b/arch/arm/include/asm/spinlock_types.h
@@ -7,14 +7,14 @@
7 7
8typedef struct { 8typedef struct {
9 volatile unsigned int lock; 9 volatile unsigned int lock;
10} raw_spinlock_t; 10} arch_spinlock_t;
11 11
12#define __RAW_SPIN_LOCK_UNLOCKED { 0 } 12#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
13 13
14typedef struct { 14typedef struct {
15 volatile unsigned int lock; 15 volatile unsigned int lock;
16} raw_rwlock_t; 16} arch_rwlock_t;
17 17
18#define __RAW_RW_LOCK_UNLOCKED { 0 } 18#define __ARCH_RW_LOCK_UNLOCKED { 0 }
19 19
20#endif 20#endif
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index c9a8619f3856..b7cb45bb91e8 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -69,7 +69,7 @@ int show_interrupts(struct seq_file *p, void *v)
69 } 69 }
70 70
71 if (i < NR_IRQS) { 71 if (i < NR_IRQS) {
72 spin_lock_irqsave(&irq_desc[i].lock, flags); 72 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
73 action = irq_desc[i].action; 73 action = irq_desc[i].action;
74 if (!action) 74 if (!action)
75 goto unlock; 75 goto unlock;
@@ -84,7 +84,7 @@ int show_interrupts(struct seq_file *p, void *v)
84 84
85 seq_putc(p, '\n'); 85 seq_putc(p, '\n');
86unlock: 86unlock:
87 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 87 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
88 } else if (i == NR_IRQS) { 88 } else if (i == NR_IRQS) {
89#ifdef CONFIG_FIQ 89#ifdef CONFIG_FIQ
90 show_fiq_list(p, v); 90 show_fiq_list(p, v);
@@ -139,7 +139,7 @@ void set_irq_flags(unsigned int irq, unsigned int iflags)
139 } 139 }
140 140
141 desc = irq_desc + irq; 141 desc = irq_desc + irq;
142 spin_lock_irqsave(&desc->lock, flags); 142 raw_spin_lock_irqsave(&desc->lock, flags);
143 desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; 143 desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
144 if (iflags & IRQF_VALID) 144 if (iflags & IRQF_VALID)
145 desc->status &= ~IRQ_NOREQUEST; 145 desc->status &= ~IRQ_NOREQUEST;
@@ -147,7 +147,7 @@ void set_irq_flags(unsigned int irq, unsigned int iflags)
147 desc->status &= ~IRQ_NOPROBE; 147 desc->status &= ~IRQ_NOPROBE;
148 if (!(iflags & IRQF_NOAUTOEN)) 148 if (!(iflags & IRQF_NOAUTOEN))
149 desc->status &= ~IRQ_NOAUTOEN; 149 desc->status &= ~IRQ_NOAUTOEN;
150 spin_unlock_irqrestore(&desc->lock, flags); 150 raw_spin_unlock_irqrestore(&desc->lock, flags);
151} 151}
152 152
153void __init init_IRQ(void) 153void __init init_IRQ(void)
@@ -166,9 +166,9 @@ static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu)
166{ 166{
167 pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->node, cpu); 167 pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->node, cpu);
168 168
169 spin_lock_irq(&desc->lock); 169 raw_spin_lock_irq(&desc->lock);
170 desc->chip->set_affinity(irq, cpumask_of(cpu)); 170 desc->chip->set_affinity(irq, cpumask_of(cpu));
171 spin_unlock_irq(&desc->lock); 171 raw_spin_unlock_irq(&desc->lock);
172} 172}
173 173
174/* 174/*
diff --git a/arch/arm/mach-at91/include/mach/atmel-mci.h b/arch/arm/mach-at91/include/mach/atmel-mci.h
new file mode 100644
index 000000000000..998cb0c07135
--- /dev/null
+++ b/arch/arm/mach-at91/include/mach/atmel-mci.h
@@ -0,0 +1,24 @@
1#ifndef __MACH_ATMEL_MCI_H
2#define __MACH_ATMEL_MCI_H
3
4#include <mach/at_hdmac.h>
5
6/**
7 * struct mci_dma_data - DMA data for MCI interface
8 */
9struct mci_dma_data {
10 struct at_dma_slave sdata;
11};
12
13/* accessor macros */
14#define slave_data_ptr(s) (&(s)->sdata)
15#define find_slave_dev(s) ((s)->sdata.dma_dev)
16
17#define setup_dma_addr(s, t, r) do { \
18 if (s) { \
19 (s)->sdata.tx_reg = (t); \
20 (s)->sdata.rx_reg = (r); \
21 } \
22} while (0)
23
24#endif /* __MACH_ATMEL_MCI_H */
diff --git a/arch/arm/mach-bcmring/arch.c b/arch/arm/mach-bcmring/arch.c
index fbe6fa02c882..53dd2a9eecf9 100644
--- a/arch/arm/mach-bcmring/arch.c
+++ b/arch/arm/mach-bcmring/arch.c
@@ -70,9 +70,19 @@ static struct ctl_table bcmring_sysctl_reboot[] = {
70 {} 70 {}
71}; 71};
72 72
73static struct resource nand_resource[] = {
74 [0] = {
75 .start = MM_ADDR_IO_NAND,
76 .end = MM_ADDR_IO_NAND + 0x1000 - 1,
77 .flags = IORESOURCE_MEM,
78 },
79};
80
73static struct platform_device nand_device = { 81static struct platform_device nand_device = {
74 .name = "bcm-nand", 82 .name = "bcm-nand",
75 .id = -1, 83 .id = -1,
84 .resource = nand_resource,
85 .num_resources = ARRAY_SIZE(nand_resource),
76}; 86};
77 87
78static struct platform_device *devices[] __initdata = { 88static struct platform_device *devices[] __initdata = {
diff --git a/arch/arm/mach-bcmring/include/mach/reg_nand.h b/arch/arm/mach-bcmring/include/mach/reg_nand.h
new file mode 100644
index 000000000000..387376ffb56b
--- /dev/null
+++ b/arch/arm/mach-bcmring/include/mach/reg_nand.h
@@ -0,0 +1,66 @@
1/*****************************************************************************
2* Copyright 2001 - 2008 Broadcom Corporation. All rights reserved.
3*
4* Unless you and Broadcom execute a separate written software license
5* agreement governing use of this software, this software is licensed to you
6* under the terms of the GNU General Public License version 2, available at
7* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
8*
9* Notwithstanding the above, under no circumstances may you combine this
10* software in any way with any other Broadcom software provided under a
11* license other than the GPL, without Broadcom's express prior written
12* consent.
13*****************************************************************************/
14
15/*
16*
17*****************************************************************************
18*
19* REG_NAND.h
20*
21* PURPOSE:
22*
23* This file contains definitions for the nand registers:
24*
25* NOTES:
26*
27*****************************************************************************/
28
29#if !defined(__ASM_ARCH_REG_NAND_H)
30#define __ASM_ARCH_REG_NAND_H
31
32/* ---- Include Files ---------------------------------------------------- */
33#include <csp/reg.h>
34#include <mach/reg_umi.h>
35
36/* ---- Constants and Types ---------------------------------------------- */
37
38#define HW_NAND_BASE MM_IO_BASE_NAND /* NAND Flash */
39
40/* DMA accesses by the bootstrap need hard nonvirtual addresses */
41#define REG_NAND_CMD __REG16(HW_NAND_BASE + 0)
42#define REG_NAND_ADDR __REG16(HW_NAND_BASE + 4)
43
44#define REG_NAND_PHYS_DATA16 (HW_NAND_BASE + 8)
45#define REG_NAND_PHYS_DATA8 (HW_NAND_BASE + 8)
46#define REG_NAND_DATA16 __REG16(REG_NAND_PHYS_DATA16)
47#define REG_NAND_DATA8 __REG8(REG_NAND_PHYS_DATA8)
48
49/* use appropriate offset to make sure it start at the 1K boundary */
50#define REG_NAND_PHYS_DATA_DMA (HW_NAND_BASE + 0x400)
51#define REG_NAND_DATA_DMA __REG32(REG_NAND_PHYS_DATA_DMA)
52
53/* Linux DMA requires physical address of the data register */
54#define REG_NAND_DATA16_PADDR HW_IO_VIRT_TO_PHYS(REG_NAND_PHYS_DATA16)
55#define REG_NAND_DATA8_PADDR HW_IO_VIRT_TO_PHYS(REG_NAND_PHYS_DATA8)
56#define REG_NAND_DATA_PADDR HW_IO_VIRT_TO_PHYS(REG_NAND_PHYS_DATA_DMA)
57
58#define NAND_BUS_16BIT() (0)
59#define NAND_BUS_8BIT() (!NAND_BUS_16BIT())
60
61/* Register offsets */
62#define REG_NAND_CMD_OFFSET (0)
63#define REG_NAND_ADDR_OFFSET (4)
64#define REG_NAND_DATA8_OFFSET (8)
65
66#endif
diff --git a/arch/arm/mach-bcmring/include/mach/reg_umi.h b/arch/arm/mach-bcmring/include/mach/reg_umi.h
new file mode 100644
index 000000000000..06a355481ea6
--- /dev/null
+++ b/arch/arm/mach-bcmring/include/mach/reg_umi.h
@@ -0,0 +1,237 @@
1/*****************************************************************************
2* Copyright 2005 - 2008 Broadcom Corporation. All rights reserved.
3*
4* Unless you and Broadcom execute a separate written software license
5* agreement governing use of this software, this software is licensed to you
6* under the terms of the GNU General Public License version 2, available at
7* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
8*
9* Notwithstanding the above, under no circumstances may you combine this
10* software in any way with any other Broadcom software provided under a
11* license other than the GPL, without Broadcom's express prior written
12* consent.
13*****************************************************************************/
14
15/*
16*
17*****************************************************************************
18*
19* REG_UMI.h
20*
21* PURPOSE:
22*
23* This file contains definitions for the nand registers:
24*
25* NOTES:
26*
27*****************************************************************************/
28
29#if !defined(__ASM_ARCH_REG_UMI_H)
30#define __ASM_ARCH_REG_UMI_H
31
32/* ---- Include Files ---------------------------------------------------- */
33#include <csp/reg.h>
34#include <mach/csp/mm_io.h>
35
36/* ---- Constants and Types ---------------------------------------------- */
37
38/* Unified Memory Interface Ctrl Register */
39#define HW_UMI_BASE MM_IO_BASE_UMI
40
41/* Flash bank 0 timing and control register */
42#define REG_UMI_FLASH0_TCR __REG32(HW_UMI_BASE + 0x00)
43/* Flash bank 1 timing and control register */
44#define REG_UMI_FLASH1_TCR __REG32(HW_UMI_BASE + 0x04)
45/* Flash bank 2 timing and control register */
46#define REG_UMI_FLASH2_TCR __REG32(HW_UMI_BASE + 0x08)
47/* MMD interface and control register */
48#define REG_UMI_MMD_ICR __REG32(HW_UMI_BASE + 0x0c)
49/* NAND timing and control register */
50#define REG_UMI_NAND_TCR __REG32(HW_UMI_BASE + 0x18)
51/* NAND ready/chip select register */
52#define REG_UMI_NAND_RCSR __REG32(HW_UMI_BASE + 0x1c)
53/* NAND ECC control & status register */
54#define REG_UMI_NAND_ECC_CSR __REG32(HW_UMI_BASE + 0x20)
55/* NAND ECC data register XXB2B1B0 */
56#define REG_UMI_NAND_ECC_DATA __REG32(HW_UMI_BASE + 0x24)
57/* BCH ECC Parameter N */
58#define REG_UMI_BCH_N __REG32(HW_UMI_BASE + 0x40)
59/* BCH ECC Parameter T */
60#define REG_UMI_BCH_K __REG32(HW_UMI_BASE + 0x44)
61/* BCH ECC Parameter K */
62#define REG_UMI_BCH_T __REG32(HW_UMI_BASE + 0x48)
63/* BCH ECC Contro Status */
64#define REG_UMI_BCH_CTRL_STATUS __REG32(HW_UMI_BASE + 0x4C)
65/* BCH WR ECC 31:0 */
66#define REG_UMI_BCH_WR_ECC_0 __REG32(HW_UMI_BASE + 0x50)
67/* BCH WR ECC 63:32 */
68#define REG_UMI_BCH_WR_ECC_1 __REG32(HW_UMI_BASE + 0x54)
69/* BCH WR ECC 95:64 */
70#define REG_UMI_BCH_WR_ECC_2 __REG32(HW_UMI_BASE + 0x58)
71/* BCH WR ECC 127:96 */
72#define REG_UMI_BCH_WR_ECC_3 __REG32(HW_UMI_BASE + 0x5c)
73/* BCH WR ECC 155:128 */
74#define REG_UMI_BCH_WR_ECC_4 __REG32(HW_UMI_BASE + 0x60)
75/* BCH Read Error Location 1,0 */
76#define REG_UMI_BCH_RD_ERR_LOC_1_0 __REG32(HW_UMI_BASE + 0x64)
77/* BCH Read Error Location 3,2 */
78#define REG_UMI_BCH_RD_ERR_LOC_3_2 __REG32(HW_UMI_BASE + 0x68)
79/* BCH Read Error Location 5,4 */
80#define REG_UMI_BCH_RD_ERR_LOC_5_4 __REG32(HW_UMI_BASE + 0x6c)
81/* BCH Read Error Location 7,6 */
82#define REG_UMI_BCH_RD_ERR_LOC_7_6 __REG32(HW_UMI_BASE + 0x70)
83/* BCH Read Error Location 9,8 */
84#define REG_UMI_BCH_RD_ERR_LOC_9_8 __REG32(HW_UMI_BASE + 0x74)
85/* BCH Read Error Location 11,10 */
86#define REG_UMI_BCH_RD_ERR_LOC_B_A __REG32(HW_UMI_BASE + 0x78)
87
88/* REG_UMI_FLASH0/1/2_TCR, REG_UMI_SRAM0/1_TCR bits */
89/* Enable wait pin during burst write or read */
90#define REG_UMI_TCR_WAITEN 0x80000000
91/* Enable mem ctrlr to work iwth ext mem of lower freq than AHB clk */
92#define REG_UMI_TCR_LOWFREQ 0x40000000
93/* 1=synch write, 0=async write */
94#define REG_UMI_TCR_MEMTYPE_SYNCWRITE 0x20000000
95/* 1=synch read, 0=async read */
96#define REG_UMI_TCR_MEMTYPE_SYNCREAD 0x10000000
97/* 1=page mode read, 0=normal mode read */
98#define REG_UMI_TCR_MEMTYPE_PAGEREAD 0x08000000
99/* page size/burst size (wrap only) */
100#define REG_UMI_TCR_MEMTYPE_PGSZ_MASK 0x07000000
101/* 4 word */
102#define REG_UMI_TCR_MEMTYPE_PGSZ_4 0x00000000
103/* 8 word */
104#define REG_UMI_TCR_MEMTYPE_PGSZ_8 0x01000000
105/* 16 word */
106#define REG_UMI_TCR_MEMTYPE_PGSZ_16 0x02000000
107/* 32 word */
108#define REG_UMI_TCR_MEMTYPE_PGSZ_32 0x03000000
109/* 64 word */
110#define REG_UMI_TCR_MEMTYPE_PGSZ_64 0x04000000
111/* 128 word */
112#define REG_UMI_TCR_MEMTYPE_PGSZ_128 0x05000000
113/* 256 word */
114#define REG_UMI_TCR_MEMTYPE_PGSZ_256 0x06000000
115/* 512 word */
116#define REG_UMI_TCR_MEMTYPE_PGSZ_512 0x07000000
117/* Page read access cycle / Burst write latency (n+2 / n+1) */
118#define REG_UMI_TCR_TPRC_TWLC_MASK 0x00f80000
119/* Bus turnaround cycle (n) */
120#define REG_UMI_TCR_TBTA_MASK 0x00070000
121/* Write pulse width cycle (n+1) */
122#define REG_UMI_TCR_TWP_MASK 0x0000f800
123/* Write recovery cycle (n+1) */
124#define REG_UMI_TCR_TWR_MASK 0x00000600
125/* Write address setup cycle (n+1) */
126#define REG_UMI_TCR_TAS_MASK 0x00000180
127/* Output enable delay cycle (n) */
128#define REG_UMI_TCR_TOE_MASK 0x00000060
129/* Read access cycle / Burst read latency (n+2 / n+1) */
130#define REG_UMI_TCR_TRC_TLC_MASK 0x0000001f
131
132/* REG_UMI_MMD_ICR bits */
133/* Flash write protection pin control */
134#define REG_UMI_MMD_ICR_FLASH_WP 0x8000
135/* Extend hold time for sram0, sram1 csn (39 MHz operation) */
136#define REG_UMI_MMD_ICR_XHCS 0x4000
137/* Enable SDRAM 2 interface control */
138#define REG_UMI_MMD_ICR_SDRAM2EN 0x2000
139/* Enable merge of flash banks 0/1 to 512 MBit bank */
140#define REG_UMI_MMD_ICR_INST512 0x1000
141/* Enable merge of flash banks 1/2 to 512 MBit bank */
142#define REG_UMI_MMD_ICR_DATA512 0x0800
143/* Enable SDRAM interface control */
144#define REG_UMI_MMD_ICR_SDRAMEN 0x0400
145/* Polarity of busy state of Burst Wait Signal */
146#define REG_UMI_MMD_ICR_WAITPOL 0x0200
147/* Enable burst clock stopped when not accessing external burst flash/sram */
148#define REG_UMI_MMD_ICR_BCLKSTOP 0x0100
149/* Enable the peri1_csn to replace flash1_csn in 512 Mb flash mode */
150#define REG_UMI_MMD_ICR_PERI1EN 0x0080
151/* Enable the peri2_csn to replace sdram_csn */
152#define REG_UMI_MMD_ICR_PERI2EN 0x0040
153/* Enable the peri3_csn to replace sdram2_csn */
154#define REG_UMI_MMD_ICR_PERI3EN 0x0020
155/* Enable sram bank1 for H/W controlled MRS */
156#define REG_UMI_MMD_ICR_MRSB1 0x0010
157/* Enable sram bank0 for H/W controlled MRS */
158#define REG_UMI_MMD_ICR_MRSB0 0x0008
159/* Polarity for assert3ed state of H/W controlled MRS */
160#define REG_UMI_MMD_ICR_MRSPOL 0x0004
161/* 0: S/W controllable ZZ/MRS/CRE/P-Mode pin */
162/* 1: H/W controlled ZZ/MRS/CRE/P-Mode, same timing as CS */
163#define REG_UMI_MMD_ICR_MRSMODE 0x0002
164/* MRS state for S/W controlled mode */
165#define REG_UMI_MMD_ICR_MRSSTATE 0x0001
166
167/* REG_UMI_NAND_TCR bits */
168/* Enable software to control CS */
169#define REG_UMI_NAND_TCR_CS_SWCTRL 0x80000000
170/* 16-bit nand wordsize if set */
171#define REG_UMI_NAND_TCR_WORD16 0x40000000
172/* Bus turnaround cycle (n) */
173#define REG_UMI_NAND_TCR_TBTA_MASK 0x00070000
174/* Write pulse width cycle (n+1) */
175#define REG_UMI_NAND_TCR_TWP_MASK 0x0000f800
176/* Write recovery cycle (n+1) */
177#define REG_UMI_NAND_TCR_TWR_MASK 0x00000600
178/* Write address setup cycle (n+1) */
179#define REG_UMI_NAND_TCR_TAS_MASK 0x00000180
180/* Output enable delay cycle (n) */
181#define REG_UMI_NAND_TCR_TOE_MASK 0x00000060
182/* Read access cycle (n+2) */
183#define REG_UMI_NAND_TCR_TRC_TLC_MASK 0x0000001f
184
185/* REG_UMI_NAND_RCSR bits */
186/* Status: Ready=1, Busy=0 */
187#define REG_UMI_NAND_RCSR_RDY 0x02
188/* Keep CS asserted during operation */
189#define REG_UMI_NAND_RCSR_CS_ASSERTED 0x01
190
191/* REG_UMI_NAND_ECC_CSR bits */
192/* Interrupt status - read-only */
193#define REG_UMI_NAND_ECC_CSR_NANDINT 0x80000000
194/* Read: Status of ECC done, Write: clear ECC interrupt */
195#define REG_UMI_NAND_ECC_CSR_ECCINT_RAW 0x00800000
196/* Read: Status of R/B, Write: clear R/B interrupt */
197#define REG_UMI_NAND_ECC_CSR_RBINT_RAW 0x00400000
198/* 1 = Enable ECC Interrupt */
199#define REG_UMI_NAND_ECC_CSR_ECCINT_ENABLE 0x00008000
200/* 1 = Assert interrupt at rising edge of R/B_ */
201#define REG_UMI_NAND_ECC_CSR_RBINT_ENABLE 0x00004000
202/* Calculate ECC by 0=512 bytes, 1=256 bytes */
203#define REG_UMI_NAND_ECC_CSR_256BYTE 0x00000080
204/* Enable ECC in hardware */
205#define REG_UMI_NAND_ECC_CSR_ECC_ENABLE 0x00000001
206
207/* REG_UMI_BCH_CTRL_STATUS bits */
208/* Shift to Indicate Number of correctable errors detected */
209#define REG_UMI_BCH_CTRL_STATUS_NB_CORR_ERROR_SHIFT 20
210/* Indicate Number of correctable errors detected */
211#define REG_UMI_BCH_CTRL_STATUS_NB_CORR_ERROR 0x00F00000
212/* Indicate Errors detected during read but uncorrectable */
213#define REG_UMI_BCH_CTRL_STATUS_UNCORR_ERR 0x00080000
214/* Indicate Errors detected during read and are correctable */
215#define REG_UMI_BCH_CTRL_STATUS_CORR_ERR 0x00040000
216/* Flag indicates BCH's ECC status of read process are valid */
217#define REG_UMI_BCH_CTRL_STATUS_RD_ECC_VALID 0x00020000
218/* Flag indicates BCH's ECC status of write process are valid */
219#define REG_UMI_BCH_CTRL_STATUS_WR_ECC_VALID 0x00010000
220/* Pause ECC calculation */
221#define REG_UMI_BCH_CTRL_STATUS_PAUSE_ECC_DEC 0x00000010
222/* Enable Interrupt */
223#define REG_UMI_BCH_CTRL_STATUS_INT_EN 0x00000004
224/* Enable ECC during read */
225#define REG_UMI_BCH_CTRL_STATUS_ECC_RD_EN 0x00000002
226/* Enable ECC during write */
227#define REG_UMI_BCH_CTRL_STATUS_ECC_WR_EN 0x00000001
228/* Mask for location */
229#define REG_UMI_BCH_ERR_LOC_MASK 0x00001FFF
230/* location within a byte */
231#define REG_UMI_BCH_ERR_LOC_BYTE 0x00000007
232/* location within a word */
233#define REG_UMI_BCH_ERR_LOC_WORD 0x00000018
234/* location within a page (512 byte) */
235#define REG_UMI_BCH_ERR_LOC_PAGE 0x00001FE0
236#define REG_UMI_BCH_ERR_LOC_ADDR(index) (__REG32(HW_UMI_BASE + 0x64 + (index / 2)*4) >> ((index % 2) * 16))
237#endif
diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c
index 62b98bffc158..07de8db14581 100644
--- a/arch/arm/mach-davinci/board-da850-evm.c
+++ b/arch/arm/mach-davinci/board-da850-evm.c
@@ -339,6 +339,15 @@ static struct davinci_mmc_config da850_mmc_config = {
339 .version = MMC_CTLR_VERSION_2, 339 .version = MMC_CTLR_VERSION_2,
340}; 340};
341 341
342static void da850_panel_power_ctrl(int val)
343{
344 /* lcd backlight */
345 gpio_set_value(DA850_LCD_BL_PIN, val);
346
347 /* lcd power */
348 gpio_set_value(DA850_LCD_PWR_PIN, val);
349}
350
342static int da850_lcd_hw_init(void) 351static int da850_lcd_hw_init(void)
343{ 352{
344 int status; 353 int status;
@@ -356,17 +365,11 @@ static int da850_lcd_hw_init(void)
356 gpio_direction_output(DA850_LCD_BL_PIN, 0); 365 gpio_direction_output(DA850_LCD_BL_PIN, 0);
357 gpio_direction_output(DA850_LCD_PWR_PIN, 0); 366 gpio_direction_output(DA850_LCD_PWR_PIN, 0);
358 367
359 /* disable lcd backlight */ 368 /* Switch off panel power and backlight */
360 gpio_set_value(DA850_LCD_BL_PIN, 0); 369 da850_panel_power_ctrl(0);
361
362 /* disable lcd power */
363 gpio_set_value(DA850_LCD_PWR_PIN, 0);
364
365 /* enable lcd power */
366 gpio_set_value(DA850_LCD_PWR_PIN, 1);
367 370
368 /* enable lcd backlight */ 371 /* Switch on panel power and backlight */
369 gpio_set_value(DA850_LCD_BL_PIN, 1); 372 da850_panel_power_ctrl(1);
370 373
371 return 0; 374 return 0;
372} 375}
@@ -674,6 +677,7 @@ static __init void da850_evm_init(void)
674 pr_warning("da850_evm_init: lcd initialization failed: %d\n", 677 pr_warning("da850_evm_init: lcd initialization failed: %d\n",
675 ret); 678 ret);
676 679
680 sharp_lk043t1dg01_pdata.panel_power_ctrl = da850_panel_power_ctrl,
677 ret = da8xx_register_lcdc(&sharp_lk043t1dg01_pdata); 681 ret = da8xx_register_lcdc(&sharp_lk043t1dg01_pdata);
678 if (ret) 682 if (ret)
679 pr_warning("da850_evm_init: lcdc registration failed: %d\n", 683 pr_warning("da850_evm_init: lcdc registration failed: %d\n",
diff --git a/arch/arm/mach-davinci/include/mach/nand.h b/arch/arm/mach-davinci/include/mach/nand.h
index b520c4b5678a..b2ad8090bd10 100644
--- a/arch/arm/mach-davinci/include/mach/nand.h
+++ b/arch/arm/mach-davinci/include/mach/nand.h
@@ -79,6 +79,10 @@ struct davinci_nand_pdata { /* platform_data */
79 79
80 /* e.g. NAND_BUSWIDTH_16 or NAND_USE_FLASH_BBT */ 80 /* e.g. NAND_BUSWIDTH_16 or NAND_USE_FLASH_BBT */
81 unsigned options; 81 unsigned options;
82
83 /* Main and mirror bbt descriptor overrides */
84 struct nand_bbt_descr *bbt_td;
85 struct nand_bbt_descr *bbt_md;
82}; 86};
83 87
84#endif /* __ARCH_ARM_DAVINCI_NAND_H */ 88#endif /* __ARCH_ARM_DAVINCI_NAND_H */
diff --git a/arch/arm/mach-ep93xx/include/mach/ep93xx_keypad.h b/arch/arm/mach-ep93xx/include/mach/ep93xx_keypad.h
index 83f31cd0a274..62d17421e48c 100644
--- a/arch/arm/mach-ep93xx/include/mach/ep93xx_keypad.h
+++ b/arch/arm/mach-ep93xx/include/mach/ep93xx_keypad.h
@@ -5,9 +5,6 @@
5#ifndef __ASM_ARCH_EP93XX_KEYPAD_H 5#ifndef __ASM_ARCH_EP93XX_KEYPAD_H
6#define __ASM_ARCH_EP93XX_KEYPAD_H 6#define __ASM_ARCH_EP93XX_KEYPAD_H
7 7
8#define MAX_MATRIX_KEY_ROWS (8)
9#define MAX_MATRIX_KEY_COLS (8)
10
11/* flags for the ep93xx_keypad driver */ 8/* flags for the ep93xx_keypad driver */
12#define EP93XX_KEYPAD_DISABLE_3_KEY (1<<0) /* disable 3-key reset */ 9#define EP93XX_KEYPAD_DISABLE_3_KEY (1<<0) /* disable 3-key reset */
13#define EP93XX_KEYPAD_DIAG_MODE (1<<1) /* diagnostic mode */ 10#define EP93XX_KEYPAD_DIAG_MODE (1<<1) /* diagnostic mode */
@@ -18,8 +15,6 @@
18 15
19/** 16/**
20 * struct ep93xx_keypad_platform_data - platform specific device structure 17 * struct ep93xx_keypad_platform_data - platform specific device structure
21 * @matrix_key_rows: number of rows in the keypad matrix
22 * @matrix_key_cols: number of columns in the keypad matrix
23 * @matrix_key_map: array of keycodes defining the keypad matrix 18 * @matrix_key_map: array of keycodes defining the keypad matrix
24 * @matrix_key_map_size: ARRAY_SIZE(matrix_key_map) 19 * @matrix_key_map_size: ARRAY_SIZE(matrix_key_map)
25 * @debounce: debounce start count; terminal count is 0xff 20 * @debounce: debounce start count; terminal count is 0xff
@@ -27,8 +22,6 @@
27 * @flags: see above 22 * @flags: see above
28 */ 23 */
29struct ep93xx_keypad_platform_data { 24struct ep93xx_keypad_platform_data {
30 unsigned int matrix_key_rows;
31 unsigned int matrix_key_cols;
32 unsigned int *matrix_key_map; 25 unsigned int *matrix_key_map;
33 int matrix_key_map_size; 26 int matrix_key_map_size;
34 unsigned int debounce; 27 unsigned int debounce;
@@ -36,7 +29,7 @@ struct ep93xx_keypad_platform_data {
36 unsigned int flags; 29 unsigned int flags;
37}; 30};
38 31
39/* macro for creating the matrix_key_map table */ 32#define EP93XX_MATRIX_ROWS (8)
40#define KEY(row, col, val) (((row) << 28) | ((col) << 24) | (val)) 33#define EP93XX_MATRIX_COLS (8)
41 34
42#endif /* __ASM_ARCH_EP93XX_KEYPAD_H */ 35#endif /* __ASM_ARCH_EP93XX_KEYPAD_H */
diff --git a/arch/arm/mach-nomadik/board-nhk8815.c b/arch/arm/mach-nomadik/board-nhk8815.c
index 116394484e71..9438bf6613a3 100644
--- a/arch/arm/mach-nomadik/board-nhk8815.c
+++ b/arch/arm/mach-nomadik/board-nhk8815.c
@@ -18,6 +18,7 @@
18#include <linux/gpio.h> 18#include <linux/gpio.h>
19#include <linux/mtd/mtd.h> 19#include <linux/mtd/mtd.h>
20#include <linux/mtd/nand.h> 20#include <linux/mtd/nand.h>
21#include <linux/mtd/onenand.h>
21#include <linux/mtd/partitions.h> 22#include <linux/mtd/partitions.h>
22#include <linux/io.h> 23#include <linux/io.h>
23#include <asm/sizes.h> 24#include <asm/sizes.h>
@@ -149,7 +150,7 @@ static struct mtd_partition nhk8815_onenand_partitions[] = {
149 } 150 }
150}; 151};
151 152
152static struct flash_platform_data nhk8815_onenand_data = { 153static struct onenand_platform_data nhk8815_onenand_data = {
153 .parts = nhk8815_onenand_partitions, 154 .parts = nhk8815_onenand_partitions,
154 .nr_parts = ARRAY_SIZE(nhk8815_onenand_partitions), 155 .nr_parts = ARRAY_SIZE(nhk8815_onenand_partitions),
155}; 156};
@@ -163,7 +164,7 @@ static struct resource nhk8815_onenand_resource[] = {
163}; 164};
164 165
165static struct platform_device nhk8815_onenand_device = { 166static struct platform_device nhk8815_onenand_device = {
166 .name = "onenand", 167 .name = "onenand-flash",
167 .id = -1, 168 .id = -1,
168 .dev = { 169 .dev = {
169 .platform_data = &nhk8815_onenand_data, 170 .platform_data = &nhk8815_onenand_data,
@@ -174,10 +175,10 @@ static struct platform_device nhk8815_onenand_device = {
174 175
175static void __init nhk8815_onenand_init(void) 176static void __init nhk8815_onenand_init(void)
176{ 177{
177#ifdef CONFIG_ONENAND 178#ifdef CONFIG_MTD_ONENAND
178 /* Set up SMCS0 for OneNand */ 179 /* Set up SMCS0 for OneNand */
179 writel(0x000030db, FSMC_BCR0); 180 writel(0x000030db, FSMC_BCR(0));
180 writel(0x02100551, FSMC_BTR0); 181 writel(0x02100551, FSMC_BTR(0));
181#endif 182#endif
182} 183}
183 184
diff --git a/arch/arm/mach-ns9xxx/irq.c b/arch/arm/mach-ns9xxx/irq.c
index feb0e54a91de..038f24d47023 100644
--- a/arch/arm/mach-ns9xxx/irq.c
+++ b/arch/arm/mach-ns9xxx/irq.c
@@ -66,7 +66,7 @@ static void handle_prio_irq(unsigned int irq, struct irq_desc *desc)
66 struct irqaction *action; 66 struct irqaction *action;
67 irqreturn_t action_ret; 67 irqreturn_t action_ret;
68 68
69 spin_lock(&desc->lock); 69 raw_spin_lock(&desc->lock);
70 70
71 BUG_ON(desc->status & IRQ_INPROGRESS); 71 BUG_ON(desc->status & IRQ_INPROGRESS);
72 72
@@ -78,7 +78,7 @@ static void handle_prio_irq(unsigned int irq, struct irq_desc *desc)
78 goto out_mask; 78 goto out_mask;
79 79
80 desc->status |= IRQ_INPROGRESS; 80 desc->status |= IRQ_INPROGRESS;
81 spin_unlock(&desc->lock); 81 raw_spin_unlock(&desc->lock);
82 82
83 action_ret = handle_IRQ_event(irq, action); 83 action_ret = handle_IRQ_event(irq, action);
84 84
@@ -87,7 +87,7 @@ static void handle_prio_irq(unsigned int irq, struct irq_desc *desc)
87 * Maybe this function should go to kernel/irq/chip.c? */ 87 * Maybe this function should go to kernel/irq/chip.c? */
88 note_interrupt(irq, desc, action_ret); 88 note_interrupt(irq, desc, action_ret);
89 89
90 spin_lock(&desc->lock); 90 raw_spin_lock(&desc->lock);
91 desc->status &= ~IRQ_INPROGRESS; 91 desc->status &= ~IRQ_INPROGRESS;
92 92
93 if (desc->status & IRQ_DISABLED) 93 if (desc->status & IRQ_DISABLED)
@@ -97,7 +97,7 @@ out_mask:
97 /* ack unconditionally to unmask lower prio irqs */ 97 /* ack unconditionally to unmask lower prio irqs */
98 desc->chip->ack(irq); 98 desc->chip->ack(irq);
99 99
100 spin_unlock(&desc->lock); 100 raw_spin_unlock(&desc->lock);
101} 101}
102#define handle_irq handle_prio_irq 102#define handle_irq handle_prio_irq
103#endif 103#endif
diff --git a/arch/arm/mach-s3c2442/mach-gta02.c b/arch/arm/mach-s3c2442/mach-gta02.c
index f76d6ff4aeb9..0b4a3a03071f 100644
--- a/arch/arm/mach-s3c2442/mach-gta02.c
+++ b/arch/arm/mach-s3c2442/mach-gta02.c
@@ -268,6 +268,9 @@ struct pcf50633_platform_data gta02_pcf_pdata = {
268 268
269 .batteries = gta02_batteries, 269 .batteries = gta02_batteries,
270 .num_batteries = ARRAY_SIZE(gta02_batteries), 270 .num_batteries = ARRAY_SIZE(gta02_batteries),
271
272 .charger_reference_current_ma = 1000,
273
271 .reg_init_data = { 274 .reg_init_data = {
272 [PCF50633_REGULATOR_AUTO] = { 275 [PCF50633_REGULATOR_AUTO] = {
273 .constraints = { 276 .constraints = {
diff --git a/arch/arm/mach-u300/include/mach/coh901318.h b/arch/arm/mach-u300/include/mach/coh901318.h
new file mode 100644
index 000000000000..f4cfee9c7d28
--- /dev/null
+++ b/arch/arm/mach-u300/include/mach/coh901318.h
@@ -0,0 +1,281 @@
1/*
2 *
3 * include/linux/coh901318.h
4 *
5 *
6 * Copyright (C) 2007-2009 ST-Ericsson
7 * License terms: GNU General Public License (GPL) version 2
8 * DMA driver for COH 901 318
9 * Author: Per Friden <per.friden@stericsson.com>
10 */
11
12#ifndef COH901318_H
13#define COH901318_H
14
15#include <linux/device.h>
16#include <linux/dmaengine.h>
17
18#define MAX_DMA_PACKET_SIZE_SHIFT 11
19#define MAX_DMA_PACKET_SIZE (1 << MAX_DMA_PACKET_SIZE_SHIFT)
20
21/**
22 * struct coh901318_lli - linked list item for DMAC
23 * @control: control settings for DMAC
24 * @src_addr: transfer source address
25 * @dst_addr: transfer destination address
26 * @link_addr: physical address to next lli
27 * @virt_link_addr: virtual addres of next lli (only used by pool_free)
28 * @phy_this: physical address of current lli (only used by pool_free)
29 */
30struct coh901318_lli {
31 u32 control;
32 dma_addr_t src_addr;
33 dma_addr_t dst_addr;
34 dma_addr_t link_addr;
35
36 void *virt_link_addr;
37 dma_addr_t phy_this;
38};
39/**
40 * struct coh901318_params - parameters for DMAC configuration
41 * @config: DMA config register
42 * @ctrl_lli_last: DMA control register for the last lli in the list
43 * @ctrl_lli: DMA control register for an lli
44 * @ctrl_lli_chained: DMA control register for a chained lli
45 */
46struct coh901318_params {
47 u32 config;
48 u32 ctrl_lli_last;
49 u32 ctrl_lli;
50 u32 ctrl_lli_chained;
51};
52/**
53 * struct coh_dma_channel - dma channel base
54 * @name: ascii name of dma channel
55 * @number: channel id number
56 * @desc_nbr_max: number of preallocated descriptortors
57 * @priority_high: prio of channel, 0 low otherwise high.
58 * @param: configuration parameters
59 * @dev_addr: physical address of periphal connected to channel
60 */
61struct coh_dma_channel {
62 const char name[32];
63 const int number;
64 const int desc_nbr_max;
65 const int priority_high;
66 const struct coh901318_params param;
67 const dma_addr_t dev_addr;
68};
69
70/**
71 * dma_access_memory_state_t - register dma for memory access
72 *
73 * @dev: The dma device
74 * @active: 1 means dma intends to access memory
75 * 0 means dma wont access memory
76 */
77typedef void (*dma_access_memory_state_t)(struct device *dev,
78 bool active);
79
80/**
81 * struct powersave - DMA power save structure
82 * @lock: lock protecting data in this struct
83 * @started_channels: bit mask indicating active dma channels
84 */
85struct powersave {
86 spinlock_t lock;
87 u64 started_channels;
88};
89/**
90 * struct coh901318_platform - platform arch structure
91 * @chans_slave: specifying dma slave channels
92 * @chans_memcpy: specifying dma memcpy channels
93 * @access_memory_state: requesting DMA memeory access (on / off)
94 * @chan_conf: dma channel configurations
95 * @max_channels: max number of dma chanenls
96 */
97struct coh901318_platform {
98 const int *chans_slave;
99 const int *chans_memcpy;
100 const dma_access_memory_state_t access_memory_state;
101 const struct coh_dma_channel *chan_conf;
102 const int max_channels;
103};
104
105/**
106 * coh901318_get_bytes_left() - Get number of bytes left on a current transfer
107 * @chan: dma channel handle
108 * return number of bytes left, or negative on error
109 */
110u32 coh901318_get_bytes_left(struct dma_chan *chan);
111
112/**
113 * coh901318_stop() - Stops dma transfer
114 * @chan: dma channel handle
115 * return 0 on success otherwise negative value
116 */
117void coh901318_stop(struct dma_chan *chan);
118
119/**
120 * coh901318_continue() - Resumes a stopped dma transfer
121 * @chan: dma channel handle
122 * return 0 on success otherwise negative value
123 */
124void coh901318_continue(struct dma_chan *chan);
125
126/**
127 * coh901318_filter_id() - DMA channel filter function
128 * @chan: dma channel handle
129 * @chan_id: id of dma channel to be filter out
130 *
131 * In dma_request_channel() it specifies what channel id to be requested
132 */
133bool coh901318_filter_id(struct dma_chan *chan, void *chan_id);
134
135/*
136 * DMA Controller - this access the static mappings of the coh901318 dma.
137 *
138 */
139
140#define COH901318_MOD32_MASK (0x1F)
141#define COH901318_WORD_MASK (0xFFFFFFFF)
142/* INT_STATUS - Interrupt Status Registers 32bit (R/-) */
143#define COH901318_INT_STATUS1 (0x0000)
144#define COH901318_INT_STATUS2 (0x0004)
145/* TC_INT_STATUS - Terminal Count Interrupt Status Registers 32bit (R/-) */
146#define COH901318_TC_INT_STATUS1 (0x0008)
147#define COH901318_TC_INT_STATUS2 (0x000C)
148/* TC_INT_CLEAR - Terminal Count Interrupt Clear Registers 32bit (-/W) */
149#define COH901318_TC_INT_CLEAR1 (0x0010)
150#define COH901318_TC_INT_CLEAR2 (0x0014)
151/* RAW_TC_INT_STATUS - Raw Term Count Interrupt Status Registers 32bit (R/-) */
152#define COH901318_RAW_TC_INT_STATUS1 (0x0018)
153#define COH901318_RAW_TC_INT_STATUS2 (0x001C)
154/* BE_INT_STATUS - Bus Error Interrupt Status Registers 32bit (R/-) */
155#define COH901318_BE_INT_STATUS1 (0x0020)
156#define COH901318_BE_INT_STATUS2 (0x0024)
157/* BE_INT_CLEAR - Bus Error Interrupt Clear Registers 32bit (-/W) */
158#define COH901318_BE_INT_CLEAR1 (0x0028)
159#define COH901318_BE_INT_CLEAR2 (0x002C)
160/* RAW_BE_INT_STATUS - Raw Term Count Interrupt Status Registers 32bit (R/-) */
161#define COH901318_RAW_BE_INT_STATUS1 (0x0030)
162#define COH901318_RAW_BE_INT_STATUS2 (0x0034)
163
164/*
165 * CX_CFG - Channel Configuration Registers 32bit (R/W)
166 */
167#define COH901318_CX_CFG (0x0100)
168#define COH901318_CX_CFG_SPACING (0x04)
169/* Channel enable activates tha dma job */
170#define COH901318_CX_CFG_CH_ENABLE (0x00000001)
171#define COH901318_CX_CFG_CH_DISABLE (0x00000000)
172/* Request Mode */
173#define COH901318_CX_CFG_RM_MASK (0x00000006)
174#define COH901318_CX_CFG_RM_MEMORY_TO_MEMORY (0x0 << 1)
175#define COH901318_CX_CFG_RM_PRIMARY_TO_MEMORY (0x1 << 1)
176#define COH901318_CX_CFG_RM_MEMORY_TO_PRIMARY (0x1 << 1)
177#define COH901318_CX_CFG_RM_PRIMARY_TO_SECONDARY (0x3 << 1)
178#define COH901318_CX_CFG_RM_SECONDARY_TO_PRIMARY (0x3 << 1)
179/* Linked channel request field. RM must == 11 */
180#define COH901318_CX_CFG_LCRF_SHIFT 3
181#define COH901318_CX_CFG_LCRF_MASK (0x000001F8)
182#define COH901318_CX_CFG_LCR_DISABLE (0x00000000)
183/* Terminal Counter Interrupt Request Mask */
184#define COH901318_CX_CFG_TC_IRQ_ENABLE (0x00000200)
185#define COH901318_CX_CFG_TC_IRQ_DISABLE (0x00000000)
186/* Bus Error interrupt Mask */
187#define COH901318_CX_CFG_BE_IRQ_ENABLE (0x00000400)
188#define COH901318_CX_CFG_BE_IRQ_DISABLE (0x00000000)
189
190/*
191 * CX_STAT - Channel Status Registers 32bit (R/-)
192 */
193#define COH901318_CX_STAT (0x0200)
194#define COH901318_CX_STAT_SPACING (0x04)
195#define COH901318_CX_STAT_RBE_IRQ_IND (0x00000008)
196#define COH901318_CX_STAT_RTC_IRQ_IND (0x00000004)
197#define COH901318_CX_STAT_ACTIVE (0x00000002)
198#define COH901318_CX_STAT_ENABLED (0x00000001)
199
200/*
201 * CX_CTRL - Channel Control Registers 32bit (R/W)
202 */
203#define COH901318_CX_CTRL (0x0400)
204#define COH901318_CX_CTRL_SPACING (0x10)
205/* Transfer Count Enable */
206#define COH901318_CX_CTRL_TC_ENABLE (0x00001000)
207#define COH901318_CX_CTRL_TC_DISABLE (0x00000000)
208/* Transfer Count Value 0 - 4095 */
209#define COH901318_CX_CTRL_TC_VALUE_MASK (0x00000FFF)
210/* Burst count */
211#define COH901318_CX_CTRL_BURST_COUNT_MASK (0x0000E000)
212#define COH901318_CX_CTRL_BURST_COUNT_64_BYTES (0x7 << 13)
213#define COH901318_CX_CTRL_BURST_COUNT_48_BYTES (0x6 << 13)
214#define COH901318_CX_CTRL_BURST_COUNT_32_BYTES (0x5 << 13)
215#define COH901318_CX_CTRL_BURST_COUNT_16_BYTES (0x4 << 13)
216#define COH901318_CX_CTRL_BURST_COUNT_8_BYTES (0x3 << 13)
217#define COH901318_CX_CTRL_BURST_COUNT_4_BYTES (0x2 << 13)
218#define COH901318_CX_CTRL_BURST_COUNT_2_BYTES (0x1 << 13)
219#define COH901318_CX_CTRL_BURST_COUNT_1_BYTE (0x0 << 13)
220/* Source bus size */
221#define COH901318_CX_CTRL_SRC_BUS_SIZE_MASK (0x00030000)
222#define COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS (0x2 << 16)
223#define COH901318_CX_CTRL_SRC_BUS_SIZE_16_BITS (0x1 << 16)
224#define COH901318_CX_CTRL_SRC_BUS_SIZE_8_BITS (0x0 << 16)
225/* Source address increment */
226#define COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE (0x00040000)
227#define COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE (0x00000000)
228/* Destination Bus Size */
229#define COH901318_CX_CTRL_DST_BUS_SIZE_MASK (0x00180000)
230#define COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS (0x2 << 19)
231#define COH901318_CX_CTRL_DST_BUS_SIZE_16_BITS (0x1 << 19)
232#define COH901318_CX_CTRL_DST_BUS_SIZE_8_BITS (0x0 << 19)
233/* Destination address increment */
234#define COH901318_CX_CTRL_DST_ADDR_INC_ENABLE (0x00200000)
235#define COH901318_CX_CTRL_DST_ADDR_INC_DISABLE (0x00000000)
236/* Master Mode (Master2 is only connected to MSL) */
237#define COH901318_CX_CTRL_MASTER_MODE_MASK (0x00C00000)
238#define COH901318_CX_CTRL_MASTER_MODE_M2R_M1W (0x3 << 22)
239#define COH901318_CX_CTRL_MASTER_MODE_M1R_M2W (0x2 << 22)
240#define COH901318_CX_CTRL_MASTER_MODE_M2RW (0x1 << 22)
241#define COH901318_CX_CTRL_MASTER_MODE_M1RW (0x0 << 22)
242/* Terminal Count flag to PER enable */
243#define COH901318_CX_CTRL_TCP_ENABLE (0x01000000)
244#define COH901318_CX_CTRL_TCP_DISABLE (0x00000000)
245/* Terminal Count flags to CPU enable */
246#define COH901318_CX_CTRL_TC_IRQ_ENABLE (0x02000000)
247#define COH901318_CX_CTRL_TC_IRQ_DISABLE (0x00000000)
248/* Hand shake to peripheral */
249#define COH901318_CX_CTRL_HSP_ENABLE (0x04000000)
250#define COH901318_CX_CTRL_HSP_DISABLE (0x00000000)
251#define COH901318_CX_CTRL_HSS_ENABLE (0x08000000)
252#define COH901318_CX_CTRL_HSS_DISABLE (0x00000000)
253/* DMA mode */
254#define COH901318_CX_CTRL_DDMA_MASK (0x30000000)
255#define COH901318_CX_CTRL_DDMA_LEGACY (0x0 << 28)
256#define COH901318_CX_CTRL_DDMA_DEMAND_DMA1 (0x1 << 28)
257#define COH901318_CX_CTRL_DDMA_DEMAND_DMA2 (0x2 << 28)
258/* Primary Request Data Destination */
259#define COH901318_CX_CTRL_PRDD_MASK (0x40000000)
260#define COH901318_CX_CTRL_PRDD_DEST (0x1 << 30)
261#define COH901318_CX_CTRL_PRDD_SOURCE (0x0 << 30)
262
263/*
264 * CX_SRC_ADDR - Channel Source Address Registers 32bit (R/W)
265 */
266#define COH901318_CX_SRC_ADDR (0x0404)
267#define COH901318_CX_SRC_ADDR_SPACING (0x10)
268
269/*
270 * CX_DST_ADDR - Channel Destination Address Registers 32bit R/W
271 */
272#define COH901318_CX_DST_ADDR (0x0408)
273#define COH901318_CX_DST_ADDR_SPACING (0x10)
274
275/*
276 * CX_LNK_ADDR - Channel Link Address Registers 32bit (R/W)
277 */
278#define COH901318_CX_LNK_ADDR (0x040C)
279#define COH901318_CX_LNK_ADDR_SPACING (0x10)
280#define COH901318_CX_LNK_LINK_IMMEDIATE (0x00000001)
281#endif /* COH901318_H */
diff --git a/arch/arm/plat-mxc/include/mach/mxc_nand.h b/arch/arm/plat-mxc/include/mach/mxc_nand.h
index 2b972df22d12..5d2d21d414e0 100644
--- a/arch/arm/plat-mxc/include/mach/mxc_nand.h
+++ b/arch/arm/plat-mxc/include/mach/mxc_nand.h
@@ -22,6 +22,7 @@
22 22
23struct mxc_nand_platform_data { 23struct mxc_nand_platform_data {
24 int width; /* data bus width in bytes */ 24 int width; /* data bus width in bytes */
25 int hw_ecc; /* 0 if supress hardware ECC */ 25 int hw_ecc:1; /* 0 if supress hardware ECC */
26 int flash_bbt:1; /* set to 1 to use a flash based bbt */
26}; 27};
27#endif /* __ASM_ARCH_NAND_H */ 28#endif /* __ASM_ARCH_NAND_H */
diff --git a/arch/arm/plat-omap/debug-leds.c b/arch/arm/plat-omap/debug-leds.c
index 6c768b71ad64..53fcef7c5201 100644
--- a/arch/arm/plat-omap/debug-leds.c
+++ b/arch/arm/plat-omap/debug-leds.c
@@ -293,7 +293,7 @@ static int fpga_resume_noirq(struct device *dev)
293 return 0; 293 return 0;
294} 294}
295 295
296static struct dev_pm_ops fpga_dev_pm_ops = { 296static const struct dev_pm_ops fpga_dev_pm_ops = {
297 .suspend_noirq = fpga_suspend_noirq, 297 .suspend_noirq = fpga_suspend_noirq,
298 .resume_noirq = fpga_resume_noirq, 298 .resume_noirq = fpga_resume_noirq,
299}; 299};
diff --git a/arch/arm/plat-omap/gpio.c b/arch/arm/plat-omap/gpio.c
index 055160e0620e..04846811d0aa 100644
--- a/arch/arm/plat-omap/gpio.c
+++ b/arch/arm/plat-omap/gpio.c
@@ -1431,7 +1431,7 @@ static int omap_mpuio_resume_noirq(struct device *dev)
1431 return 0; 1431 return 0;
1432} 1432}
1433 1433
1434static struct dev_pm_ops omap_mpuio_dev_pm_ops = { 1434static const struct dev_pm_ops omap_mpuio_dev_pm_ops = {
1435 .suspend_noirq = omap_mpuio_suspend_noirq, 1435 .suspend_noirq = omap_mpuio_suspend_noirq,
1436 .resume_noirq = omap_mpuio_resume_noirq, 1436 .resume_noirq = omap_mpuio_resume_noirq,
1437}; 1437};
diff --git a/arch/arm/plat-s3c/include/plat/nand.h b/arch/arm/plat-s3c/include/plat/nand.h
index 065985978413..226147b7e026 100644
--- a/arch/arm/plat-s3c/include/plat/nand.h
+++ b/arch/arm/plat-s3c/include/plat/nand.h
@@ -17,6 +17,7 @@
17 * Setting this flag will allow the kernel to 17 * Setting this flag will allow the kernel to
18 * look for it at boot time and also skip the NAND 18 * look for it at boot time and also skip the NAND
19 * scan. 19 * scan.
20 * @options: Default value to set into 'struct nand_chip' options.
20 * @nr_chips: Number of chips in this set 21 * @nr_chips: Number of chips in this set
21 * @nr_partitions: Number of partitions pointed to by @partitions 22 * @nr_partitions: Number of partitions pointed to by @partitions
22 * @name: Name of set (optional) 23 * @name: Name of set (optional)
@@ -31,6 +32,7 @@ struct s3c2410_nand_set {
31 unsigned int disable_ecc:1; 32 unsigned int disable_ecc:1;
32 unsigned int flash_bbt:1; 33 unsigned int flash_bbt:1;
33 34
35 unsigned int options;
34 int nr_chips; 36 int nr_chips;
35 int nr_partitions; 37 int nr_partitions;
36 char *name; 38 char *name;
diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig
index d856354f4272..f2b319333184 100644
--- a/arch/avr32/Kconfig
+++ b/arch/avr32/Kconfig
@@ -112,6 +112,11 @@ config CPU_AT32AP7002
112 bool 112 bool
113 select CPU_AT32AP700X 113 select CPU_AT32AP700X
114 114
115# AP700X boards
116config BOARD_ATNGW100_COMMON
117 bool
118 select CPU_AT32AP7000
119
115choice 120choice
116 prompt "AVR32 board type" 121 prompt "AVR32 board type"
117 default BOARD_ATSTK1000 122 default BOARD_ATSTK1000
@@ -119,9 +124,13 @@ choice
119config BOARD_ATSTK1000 124config BOARD_ATSTK1000
120 bool "ATSTK1000 evaluation board" 125 bool "ATSTK1000 evaluation board"
121 126
122config BOARD_ATNGW100 127config BOARD_ATNGW100_MKI
123 bool "ATNGW100 Network Gateway" 128 bool "ATNGW100 Network Gateway"
124 select CPU_AT32AP7000 129 select BOARD_ATNGW100_COMMON
130
131config BOARD_ATNGW100_MKII
132 bool "ATNGW100 mkII Network Gateway"
133 select BOARD_ATNGW100_COMMON
125 134
126config BOARD_HAMMERHEAD 135config BOARD_HAMMERHEAD
127 bool "Hammerhead board" 136 bool "Hammerhead board"
diff --git a/arch/avr32/Makefile b/arch/avr32/Makefile
index c21a3290d542..ead8a75203a9 100644
--- a/arch/avr32/Makefile
+++ b/arch/avr32/Makefile
@@ -32,7 +32,7 @@ head-$(CONFIG_LOADER_U_BOOT) += arch/avr32/boot/u-boot/head.o
32head-y += arch/avr32/kernel/head.o 32head-y += arch/avr32/kernel/head.o
33core-y += $(machdirs) 33core-y += $(machdirs)
34core-$(CONFIG_BOARD_ATSTK1000) += arch/avr32/boards/atstk1000/ 34core-$(CONFIG_BOARD_ATSTK1000) += arch/avr32/boards/atstk1000/
35core-$(CONFIG_BOARD_ATNGW100) += arch/avr32/boards/atngw100/ 35core-$(CONFIG_BOARD_ATNGW100_COMMON) += arch/avr32/boards/atngw100/
36core-$(CONFIG_BOARD_HAMMERHEAD) += arch/avr32/boards/hammerhead/ 36core-$(CONFIG_BOARD_HAMMERHEAD) += arch/avr32/boards/hammerhead/
37core-$(CONFIG_BOARD_FAVR_32) += arch/avr32/boards/favr-32/ 37core-$(CONFIG_BOARD_FAVR_32) += arch/avr32/boards/favr-32/
38core-$(CONFIG_BOARD_MERISC) += arch/avr32/boards/merisc/ 38core-$(CONFIG_BOARD_MERISC) += arch/avr32/boards/merisc/
diff --git a/arch/avr32/boards/atngw100/Kconfig b/arch/avr32/boards/atngw100/Kconfig
index be27a0218ab4..4e55617ade2d 100644
--- a/arch/avr32/boards/atngw100/Kconfig
+++ b/arch/avr32/boards/atngw100/Kconfig
@@ -1,6 +1,17 @@
1# NGW100 customization 1# NGW100 customization
2 2
3if BOARD_ATNGW100 3if BOARD_ATNGW100_COMMON
4
5config BOARD_ATNGW100_MKII_LCD
6 bool "Enable ATNGW100 mkII LCD interface"
7 depends on BOARD_ATNGW100_MKII
8 help
9 This enables the LCD controller (LCDC) in the AT32AP7000. Since the
10 LCDC is multiplexed with MACB1 (LAN) Ethernet port, only one can be
11 enabled at a time.
12
13 This choice enables the LCDC and disables the MACB1 interface marked
14 LAN on the PCB.
4 15
5choice 16choice
6 prompt "Select an NGW100 add-on board to support" 17 prompt "Select an NGW100 add-on board to support"
@@ -11,15 +22,11 @@ config BOARD_ATNGW100_ADDON_NONE
11 22
12config BOARD_ATNGW100_EVKLCD10X 23config BOARD_ATNGW100_EVKLCD10X
13 bool "EVKLCD10X addon board" 24 bool "EVKLCD10X addon board"
25 depends on BOARD_ATNGW100_MKI || BOARD_ATNGW100_MKII_LCD
14 help 26 help
15 This enables support for the EVKLCD100 (QVGA) or EVKLCD101 (VGA) 27 This enables support for the EVKLCD100 (QVGA) or EVKLCD101 (VGA)
16 addon board for the NGW100. By enabling this the LCD controller and 28 addon board for the NGW100 and NGW100 mkII. By enabling this the LCD
17 AC97 controller is added as platform devices. 29 controller and AC97 controller is added as platform devices.
18
19 This choice disables the detect pin and the write-protect pin for the
20 MCI platform device, since it conflicts with the LCD platform device.
21 The MCI pins can be reenabled by editing the "add device function" but
22 this may break the setup for other displays that use these pins.
23 30
24config BOARD_ATNGW100_MRMT 31config BOARD_ATNGW100_MRMT
25 bool "Mediama RMT1/2 add-on board" 32 bool "Mediama RMT1/2 add-on board"
@@ -55,4 +62,4 @@ if BOARD_ATNGW100_MRMT
55source "arch/avr32/boards/atngw100/Kconfig_mrmt" 62source "arch/avr32/boards/atngw100/Kconfig_mrmt"
56endif 63endif
57 64
58endif # BOARD_ATNGW100 65endif # BOARD_ATNGW100_COMMON
diff --git a/arch/avr32/boards/atngw100/evklcd10x.c b/arch/avr32/boards/atngw100/evklcd10x.c
index 00337112c5ac..20388750d564 100644
--- a/arch/avr32/boards/atngw100/evklcd10x.c
+++ b/arch/avr32/boards/atngw100/evklcd10x.c
@@ -164,7 +164,12 @@ static int __init atevklcd10x_init(void)
164 164
165 at32_add_device_lcdc(0, &atevklcd10x_lcdc_data, 165 at32_add_device_lcdc(0, &atevklcd10x_lcdc_data,
166 fbmem_start, fbmem_size, 166 fbmem_start, fbmem_size,
167 ATMEL_LCDC_ALT_18BIT | ATMEL_LCDC_PE_DVAL); 167#ifdef CONFIG_BOARD_ATNGW100_MKII
168 ATMEL_LCDC_PRI_18BIT | ATMEL_LCDC_PC_DVAL
169#else
170 ATMEL_LCDC_ALT_18BIT | ATMEL_LCDC_PE_DVAL
171#endif
172 );
168 173
169 at32_add_device_ac97c(0, &ac97c0_data, AC97C_BOTH); 174 at32_add_device_ac97c(0, &ac97c0_data, AC97C_BOTH);
170 175
diff --git a/arch/avr32/boards/atngw100/mrmt.c b/arch/avr32/boards/atngw100/mrmt.c
index bf78e516a85f..7919be311f4a 100644
--- a/arch/avr32/boards/atngw100/mrmt.c
+++ b/arch/avr32/boards/atngw100/mrmt.c
@@ -302,6 +302,7 @@ static int __init mrmt1_init(void)
302 at32_select_periph( GPIO_PIOB_BASE, 1 << (PB_EXTINT_BASE+TS_IRQ), 302 at32_select_periph( GPIO_PIOB_BASE, 1 << (PB_EXTINT_BASE+TS_IRQ),
303 GPIO_PERIPH_A, AT32_GPIOF_DEGLITCH); 303 GPIO_PERIPH_A, AT32_GPIOF_DEGLITCH);
304 set_irq_type( AT32_EXTINT(TS_IRQ), IRQ_TYPE_EDGE_FALLING ); 304 set_irq_type( AT32_EXTINT(TS_IRQ), IRQ_TYPE_EDGE_FALLING );
305 at32_spi_setup_slaves(0,spi01_board_info,ARRAY_SIZE(spi01_board_info));
305 spi_register_board_info(spi01_board_info,ARRAY_SIZE(spi01_board_info)); 306 spi_register_board_info(spi01_board_info,ARRAY_SIZE(spi01_board_info));
306#endif 307#endif
307 308
diff --git a/arch/avr32/boards/atngw100/setup.c b/arch/avr32/boards/atngw100/setup.c
index bc299fbbeb4e..8c6a2440e345 100644
--- a/arch/avr32/boards/atngw100/setup.c
+++ b/arch/avr32/boards/atngw100/setup.c
@@ -20,6 +20,7 @@
20#include <linux/leds.h> 20#include <linux/leds.h>
21#include <linux/spi/spi.h> 21#include <linux/spi/spi.h>
22#include <linux/atmel-mci.h> 22#include <linux/atmel-mci.h>
23#include <linux/usb/atmel_usba_udc.h>
23 24
24#include <asm/io.h> 25#include <asm/io.h>
25#include <asm/setup.h> 26#include <asm/setup.h>
@@ -36,6 +37,75 @@ unsigned long at32_board_osc_rates[3] = {
36 [2] = 12000000, /* 12 MHz on osc1 */ 37 [2] = 12000000, /* 12 MHz on osc1 */
37}; 38};
38 39
40/*
41 * The ATNGW100 mkII is very similar to the ATNGW100. Both have the AT32AP7000
42 * chip on board; the difference is that the ATNGW100 mkII has 128 MB 32-bit
43 * SDRAM (the ATNGW100 has 32 MB 16-bit SDRAM) and 256 MB 16-bit NAND flash
44 * (the ATNGW100 has none.)
45 *
46 * The RAM difference is handled by the boot loader, so the only difference we
47 * end up handling here is the NAND flash, EBI pin reservation and if LCDC or
48 * MACB1 should be enabled.
49 */
50#ifdef CONFIG_BOARD_ATNGW100_MKII
51#include <linux/mtd/partitions.h>
52#include <mach/smc.h>
53
54static struct smc_timing nand_timing __initdata = {
55 .ncs_read_setup = 0,
56 .nrd_setup = 10,
57 .ncs_write_setup = 0,
58 .nwe_setup = 10,
59
60 .ncs_read_pulse = 30,
61 .nrd_pulse = 15,
62 .ncs_write_pulse = 30,
63 .nwe_pulse = 15,
64
65 .read_cycle = 30,
66 .write_cycle = 30,
67
68 .ncs_read_recover = 0,
69 .nrd_recover = 15,
70 .ncs_write_recover = 0,
71 /* WE# high -> RE# low min 60 ns */
72 .nwe_recover = 50,
73};
74
75static struct smc_config nand_config __initdata = {
76 .bus_width = 2,
77 .nrd_controlled = 1,
78 .nwe_controlled = 1,
79 .nwait_mode = 0,
80 .byte_write = 0,
81 .tdf_cycles = 2,
82 .tdf_mode = 0,
83};
84
85static struct mtd_partition nand_partitions[] = {
86 {
87 .name = "main",
88 .offset = 0x00000000,
89 .size = MTDPART_SIZ_FULL,
90 },
91};
92
93static struct mtd_partition *nand_part_info(int size, int *num_partitions)
94{
95 *num_partitions = ARRAY_SIZE(nand_partitions);
96 return nand_partitions;
97}
98
99static struct atmel_nand_data atngw100mkii_nand_data __initdata = {
100 .cle = 21,
101 .ale = 22,
102 .rdy_pin = GPIO_PIN_PB(28),
103 .enable_pin = GPIO_PIN_PE(23),
104 .bus_width_16 = true,
105 .partition_info = nand_part_info,
106};
107#endif
108
39/* Initialized by bootloader-specific startup code. */ 109/* Initialized by bootloader-specific startup code. */
40struct tag *bootloader_tags __initdata; 110struct tag *bootloader_tags __initdata;
41 111
@@ -56,9 +126,9 @@ static struct spi_board_info spi0_board_info[] __initdata = {
56static struct mci_platform_data __initdata mci0_data = { 126static struct mci_platform_data __initdata mci0_data = {
57 .slot[0] = { 127 .slot[0] = {
58 .bus_width = 4, 128 .bus_width = 4,
59#if defined(CONFIG_BOARD_ATNGW100_EVKLCD10X) || defined(CONFIG_BOARD_ATNGW100_MRMT1) 129#if defined(CONFIG_BOARD_ATNGW100_MKII)
60 .detect_pin = GPIO_PIN_NONE, 130 .detect_pin = GPIO_PIN_PC(25),
61 .wp_pin = GPIO_PIN_NONE, 131 .wp_pin = GPIO_PIN_PE(22),
62#else 132#else
63 .detect_pin = GPIO_PIN_PC(25), 133 .detect_pin = GPIO_PIN_PC(25),
64 .wp_pin = GPIO_PIN_PE(0), 134 .wp_pin = GPIO_PIN_PE(0),
@@ -66,6 +136,14 @@ static struct mci_platform_data __initdata mci0_data = {
66 }, 136 },
67}; 137};
68 138
139static struct usba_platform_data atngw100_usba_data __initdata = {
140#if defined(CONFIG_BOARD_ATNGW100_MKII)
141 .vbus_pin = GPIO_PIN_PE(26),
142#else
143 .vbus_pin = -ENODEV,
144#endif
145};
146
69/* 147/*
70 * The next two functions should go away as the boot loader is 148 * The next two functions should go away as the boot loader is
71 * supposed to initialize the macb address registers with a valid 149 * supposed to initialize the macb address registers with a valid
@@ -173,18 +251,27 @@ static int __init atngw100_init(void)
173 unsigned i; 251 unsigned i;
174 252
175 /* 253 /*
176 * ATNGW100 uses 16-bit SDRAM interface, so we don't need to 254 * ATNGW100 mkII uses 32-bit SDRAM interface. Reserve the
177 * reserve any pins for it. 255 * SDRAM-specific pins so that nobody messes with them.
178 */ 256 */
257#ifdef CONFIG_BOARD_ATNGW100_MKII
258 at32_reserve_pin(GPIO_PIOE_BASE, ATMEL_EBI_PE_DATA_ALL);
259
260 smc_set_timing(&nand_config, &nand_timing);
261 smc_set_configuration(3, &nand_config);
262 at32_add_device_nand(0, &atngw100mkii_nand_data);
263#endif
179 264
180 at32_add_device_usart(0); 265 at32_add_device_usart(0);
181 266
182 set_hw_addr(at32_add_device_eth(0, &eth_data[0])); 267 set_hw_addr(at32_add_device_eth(0, &eth_data[0]));
268#ifndef CONFIG_BOARD_ATNGW100_MKII_LCD
183 set_hw_addr(at32_add_device_eth(1, &eth_data[1])); 269 set_hw_addr(at32_add_device_eth(1, &eth_data[1]));
270#endif
184 271
185 at32_add_device_spi(0, spi0_board_info, ARRAY_SIZE(spi0_board_info)); 272 at32_add_device_spi(0, spi0_board_info, ARRAY_SIZE(spi0_board_info));
186 at32_add_device_mci(0, &mci0_data); 273 at32_add_device_mci(0, &mci0_data);
187 at32_add_device_usba(0, NULL); 274 at32_add_device_usba(0, &atngw100_usba_data);
188 275
189 for (i = 0; i < ARRAY_SIZE(ngw_leds); i++) { 276 for (i = 0; i < ARRAY_SIZE(ngw_leds); i++) {
190 at32_select_gpio(ngw_leds[i].gpio, 277 at32_select_gpio(ngw_leds[i].gpio,
@@ -194,10 +281,14 @@ static int __init atngw100_init(void)
194 281
195 /* all these i2c/smbus pins should have external pullups for 282 /* all these i2c/smbus pins should have external pullups for
196 * open-drain sharing among all I2C devices. SDA and SCL do; 283 * open-drain sharing among all I2C devices. SDA and SCL do;
197 * PB28/EXTINT3 doesn't; it should be SMBALERT# (for PMBus), 284 * PB28/EXTINT3 (ATNGW100) and PE21 (ATNGW100 mkII) doesn't; it should
198 * but it's not available off-board. 285 * be SMBALERT# (for PMBus), but it's not available off-board.
199 */ 286 */
287#ifdef CONFIG_BOARD_ATNGW100_MKII
288 at32_select_periph(GPIO_PIOE_BASE, 1 << 21, 0, AT32_GPIOF_PULLUP);
289#else
200 at32_select_periph(GPIO_PIOB_BASE, 1 << 28, 0, AT32_GPIOF_PULLUP); 290 at32_select_periph(GPIO_PIOB_BASE, 1 << 28, 0, AT32_GPIOF_PULLUP);
291#endif
201 at32_select_gpio(i2c_gpio_data.sda_pin, 292 at32_select_gpio(i2c_gpio_data.sda_pin,
202 AT32_GPIOF_MULTIDRV | AT32_GPIOF_OUTPUT | AT32_GPIOF_HIGH); 293 AT32_GPIOF_MULTIDRV | AT32_GPIOF_OUTPUT | AT32_GPIOF_HIGH);
203 at32_select_gpio(i2c_gpio_data.scl_pin, 294 at32_select_gpio(i2c_gpio_data.scl_pin,
@@ -211,14 +302,22 @@ postcore_initcall(atngw100_init);
211 302
212static int __init atngw100_arch_init(void) 303static int __init atngw100_arch_init(void)
213{ 304{
214 /* PB30 is the otherwise unused jumper on the mainboard, with an 305 /* PB30 (ATNGW100) and PE30 (ATNGW100 mkII) is the otherwise unused
215 * external pullup; the jumper grounds it. Use it however you 306 * jumper on the mainboard, with an external pullup; the jumper grounds
216 * like, including letting U-Boot or Linux tweak boot sequences. 307 * it. Use it however you like, including letting U-Boot or Linux tweak
308 * boot sequences.
217 */ 309 */
310#ifdef CONFIG_BOARD_ATNGW100_MKII
311 at32_select_gpio(GPIO_PIN_PE(30), 0);
312 gpio_request(GPIO_PIN_PE(30), "j15");
313 gpio_direction_input(GPIO_PIN_PE(30));
314 gpio_export(GPIO_PIN_PE(30), false);
315#else
218 at32_select_gpio(GPIO_PIN_PB(30), 0); 316 at32_select_gpio(GPIO_PIN_PB(30), 0);
219 gpio_request(GPIO_PIN_PB(30), "j15"); 317 gpio_request(GPIO_PIN_PB(30), "j15");
220 gpio_direction_input(GPIO_PIN_PB(30)); 318 gpio_direction_input(GPIO_PIN_PB(30));
221 gpio_export(GPIO_PIN_PB(30), false); 319 gpio_export(GPIO_PIN_PB(30), false);
320#endif
222 321
223 /* set_irq_type() after the arch_initcall for EIC has run, and 322 /* set_irq_type() after the arch_initcall for EIC has run, and
224 * before the I2C subsystem could try using this IRQ. 323 * before the I2C subsystem could try using this IRQ.
diff --git a/arch/avr32/configs/atngw100_defconfig b/arch/avr32/configs/atngw100_defconfig
index 574aca975334..32205c9d37d4 100644
--- a/arch/avr32/configs/atngw100_defconfig
+++ b/arch/avr32/configs/atngw100_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.27-rc1 3# Linux kernel version: 2.6.32-rc5
4# Tue Aug 5 16:00:47 2008 4# Thu Oct 29 09:39:22 2009
5# 5#
6CONFIG_AVR32=y 6CONFIG_AVR32=y
7CONFIG_GENERIC_GPIO=y 7CONFIG_GENERIC_GPIO=y
@@ -21,6 +21,7 @@ CONFIG_GENERIC_HWEIGHT=y
21CONFIG_GENERIC_CALIBRATE_DELAY=y 21CONFIG_GENERIC_CALIBRATE_DELAY=y
22CONFIG_GENERIC_BUG=y 22CONFIG_GENERIC_BUG=y
23CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" 23CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
24CONFIG_CONSTRUCTORS=y
24 25
25# 26#
26# General setup 27# General setup
@@ -34,22 +35,37 @@ CONFIG_SWAP=y
34CONFIG_SYSVIPC=y 35CONFIG_SYSVIPC=y
35CONFIG_SYSVIPC_SYSCTL=y 36CONFIG_SYSVIPC_SYSCTL=y
36CONFIG_POSIX_MQUEUE=y 37CONFIG_POSIX_MQUEUE=y
38CONFIG_POSIX_MQUEUE_SYSCTL=y
37CONFIG_BSD_PROCESS_ACCT=y 39CONFIG_BSD_PROCESS_ACCT=y
38CONFIG_BSD_PROCESS_ACCT_V3=y 40CONFIG_BSD_PROCESS_ACCT_V3=y
39# CONFIG_TASKSTATS is not set 41# CONFIG_TASKSTATS is not set
40# CONFIG_AUDIT is not set 42# CONFIG_AUDIT is not set
43
44#
45# RCU Subsystem
46#
47CONFIG_TREE_RCU=y
48# CONFIG_TREE_PREEMPT_RCU is not set
49# CONFIG_RCU_TRACE is not set
50CONFIG_RCU_FANOUT=32
51# CONFIG_RCU_FANOUT_EXACT is not set
52# CONFIG_TREE_RCU_TRACE is not set
41# CONFIG_IKCONFIG is not set 53# CONFIG_IKCONFIG is not set
42CONFIG_LOG_BUF_SHIFT=14 54CONFIG_LOG_BUF_SHIFT=14
43# CONFIG_CGROUPS is not set
44# CONFIG_GROUP_SCHED is not set 55# CONFIG_GROUP_SCHED is not set
56# CONFIG_CGROUPS is not set
45CONFIG_SYSFS_DEPRECATED=y 57CONFIG_SYSFS_DEPRECATED=y
46CONFIG_SYSFS_DEPRECATED_V2=y 58CONFIG_SYSFS_DEPRECATED_V2=y
47# CONFIG_RELAY is not set 59# CONFIG_RELAY is not set
48# CONFIG_NAMESPACES is not set 60# CONFIG_NAMESPACES is not set
49CONFIG_BLK_DEV_INITRD=y 61CONFIG_BLK_DEV_INITRD=y
50CONFIG_INITRAMFS_SOURCE="" 62CONFIG_INITRAMFS_SOURCE=""
63CONFIG_RD_GZIP=y
64# CONFIG_RD_BZIP2 is not set
65# CONFIG_RD_LZMA is not set
51CONFIG_CC_OPTIMIZE_FOR_SIZE=y 66CONFIG_CC_OPTIMIZE_FOR_SIZE=y
52CONFIG_SYSCTL=y 67CONFIG_SYSCTL=y
68CONFIG_ANON_INODES=y
53CONFIG_EMBEDDED=y 69CONFIG_EMBEDDED=y
54# CONFIG_SYSCTL_SYSCALL is not set 70# CONFIG_SYSCTL_SYSCALL is not set
55CONFIG_KALLSYMS=y 71CONFIG_KALLSYMS=y
@@ -59,38 +75,40 @@ CONFIG_HOTPLUG=y
59CONFIG_PRINTK=y 75CONFIG_PRINTK=y
60CONFIG_BUG=y 76CONFIG_BUG=y
61CONFIG_ELF_CORE=y 77CONFIG_ELF_CORE=y
62# CONFIG_COMPAT_BRK is not set
63# CONFIG_BASE_FULL is not set 78# CONFIG_BASE_FULL is not set
64CONFIG_FUTEX=y 79CONFIG_FUTEX=y
65CONFIG_ANON_INODES=y
66CONFIG_EPOLL=y 80CONFIG_EPOLL=y
67CONFIG_SIGNALFD=y 81CONFIG_SIGNALFD=y
68CONFIG_TIMERFD=y 82CONFIG_TIMERFD=y
69CONFIG_EVENTFD=y 83CONFIG_EVENTFD=y
70CONFIG_SHMEM=y 84CONFIG_SHMEM=y
85CONFIG_AIO=y
86
87#
88# Kernel Performance Events And Counters
89#
71CONFIG_VM_EVENT_COUNTERS=y 90CONFIG_VM_EVENT_COUNTERS=y
72CONFIG_SLUB_DEBUG=y 91CONFIG_SLUB_DEBUG=y
92# CONFIG_COMPAT_BRK is not set
73# CONFIG_SLAB is not set 93# CONFIG_SLAB is not set
74CONFIG_SLUB=y 94CONFIG_SLUB=y
75# CONFIG_SLOB is not set 95# CONFIG_SLOB is not set
76CONFIG_PROFILING=y 96CONFIG_PROFILING=y
77# CONFIG_MARKERS is not set 97CONFIG_TRACEPOINTS=y
78CONFIG_OPROFILE=m 98CONFIG_OPROFILE=m
79CONFIG_HAVE_OPROFILE=y 99CONFIG_HAVE_OPROFILE=y
80CONFIG_KPROBES=y 100CONFIG_KPROBES=y
81# CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS is not set
82# CONFIG_HAVE_IOREMAP_PROT is not set
83CONFIG_HAVE_KPROBES=y 101CONFIG_HAVE_KPROBES=y
84# CONFIG_HAVE_KRETPROBES is not set
85# CONFIG_HAVE_ARCH_TRACEHOOK is not set
86# CONFIG_HAVE_DMA_ATTRS is not set
87# CONFIG_USE_GENERIC_SMP_HELPERS is not set
88CONFIG_HAVE_CLK=y 102CONFIG_HAVE_CLK=y
89CONFIG_PROC_PAGE_MONITOR=y 103
104#
105# GCOV-based kernel profiling
106#
107# CONFIG_GCOV_KERNEL is not set
108CONFIG_SLOW_WORK=y
90# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set 109# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
91CONFIG_SLABINFO=y 110CONFIG_SLABINFO=y
92CONFIG_RT_MUTEXES=y 111CONFIG_RT_MUTEXES=y
93# CONFIG_TINY_SHMEM is not set
94CONFIG_BASE_SMALL=1 112CONFIG_BASE_SMALL=1
95CONFIG_MODULES=y 113CONFIG_MODULES=y
96# CONFIG_MODULE_FORCE_LOAD is not set 114# CONFIG_MODULE_FORCE_LOAD is not set
@@ -98,11 +116,8 @@ CONFIG_MODULE_UNLOAD=y
98CONFIG_MODULE_FORCE_UNLOAD=y 116CONFIG_MODULE_FORCE_UNLOAD=y
99# CONFIG_MODVERSIONS is not set 117# CONFIG_MODVERSIONS is not set
100# CONFIG_MODULE_SRCVERSION_ALL is not set 118# CONFIG_MODULE_SRCVERSION_ALL is not set
101CONFIG_KMOD=y
102CONFIG_BLOCK=y 119CONFIG_BLOCK=y
103# CONFIG_LBD is not set 120CONFIG_LBDAF=y
104# CONFIG_BLK_DEV_IO_TRACE is not set
105# CONFIG_LSF is not set
106# CONFIG_BLK_DEV_BSG is not set 121# CONFIG_BLK_DEV_BSG is not set
107# CONFIG_BLK_DEV_INTEGRITY is not set 122# CONFIG_BLK_DEV_INTEGRITY is not set
108 123
@@ -118,7 +133,7 @@ CONFIG_IOSCHED_CFQ=y
118CONFIG_DEFAULT_CFQ=y 133CONFIG_DEFAULT_CFQ=y
119# CONFIG_DEFAULT_NOOP is not set 134# CONFIG_DEFAULT_NOOP is not set
120CONFIG_DEFAULT_IOSCHED="cfq" 135CONFIG_DEFAULT_IOSCHED="cfq"
121CONFIG_CLASSIC_RCU=y 136CONFIG_FREEZER=y
122 137
123# 138#
124# System Type and features 139# System Type and features
@@ -133,8 +148,23 @@ CONFIG_PERFORMANCE_COUNTERS=y
133CONFIG_PLATFORM_AT32AP=y 148CONFIG_PLATFORM_AT32AP=y
134CONFIG_CPU_AT32AP700X=y 149CONFIG_CPU_AT32AP700X=y
135CONFIG_CPU_AT32AP7000=y 150CONFIG_CPU_AT32AP7000=y
151CONFIG_BOARD_ATNGW100_COMMON=y
136# CONFIG_BOARD_ATSTK1000 is not set 152# CONFIG_BOARD_ATSTK1000 is not set
137CONFIG_BOARD_ATNGW100=y 153CONFIG_BOARD_ATNGW100_MKI=y
154# CONFIG_BOARD_ATNGW100_MKII is not set
155# CONFIG_BOARD_HAMMERHEAD is not set
156# CONFIG_BOARD_FAVR_32 is not set
157# CONFIG_BOARD_MERISC is not set
158# CONFIG_BOARD_MIMC200 is not set
159# CONFIG_BOARD_ATSTK1002 is not set
160# CONFIG_BOARD_ATSTK1003 is not set
161# CONFIG_BOARD_ATSTK1004 is not set
162# CONFIG_BOARD_ATSTK1006 is not set
163# CONFIG_BOARD_ATSTK1000_J2_LED8 is not set
164# CONFIG_BOARD_ATSTK1000_J2_RGB is not set
165CONFIG_BOARD_ATNGW100_ADDON_NONE=y
166# CONFIG_BOARD_ATNGW100_EVKLCD10X is not set
167# CONFIG_BOARD_ATNGW100_MRMT is not set
138CONFIG_LOADER_U_BOOT=y 168CONFIG_LOADER_U_BOOT=y
139 169
140# 170#
@@ -150,7 +180,7 @@ CONFIG_PREEMPT_NONE=y
150# CONFIG_PREEMPT_VOLUNTARY is not set 180# CONFIG_PREEMPT_VOLUNTARY is not set
151# CONFIG_PREEMPT is not set 181# CONFIG_PREEMPT is not set
152CONFIG_QUICKLIST=y 182CONFIG_QUICKLIST=y
153# CONFIG_HAVE_ARCH_BOOTMEM_NODE is not set 183# CONFIG_HAVE_ARCH_BOOTMEM is not set
154# CONFIG_ARCH_HAVE_MEMORY_PRESENT is not set 184# CONFIG_ARCH_HAVE_MEMORY_PRESENT is not set
155# CONFIG_NEED_NODE_MEMMAP_SIZE is not set 185# CONFIG_NEED_NODE_MEMMAP_SIZE is not set
156CONFIG_ARCH_FLATMEM_ENABLE=y 186CONFIG_ARCH_FLATMEM_ENABLE=y
@@ -162,14 +192,16 @@ CONFIG_FLATMEM_MANUAL=y
162# CONFIG_SPARSEMEM_MANUAL is not set 192# CONFIG_SPARSEMEM_MANUAL is not set
163CONFIG_FLATMEM=y 193CONFIG_FLATMEM=y
164CONFIG_FLAT_NODE_MEM_MAP=y 194CONFIG_FLAT_NODE_MEM_MAP=y
165# CONFIG_SPARSEMEM_STATIC is not set
166# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
167CONFIG_PAGEFLAGS_EXTENDED=y 195CONFIG_PAGEFLAGS_EXTENDED=y
168CONFIG_SPLIT_PTLOCK_CPUS=4 196CONFIG_SPLIT_PTLOCK_CPUS=4
169# CONFIG_RESOURCES_64BIT is not set 197# CONFIG_PHYS_ADDR_T_64BIT is not set
170CONFIG_ZONE_DMA_FLAG=0 198CONFIG_ZONE_DMA_FLAG=0
171CONFIG_NR_QUICK=2 199CONFIG_NR_QUICK=2
172CONFIG_VIRT_TO_BUS=y 200CONFIG_VIRT_TO_BUS=y
201CONFIG_HAVE_MLOCK=y
202CONFIG_HAVE_MLOCKED_PAGE_BIT=y
203# CONFIG_KSM is not set
204CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
173# CONFIG_OWNERSHIP_TRACE is not set 205# CONFIG_OWNERSHIP_TRACE is not set
174CONFIG_NMI_DEBUGGING=y 206CONFIG_NMI_DEBUGGING=y
175# CONFIG_HZ_100 is not set 207# CONFIG_HZ_100 is not set
@@ -177,7 +209,7 @@ CONFIG_HZ_250=y
177# CONFIG_HZ_300 is not set 209# CONFIG_HZ_300 is not set
178# CONFIG_HZ_1000 is not set 210# CONFIG_HZ_1000 is not set
179CONFIG_HZ=250 211CONFIG_HZ=250
180# CONFIG_SCHED_HRTICK is not set 212CONFIG_SCHED_HRTICK=y
181CONFIG_CMDLINE="" 213CONFIG_CMDLINE=""
182 214
183# 215#
@@ -188,6 +220,7 @@ CONFIG_PM=y
188CONFIG_PM_SLEEP=y 220CONFIG_PM_SLEEP=y
189CONFIG_SUSPEND=y 221CONFIG_SUSPEND=y
190CONFIG_SUSPEND_FREEZER=y 222CONFIG_SUSPEND_FREEZER=y
223# CONFIG_PM_RUNTIME is not set
191CONFIG_ARCH_SUSPEND_POSSIBLE=y 224CONFIG_ARCH_SUSPEND_POSSIBLE=y
192 225
193# 226#
@@ -219,6 +252,8 @@ CONFIG_CPU_FREQ_AT32AP=y
219# Executable file formats 252# Executable file formats
220# 253#
221CONFIG_BINFMT_ELF=y 254CONFIG_BINFMT_ELF=y
255# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
256# CONFIG_HAVE_AOUT is not set
222# CONFIG_BINFMT_MISC is not set 257# CONFIG_BINFMT_MISC is not set
223CONFIG_NET=y 258CONFIG_NET=y
224 259
@@ -271,7 +306,6 @@ CONFIG_INET_TCP_DIAG=y
271CONFIG_TCP_CONG_CUBIC=y 306CONFIG_TCP_CONG_CUBIC=y
272CONFIG_DEFAULT_TCP_CONG="cubic" 307CONFIG_DEFAULT_TCP_CONG="cubic"
273# CONFIG_TCP_MD5SIG is not set 308# CONFIG_TCP_MD5SIG is not set
274# CONFIG_IP_VS is not set
275CONFIG_IPV6=y 309CONFIG_IPV6=y
276# CONFIG_IPV6_PRIVACY is not set 310# CONFIG_IPV6_PRIVACY is not set
277# CONFIG_IPV6_ROUTER_PREF is not set 311# CONFIG_IPV6_ROUTER_PREF is not set
@@ -314,10 +348,12 @@ CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
314CONFIG_NETFILTER_XT_MATCH_MARK=m 348CONFIG_NETFILTER_XT_MATCH_MARK=m
315CONFIG_NETFILTER_XT_MATCH_POLICY=m 349CONFIG_NETFILTER_XT_MATCH_POLICY=m
316CONFIG_NETFILTER_XT_MATCH_STATE=m 350CONFIG_NETFILTER_XT_MATCH_STATE=m
351# CONFIG_IP_VS is not set
317 352
318# 353#
319# IP: Netfilter Configuration 354# IP: Netfilter Configuration
320# 355#
356CONFIG_NF_DEFRAG_IPV4=m
321CONFIG_NF_CONNTRACK_IPV4=m 357CONFIG_NF_CONNTRACK_IPV4=m
322CONFIG_NF_CONNTRACK_PROC_COMPAT=y 358CONFIG_NF_CONNTRACK_PROC_COMPAT=y
323CONFIG_IP_NF_IPTABLES=m 359CONFIG_IP_NF_IPTABLES=m
@@ -343,16 +379,18 @@ CONFIG_IP_NF_MANGLE=m
343CONFIG_NF_CONNTRACK_IPV6=m 379CONFIG_NF_CONNTRACK_IPV6=m
344CONFIG_IP6_NF_IPTABLES=m 380CONFIG_IP6_NF_IPTABLES=m
345CONFIG_IP6_NF_MATCH_IPV6HEADER=m 381CONFIG_IP6_NF_MATCH_IPV6HEADER=m
346CONFIG_IP6_NF_FILTER=m
347CONFIG_IP6_NF_TARGET_LOG=m 382CONFIG_IP6_NF_TARGET_LOG=m
383CONFIG_IP6_NF_FILTER=m
348CONFIG_IP6_NF_TARGET_REJECT=m 384CONFIG_IP6_NF_TARGET_REJECT=m
349CONFIG_IP6_NF_MANGLE=m 385CONFIG_IP6_NF_MANGLE=m
350# CONFIG_IP_DCCP is not set 386# CONFIG_IP_DCCP is not set
351# CONFIG_IP_SCTP is not set 387# CONFIG_IP_SCTP is not set
388# CONFIG_RDS is not set
352# CONFIG_TIPC is not set 389# CONFIG_TIPC is not set
353# CONFIG_ATM is not set 390# CONFIG_ATM is not set
354CONFIG_STP=m 391CONFIG_STP=m
355CONFIG_BRIDGE=m 392CONFIG_BRIDGE=m
393# CONFIG_NET_DSA is not set
356CONFIG_VLAN_8021Q=m 394CONFIG_VLAN_8021Q=m
357# CONFIG_VLAN_8021Q_GVRP is not set 395# CONFIG_VLAN_8021Q_GVRP is not set
358# CONFIG_DECNET is not set 396# CONFIG_DECNET is not set
@@ -364,26 +402,33 @@ CONFIG_LLC=m
364# CONFIG_LAPB is not set 402# CONFIG_LAPB is not set
365# CONFIG_ECONET is not set 403# CONFIG_ECONET is not set
366# CONFIG_WAN_ROUTER is not set 404# CONFIG_WAN_ROUTER is not set
405# CONFIG_PHONET is not set
406# CONFIG_IEEE802154 is not set
367# CONFIG_NET_SCHED is not set 407# CONFIG_NET_SCHED is not set
408# CONFIG_DCB is not set
368 409
369# 410#
370# Network testing 411# Network testing
371# 412#
372# CONFIG_NET_PKTGEN is not set 413# CONFIG_NET_PKTGEN is not set
373# CONFIG_NET_TCPPROBE is not set 414# CONFIG_NET_TCPPROBE is not set
415# CONFIG_NET_DROP_MONITOR is not set
374# CONFIG_HAMRADIO is not set 416# CONFIG_HAMRADIO is not set
375# CONFIG_CAN is not set 417# CONFIG_CAN is not set
376# CONFIG_IRDA is not set 418# CONFIG_IRDA is not set
377# CONFIG_BT is not set 419# CONFIG_BT is not set
378# CONFIG_AF_RXRPC is not set 420# CONFIG_AF_RXRPC is not set
421CONFIG_WIRELESS=y
422# CONFIG_CFG80211 is not set
423CONFIG_CFG80211_DEFAULT_PS_VALUE=0
424# CONFIG_WIRELESS_OLD_REGULATORY is not set
425# CONFIG_WIRELESS_EXT is not set
426# CONFIG_LIB80211 is not set
379 427
380# 428#
381# Wireless 429# CFG80211 needs to be enabled for MAC80211
382# 430#
383# CONFIG_CFG80211 is not set 431# CONFIG_WIMAX is not set
384# CONFIG_WIRELESS_EXT is not set
385# CONFIG_MAC80211 is not set
386# CONFIG_IEEE80211 is not set
387# CONFIG_RFKILL is not set 432# CONFIG_RFKILL is not set
388# CONFIG_NET_9P is not set 433# CONFIG_NET_9P is not set
389 434
@@ -395,6 +440,7 @@ CONFIG_LLC=m
395# Generic Driver Options 440# Generic Driver Options
396# 441#
397CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 442CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
443# CONFIG_DEVTMPFS is not set
398CONFIG_STANDALONE=y 444CONFIG_STANDALONE=y
399# CONFIG_PREVENT_FIRMWARE_BUILD is not set 445# CONFIG_PREVENT_FIRMWARE_BUILD is not set
400# CONFIG_FW_LOADER is not set 446# CONFIG_FW_LOADER is not set
@@ -404,6 +450,7 @@ CONFIG_STANDALONE=y
404# CONFIG_CONNECTOR is not set 450# CONFIG_CONNECTOR is not set
405CONFIG_MTD=y 451CONFIG_MTD=y
406# CONFIG_MTD_DEBUG is not set 452# CONFIG_MTD_DEBUG is not set
453# CONFIG_MTD_TESTS is not set
407# CONFIG_MTD_CONCAT is not set 454# CONFIG_MTD_CONCAT is not set
408CONFIG_MTD_PARTITIONS=y 455CONFIG_MTD_PARTITIONS=y
409# CONFIG_MTD_REDBOOT_PARTS is not set 456# CONFIG_MTD_REDBOOT_PARTS is not set
@@ -453,16 +500,17 @@ CONFIG_MTD_CFI_UTIL=y
453# 500#
454# CONFIG_MTD_COMPLEX_MAPPINGS is not set 501# CONFIG_MTD_COMPLEX_MAPPINGS is not set
455CONFIG_MTD_PHYSMAP=y 502CONFIG_MTD_PHYSMAP=y
456CONFIG_MTD_PHYSMAP_START=0x80000000 503# CONFIG_MTD_PHYSMAP_COMPAT is not set
457CONFIG_MTD_PHYSMAP_LEN=0x0
458CONFIG_MTD_PHYSMAP_BANKWIDTH=2
459# CONFIG_MTD_PLATRAM is not set 504# CONFIG_MTD_PLATRAM is not set
460 505
461# 506#
462# Self-contained MTD device drivers 507# Self-contained MTD device drivers
463# 508#
464CONFIG_MTD_DATAFLASH=y 509CONFIG_MTD_DATAFLASH=y
510# CONFIG_MTD_DATAFLASH_WRITE_VERIFY is not set
511# CONFIG_MTD_DATAFLASH_OTP is not set
465# CONFIG_MTD_M25P80 is not set 512# CONFIG_MTD_M25P80 is not set
513# CONFIG_MTD_SST25L is not set
466# CONFIG_MTD_SLRAM is not set 514# CONFIG_MTD_SLRAM is not set
467# CONFIG_MTD_PHRAM is not set 515# CONFIG_MTD_PHRAM is not set
468# CONFIG_MTD_MTDRAM is not set 516# CONFIG_MTD_MTDRAM is not set
@@ -478,9 +526,22 @@ CONFIG_MTD_DATAFLASH=y
478# CONFIG_MTD_ONENAND is not set 526# CONFIG_MTD_ONENAND is not set
479 527
480# 528#
529# LPDDR flash memory drivers
530#
531# CONFIG_MTD_LPDDR is not set
532
533#
481# UBI - Unsorted block images 534# UBI - Unsorted block images
482# 535#
483# CONFIG_MTD_UBI is not set 536CONFIG_MTD_UBI=y
537CONFIG_MTD_UBI_WL_THRESHOLD=4096
538CONFIG_MTD_UBI_BEB_RESERVE=1
539# CONFIG_MTD_UBI_GLUEBI is not set
540
541#
542# UBI debugging options
543#
544# CONFIG_MTD_UBI_DEBUG is not set
484# CONFIG_PARPORT is not set 545# CONFIG_PARPORT is not set
485CONFIG_BLK_DEV=y 546CONFIG_BLK_DEV=y
486# CONFIG_BLK_DEV_COW_COMMON is not set 547# CONFIG_BLK_DEV_COW_COMMON is not set
@@ -498,10 +559,20 @@ CONFIG_MISC_DEVICES=y
498CONFIG_ATMEL_TCLIB=y 559CONFIG_ATMEL_TCLIB=y
499CONFIG_ATMEL_TCB_CLKSRC=y 560CONFIG_ATMEL_TCB_CLKSRC=y
500CONFIG_ATMEL_TCB_CLKSRC_BLOCK=0 561CONFIG_ATMEL_TCB_CLKSRC_BLOCK=0
501# CONFIG_EEPROM_93CX6 is not set 562# CONFIG_ICS932S401 is not set
502# CONFIG_ATMEL_SSC is not set 563# CONFIG_ATMEL_SSC is not set
503# CONFIG_ENCLOSURE_SERVICES is not set 564# CONFIG_ENCLOSURE_SERVICES is not set
504# CONFIG_HAVE_IDE is not set 565# CONFIG_ISL29003 is not set
566# CONFIG_C2PORT is not set
567
568#
569# EEPROM support
570#
571CONFIG_EEPROM_AT24=m
572# CONFIG_EEPROM_AT25 is not set
573# CONFIG_EEPROM_LEGACY is not set
574# CONFIG_EEPROM_MAX6875 is not set
575# CONFIG_EEPROM_93CX6 is not set
505 576
506# 577#
507# SCSI device support 578# SCSI device support
@@ -534,26 +605,37 @@ CONFIG_PHYLIB=y
534# CONFIG_BROADCOM_PHY is not set 605# CONFIG_BROADCOM_PHY is not set
535# CONFIG_ICPLUS_PHY is not set 606# CONFIG_ICPLUS_PHY is not set
536# CONFIG_REALTEK_PHY is not set 607# CONFIG_REALTEK_PHY is not set
608# CONFIG_NATIONAL_PHY is not set
609# CONFIG_STE10XP is not set
610# CONFIG_LSI_ET1011C_PHY is not set
537# CONFIG_FIXED_PHY is not set 611# CONFIG_FIXED_PHY is not set
538# CONFIG_MDIO_BITBANG is not set 612# CONFIG_MDIO_BITBANG is not set
539CONFIG_NET_ETHERNET=y 613CONFIG_NET_ETHERNET=y
540# CONFIG_MII is not set 614# CONFIG_MII is not set
541CONFIG_MACB=y 615CONFIG_MACB=y
542# CONFIG_ENC28J60 is not set 616# CONFIG_ENC28J60 is not set
617# CONFIG_ETHOC is not set
618# CONFIG_DNET is not set
543# CONFIG_IBM_NEW_EMAC_ZMII is not set 619# CONFIG_IBM_NEW_EMAC_ZMII is not set
544# CONFIG_IBM_NEW_EMAC_RGMII is not set 620# CONFIG_IBM_NEW_EMAC_RGMII is not set
545# CONFIG_IBM_NEW_EMAC_TAH is not set 621# CONFIG_IBM_NEW_EMAC_TAH is not set
546# CONFIG_IBM_NEW_EMAC_EMAC4 is not set 622# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
623# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
624# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
625# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
547# CONFIG_B44 is not set 626# CONFIG_B44 is not set
627# CONFIG_KS8842 is not set
628# CONFIG_KS8851 is not set
629# CONFIG_KS8851_MLL is not set
548# CONFIG_NETDEV_1000 is not set 630# CONFIG_NETDEV_1000 is not set
549# CONFIG_NETDEV_10000 is not set 631# CONFIG_NETDEV_10000 is not set
632CONFIG_WLAN=y
633# CONFIG_WLAN_PRE80211 is not set
634# CONFIG_WLAN_80211 is not set
550 635
551# 636#
552# Wireless LAN 637# Enable WiMAX (Networking options) to see the WiMAX drivers
553# 638#
554# CONFIG_WLAN_PRE80211 is not set
555# CONFIG_WLAN_80211 is not set
556# CONFIG_IWLWIFI_LEDS is not set
557# CONFIG_WAN is not set 639# CONFIG_WAN is not set
558CONFIG_PPP=m 640CONFIG_PPP=m
559# CONFIG_PPP_MULTILINK is not set 641# CONFIG_PPP_MULTILINK is not set
@@ -603,9 +685,11 @@ CONFIG_SERIAL_ATMEL=y
603CONFIG_SERIAL_ATMEL_CONSOLE=y 685CONFIG_SERIAL_ATMEL_CONSOLE=y
604CONFIG_SERIAL_ATMEL_PDC=y 686CONFIG_SERIAL_ATMEL_PDC=y
605# CONFIG_SERIAL_ATMEL_TTYAT is not set 687# CONFIG_SERIAL_ATMEL_TTYAT is not set
688# CONFIG_SERIAL_MAX3100 is not set
606CONFIG_SERIAL_CORE=y 689CONFIG_SERIAL_CORE=y
607CONFIG_SERIAL_CORE_CONSOLE=y 690CONFIG_SERIAL_CORE_CONSOLE=y
608CONFIG_UNIX98_PTYS=y 691CONFIG_UNIX98_PTYS=y
692# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
609# CONFIG_LEGACY_PTYS is not set 693# CONFIG_LEGACY_PTYS is not set
610# CONFIG_IPMI_HANDLER is not set 694# CONFIG_IPMI_HANDLER is not set
611# CONFIG_HW_RANDOM is not set 695# CONFIG_HW_RANDOM is not set
@@ -614,7 +698,9 @@ CONFIG_UNIX98_PTYS=y
614# CONFIG_TCG_TPM is not set 698# CONFIG_TCG_TPM is not set
615CONFIG_I2C=m 699CONFIG_I2C=m
616CONFIG_I2C_BOARDINFO=y 700CONFIG_I2C_BOARDINFO=y
701CONFIG_I2C_COMPAT=y
617CONFIG_I2C_CHARDEV=m 702CONFIG_I2C_CHARDEV=m
703CONFIG_I2C_HELPER_AUTO=y
618CONFIG_I2C_ALGOBIT=m 704CONFIG_I2C_ALGOBIT=m
619 705
620# 706#
@@ -624,6 +710,7 @@ CONFIG_I2C_ALGOBIT=m
624# 710#
625# I2C system bus drivers (mostly embedded / system-on-chip) 711# I2C system bus drivers (mostly embedded / system-on-chip)
626# 712#
713# CONFIG_I2C_DESIGNWARE is not set
627CONFIG_I2C_GPIO=m 714CONFIG_I2C_GPIO=m
628# CONFIG_I2C_OCORES is not set 715# CONFIG_I2C_OCORES is not set
629# CONFIG_I2C_SIMTEC is not set 716# CONFIG_I2C_SIMTEC is not set
@@ -644,14 +731,6 @@ CONFIG_I2C_GPIO=m
644# Miscellaneous I2C Chip support 731# Miscellaneous I2C Chip support
645# 732#
646# CONFIG_DS1682 is not set 733# CONFIG_DS1682 is not set
647CONFIG_EEPROM_AT24=m
648# CONFIG_EEPROM_LEGACY is not set
649# CONFIG_SENSORS_PCF8574 is not set
650# CONFIG_PCF8575 is not set
651# CONFIG_SENSORS_PCA9539 is not set
652# CONFIG_SENSORS_PCF8591 is not set
653# CONFIG_TPS65010 is not set
654# CONFIG_SENSORS_MAX6875 is not set
655# CONFIG_SENSORS_TSL2550 is not set 734# CONFIG_SENSORS_TSL2550 is not set
656# CONFIG_I2C_DEBUG_CORE is not set 735# CONFIG_I2C_DEBUG_CORE is not set
657# CONFIG_I2C_DEBUG_ALGO is not set 736# CONFIG_I2C_DEBUG_ALGO is not set
@@ -666,19 +745,28 @@ CONFIG_SPI_MASTER=y
666# 745#
667CONFIG_SPI_ATMEL=y 746CONFIG_SPI_ATMEL=y
668# CONFIG_SPI_BITBANG is not set 747# CONFIG_SPI_BITBANG is not set
748# CONFIG_SPI_GPIO is not set
669 749
670# 750#
671# SPI Protocol Masters 751# SPI Protocol Masters
672# 752#
673# CONFIG_EEPROM_AT25 is not set
674CONFIG_SPI_SPIDEV=m 753CONFIG_SPI_SPIDEV=m
675# CONFIG_SPI_TLE62X0 is not set 754# CONFIG_SPI_TLE62X0 is not set
755
756#
757# PPS support
758#
759# CONFIG_PPS is not set
676CONFIG_ARCH_REQUIRE_GPIOLIB=y 760CONFIG_ARCH_REQUIRE_GPIOLIB=y
677CONFIG_GPIOLIB=y 761CONFIG_GPIOLIB=y
678# CONFIG_DEBUG_GPIO is not set 762# CONFIG_DEBUG_GPIO is not set
679CONFIG_GPIO_SYSFS=y 763CONFIG_GPIO_SYSFS=y
680 764
681# 765#
766# Memory mapped GPIO expanders:
767#
768
769#
682# I2C GPIO expanders: 770# I2C GPIO expanders:
683# 771#
684# CONFIG_GPIO_MAX732X is not set 772# CONFIG_GPIO_MAX732X is not set
@@ -694,11 +782,15 @@ CONFIG_GPIO_SYSFS=y
694# 782#
695# CONFIG_GPIO_MAX7301 is not set 783# CONFIG_GPIO_MAX7301 is not set
696# CONFIG_GPIO_MCP23S08 is not set 784# CONFIG_GPIO_MCP23S08 is not set
785# CONFIG_GPIO_MC33880 is not set
786
787#
788# AC97 GPIO expanders:
789#
697# CONFIG_W1 is not set 790# CONFIG_W1 is not set
698# CONFIG_POWER_SUPPLY is not set 791# CONFIG_POWER_SUPPLY is not set
699# CONFIG_HWMON is not set 792# CONFIG_HWMON is not set
700# CONFIG_THERMAL is not set 793# CONFIG_THERMAL is not set
701# CONFIG_THERMAL_HWMON is not set
702CONFIG_WATCHDOG=y 794CONFIG_WATCHDOG=y
703# CONFIG_WATCHDOG_NOWAYOUT is not set 795# CONFIG_WATCHDOG_NOWAYOUT is not set
704 796
@@ -707,11 +799,11 @@ CONFIG_WATCHDOG=y
707# 799#
708# CONFIG_SOFT_WATCHDOG is not set 800# CONFIG_SOFT_WATCHDOG is not set
709CONFIG_AT32AP700X_WDT=y 801CONFIG_AT32AP700X_WDT=y
802CONFIG_SSB_POSSIBLE=y
710 803
711# 804#
712# Sonics Silicon Backplane 805# Sonics Silicon Backplane
713# 806#
714CONFIG_SSB_POSSIBLE=y
715# CONFIG_SSB is not set 807# CONFIG_SSB is not set
716 808
717# 809#
@@ -720,22 +812,17 @@ CONFIG_SSB_POSSIBLE=y
720# CONFIG_MFD_CORE is not set 812# CONFIG_MFD_CORE is not set
721# CONFIG_MFD_SM501 is not set 813# CONFIG_MFD_SM501 is not set
722# CONFIG_HTC_PASIC3 is not set 814# CONFIG_HTC_PASIC3 is not set
723 815# CONFIG_TPS65010 is not set
724# 816# CONFIG_MFD_TMIO is not set
725# Multimedia devices 817# CONFIG_MFD_WM8400 is not set
726# 818# CONFIG_MFD_WM831X is not set
727 819# CONFIG_MFD_WM8350_I2C is not set
728# 820# CONFIG_MFD_PCF50633 is not set
729# Multimedia core support 821# CONFIG_MFD_MC13783 is not set
730# 822# CONFIG_AB3100_CORE is not set
731# CONFIG_VIDEO_DEV is not set 823# CONFIG_EZX_PCAP is not set
732# CONFIG_DVB_CORE is not set 824# CONFIG_REGULATOR is not set
733# CONFIG_VIDEO_MEDIA is not set 825# CONFIG_MEDIA_SUPPORT is not set
734
735#
736# Multimedia drivers
737#
738# CONFIG_DAB is not set
739 826
740# 827#
741# Graphics support 828# Graphics support
@@ -756,32 +843,43 @@ CONFIG_USB_SUPPORT=y
756# CONFIG_USB_ARCH_HAS_EHCI is not set 843# CONFIG_USB_ARCH_HAS_EHCI is not set
757# CONFIG_USB_OTG_WHITELIST is not set 844# CONFIG_USB_OTG_WHITELIST is not set
758# CONFIG_USB_OTG_BLACKLIST_HUB is not set 845# CONFIG_USB_OTG_BLACKLIST_HUB is not set
846# CONFIG_USB_GADGET_MUSB_HDRC is not set
759 847
760# 848#
761# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' 849# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
762# 850#
763CONFIG_USB_GADGET=y 851CONFIG_USB_GADGET=y
764# CONFIG_USB_GADGET_DEBUG is not set 852# CONFIG_USB_GADGET_DEBUG is not set
765# CONFIG_USB_GADGET_DEBUG_FILES is not set 853# CONFIG_USB_GADGET_DEBUG_FILES is not set
854# CONFIG_USB_GADGET_DEBUG_FS is not set
855CONFIG_USB_GADGET_VBUS_DRAW=2
766CONFIG_USB_GADGET_SELECTED=y 856CONFIG_USB_GADGET_SELECTED=y
767# CONFIG_USB_GADGET_AMD5536UDC is not set 857# CONFIG_USB_GADGET_AT91 is not set
768CONFIG_USB_GADGET_ATMEL_USBA=y 858CONFIG_USB_GADGET_ATMEL_USBA=y
769CONFIG_USB_ATMEL_USBA=y 859CONFIG_USB_ATMEL_USBA=y
770# CONFIG_USB_GADGET_FSL_USB2 is not set 860# CONFIG_USB_GADGET_FSL_USB2 is not set
771# CONFIG_USB_GADGET_NET2280 is not set
772# CONFIG_USB_GADGET_PXA25X is not set
773# CONFIG_USB_GADGET_M66592 is not set
774# CONFIG_USB_GADGET_PXA27X is not set
775# CONFIG_USB_GADGET_GOKU is not set
776# CONFIG_USB_GADGET_LH7A40X is not set 861# CONFIG_USB_GADGET_LH7A40X is not set
777# CONFIG_USB_GADGET_OMAP is not set 862# CONFIG_USB_GADGET_OMAP is not set
863# CONFIG_USB_GADGET_PXA25X is not set
864# CONFIG_USB_GADGET_R8A66597 is not set
865# CONFIG_USB_GADGET_PXA27X is not set
866# CONFIG_USB_GADGET_S3C_HSOTG is not set
867# CONFIG_USB_GADGET_IMX is not set
778# CONFIG_USB_GADGET_S3C2410 is not set 868# CONFIG_USB_GADGET_S3C2410 is not set
779# CONFIG_USB_GADGET_AT91 is not set 869# CONFIG_USB_GADGET_M66592 is not set
870# CONFIG_USB_GADGET_AMD5536UDC is not set
871# CONFIG_USB_GADGET_FSL_QE is not set
872# CONFIG_USB_GADGET_CI13XXX is not set
873# CONFIG_USB_GADGET_NET2280 is not set
874# CONFIG_USB_GADGET_GOKU is not set
875# CONFIG_USB_GADGET_LANGWELL is not set
780# CONFIG_USB_GADGET_DUMMY_HCD is not set 876# CONFIG_USB_GADGET_DUMMY_HCD is not set
781CONFIG_USB_GADGET_DUALSPEED=y 877CONFIG_USB_GADGET_DUALSPEED=y
782CONFIG_USB_ZERO=m 878CONFIG_USB_ZERO=m
879# CONFIG_USB_AUDIO is not set
783CONFIG_USB_ETH=m 880CONFIG_USB_ETH=m
784CONFIG_USB_ETH_RNDIS=y 881CONFIG_USB_ETH_RNDIS=y
882# CONFIG_USB_ETH_EEM is not set
785CONFIG_USB_GADGETFS=m 883CONFIG_USB_GADGETFS=m
786CONFIG_USB_FILE_STORAGE=m 884CONFIG_USB_FILE_STORAGE=m
787# CONFIG_USB_FILE_STORAGE_TEST is not set 885# CONFIG_USB_FILE_STORAGE_TEST is not set
@@ -789,12 +887,18 @@ CONFIG_USB_G_SERIAL=m
789# CONFIG_USB_MIDI_GADGET is not set 887# CONFIG_USB_MIDI_GADGET is not set
790# CONFIG_USB_G_PRINTER is not set 888# CONFIG_USB_G_PRINTER is not set
791CONFIG_USB_CDC_COMPOSITE=m 889CONFIG_USB_CDC_COMPOSITE=m
890
891#
892# OTG and related infrastructure
893#
894# CONFIG_USB_GPIO_VBUS is not set
895# CONFIG_NOP_USB_XCEIV is not set
792CONFIG_MMC=y 896CONFIG_MMC=y
793# CONFIG_MMC_DEBUG is not set 897# CONFIG_MMC_DEBUG is not set
794# CONFIG_MMC_UNSAFE_RESUME is not set 898# CONFIG_MMC_UNSAFE_RESUME is not set
795 899
796# 900#
797# MMC/SD Card Drivers 901# MMC/SD/SDIO Card Drivers
798# 902#
799CONFIG_MMC_BLOCK=y 903CONFIG_MMC_BLOCK=y
800CONFIG_MMC_BLOCK_BOUNCE=y 904CONFIG_MMC_BLOCK_BOUNCE=y
@@ -802,10 +906,12 @@ CONFIG_MMC_BLOCK_BOUNCE=y
802CONFIG_MMC_TEST=m 906CONFIG_MMC_TEST=m
803 907
804# 908#
805# MMC/SD Host Controller Drivers 909# MMC/SD/SDIO Host Controller Drivers
806# 910#
807# CONFIG_MMC_SDHCI is not set 911# CONFIG_MMC_SDHCI is not set
912# CONFIG_MMC_AT91 is not set
808CONFIG_MMC_ATMELMCI=y 913CONFIG_MMC_ATMELMCI=y
914# CONFIG_MMC_ATMELMCI_DMA is not set
809CONFIG_MMC_SPI=m 915CONFIG_MMC_SPI=m
810# CONFIG_MEMSTICK is not set 916# CONFIG_MEMSTICK is not set
811CONFIG_NEW_LEDS=y 917CONFIG_NEW_LEDS=y
@@ -815,7 +921,11 @@ CONFIG_LEDS_CLASS=y
815# LED drivers 921# LED drivers
816# 922#
817CONFIG_LEDS_GPIO=y 923CONFIG_LEDS_GPIO=y
924CONFIG_LEDS_GPIO_PLATFORM=y
925# CONFIG_LEDS_LP3944 is not set
818# CONFIG_LEDS_PCA955X is not set 926# CONFIG_LEDS_PCA955X is not set
927# CONFIG_LEDS_DAC124S085 is not set
928# CONFIG_LEDS_BD2802 is not set
819 929
820# 930#
821# LED Triggers 931# LED Triggers
@@ -823,7 +933,13 @@ CONFIG_LEDS_GPIO=y
823CONFIG_LEDS_TRIGGERS=y 933CONFIG_LEDS_TRIGGERS=y
824CONFIG_LEDS_TRIGGER_TIMER=y 934CONFIG_LEDS_TRIGGER_TIMER=y
825CONFIG_LEDS_TRIGGER_HEARTBEAT=y 935CONFIG_LEDS_TRIGGER_HEARTBEAT=y
936# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
937# CONFIG_LEDS_TRIGGER_GPIO is not set
826CONFIG_LEDS_TRIGGER_DEFAULT_ON=y 938CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
939
940#
941# iptables trigger is under Netfilter config (LED target)
942#
827# CONFIG_ACCESSIBILITY is not set 943# CONFIG_ACCESSIBILITY is not set
828CONFIG_RTC_LIB=y 944CONFIG_RTC_LIB=y
829CONFIG_RTC_CLASS=y 945CONFIG_RTC_CLASS=y
@@ -855,25 +971,33 @@ CONFIG_RTC_INTF_DEV=y
855# CONFIG_RTC_DRV_M41T80 is not set 971# CONFIG_RTC_DRV_M41T80 is not set
856# CONFIG_RTC_DRV_S35390A is not set 972# CONFIG_RTC_DRV_S35390A is not set
857# CONFIG_RTC_DRV_FM3130 is not set 973# CONFIG_RTC_DRV_FM3130 is not set
974# CONFIG_RTC_DRV_RX8581 is not set
975# CONFIG_RTC_DRV_RX8025 is not set
858 976
859# 977#
860# SPI RTC drivers 978# SPI RTC drivers
861# 979#
862# CONFIG_RTC_DRV_M41T94 is not set 980# CONFIG_RTC_DRV_M41T94 is not set
863# CONFIG_RTC_DRV_DS1305 is not set 981# CONFIG_RTC_DRV_DS1305 is not set
982# CONFIG_RTC_DRV_DS1390 is not set
864# CONFIG_RTC_DRV_MAX6902 is not set 983# CONFIG_RTC_DRV_MAX6902 is not set
865# CONFIG_RTC_DRV_R9701 is not set 984# CONFIG_RTC_DRV_R9701 is not set
866# CONFIG_RTC_DRV_RS5C348 is not set 985# CONFIG_RTC_DRV_RS5C348 is not set
986# CONFIG_RTC_DRV_DS3234 is not set
987# CONFIG_RTC_DRV_PCF2123 is not set
867 988
868# 989#
869# Platform RTC drivers 990# Platform RTC drivers
870# 991#
992# CONFIG_RTC_DRV_DS1286 is not set
871# CONFIG_RTC_DRV_DS1511 is not set 993# CONFIG_RTC_DRV_DS1511 is not set
872# CONFIG_RTC_DRV_DS1553 is not set 994# CONFIG_RTC_DRV_DS1553 is not set
873# CONFIG_RTC_DRV_DS1742 is not set 995# CONFIG_RTC_DRV_DS1742 is not set
874# CONFIG_RTC_DRV_STK17TA8 is not set 996# CONFIG_RTC_DRV_STK17TA8 is not set
875# CONFIG_RTC_DRV_M48T86 is not set 997# CONFIG_RTC_DRV_M48T86 is not set
998# CONFIG_RTC_DRV_M48T35 is not set
876# CONFIG_RTC_DRV_M48T59 is not set 999# CONFIG_RTC_DRV_M48T59 is not set
1000# CONFIG_RTC_DRV_BQ4802 is not set
877# CONFIG_RTC_DRV_V3020 is not set 1001# CONFIG_RTC_DRV_V3020 is not set
878 1002
879# 1003#
@@ -892,24 +1016,38 @@ CONFIG_DMA_ENGINE=y
892# DMA Clients 1016# DMA Clients
893# 1017#
894# CONFIG_NET_DMA is not set 1018# CONFIG_NET_DMA is not set
1019# CONFIG_ASYNC_TX_DMA is not set
895# CONFIG_DMATEST is not set 1020# CONFIG_DMATEST is not set
1021# CONFIG_AUXDISPLAY is not set
896# CONFIG_UIO is not set 1022# CONFIG_UIO is not set
897 1023
898# 1024#
1025# TI VLYNQ
1026#
1027# CONFIG_STAGING is not set
1028
1029#
899# File systems 1030# File systems
900# 1031#
901CONFIG_EXT2_FS=m 1032CONFIG_EXT2_FS=y
902# CONFIG_EXT2_FS_XATTR is not set 1033# CONFIG_EXT2_FS_XATTR is not set
903# CONFIG_EXT2_FS_XIP is not set 1034# CONFIG_EXT2_FS_XIP is not set
904CONFIG_EXT3_FS=m 1035CONFIG_EXT3_FS=y
1036# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
905# CONFIG_EXT3_FS_XATTR is not set 1037# CONFIG_EXT3_FS_XATTR is not set
906# CONFIG_EXT4DEV_FS is not set 1038# CONFIG_EXT4_FS is not set
907CONFIG_JBD=m 1039CONFIG_JBD=y
1040# CONFIG_JBD_DEBUG is not set
908# CONFIG_REISERFS_FS is not set 1041# CONFIG_REISERFS_FS is not set
909# CONFIG_JFS_FS is not set 1042# CONFIG_JFS_FS is not set
910# CONFIG_FS_POSIX_ACL is not set 1043# CONFIG_FS_POSIX_ACL is not set
911# CONFIG_XFS_FS is not set 1044# CONFIG_XFS_FS is not set
1045# CONFIG_GFS2_FS is not set
912# CONFIG_OCFS2_FS is not set 1046# CONFIG_OCFS2_FS is not set
1047# CONFIG_BTRFS_FS is not set
1048# CONFIG_NILFS2_FS is not set
1049CONFIG_FILE_LOCKING=y
1050CONFIG_FSNOTIFY=y
913# CONFIG_DNOTIFY is not set 1051# CONFIG_DNOTIFY is not set
914CONFIG_INOTIFY=y 1052CONFIG_INOTIFY=y
915CONFIG_INOTIFY_USER=y 1053CONFIG_INOTIFY_USER=y
@@ -917,6 +1055,12 @@ CONFIG_INOTIFY_USER=y
917# CONFIG_AUTOFS_FS is not set 1055# CONFIG_AUTOFS_FS is not set
918# CONFIG_AUTOFS4_FS is not set 1056# CONFIG_AUTOFS4_FS is not set
919CONFIG_FUSE_FS=m 1057CONFIG_FUSE_FS=m
1058# CONFIG_CUSE is not set
1059
1060#
1061# Caches
1062#
1063# CONFIG_FSCACHE is not set
920 1064
921# 1065#
922# CD-ROM/DVD Filesystems 1066# CD-ROM/DVD Filesystems
@@ -940,15 +1084,13 @@ CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
940CONFIG_PROC_FS=y 1084CONFIG_PROC_FS=y
941# CONFIG_PROC_KCORE is not set 1085# CONFIG_PROC_KCORE is not set
942CONFIG_PROC_SYSCTL=y 1086CONFIG_PROC_SYSCTL=y
1087CONFIG_PROC_PAGE_MONITOR=y
943CONFIG_SYSFS=y 1088CONFIG_SYSFS=y
944CONFIG_TMPFS=y 1089CONFIG_TMPFS=y
945# CONFIG_TMPFS_POSIX_ACL is not set 1090# CONFIG_TMPFS_POSIX_ACL is not set
946# CONFIG_HUGETLB_PAGE is not set 1091# CONFIG_HUGETLB_PAGE is not set
947CONFIG_CONFIGFS_FS=m 1092CONFIG_CONFIGFS_FS=m
948 1093CONFIG_MISC_FILESYSTEMS=y
949#
950# Miscellaneous filesystems
951#
952# CONFIG_ADFS_FS is not set 1094# CONFIG_ADFS_FS is not set
953# CONFIG_AFFS_FS is not set 1095# CONFIG_AFFS_FS is not set
954# CONFIG_HFS_FS is not set 1096# CONFIG_HFS_FS is not set
@@ -967,7 +1109,9 @@ CONFIG_JFFS2_ZLIB=y
967# CONFIG_JFFS2_LZO is not set 1109# CONFIG_JFFS2_LZO is not set
968CONFIG_JFFS2_RTIME=y 1110CONFIG_JFFS2_RTIME=y
969# CONFIG_JFFS2_RUBIN is not set 1111# CONFIG_JFFS2_RUBIN is not set
1112# CONFIG_UBIFS_FS is not set
970# CONFIG_CRAMFS is not set 1113# CONFIG_CRAMFS is not set
1114# CONFIG_SQUASHFS is not set
971# CONFIG_VXFS_FS is not set 1115# CONFIG_VXFS_FS is not set
972# CONFIG_MINIX_FS is not set 1116# CONFIG_MINIX_FS is not set
973# CONFIG_OMFS_FS is not set 1117# CONFIG_OMFS_FS is not set
@@ -975,7 +1119,9 @@ CONFIG_JFFS2_RTIME=y
975# CONFIG_QNX4FS_FS is not set 1119# CONFIG_QNX4FS_FS is not set
976# CONFIG_ROMFS_FS is not set 1120# CONFIG_ROMFS_FS is not set
977# CONFIG_SYSV_FS is not set 1121# CONFIG_SYSV_FS is not set
978# CONFIG_UFS_FS is not set 1122CONFIG_UFS_FS=y
1123# CONFIG_UFS_FS_WRITE is not set
1124# CONFIG_UFS_DEBUG is not set
979CONFIG_NETWORK_FILESYSTEMS=y 1125CONFIG_NETWORK_FILESYSTEMS=y
980CONFIG_NFS_FS=y 1126CONFIG_NFS_FS=y
981CONFIG_NFS_V3=y 1127CONFIG_NFS_V3=y
@@ -1060,14 +1206,18 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
1060CONFIG_ENABLE_MUST_CHECK=y 1206CONFIG_ENABLE_MUST_CHECK=y
1061CONFIG_FRAME_WARN=1024 1207CONFIG_FRAME_WARN=1024
1062CONFIG_MAGIC_SYSRQ=y 1208CONFIG_MAGIC_SYSRQ=y
1209# CONFIG_STRIP_ASM_SYMS is not set
1063# CONFIG_UNUSED_SYMBOLS is not set 1210# CONFIG_UNUSED_SYMBOLS is not set
1064# CONFIG_DEBUG_FS is not set 1211CONFIG_DEBUG_FS=y
1065# CONFIG_HEADERS_CHECK is not set 1212# CONFIG_HEADERS_CHECK is not set
1066CONFIG_DEBUG_KERNEL=y 1213CONFIG_DEBUG_KERNEL=y
1067# CONFIG_DEBUG_SHIRQ is not set 1214# CONFIG_DEBUG_SHIRQ is not set
1068CONFIG_DETECT_SOFTLOCKUP=y 1215CONFIG_DETECT_SOFTLOCKUP=y
1069# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set 1216# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
1070CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 1217CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
1218CONFIG_DETECT_HUNG_TASK=y
1219# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
1220CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
1071CONFIG_SCHED_DEBUG=y 1221CONFIG_SCHED_DEBUG=y
1072# CONFIG_SCHEDSTATS is not set 1222# CONFIG_SCHEDSTATS is not set
1073# CONFIG_TIMER_STATS is not set 1223# CONFIG_TIMER_STATS is not set
@@ -1083,6 +1233,7 @@ CONFIG_SCHED_DEBUG=y
1083# CONFIG_LOCK_STAT is not set 1233# CONFIG_LOCK_STAT is not set
1084# CONFIG_DEBUG_SPINLOCK_SLEEP is not set 1234# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
1085# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set 1235# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
1236CONFIG_STACKTRACE=y
1086# CONFIG_DEBUG_KOBJECT is not set 1237# CONFIG_DEBUG_KOBJECT is not set
1087CONFIG_DEBUG_BUGVERBOSE=y 1238CONFIG_DEBUG_BUGVERBOSE=y
1088# CONFIG_DEBUG_INFO is not set 1239# CONFIG_DEBUG_INFO is not set
@@ -1091,13 +1242,39 @@ CONFIG_DEBUG_BUGVERBOSE=y
1091# CONFIG_DEBUG_MEMORY_INIT is not set 1242# CONFIG_DEBUG_MEMORY_INIT is not set
1092# CONFIG_DEBUG_LIST is not set 1243# CONFIG_DEBUG_LIST is not set
1093# CONFIG_DEBUG_SG is not set 1244# CONFIG_DEBUG_SG is not set
1245# CONFIG_DEBUG_NOTIFIERS is not set
1246# CONFIG_DEBUG_CREDENTIALS is not set
1094CONFIG_FRAME_POINTER=y 1247CONFIG_FRAME_POINTER=y
1095# CONFIG_BOOT_PRINTK_DELAY is not set 1248# CONFIG_BOOT_PRINTK_DELAY is not set
1096# CONFIG_RCU_TORTURE_TEST is not set 1249# CONFIG_RCU_TORTURE_TEST is not set
1250# CONFIG_RCU_CPU_STALL_DETECTOR is not set
1097# CONFIG_KPROBES_SANITY_TEST is not set 1251# CONFIG_KPROBES_SANITY_TEST is not set
1098# CONFIG_BACKTRACE_SELF_TEST is not set 1252# CONFIG_BACKTRACE_SELF_TEST is not set
1253# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
1254# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
1099# CONFIG_LKDTM is not set 1255# CONFIG_LKDTM is not set
1100# CONFIG_FAULT_INJECTION is not set 1256# CONFIG_FAULT_INJECTION is not set
1257# CONFIG_PAGE_POISONING is not set
1258CONFIG_NOP_TRACER=y
1259CONFIG_RING_BUFFER=y
1260CONFIG_EVENT_TRACING=y
1261CONFIG_CONTEXT_SWITCH_TRACER=y
1262CONFIG_RING_BUFFER_ALLOW_SWAP=y
1263CONFIG_TRACING=y
1264CONFIG_TRACING_SUPPORT=y
1265CONFIG_FTRACE=y
1266# CONFIG_IRQSOFF_TRACER is not set
1267# CONFIG_SCHED_TRACER is not set
1268# CONFIG_ENABLE_DEFAULT_TRACERS is not set
1269# CONFIG_BOOT_TRACER is not set
1270CONFIG_BRANCH_PROFILE_NONE=y
1271# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
1272# CONFIG_PROFILE_ALL_BRANCHES is not set
1273# CONFIG_KMEMTRACE is not set
1274# CONFIG_WORKQUEUE_TRACER is not set
1275# CONFIG_BLK_DEV_IO_TRACE is not set
1276# CONFIG_RING_BUFFER_BENCHMARK is not set
1277# CONFIG_DYNAMIC_DEBUG is not set
1101# CONFIG_SAMPLES is not set 1278# CONFIG_SAMPLES is not set
1102 1279
1103# 1280#
@@ -1105,19 +1282,30 @@ CONFIG_FRAME_POINTER=y
1105# 1282#
1106# CONFIG_KEYS is not set 1283# CONFIG_KEYS is not set
1107# CONFIG_SECURITY is not set 1284# CONFIG_SECURITY is not set
1285# CONFIG_SECURITYFS is not set
1108# CONFIG_SECURITY_FILE_CAPABILITIES is not set 1286# CONFIG_SECURITY_FILE_CAPABILITIES is not set
1109CONFIG_CRYPTO=y 1287CONFIG_CRYPTO=y
1110 1288
1111# 1289#
1112# Crypto core or helper 1290# Crypto core or helper
1113# 1291#
1292# CONFIG_CRYPTO_FIPS is not set
1114CONFIG_CRYPTO_ALGAPI=y 1293CONFIG_CRYPTO_ALGAPI=y
1294CONFIG_CRYPTO_ALGAPI2=y
1115CONFIG_CRYPTO_AEAD=y 1295CONFIG_CRYPTO_AEAD=y
1296CONFIG_CRYPTO_AEAD2=y
1116CONFIG_CRYPTO_BLKCIPHER=y 1297CONFIG_CRYPTO_BLKCIPHER=y
1298CONFIG_CRYPTO_BLKCIPHER2=y
1117CONFIG_CRYPTO_HASH=y 1299CONFIG_CRYPTO_HASH=y
1300CONFIG_CRYPTO_HASH2=y
1301CONFIG_CRYPTO_RNG=m
1302CONFIG_CRYPTO_RNG2=y
1303CONFIG_CRYPTO_PCOMP=y
1118CONFIG_CRYPTO_MANAGER=y 1304CONFIG_CRYPTO_MANAGER=y
1305CONFIG_CRYPTO_MANAGER2=y
1119# CONFIG_CRYPTO_GF128MUL is not set 1306# CONFIG_CRYPTO_GF128MUL is not set
1120# CONFIG_CRYPTO_NULL is not set 1307# CONFIG_CRYPTO_NULL is not set
1308CONFIG_CRYPTO_WORKQUEUE=y
1121# CONFIG_CRYPTO_CRYPTD is not set 1309# CONFIG_CRYPTO_CRYPTD is not set
1122CONFIG_CRYPTO_AUTHENC=y 1310CONFIG_CRYPTO_AUTHENC=y
1123# CONFIG_CRYPTO_TEST is not set 1311# CONFIG_CRYPTO_TEST is not set
@@ -1145,11 +1333,13 @@ CONFIG_CRYPTO_PCBC=m
1145# 1333#
1146CONFIG_CRYPTO_HMAC=y 1334CONFIG_CRYPTO_HMAC=y
1147# CONFIG_CRYPTO_XCBC is not set 1335# CONFIG_CRYPTO_XCBC is not set
1336# CONFIG_CRYPTO_VMAC is not set
1148 1337
1149# 1338#
1150# Digest 1339# Digest
1151# 1340#
1152# CONFIG_CRYPTO_CRC32C is not set 1341# CONFIG_CRYPTO_CRC32C is not set
1342# CONFIG_CRYPTO_GHASH is not set
1153# CONFIG_CRYPTO_MD4 is not set 1343# CONFIG_CRYPTO_MD4 is not set
1154CONFIG_CRYPTO_MD5=y 1344CONFIG_CRYPTO_MD5=y
1155# CONFIG_CRYPTO_MICHAEL_MIC is not set 1345# CONFIG_CRYPTO_MICHAEL_MIC is not set
@@ -1166,7 +1356,7 @@ CONFIG_CRYPTO_SHA1=y
1166# 1356#
1167# Ciphers 1357# Ciphers
1168# 1358#
1169# CONFIG_CRYPTO_AES is not set 1359CONFIG_CRYPTO_AES=m
1170# CONFIG_CRYPTO_ANUBIS is not set 1360# CONFIG_CRYPTO_ANUBIS is not set
1171CONFIG_CRYPTO_ARC4=m 1361CONFIG_CRYPTO_ARC4=m
1172# CONFIG_CRYPTO_BLOWFISH is not set 1362# CONFIG_CRYPTO_BLOWFISH is not set
@@ -1186,15 +1376,21 @@ CONFIG_CRYPTO_DES=y
1186# Compression 1376# Compression
1187# 1377#
1188CONFIG_CRYPTO_DEFLATE=y 1378CONFIG_CRYPTO_DEFLATE=y
1379# CONFIG_CRYPTO_ZLIB is not set
1189# CONFIG_CRYPTO_LZO is not set 1380# CONFIG_CRYPTO_LZO is not set
1381
1382#
1383# Random Number Generation
1384#
1385CONFIG_CRYPTO_ANSI_CPRNG=m
1190CONFIG_CRYPTO_HW=y 1386CONFIG_CRYPTO_HW=y
1387CONFIG_BINARY_PRINTF=y
1191 1388
1192# 1389#
1193# Library routines 1390# Library routines
1194# 1391#
1195CONFIG_BITREVERSE=y 1392CONFIG_BITREVERSE=y
1196# CONFIG_GENERIC_FIND_FIRST_BIT is not set 1393CONFIG_GENERIC_FIND_LAST_BIT=y
1197# CONFIG_GENERIC_FIND_NEXT_BIT is not set
1198CONFIG_CRC_CCITT=m 1394CONFIG_CRC_CCITT=m
1199# CONFIG_CRC16 is not set 1395# CONFIG_CRC16 is not set
1200# CONFIG_CRC_T10DIF is not set 1396# CONFIG_CRC_T10DIF is not set
@@ -1204,8 +1400,9 @@ CONFIG_CRC7=m
1204# CONFIG_LIBCRC32C is not set 1400# CONFIG_LIBCRC32C is not set
1205CONFIG_ZLIB_INFLATE=y 1401CONFIG_ZLIB_INFLATE=y
1206CONFIG_ZLIB_DEFLATE=y 1402CONFIG_ZLIB_DEFLATE=y
1403CONFIG_DECOMPRESS_GZIP=y
1207CONFIG_GENERIC_ALLOCATOR=y 1404CONFIG_GENERIC_ALLOCATOR=y
1208CONFIG_PLIST=y
1209CONFIG_HAS_IOMEM=y 1405CONFIG_HAS_IOMEM=y
1210CONFIG_HAS_IOPORT=y 1406CONFIG_HAS_IOPORT=y
1211CONFIG_HAS_DMA=y 1407CONFIG_HAS_DMA=y
1408CONFIG_NLATTR=y
diff --git a/arch/avr32/configs/atngw100_evklcd100_defconfig b/arch/avr32/configs/atngw100_evklcd100_defconfig
index 86a45b5c9d0d..c732cc397ad0 100644
--- a/arch/avr32/configs/atngw100_evklcd100_defconfig
+++ b/arch/avr32/configs/atngw100_evklcd100_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.25.6 3# Linux kernel version: 2.6.32-rc5
4# Wed Jun 18 16:06:32 2008 4# Thu Oct 29 09:36:39 2009
5# 5#
6CONFIG_AVR32=y 6CONFIG_AVR32=y
7CONFIG_GENERIC_GPIO=y 7CONFIG_GENERIC_GPIO=y
@@ -21,6 +21,7 @@ CONFIG_GENERIC_HWEIGHT=y
21CONFIG_GENERIC_CALIBRATE_DELAY=y 21CONFIG_GENERIC_CALIBRATE_DELAY=y
22CONFIG_GENERIC_BUG=y 22CONFIG_GENERIC_BUG=y
23CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" 23CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
24CONFIG_CONSTRUCTORS=y
24 25
25# 26#
26# General setup 27# General setup
@@ -34,22 +35,37 @@ CONFIG_SWAP=y
34CONFIG_SYSVIPC=y 35CONFIG_SYSVIPC=y
35CONFIG_SYSVIPC_SYSCTL=y 36CONFIG_SYSVIPC_SYSCTL=y
36CONFIG_POSIX_MQUEUE=y 37CONFIG_POSIX_MQUEUE=y
38CONFIG_POSIX_MQUEUE_SYSCTL=y
37CONFIG_BSD_PROCESS_ACCT=y 39CONFIG_BSD_PROCESS_ACCT=y
38CONFIG_BSD_PROCESS_ACCT_V3=y 40CONFIG_BSD_PROCESS_ACCT_V3=y
39# CONFIG_TASKSTATS is not set 41# CONFIG_TASKSTATS is not set
40# CONFIG_AUDIT is not set 42# CONFIG_AUDIT is not set
43
44#
45# RCU Subsystem
46#
47CONFIG_TREE_RCU=y
48# CONFIG_TREE_PREEMPT_RCU is not set
49# CONFIG_RCU_TRACE is not set
50CONFIG_RCU_FANOUT=32
51# CONFIG_RCU_FANOUT_EXACT is not set
52# CONFIG_TREE_RCU_TRACE is not set
41# CONFIG_IKCONFIG is not set 53# CONFIG_IKCONFIG is not set
42CONFIG_LOG_BUF_SHIFT=14 54CONFIG_LOG_BUF_SHIFT=14
43# CONFIG_CGROUPS is not set
44# CONFIG_GROUP_SCHED is not set 55# CONFIG_GROUP_SCHED is not set
56# CONFIG_CGROUPS is not set
45CONFIG_SYSFS_DEPRECATED=y 57CONFIG_SYSFS_DEPRECATED=y
46CONFIG_SYSFS_DEPRECATED_V2=y 58CONFIG_SYSFS_DEPRECATED_V2=y
47# CONFIG_RELAY is not set 59# CONFIG_RELAY is not set
48# CONFIG_NAMESPACES is not set 60# CONFIG_NAMESPACES is not set
49CONFIG_BLK_DEV_INITRD=y 61CONFIG_BLK_DEV_INITRD=y
50CONFIG_INITRAMFS_SOURCE="" 62CONFIG_INITRAMFS_SOURCE=""
63CONFIG_RD_GZIP=y
64# CONFIG_RD_BZIP2 is not set
65# CONFIG_RD_LZMA is not set
51CONFIG_CC_OPTIMIZE_FOR_SIZE=y 66CONFIG_CC_OPTIMIZE_FOR_SIZE=y
52CONFIG_SYSCTL=y 67CONFIG_SYSCTL=y
68CONFIG_ANON_INODES=y
53CONFIG_EMBEDDED=y 69CONFIG_EMBEDDED=y
54# CONFIG_SYSCTL_SYSCALL is not set 70# CONFIG_SYSCTL_SYSCALL is not set
55CONFIG_KALLSYMS=y 71CONFIG_KALLSYMS=y
@@ -59,43 +75,51 @@ CONFIG_HOTPLUG=y
59CONFIG_PRINTK=y 75CONFIG_PRINTK=y
60CONFIG_BUG=y 76CONFIG_BUG=y
61CONFIG_ELF_CORE=y 77CONFIG_ELF_CORE=y
62# CONFIG_COMPAT_BRK is not set
63# CONFIG_BASE_FULL is not set 78# CONFIG_BASE_FULL is not set
64CONFIG_FUTEX=y 79CONFIG_FUTEX=y
65CONFIG_ANON_INODES=y
66CONFIG_EPOLL=y 80CONFIG_EPOLL=y
67CONFIG_SIGNALFD=y 81CONFIG_SIGNALFD=y
68CONFIG_TIMERFD=y 82CONFIG_TIMERFD=y
69CONFIG_EVENTFD=y 83CONFIG_EVENTFD=y
70CONFIG_SHMEM=y 84CONFIG_SHMEM=y
85CONFIG_AIO=y
86
87#
88# Kernel Performance Events And Counters
89#
71CONFIG_VM_EVENT_COUNTERS=y 90CONFIG_VM_EVENT_COUNTERS=y
72CONFIG_SLUB_DEBUG=y 91CONFIG_SLUB_DEBUG=y
92# CONFIG_COMPAT_BRK is not set
73# CONFIG_SLAB is not set 93# CONFIG_SLAB is not set
74CONFIG_SLUB=y 94CONFIG_SLUB=y
75# CONFIG_SLOB is not set 95# CONFIG_SLOB is not set
76CONFIG_PROFILING=y 96CONFIG_PROFILING=y
77# CONFIG_MARKERS is not set 97CONFIG_TRACEPOINTS=y
78CONFIG_OPROFILE=m 98CONFIG_OPROFILE=m
79CONFIG_HAVE_OPROFILE=y 99CONFIG_HAVE_OPROFILE=y
80CONFIG_KPROBES=y 100CONFIG_KPROBES=y
81CONFIG_HAVE_KPROBES=y 101CONFIG_HAVE_KPROBES=y
82# CONFIG_HAVE_KRETPROBES is not set 102CONFIG_HAVE_CLK=y
83CONFIG_PROC_PAGE_MONITOR=y 103
104#
105# GCOV-based kernel profiling
106#
107# CONFIG_GCOV_KERNEL is not set
108CONFIG_SLOW_WORK=y
109# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
84CONFIG_SLABINFO=y 110CONFIG_SLABINFO=y
85CONFIG_RT_MUTEXES=y 111CONFIG_RT_MUTEXES=y
86# CONFIG_TINY_SHMEM is not set
87CONFIG_BASE_SMALL=1 112CONFIG_BASE_SMALL=1
88CONFIG_MODULES=y 113CONFIG_MODULES=y
114# CONFIG_MODULE_FORCE_LOAD is not set
89CONFIG_MODULE_UNLOAD=y 115CONFIG_MODULE_UNLOAD=y
90CONFIG_MODULE_FORCE_UNLOAD=y 116CONFIG_MODULE_FORCE_UNLOAD=y
91# CONFIG_MODVERSIONS is not set 117# CONFIG_MODVERSIONS is not set
92# CONFIG_MODULE_SRCVERSION_ALL is not set 118# CONFIG_MODULE_SRCVERSION_ALL is not set
93CONFIG_KMOD=y
94CONFIG_BLOCK=y 119CONFIG_BLOCK=y
95# CONFIG_LBD is not set 120CONFIG_LBDAF=y
96# CONFIG_BLK_DEV_IO_TRACE is not set
97# CONFIG_LSF is not set
98# CONFIG_BLK_DEV_BSG is not set 121# CONFIG_BLK_DEV_BSG is not set
122# CONFIG_BLK_DEV_INTEGRITY is not set
99 123
100# 124#
101# IO Schedulers 125# IO Schedulers
@@ -109,7 +133,7 @@ CONFIG_IOSCHED_CFQ=y
109CONFIG_DEFAULT_CFQ=y 133CONFIG_DEFAULT_CFQ=y
110# CONFIG_DEFAULT_NOOP is not set 134# CONFIG_DEFAULT_NOOP is not set
111CONFIG_DEFAULT_IOSCHED="cfq" 135CONFIG_DEFAULT_IOSCHED="cfq"
112CONFIG_CLASSIC_RCU=y 136CONFIG_FREEZER=y
113 137
114# 138#
115# System Type and features 139# System Type and features
@@ -124,13 +148,26 @@ CONFIG_PERFORMANCE_COUNTERS=y
124CONFIG_PLATFORM_AT32AP=y 148CONFIG_PLATFORM_AT32AP=y
125CONFIG_CPU_AT32AP700X=y 149CONFIG_CPU_AT32AP700X=y
126CONFIG_CPU_AT32AP7000=y 150CONFIG_CPU_AT32AP7000=y
151CONFIG_BOARD_ATNGW100_COMMON=y
127# CONFIG_BOARD_ATSTK1000 is not set 152# CONFIG_BOARD_ATSTK1000 is not set
128CONFIG_BOARD_ATNGW100=y 153CONFIG_BOARD_ATNGW100_MKI=y
154# CONFIG_BOARD_ATNGW100_MKII is not set
155# CONFIG_BOARD_HAMMERHEAD is not set
156# CONFIG_BOARD_FAVR_32 is not set
157# CONFIG_BOARD_MERISC is not set
158# CONFIG_BOARD_MIMC200 is not set
159# CONFIG_BOARD_ATSTK1002 is not set
160# CONFIG_BOARD_ATSTK1003 is not set
161# CONFIG_BOARD_ATSTK1004 is not set
162# CONFIG_BOARD_ATSTK1006 is not set
163# CONFIG_BOARD_ATSTK1000_J2_LED8 is not set
164# CONFIG_BOARD_ATSTK1000_J2_RGB is not set
165# CONFIG_BOARD_ATNGW100_ADDON_NONE is not set
129CONFIG_BOARD_ATNGW100_EVKLCD10X=y 166CONFIG_BOARD_ATNGW100_EVKLCD10X=y
167# CONFIG_BOARD_ATNGW100_MRMT is not set
130CONFIG_BOARD_ATNGW100_EVKLCD10X_QVGA=y 168CONFIG_BOARD_ATNGW100_EVKLCD10X_QVGA=y
131# CONFIG_BOARD_ATNGW100_EVKLCD10X_VGA is not set 169# CONFIG_BOARD_ATNGW100_EVKLCD10X_VGA is not set
132# CONFIG_BOARD_ATNGW100_EVKLCD10X_POW_QVGA is not set 170# CONFIG_BOARD_ATNGW100_EVKLCD10X_POW_QVGA is not set
133CONFIG_BOARD_ATNGW100_I2C_GPIO=y
134CONFIG_LOADER_U_BOOT=y 171CONFIG_LOADER_U_BOOT=y
135 172
136# 173#
@@ -139,14 +176,14 @@ CONFIG_LOADER_U_BOOT=y
139# CONFIG_AP700X_32_BIT_SMC is not set 176# CONFIG_AP700X_32_BIT_SMC is not set
140CONFIG_AP700X_16_BIT_SMC=y 177CONFIG_AP700X_16_BIT_SMC=y
141# CONFIG_AP700X_8_BIT_SMC is not set 178# CONFIG_AP700X_8_BIT_SMC is not set
142CONFIG_GPIO_DEV=y
143CONFIG_LOAD_ADDRESS=0x10000000 179CONFIG_LOAD_ADDRESS=0x10000000
144CONFIG_ENTRY_ADDRESS=0x90000000 180CONFIG_ENTRY_ADDRESS=0x90000000
145CONFIG_PHYS_OFFSET=0x10000000 181CONFIG_PHYS_OFFSET=0x10000000
146CONFIG_PREEMPT_NONE=y 182CONFIG_PREEMPT_NONE=y
147# CONFIG_PREEMPT_VOLUNTARY is not set 183# CONFIG_PREEMPT_VOLUNTARY is not set
148# CONFIG_PREEMPT is not set 184# CONFIG_PREEMPT is not set
149# CONFIG_HAVE_ARCH_BOOTMEM_NODE is not set 185CONFIG_QUICKLIST=y
186# CONFIG_HAVE_ARCH_BOOTMEM is not set
150# CONFIG_ARCH_HAVE_MEMORY_PRESENT is not set 187# CONFIG_ARCH_HAVE_MEMORY_PRESENT is not set
151# CONFIG_NEED_NODE_MEMMAP_SIZE is not set 188# CONFIG_NEED_NODE_MEMMAP_SIZE is not set
152CONFIG_ARCH_FLATMEM_ENABLE=y 189CONFIG_ARCH_FLATMEM_ENABLE=y
@@ -158,33 +195,36 @@ CONFIG_FLATMEM_MANUAL=y
158# CONFIG_SPARSEMEM_MANUAL is not set 195# CONFIG_SPARSEMEM_MANUAL is not set
159CONFIG_FLATMEM=y 196CONFIG_FLATMEM=y
160CONFIG_FLAT_NODE_MEM_MAP=y 197CONFIG_FLAT_NODE_MEM_MAP=y
161# CONFIG_SPARSEMEM_STATIC is not set 198CONFIG_PAGEFLAGS_EXTENDED=y
162# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
163CONFIG_SPLIT_PTLOCK_CPUS=4 199CONFIG_SPLIT_PTLOCK_CPUS=4
164# CONFIG_RESOURCES_64BIT is not set 200# CONFIG_PHYS_ADDR_T_64BIT is not set
165CONFIG_ZONE_DMA_FLAG=0 201CONFIG_ZONE_DMA_FLAG=0
202CONFIG_NR_QUICK=2
166CONFIG_VIRT_TO_BUS=y 203CONFIG_VIRT_TO_BUS=y
204CONFIG_HAVE_MLOCK=y
205CONFIG_HAVE_MLOCKED_PAGE_BIT=y
206# CONFIG_KSM is not set
207CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
167# CONFIG_OWNERSHIP_TRACE is not set 208# CONFIG_OWNERSHIP_TRACE is not set
168CONFIG_NMI_DEBUGGING=y 209CONFIG_NMI_DEBUGGING=y
169CONFIG_DW_DMAC=y
170# CONFIG_HZ_100 is not set 210# CONFIG_HZ_100 is not set
171CONFIG_HZ_250=y 211CONFIG_HZ_250=y
172# CONFIG_HZ_300 is not set 212# CONFIG_HZ_300 is not set
173# CONFIG_HZ_1000 is not set 213# CONFIG_HZ_1000 is not set
174CONFIG_HZ=250 214CONFIG_HZ=250
175# CONFIG_SCHED_HRTICK is not set 215CONFIG_SCHED_HRTICK=y
176CONFIG_CMDLINE="" 216CONFIG_CMDLINE=""
177 217
178# 218#
179# Power management options 219# Power management options
180# 220#
181CONFIG_ARCH_SUSPEND_POSSIBLE=y
182CONFIG_PM=y 221CONFIG_PM=y
183# CONFIG_PM_LEGACY is not set
184# CONFIG_PM_DEBUG is not set 222# CONFIG_PM_DEBUG is not set
185CONFIG_PM_SLEEP=y 223CONFIG_PM_SLEEP=y
186CONFIG_SUSPEND=y 224CONFIG_SUSPEND=y
187CONFIG_SUSPEND_FREEZER=y 225CONFIG_SUSPEND_FREEZER=y
226# CONFIG_PM_RUNTIME is not set
227CONFIG_ARCH_SUSPEND_POSSIBLE=y
188 228
189# 229#
190# CPU Frequency scaling 230# CPU Frequency scaling
@@ -194,6 +234,7 @@ CONFIG_CPU_FREQ_TABLE=y
194# CONFIG_CPU_FREQ_DEBUG is not set 234# CONFIG_CPU_FREQ_DEBUG is not set
195# CONFIG_CPU_FREQ_STAT is not set 235# CONFIG_CPU_FREQ_STAT is not set
196# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set 236# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set
237# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
197# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set 238# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
198CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y 239CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
199# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set 240# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
@@ -214,11 +255,9 @@ CONFIG_CPU_FREQ_AT32AP=y
214# Executable file formats 255# Executable file formats
215# 256#
216CONFIG_BINFMT_ELF=y 257CONFIG_BINFMT_ELF=y
258# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
259# CONFIG_HAVE_AOUT is not set
217# CONFIG_BINFMT_MISC is not set 260# CONFIG_BINFMT_MISC is not set
218
219#
220# Networking
221#
222CONFIG_NET=y 261CONFIG_NET=y
223 262
224# 263#
@@ -232,6 +271,7 @@ CONFIG_XFRM_USER=y
232# CONFIG_XFRM_SUB_POLICY is not set 271# CONFIG_XFRM_SUB_POLICY is not set
233# CONFIG_XFRM_MIGRATE is not set 272# CONFIG_XFRM_MIGRATE is not set
234# CONFIG_XFRM_STATISTICS is not set 273# CONFIG_XFRM_STATISTICS is not set
274CONFIG_XFRM_IPCOMP=y
235CONFIG_NET_KEY=y 275CONFIG_NET_KEY=y
236# CONFIG_NET_KEY_MIGRATE is not set 276# CONFIG_NET_KEY_MIGRATE is not set
237CONFIG_INET=y 277CONFIG_INET=y
@@ -269,7 +309,6 @@ CONFIG_INET_TCP_DIAG=y
269CONFIG_TCP_CONG_CUBIC=y 309CONFIG_TCP_CONG_CUBIC=y
270CONFIG_DEFAULT_TCP_CONG="cubic" 310CONFIG_DEFAULT_TCP_CONG="cubic"
271# CONFIG_TCP_MD5SIG is not set 311# CONFIG_TCP_MD5SIG is not set
272# CONFIG_IP_VS is not set
273CONFIG_IPV6=y 312CONFIG_IPV6=y
274# CONFIG_IPV6_PRIVACY is not set 313# CONFIG_IPV6_PRIVACY is not set
275# CONFIG_IPV6_ROUTER_PREF is not set 314# CONFIG_IPV6_ROUTER_PREF is not set
@@ -285,8 +324,10 @@ CONFIG_INET6_XFRM_MODE_TUNNEL=y
285CONFIG_INET6_XFRM_MODE_BEET=y 324CONFIG_INET6_XFRM_MODE_BEET=y
286# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set 325# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
287CONFIG_IPV6_SIT=y 326CONFIG_IPV6_SIT=y
327CONFIG_IPV6_NDISC_NODETYPE=y
288# CONFIG_IPV6_TUNNEL is not set 328# CONFIG_IPV6_TUNNEL is not set
289# CONFIG_IPV6_MULTIPLE_TABLES is not set 329# CONFIG_IPV6_MULTIPLE_TABLES is not set
330# CONFIG_IPV6_MROUTE is not set
290# CONFIG_NETWORK_SECMARK is not set 331# CONFIG_NETWORK_SECMARK is not set
291CONFIG_NETFILTER=y 332CONFIG_NETFILTER=y
292# CONFIG_NETFILTER_DEBUG is not set 333# CONFIG_NETFILTER_DEBUG is not set
@@ -310,10 +351,12 @@ CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
310CONFIG_NETFILTER_XT_MATCH_MARK=m 351CONFIG_NETFILTER_XT_MATCH_MARK=m
311CONFIG_NETFILTER_XT_MATCH_POLICY=m 352CONFIG_NETFILTER_XT_MATCH_POLICY=m
312CONFIG_NETFILTER_XT_MATCH_STATE=m 353CONFIG_NETFILTER_XT_MATCH_STATE=m
354# CONFIG_IP_VS is not set
313 355
314# 356#
315# IP: Netfilter Configuration 357# IP: Netfilter Configuration
316# 358#
359CONFIG_NF_DEFRAG_IPV4=m
317CONFIG_NF_CONNTRACK_IPV4=m 360CONFIG_NF_CONNTRACK_IPV4=m
318CONFIG_NF_CONNTRACK_PROC_COMPAT=y 361CONFIG_NF_CONNTRACK_PROC_COMPAT=y
319CONFIG_IP_NF_IPTABLES=m 362CONFIG_IP_NF_IPTABLES=m
@@ -339,16 +382,20 @@ CONFIG_IP_NF_MANGLE=m
339CONFIG_NF_CONNTRACK_IPV6=m 382CONFIG_NF_CONNTRACK_IPV6=m
340CONFIG_IP6_NF_IPTABLES=m 383CONFIG_IP6_NF_IPTABLES=m
341CONFIG_IP6_NF_MATCH_IPV6HEADER=m 384CONFIG_IP6_NF_MATCH_IPV6HEADER=m
342CONFIG_IP6_NF_FILTER=m
343CONFIG_IP6_NF_TARGET_LOG=m 385CONFIG_IP6_NF_TARGET_LOG=m
386CONFIG_IP6_NF_FILTER=m
344CONFIG_IP6_NF_TARGET_REJECT=m 387CONFIG_IP6_NF_TARGET_REJECT=m
345CONFIG_IP6_NF_MANGLE=m 388CONFIG_IP6_NF_MANGLE=m
346# CONFIG_IP_DCCP is not set 389# CONFIG_IP_DCCP is not set
347# CONFIG_IP_SCTP is not set 390# CONFIG_IP_SCTP is not set
391# CONFIG_RDS is not set
348# CONFIG_TIPC is not set 392# CONFIG_TIPC is not set
349# CONFIG_ATM is not set 393# CONFIG_ATM is not set
394CONFIG_STP=m
350CONFIG_BRIDGE=m 395CONFIG_BRIDGE=m
396# CONFIG_NET_DSA is not set
351CONFIG_VLAN_8021Q=m 397CONFIG_VLAN_8021Q=m
398# CONFIG_VLAN_8021Q_GVRP is not set
352# CONFIG_DECNET is not set 399# CONFIG_DECNET is not set
353CONFIG_LLC=m 400CONFIG_LLC=m
354# CONFIG_LLC2 is not set 401# CONFIG_LLC2 is not set
@@ -358,26 +405,33 @@ CONFIG_LLC=m
358# CONFIG_LAPB is not set 405# CONFIG_LAPB is not set
359# CONFIG_ECONET is not set 406# CONFIG_ECONET is not set
360# CONFIG_WAN_ROUTER is not set 407# CONFIG_WAN_ROUTER is not set
408# CONFIG_PHONET is not set
409# CONFIG_IEEE802154 is not set
361# CONFIG_NET_SCHED is not set 410# CONFIG_NET_SCHED is not set
411# CONFIG_DCB is not set
362 412
363# 413#
364# Network testing 414# Network testing
365# 415#
366# CONFIG_NET_PKTGEN is not set 416# CONFIG_NET_PKTGEN is not set
367# CONFIG_NET_TCPPROBE is not set 417# CONFIG_NET_TCPPROBE is not set
418# CONFIG_NET_DROP_MONITOR is not set
368# CONFIG_HAMRADIO is not set 419# CONFIG_HAMRADIO is not set
369# CONFIG_CAN is not set 420# CONFIG_CAN is not set
370# CONFIG_IRDA is not set 421# CONFIG_IRDA is not set
371# CONFIG_BT is not set 422# CONFIG_BT is not set
372# CONFIG_AF_RXRPC is not set 423# CONFIG_AF_RXRPC is not set
424CONFIG_WIRELESS=y
425# CONFIG_CFG80211 is not set
426CONFIG_CFG80211_DEFAULT_PS_VALUE=0
427# CONFIG_WIRELESS_OLD_REGULATORY is not set
428# CONFIG_WIRELESS_EXT is not set
429# CONFIG_LIB80211 is not set
373 430
374# 431#
375# Wireless 432# CFG80211 needs to be enabled for MAC80211
376# 433#
377# CONFIG_CFG80211 is not set 434# CONFIG_WIMAX is not set
378# CONFIG_WIRELESS_EXT is not set
379# CONFIG_MAC80211 is not set
380# CONFIG_IEEE80211 is not set
381# CONFIG_RFKILL is not set 435# CONFIG_RFKILL is not set
382# CONFIG_NET_9P is not set 436# CONFIG_NET_9P is not set
383 437
@@ -389,6 +443,7 @@ CONFIG_LLC=m
389# Generic Driver Options 443# Generic Driver Options
390# 444#
391CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 445CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
446# CONFIG_DEVTMPFS is not set
392CONFIG_STANDALONE=y 447CONFIG_STANDALONE=y
393# CONFIG_PREVENT_FIRMWARE_BUILD is not set 448# CONFIG_PREVENT_FIRMWARE_BUILD is not set
394# CONFIG_FW_LOADER is not set 449# CONFIG_FW_LOADER is not set
@@ -398,10 +453,12 @@ CONFIG_STANDALONE=y
398# CONFIG_CONNECTOR is not set 453# CONFIG_CONNECTOR is not set
399CONFIG_MTD=y 454CONFIG_MTD=y
400# CONFIG_MTD_DEBUG is not set 455# CONFIG_MTD_DEBUG is not set
456# CONFIG_MTD_TESTS is not set
401# CONFIG_MTD_CONCAT is not set 457# CONFIG_MTD_CONCAT is not set
402CONFIG_MTD_PARTITIONS=y 458CONFIG_MTD_PARTITIONS=y
403# CONFIG_MTD_REDBOOT_PARTS is not set 459# CONFIG_MTD_REDBOOT_PARTS is not set
404CONFIG_MTD_CMDLINE_PARTS=y 460CONFIG_MTD_CMDLINE_PARTS=y
461# CONFIG_MTD_AR7_PARTS is not set
405 462
406# 463#
407# User Modules And Translation Layers 464# User Modules And Translation Layers
@@ -446,16 +503,17 @@ CONFIG_MTD_CFI_UTIL=y
446# 503#
447# CONFIG_MTD_COMPLEX_MAPPINGS is not set 504# CONFIG_MTD_COMPLEX_MAPPINGS is not set
448CONFIG_MTD_PHYSMAP=y 505CONFIG_MTD_PHYSMAP=y
449CONFIG_MTD_PHYSMAP_START=0x80000000 506# CONFIG_MTD_PHYSMAP_COMPAT is not set
450CONFIG_MTD_PHYSMAP_LEN=0x0
451CONFIG_MTD_PHYSMAP_BANKWIDTH=2
452# CONFIG_MTD_PLATRAM is not set 507# CONFIG_MTD_PLATRAM is not set
453 508
454# 509#
455# Self-contained MTD device drivers 510# Self-contained MTD device drivers
456# 511#
457CONFIG_MTD_DATAFLASH=y 512CONFIG_MTD_DATAFLASH=y
513# CONFIG_MTD_DATAFLASH_WRITE_VERIFY is not set
514# CONFIG_MTD_DATAFLASH_OTP is not set
458# CONFIG_MTD_M25P80 is not set 515# CONFIG_MTD_M25P80 is not set
516# CONFIG_MTD_SST25L is not set
459# CONFIG_MTD_SLRAM is not set 517# CONFIG_MTD_SLRAM is not set
460# CONFIG_MTD_PHRAM is not set 518# CONFIG_MTD_PHRAM is not set
461# CONFIG_MTD_MTDRAM is not set 519# CONFIG_MTD_MTDRAM is not set
@@ -471,6 +529,11 @@ CONFIG_MTD_DATAFLASH=y
471# CONFIG_MTD_ONENAND is not set 529# CONFIG_MTD_ONENAND is not set
472 530
473# 531#
532# LPDDR flash memory drivers
533#
534# CONFIG_MTD_LPDDR is not set
535
536#
474# UBI - Unsorted block images 537# UBI - Unsorted block images
475# 538#
476CONFIG_MTD_UBI=y 539CONFIG_MTD_UBI=y
@@ -499,10 +562,20 @@ CONFIG_MISC_DEVICES=y
499CONFIG_ATMEL_TCLIB=y 562CONFIG_ATMEL_TCLIB=y
500CONFIG_ATMEL_TCB_CLKSRC=y 563CONFIG_ATMEL_TCB_CLKSRC=y
501CONFIG_ATMEL_TCB_CLKSRC_BLOCK=0 564CONFIG_ATMEL_TCB_CLKSRC_BLOCK=0
502# CONFIG_EEPROM_93CX6 is not set 565# CONFIG_ICS932S401 is not set
503# CONFIG_ATMEL_SSC is not set 566# CONFIG_ATMEL_SSC is not set
504# CONFIG_ENCLOSURE_SERVICES is not set 567# CONFIG_ENCLOSURE_SERVICES is not set
505# CONFIG_HAVE_IDE is not set 568# CONFIG_ISL29003 is not set
569# CONFIG_C2PORT is not set
570
571#
572# EEPROM support
573#
574# CONFIG_EEPROM_AT24 is not set
575# CONFIG_EEPROM_AT25 is not set
576# CONFIG_EEPROM_LEGACY is not set
577# CONFIG_EEPROM_MAX6875 is not set
578# CONFIG_EEPROM_93CX6 is not set
506 579
507# 580#
508# SCSI device support 581# SCSI device support
@@ -514,7 +587,6 @@ CONFIG_ATMEL_TCB_CLKSRC_BLOCK=0
514# CONFIG_ATA is not set 587# CONFIG_ATA is not set
515# CONFIG_MD is not set 588# CONFIG_MD is not set
516CONFIG_NETDEVICES=y 589CONFIG_NETDEVICES=y
517# CONFIG_NETDEVICES_MULTIQUEUE is not set
518# CONFIG_DUMMY is not set 590# CONFIG_DUMMY is not set
519# CONFIG_BONDING is not set 591# CONFIG_BONDING is not set
520# CONFIG_MACVLAN is not set 592# CONFIG_MACVLAN is not set
@@ -536,25 +608,37 @@ CONFIG_PHYLIB=y
536# CONFIG_BROADCOM_PHY is not set 608# CONFIG_BROADCOM_PHY is not set
537# CONFIG_ICPLUS_PHY is not set 609# CONFIG_ICPLUS_PHY is not set
538# CONFIG_REALTEK_PHY is not set 610# CONFIG_REALTEK_PHY is not set
611# CONFIG_NATIONAL_PHY is not set
612# CONFIG_STE10XP is not set
613# CONFIG_LSI_ET1011C_PHY is not set
539# CONFIG_FIXED_PHY is not set 614# CONFIG_FIXED_PHY is not set
540# CONFIG_MDIO_BITBANG is not set 615# CONFIG_MDIO_BITBANG is not set
541CONFIG_NET_ETHERNET=y 616CONFIG_NET_ETHERNET=y
542# CONFIG_MII is not set 617# CONFIG_MII is not set
543CONFIG_MACB=y 618CONFIG_MACB=y
544# CONFIG_ENC28J60 is not set 619# CONFIG_ENC28J60 is not set
620# CONFIG_ETHOC is not set
621# CONFIG_DNET is not set
545# CONFIG_IBM_NEW_EMAC_ZMII is not set 622# CONFIG_IBM_NEW_EMAC_ZMII is not set
546# CONFIG_IBM_NEW_EMAC_RGMII is not set 623# CONFIG_IBM_NEW_EMAC_RGMII is not set
547# CONFIG_IBM_NEW_EMAC_TAH is not set 624# CONFIG_IBM_NEW_EMAC_TAH is not set
548# CONFIG_IBM_NEW_EMAC_EMAC4 is not set 625# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
626# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
627# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
628# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
549# CONFIG_B44 is not set 629# CONFIG_B44 is not set
630# CONFIG_KS8842 is not set
631# CONFIG_KS8851 is not set
632# CONFIG_KS8851_MLL is not set
550# CONFIG_NETDEV_1000 is not set 633# CONFIG_NETDEV_1000 is not set
551# CONFIG_NETDEV_10000 is not set 634# CONFIG_NETDEV_10000 is not set
635CONFIG_WLAN=y
636# CONFIG_WLAN_PRE80211 is not set
637# CONFIG_WLAN_80211 is not set
552 638
553# 639#
554# Wireless LAN 640# Enable WiMAX (Networking options) to see the WiMAX drivers
555# 641#
556# CONFIG_WLAN_PRE80211 is not set
557# CONFIG_WLAN_80211 is not set
558# CONFIG_WAN is not set 642# CONFIG_WAN is not set
559CONFIG_PPP=m 643CONFIG_PPP=m
560# CONFIG_PPP_MULTILINK is not set 644# CONFIG_PPP_MULTILINK is not set
@@ -598,15 +682,30 @@ CONFIG_INPUT_EVDEV=m
598# CONFIG_INPUT_TABLET is not set 682# CONFIG_INPUT_TABLET is not set
599CONFIG_INPUT_TOUCHSCREEN=y 683CONFIG_INPUT_TOUCHSCREEN=y
600# CONFIG_TOUCHSCREEN_ADS7846 is not set 684# CONFIG_TOUCHSCREEN_ADS7846 is not set
685# CONFIG_TOUCHSCREEN_AD7877 is not set
686# CONFIG_TOUCHSCREEN_AD7879_I2C is not set
687# CONFIG_TOUCHSCREEN_AD7879_SPI is not set
688# CONFIG_TOUCHSCREEN_AD7879 is not set
689# CONFIG_TOUCHSCREEN_EETI is not set
601# CONFIG_TOUCHSCREEN_FUJITSU is not set 690# CONFIG_TOUCHSCREEN_FUJITSU is not set
602# CONFIG_TOUCHSCREEN_GUNZE is not set 691# CONFIG_TOUCHSCREEN_GUNZE is not set
603# CONFIG_TOUCHSCREEN_ELO is not set 692# CONFIG_TOUCHSCREEN_ELO is not set
693# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
694# CONFIG_TOUCHSCREEN_MCS5000 is not set
604# CONFIG_TOUCHSCREEN_MTOUCH is not set 695# CONFIG_TOUCHSCREEN_MTOUCH is not set
696# CONFIG_TOUCHSCREEN_INEXIO is not set
605# CONFIG_TOUCHSCREEN_MK712 is not set 697# CONFIG_TOUCHSCREEN_MK712 is not set
606# CONFIG_TOUCHSCREEN_PENMOUNT is not set 698# CONFIG_TOUCHSCREEN_PENMOUNT is not set
607# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set 699# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
608# CONFIG_TOUCHSCREEN_TOUCHWIN is not set 700# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
609# CONFIG_TOUCHSCREEN_UCB1400 is not set 701CONFIG_TOUCHSCREEN_WM97XX=m
702CONFIG_TOUCHSCREEN_WM9705=y
703CONFIG_TOUCHSCREEN_WM9712=y
704CONFIG_TOUCHSCREEN_WM9713=y
705# CONFIG_TOUCHSCREEN_WM97XX_ATMEL is not set
706# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
707# CONFIG_TOUCHSCREEN_TSC2007 is not set
708# CONFIG_TOUCHSCREEN_W90X900 is not set
610# CONFIG_INPUT_MISC is not set 709# CONFIG_INPUT_MISC is not set
611 710
612# 711#
@@ -619,9 +718,11 @@ CONFIG_INPUT_TOUCHSCREEN=y
619# Character devices 718# Character devices
620# 719#
621CONFIG_VT=y 720CONFIG_VT=y
721CONFIG_CONSOLE_TRANSLATIONS=y
622CONFIG_VT_CONSOLE=y 722CONFIG_VT_CONSOLE=y
623CONFIG_HW_CONSOLE=y 723CONFIG_HW_CONSOLE=y
624# CONFIG_VT_HW_CONSOLE_BINDING is not set 724# CONFIG_VT_HW_CONSOLE_BINDING is not set
725CONFIG_DEVKMEM=y
625# CONFIG_SERIAL_NONSTANDARD is not set 726# CONFIG_SERIAL_NONSTANDARD is not set
626 727
627# 728#
@@ -636,9 +737,11 @@ CONFIG_SERIAL_ATMEL=y
636CONFIG_SERIAL_ATMEL_CONSOLE=y 737CONFIG_SERIAL_ATMEL_CONSOLE=y
637CONFIG_SERIAL_ATMEL_PDC=y 738CONFIG_SERIAL_ATMEL_PDC=y
638# CONFIG_SERIAL_ATMEL_TTYAT is not set 739# CONFIG_SERIAL_ATMEL_TTYAT is not set
740# CONFIG_SERIAL_MAX3100 is not set
639CONFIG_SERIAL_CORE=y 741CONFIG_SERIAL_CORE=y
640CONFIG_SERIAL_CORE_CONSOLE=y 742CONFIG_SERIAL_CORE_CONSOLE=y
641CONFIG_UNIX98_PTYS=y 743CONFIG_UNIX98_PTYS=y
744# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
642# CONFIG_LEGACY_PTYS is not set 745# CONFIG_LEGACY_PTYS is not set
643# CONFIG_IPMI_HANDLER is not set 746# CONFIG_IPMI_HANDLER is not set
644# CONFIG_HW_RANDOM is not set 747# CONFIG_HW_RANDOM is not set
@@ -647,45 +750,44 @@ CONFIG_UNIX98_PTYS=y
647# CONFIG_TCG_TPM is not set 750# CONFIG_TCG_TPM is not set
648CONFIG_I2C=m 751CONFIG_I2C=m
649CONFIG_I2C_BOARDINFO=y 752CONFIG_I2C_BOARDINFO=y
753CONFIG_I2C_COMPAT=y
650CONFIG_I2C_CHARDEV=m 754CONFIG_I2C_CHARDEV=m
755CONFIG_I2C_HELPER_AUTO=y
756CONFIG_I2C_ALGOBIT=m
651 757
652# 758#
653# I2C Algorithms 759# I2C Hardware Bus support
654# 760#
655CONFIG_I2C_ALGOBIT=m
656# CONFIG_I2C_ALGOPCF is not set
657# CONFIG_I2C_ALGOPCA is not set
658 761
659# 762#
660# I2C Hardware Bus support 763# I2C system bus drivers (mostly embedded / system-on-chip)
661# 764#
662CONFIG_I2C_ATMELTWI=m 765# CONFIG_I2C_DESIGNWARE is not set
663CONFIG_I2C_GPIO=m 766CONFIG_I2C_GPIO=m
664# CONFIG_I2C_OCORES is not set 767# CONFIG_I2C_OCORES is not set
665# CONFIG_I2C_PARPORT_LIGHT is not set
666# CONFIG_I2C_SIMTEC is not set 768# CONFIG_I2C_SIMTEC is not set
769
770#
771# External I2C/SMBus adapter drivers
772#
773# CONFIG_I2C_PARPORT_LIGHT is not set
667# CONFIG_I2C_TAOS_EVM is not set 774# CONFIG_I2C_TAOS_EVM is not set
775
776#
777# Other I2C/SMBus bus drivers
778#
779# CONFIG_I2C_PCA_PLATFORM is not set
668# CONFIG_I2C_STUB is not set 780# CONFIG_I2C_STUB is not set
669 781
670# 782#
671# Miscellaneous I2C Chip support 783# Miscellaneous I2C Chip support
672# 784#
673# CONFIG_DS1682 is not set 785# CONFIG_DS1682 is not set
674# CONFIG_EEPROM_LEGACY is not set
675# CONFIG_SENSORS_PCF8574 is not set
676# CONFIG_PCF8575 is not set
677# CONFIG_SENSORS_PCF8591 is not set
678# CONFIG_TPS65010 is not set
679# CONFIG_SENSORS_MAX6875 is not set
680# CONFIG_SENSORS_TSL2550 is not set 786# CONFIG_SENSORS_TSL2550 is not set
681# CONFIG_I2C_DEBUG_CORE is not set 787# CONFIG_I2C_DEBUG_CORE is not set
682# CONFIG_I2C_DEBUG_ALGO is not set 788# CONFIG_I2C_DEBUG_ALGO is not set
683# CONFIG_I2C_DEBUG_BUS is not set 789# CONFIG_I2C_DEBUG_BUS is not set
684# CONFIG_I2C_DEBUG_CHIP is not set 790# CONFIG_I2C_DEBUG_CHIP is not set
685
686#
687# SPI support
688#
689CONFIG_SPI=y 791CONFIG_SPI=y
690# CONFIG_SPI_DEBUG is not set 792# CONFIG_SPI_DEBUG is not set
691CONFIG_SPI_MASTER=y 793CONFIG_SPI_MASTER=y
@@ -695,30 +797,48 @@ CONFIG_SPI_MASTER=y
695# 797#
696CONFIG_SPI_ATMEL=y 798CONFIG_SPI_ATMEL=y
697# CONFIG_SPI_BITBANG is not set 799# CONFIG_SPI_BITBANG is not set
800# CONFIG_SPI_GPIO is not set
698 801
699# 802#
700# SPI Protocol Masters 803# SPI Protocol Masters
701# 804#
702# CONFIG_EEPROM_AT25 is not set
703CONFIG_SPI_SPIDEV=m 805CONFIG_SPI_SPIDEV=m
704# CONFIG_SPI_TLE62X0 is not set 806# CONFIG_SPI_TLE62X0 is not set
705CONFIG_HAVE_GPIO_LIB=y
706 807
707# 808#
708# GPIO Support 809# PPS support
709# 810#
811# CONFIG_PPS is not set
812CONFIG_ARCH_REQUIRE_GPIOLIB=y
813CONFIG_GPIOLIB=y
710# CONFIG_DEBUG_GPIO is not set 814# CONFIG_DEBUG_GPIO is not set
815# CONFIG_GPIO_SYSFS is not set
816
817#
818# Memory mapped GPIO expanders:
819#
711 820
712# 821#
713# I2C GPIO expanders: 822# I2C GPIO expanders:
714# 823#
824# CONFIG_GPIO_MAX732X is not set
715# CONFIG_GPIO_PCA953X is not set 825# CONFIG_GPIO_PCA953X is not set
716# CONFIG_GPIO_PCF857X is not set 826# CONFIG_GPIO_PCF857X is not set
717 827
718# 828#
829# PCI GPIO expanders:
830#
831
832#
719# SPI GPIO expanders: 833# SPI GPIO expanders:
720# 834#
835# CONFIG_GPIO_MAX7301 is not set
721# CONFIG_GPIO_MCP23S08 is not set 836# CONFIG_GPIO_MCP23S08 is not set
837# CONFIG_GPIO_MC33880 is not set
838
839#
840# AC97 GPIO expanders:
841#
722# CONFIG_W1 is not set 842# CONFIG_W1 is not set
723# CONFIG_POWER_SUPPLY is not set 843# CONFIG_POWER_SUPPLY is not set
724# CONFIG_HWMON is not set 844# CONFIG_HWMON is not set
@@ -731,24 +851,31 @@ CONFIG_WATCHDOG=y
731# 851#
732# CONFIG_SOFT_WATCHDOG is not set 852# CONFIG_SOFT_WATCHDOG is not set
733CONFIG_AT32AP700X_WDT=y 853CONFIG_AT32AP700X_WDT=y
854CONFIG_SSB_POSSIBLE=y
734 855
735# 856#
736# Sonics Silicon Backplane 857# Sonics Silicon Backplane
737# 858#
738CONFIG_SSB_POSSIBLE=y
739# CONFIG_SSB is not set 859# CONFIG_SSB is not set
740 860
741# 861#
742# Multifunction device drivers 862# Multifunction device drivers
743# 863#
864# CONFIG_MFD_CORE is not set
744# CONFIG_MFD_SM501 is not set 865# CONFIG_MFD_SM501 is not set
745 866# CONFIG_HTC_PASIC3 is not set
746# 867# CONFIG_UCB1400_CORE is not set
747# Multimedia devices 868# CONFIG_TPS65010 is not set
748# 869# CONFIG_MFD_TMIO is not set
749# CONFIG_VIDEO_DEV is not set 870# CONFIG_MFD_WM8400 is not set
750# CONFIG_DVB_CORE is not set 871# CONFIG_MFD_WM831X is not set
751# CONFIG_DAB is not set 872# CONFIG_MFD_WM8350_I2C is not set
873# CONFIG_MFD_PCF50633 is not set
874# CONFIG_MFD_MC13783 is not set
875# CONFIG_AB3100_CORE is not set
876# CONFIG_EZX_PCAP is not set
877# CONFIG_REGULATOR is not set
878# CONFIG_MEDIA_SUPPORT is not set
752 879
753# 880#
754# Graphics support 881# Graphics support
@@ -758,6 +885,7 @@ CONFIG_SSB_POSSIBLE=y
758CONFIG_FB=y 885CONFIG_FB=y
759# CONFIG_FIRMWARE_EDID is not set 886# CONFIG_FIRMWARE_EDID is not set
760# CONFIG_FB_DDC is not set 887# CONFIG_FB_DDC is not set
888# CONFIG_FB_BOOT_VESA_SUPPORT is not set
761CONFIG_FB_CFB_FILLRECT=y 889CONFIG_FB_CFB_FILLRECT=y
762CONFIG_FB_CFB_COPYAREA=y 890CONFIG_FB_CFB_COPYAREA=y
763CONFIG_FB_CFB_IMAGEBLIT=y 891CONFIG_FB_CFB_IMAGEBLIT=y
@@ -765,8 +893,8 @@ CONFIG_FB_CFB_IMAGEBLIT=y
765# CONFIG_FB_SYS_FILLRECT is not set 893# CONFIG_FB_SYS_FILLRECT is not set
766# CONFIG_FB_SYS_COPYAREA is not set 894# CONFIG_FB_SYS_COPYAREA is not set
767# CONFIG_FB_SYS_IMAGEBLIT is not set 895# CONFIG_FB_SYS_IMAGEBLIT is not set
896# CONFIG_FB_FOREIGN_ENDIAN is not set
768# CONFIG_FB_SYS_FOPS is not set 897# CONFIG_FB_SYS_FOPS is not set
769CONFIG_FB_DEFERRED_IO=y
770# CONFIG_FB_SVGALIB is not set 898# CONFIG_FB_SVGALIB is not set
771# CONFIG_FB_MACMODES is not set 899# CONFIG_FB_MACMODES is not set
772# CONFIG_FB_BACKLIGHT is not set 900# CONFIG_FB_BACKLIGHT is not set
@@ -779,6 +907,9 @@ CONFIG_FB_DEFERRED_IO=y
779# CONFIG_FB_S1D13XXX is not set 907# CONFIG_FB_S1D13XXX is not set
780CONFIG_FB_ATMEL=y 908CONFIG_FB_ATMEL=y
781# CONFIG_FB_VIRTUAL is not set 909# CONFIG_FB_VIRTUAL is not set
910# CONFIG_FB_METRONOME is not set
911# CONFIG_FB_MB862XX is not set
912# CONFIG_FB_BROADSHEET is not set
782# CONFIG_BACKLIGHT_LCD_SUPPORT is not set 913# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
783 914
784# 915#
@@ -792,119 +923,124 @@ CONFIG_FB_ATMEL=y
792CONFIG_DUMMY_CONSOLE=y 923CONFIG_DUMMY_CONSOLE=y
793# CONFIG_FRAMEBUFFER_CONSOLE is not set 924# CONFIG_FRAMEBUFFER_CONSOLE is not set
794# CONFIG_LOGO is not set 925# CONFIG_LOGO is not set
795
796#
797# Sound
798#
799CONFIG_SOUND=y 926CONFIG_SOUND=y
800 927CONFIG_SOUND_OSS_CORE=y
801# 928CONFIG_SOUND_OSS_CORE_PRECLAIM=y
802# Advanced Linux Sound Architecture
803#
804CONFIG_SND=y 929CONFIG_SND=y
805CONFIG_SND_TIMER=m 930CONFIG_SND_TIMER=y
806CONFIG_SND_PCM=m 931CONFIG_SND_PCM=m
807# CONFIG_SND_SEQUENCER is not set 932# CONFIG_SND_SEQUENCER is not set
808CONFIG_SND_OSSEMUL=y 933CONFIG_SND_OSSEMUL=y
809CONFIG_SND_MIXER_OSS=m 934CONFIG_SND_MIXER_OSS=m
810CONFIG_SND_PCM_OSS=m 935CONFIG_SND_PCM_OSS=m
811CONFIG_SND_PCM_OSS_PLUGINS=y 936CONFIG_SND_PCM_OSS_PLUGINS=y
937CONFIG_SND_HRTIMER=y
812# CONFIG_SND_DYNAMIC_MINORS is not set 938# CONFIG_SND_DYNAMIC_MINORS is not set
813# CONFIG_SND_SUPPORT_OLD_API is not set 939# CONFIG_SND_SUPPORT_OLD_API is not set
814CONFIG_SND_VERBOSE_PROCFS=y 940CONFIG_SND_VERBOSE_PROCFS=y
815# CONFIG_SND_VERBOSE_PRINTK is not set 941# CONFIG_SND_VERBOSE_PRINTK is not set
816# CONFIG_SND_DEBUG is not set 942# CONFIG_SND_DEBUG is not set
817 943CONFIG_SND_VMASTER=y
818# 944# CONFIG_SND_RAWMIDI_SEQ is not set
819# Generic devices 945# CONFIG_SND_OPL3_LIB_SEQ is not set
820# 946# CONFIG_SND_OPL4_LIB_SEQ is not set
947# CONFIG_SND_SBAWE_SEQ is not set
948# CONFIG_SND_EMU10K1_SEQ is not set
821CONFIG_SND_AC97_CODEC=m 949CONFIG_SND_AC97_CODEC=m
822# CONFIG_SND_DUMMY is not set 950# CONFIG_SND_DRIVERS is not set
823# CONFIG_SND_MTPAV is not set
824# CONFIG_SND_SERIAL_U16550 is not set
825# CONFIG_SND_MPU401 is not set
826 951
827# 952#
828# AVR32 devices 953# Atmel devices (AVR32 and AT91)
829#
830CONFIG_SND_ATMEL_AC97=m
831
832#
833# SPI devices
834#
835
836#
837# System on Chip audio support
838# 954#
955# CONFIG_SND_ATMEL_ABDAC is not set
956CONFIG_SND_ATMEL_AC97C=m
957# CONFIG_SND_SPI is not set
839# CONFIG_SND_SOC is not set 958# CONFIG_SND_SOC is not set
840
841#
842# SoC Audio support for SuperH
843#
844
845#
846# ALSA SoC audio for Freescale SOCs
847#
848
849#
850# Open Sound System
851#
852# CONFIG_SOUND_PRIME is not set 959# CONFIG_SOUND_PRIME is not set
853CONFIG_AC97_BUS=m 960CONFIG_AC97_BUS=m
854CONFIG_HID_SUPPORT=y 961CONFIG_HID_SUPPORT=y
855CONFIG_HID=y 962CONFIG_HID=y
856# CONFIG_HID_DEBUG is not set
857# CONFIG_HIDRAW is not set 963# CONFIG_HIDRAW is not set
964# CONFIG_HID_PID is not set
965
966#
967# Special HID drivers
968#
858CONFIG_USB_SUPPORT=y 969CONFIG_USB_SUPPORT=y
859# CONFIG_USB_ARCH_HAS_HCD is not set 970# CONFIG_USB_ARCH_HAS_HCD is not set
860# CONFIG_USB_ARCH_HAS_OHCI is not set 971# CONFIG_USB_ARCH_HAS_OHCI is not set
861# CONFIG_USB_ARCH_HAS_EHCI is not set 972# CONFIG_USB_ARCH_HAS_EHCI is not set
973# CONFIG_USB_OTG_WHITELIST is not set
974# CONFIG_USB_OTG_BLACKLIST_HUB is not set
975# CONFIG_USB_GADGET_MUSB_HDRC is not set
862 976
863# 977#
864# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' 978# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
865# 979#
866CONFIG_USB_GADGET=y 980CONFIG_USB_GADGET=y
867# CONFIG_USB_GADGET_DEBUG is not set 981# CONFIG_USB_GADGET_DEBUG is not set
868# CONFIG_USB_GADGET_DEBUG_FILES is not set 982# CONFIG_USB_GADGET_DEBUG_FILES is not set
983# CONFIG_USB_GADGET_DEBUG_FS is not set
984CONFIG_USB_GADGET_VBUS_DRAW=350
869CONFIG_USB_GADGET_SELECTED=y 985CONFIG_USB_GADGET_SELECTED=y
870# CONFIG_USB_GADGET_AMD5536UDC is not set 986# CONFIG_USB_GADGET_AT91 is not set
871CONFIG_USB_GADGET_ATMEL_USBA=y 987CONFIG_USB_GADGET_ATMEL_USBA=y
872CONFIG_USB_ATMEL_USBA=y 988CONFIG_USB_ATMEL_USBA=y
873# CONFIG_USB_GADGET_FSL_USB2 is not set 989# CONFIG_USB_GADGET_FSL_USB2 is not set
874# CONFIG_USB_GADGET_NET2280 is not set
875# CONFIG_USB_GADGET_PXA2XX is not set
876# CONFIG_USB_GADGET_M66592 is not set
877# CONFIG_USB_GADGET_GOKU is not set
878# CONFIG_USB_GADGET_LH7A40X is not set 990# CONFIG_USB_GADGET_LH7A40X is not set
879# CONFIG_USB_GADGET_OMAP is not set 991# CONFIG_USB_GADGET_OMAP is not set
992# CONFIG_USB_GADGET_PXA25X is not set
993# CONFIG_USB_GADGET_R8A66597 is not set
994# CONFIG_USB_GADGET_PXA27X is not set
995# CONFIG_USB_GADGET_S3C_HSOTG is not set
996# CONFIG_USB_GADGET_IMX is not set
880# CONFIG_USB_GADGET_S3C2410 is not set 997# CONFIG_USB_GADGET_S3C2410 is not set
881# CONFIG_USB_GADGET_AT91 is not set 998# CONFIG_USB_GADGET_M66592 is not set
999# CONFIG_USB_GADGET_AMD5536UDC is not set
1000# CONFIG_USB_GADGET_FSL_QE is not set
1001# CONFIG_USB_GADGET_CI13XXX is not set
1002# CONFIG_USB_GADGET_NET2280 is not set
1003# CONFIG_USB_GADGET_GOKU is not set
1004# CONFIG_USB_GADGET_LANGWELL is not set
882# CONFIG_USB_GADGET_DUMMY_HCD is not set 1005# CONFIG_USB_GADGET_DUMMY_HCD is not set
883CONFIG_USB_GADGET_DUALSPEED=y 1006CONFIG_USB_GADGET_DUALSPEED=y
884CONFIG_USB_ZERO=m 1007CONFIG_USB_ZERO=m
1008# CONFIG_USB_AUDIO is not set
885CONFIG_USB_ETH=m 1009CONFIG_USB_ETH=m
886CONFIG_USB_ETH_RNDIS=y 1010CONFIG_USB_ETH_RNDIS=y
1011# CONFIG_USB_ETH_EEM is not set
887CONFIG_USB_GADGETFS=m 1012CONFIG_USB_GADGETFS=m
888CONFIG_USB_FILE_STORAGE=m 1013CONFIG_USB_FILE_STORAGE=m
889# CONFIG_USB_FILE_STORAGE_TEST is not set 1014# CONFIG_USB_FILE_STORAGE_TEST is not set
890CONFIG_USB_G_SERIAL=m 1015CONFIG_USB_G_SERIAL=m
891# CONFIG_USB_MIDI_GADGET is not set 1016# CONFIG_USB_MIDI_GADGET is not set
892# CONFIG_USB_G_PRINTER is not set 1017# CONFIG_USB_G_PRINTER is not set
1018CONFIG_USB_CDC_COMPOSITE=m
1019
1020#
1021# OTG and related infrastructure
1022#
1023# CONFIG_USB_GPIO_VBUS is not set
1024# CONFIG_NOP_USB_XCEIV is not set
893CONFIG_MMC=y 1025CONFIG_MMC=y
894# CONFIG_MMC_DEBUG is not set 1026# CONFIG_MMC_DEBUG is not set
895# CONFIG_MMC_UNSAFE_RESUME is not set 1027# CONFIG_MMC_UNSAFE_RESUME is not set
896 1028
897# 1029#
898# MMC/SD Card Drivers 1030# MMC/SD/SDIO Card Drivers
899# 1031#
900CONFIG_MMC_BLOCK=y 1032CONFIG_MMC_BLOCK=y
901CONFIG_MMC_BLOCK_BOUNCE=y 1033CONFIG_MMC_BLOCK_BOUNCE=y
902# CONFIG_SDIO_UART is not set 1034# CONFIG_SDIO_UART is not set
1035# CONFIG_MMC_TEST is not set
903 1036
904# 1037#
905# MMC/SD Host Controller Drivers 1038# MMC/SD/SDIO Host Controller Drivers
906# 1039#
1040# CONFIG_MMC_SDHCI is not set
1041# CONFIG_MMC_AT91 is not set
907CONFIG_MMC_ATMELMCI=y 1042CONFIG_MMC_ATMELMCI=y
1043# CONFIG_MMC_ATMELMCI_DMA is not set
908# CONFIG_MMC_SPI is not set 1044# CONFIG_MMC_SPI is not set
909# CONFIG_MEMSTICK is not set 1045# CONFIG_MEMSTICK is not set
910CONFIG_NEW_LEDS=y 1046CONFIG_NEW_LEDS=y
@@ -913,7 +1049,13 @@ CONFIG_LEDS_CLASS=y
913# 1049#
914# LED drivers 1050# LED drivers
915# 1051#
1052# CONFIG_LEDS_PCA9532 is not set
916CONFIG_LEDS_GPIO=y 1053CONFIG_LEDS_GPIO=y
1054CONFIG_LEDS_GPIO_PLATFORM=y
1055# CONFIG_LEDS_LP3944 is not set
1056# CONFIG_LEDS_PCA955X is not set
1057# CONFIG_LEDS_DAC124S085 is not set
1058# CONFIG_LEDS_BD2802 is not set
917 1059
918# 1060#
919# LED Triggers 1061# LED Triggers
@@ -921,6 +1063,14 @@ CONFIG_LEDS_GPIO=y
921CONFIG_LEDS_TRIGGERS=y 1063CONFIG_LEDS_TRIGGERS=y
922CONFIG_LEDS_TRIGGER_TIMER=y 1064CONFIG_LEDS_TRIGGER_TIMER=y
923CONFIG_LEDS_TRIGGER_HEARTBEAT=y 1065CONFIG_LEDS_TRIGGER_HEARTBEAT=y
1066# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
1067# CONFIG_LEDS_TRIGGER_GPIO is not set
1068# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set
1069
1070#
1071# iptables trigger is under Netfilter config (LED target)
1072#
1073# CONFIG_ACCESSIBILITY is not set
924CONFIG_RTC_LIB=y 1074CONFIG_RTC_LIB=y
925CONFIG_RTC_CLASS=y 1075CONFIG_RTC_CLASS=y
926CONFIG_RTC_HCTOSYS=y 1076CONFIG_RTC_HCTOSYS=y
@@ -950,51 +1100,84 @@ CONFIG_RTC_INTF_DEV=y
950# CONFIG_RTC_DRV_PCF8583 is not set 1100# CONFIG_RTC_DRV_PCF8583 is not set
951# CONFIG_RTC_DRV_M41T80 is not set 1101# CONFIG_RTC_DRV_M41T80 is not set
952# CONFIG_RTC_DRV_S35390A is not set 1102# CONFIG_RTC_DRV_S35390A is not set
1103# CONFIG_RTC_DRV_FM3130 is not set
1104# CONFIG_RTC_DRV_RX8581 is not set
1105# CONFIG_RTC_DRV_RX8025 is not set
953 1106
954# 1107#
955# SPI RTC drivers 1108# SPI RTC drivers
956# 1109#
1110# CONFIG_RTC_DRV_M41T94 is not set
1111# CONFIG_RTC_DRV_DS1305 is not set
1112# CONFIG_RTC_DRV_DS1390 is not set
957# CONFIG_RTC_DRV_MAX6902 is not set 1113# CONFIG_RTC_DRV_MAX6902 is not set
958# CONFIG_RTC_DRV_R9701 is not set 1114# CONFIG_RTC_DRV_R9701 is not set
959# CONFIG_RTC_DRV_RS5C348 is not set 1115# CONFIG_RTC_DRV_RS5C348 is not set
1116# CONFIG_RTC_DRV_DS3234 is not set
1117# CONFIG_RTC_DRV_PCF2123 is not set
960 1118
961# 1119#
962# Platform RTC drivers 1120# Platform RTC drivers
963# 1121#
1122# CONFIG_RTC_DRV_DS1286 is not set
964# CONFIG_RTC_DRV_DS1511 is not set 1123# CONFIG_RTC_DRV_DS1511 is not set
965# CONFIG_RTC_DRV_DS1553 is not set 1124# CONFIG_RTC_DRV_DS1553 is not set
966# CONFIG_RTC_DRV_DS1742 is not set 1125# CONFIG_RTC_DRV_DS1742 is not set
967# CONFIG_RTC_DRV_STK17TA8 is not set 1126# CONFIG_RTC_DRV_STK17TA8 is not set
968# CONFIG_RTC_DRV_M48T86 is not set 1127# CONFIG_RTC_DRV_M48T86 is not set
1128# CONFIG_RTC_DRV_M48T35 is not set
969# CONFIG_RTC_DRV_M48T59 is not set 1129# CONFIG_RTC_DRV_M48T59 is not set
1130# CONFIG_RTC_DRV_BQ4802 is not set
970# CONFIG_RTC_DRV_V3020 is not set 1131# CONFIG_RTC_DRV_V3020 is not set
971 1132
972# 1133#
973# on-CPU RTC drivers 1134# on-CPU RTC drivers
974# 1135#
975CONFIG_RTC_DRV_AT32AP700X=y 1136CONFIG_RTC_DRV_AT32AP700X=y
1137CONFIG_DMADEVICES=y
976 1138
977# 1139#
978# Userspace I/O 1140# DMA Devices
979# 1141#
1142CONFIG_DW_DMAC=y
1143CONFIG_DMA_ENGINE=y
1144
1145#
1146# DMA Clients
1147#
1148# CONFIG_NET_DMA is not set
1149# CONFIG_ASYNC_TX_DMA is not set
1150# CONFIG_DMATEST is not set
1151# CONFIG_AUXDISPLAY is not set
980# CONFIG_UIO is not set 1152# CONFIG_UIO is not set
981 1153
982# 1154#
1155# TI VLYNQ
1156#
1157# CONFIG_STAGING is not set
1158
1159#
983# File systems 1160# File systems
984# 1161#
985CONFIG_EXT2_FS=y 1162CONFIG_EXT2_FS=y
986# CONFIG_EXT2_FS_XATTR is not set 1163# CONFIG_EXT2_FS_XATTR is not set
987# CONFIG_EXT2_FS_XIP is not set 1164# CONFIG_EXT2_FS_XIP is not set
988CONFIG_EXT3_FS=y 1165CONFIG_EXT3_FS=y
1166# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
989# CONFIG_EXT3_FS_XATTR is not set 1167# CONFIG_EXT3_FS_XATTR is not set
990# CONFIG_EXT4DEV_FS is not set 1168# CONFIG_EXT4_FS is not set
991CONFIG_JBD=y 1169CONFIG_JBD=y
1170# CONFIG_JBD_DEBUG is not set
992# CONFIG_REISERFS_FS is not set 1171# CONFIG_REISERFS_FS is not set
993# CONFIG_JFS_FS is not set 1172# CONFIG_JFS_FS is not set
994# CONFIG_FS_POSIX_ACL is not set 1173# CONFIG_FS_POSIX_ACL is not set
995# CONFIG_XFS_FS is not set 1174# CONFIG_XFS_FS is not set
996# CONFIG_GFS2_FS is not set 1175# CONFIG_GFS2_FS is not set
997# CONFIG_OCFS2_FS is not set 1176# CONFIG_OCFS2_FS is not set
1177# CONFIG_BTRFS_FS is not set
1178# CONFIG_NILFS2_FS is not set
1179CONFIG_FILE_LOCKING=y
1180CONFIG_FSNOTIFY=y
998# CONFIG_DNOTIFY is not set 1181# CONFIG_DNOTIFY is not set
999CONFIG_INOTIFY=y 1182CONFIG_INOTIFY=y
1000CONFIG_INOTIFY_USER=y 1183CONFIG_INOTIFY_USER=y
@@ -1002,6 +1185,12 @@ CONFIG_INOTIFY_USER=y
1002# CONFIG_AUTOFS_FS is not set 1185# CONFIG_AUTOFS_FS is not set
1003# CONFIG_AUTOFS4_FS is not set 1186# CONFIG_AUTOFS4_FS is not set
1004CONFIG_FUSE_FS=m 1187CONFIG_FUSE_FS=m
1188# CONFIG_CUSE is not set
1189
1190#
1191# Caches
1192#
1193# CONFIG_FSCACHE is not set
1005 1194
1006# 1195#
1007# CD-ROM/DVD Filesystems 1196# CD-ROM/DVD Filesystems
@@ -1025,15 +1214,13 @@ CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
1025CONFIG_PROC_FS=y 1214CONFIG_PROC_FS=y
1026# CONFIG_PROC_KCORE is not set 1215# CONFIG_PROC_KCORE is not set
1027CONFIG_PROC_SYSCTL=y 1216CONFIG_PROC_SYSCTL=y
1217CONFIG_PROC_PAGE_MONITOR=y
1028CONFIG_SYSFS=y 1218CONFIG_SYSFS=y
1029CONFIG_TMPFS=y 1219CONFIG_TMPFS=y
1030# CONFIG_TMPFS_POSIX_ACL is not set 1220# CONFIG_TMPFS_POSIX_ACL is not set
1031# CONFIG_HUGETLB_PAGE is not set 1221# CONFIG_HUGETLB_PAGE is not set
1032CONFIG_CONFIGFS_FS=y 1222CONFIG_CONFIGFS_FS=y
1033 1223CONFIG_MISC_FILESYSTEMS=y
1034#
1035# Miscellaneous filesystems
1036#
1037# CONFIG_ADFS_FS is not set 1224# CONFIG_ADFS_FS is not set
1038# CONFIG_AFFS_FS is not set 1225# CONFIG_AFFS_FS is not set
1039# CONFIG_HFS_FS is not set 1226# CONFIG_HFS_FS is not set
@@ -1059,8 +1246,10 @@ CONFIG_UBIFS_FS_LZO=y
1059CONFIG_UBIFS_FS_ZLIB=y 1246CONFIG_UBIFS_FS_ZLIB=y
1060# CONFIG_UBIFS_FS_DEBUG is not set 1247# CONFIG_UBIFS_FS_DEBUG is not set
1061# CONFIG_CRAMFS is not set 1248# CONFIG_CRAMFS is not set
1249# CONFIG_SQUASHFS is not set
1062# CONFIG_VXFS_FS is not set 1250# CONFIG_VXFS_FS is not set
1063# CONFIG_MINIX_FS is not set 1251# CONFIG_MINIX_FS is not set
1252# CONFIG_OMFS_FS is not set
1064# CONFIG_HPFS_FS is not set 1253# CONFIG_HPFS_FS is not set
1065# CONFIG_QNX4FS_FS is not set 1254# CONFIG_QNX4FS_FS is not set
1066# CONFIG_ROMFS_FS is not set 1255# CONFIG_ROMFS_FS is not set
@@ -1071,19 +1260,16 @@ CONFIG_NFS_FS=y
1071CONFIG_NFS_V3=y 1260CONFIG_NFS_V3=y
1072# CONFIG_NFS_V3_ACL is not set 1261# CONFIG_NFS_V3_ACL is not set
1073# CONFIG_NFS_V4 is not set 1262# CONFIG_NFS_V4 is not set
1074# CONFIG_NFS_DIRECTIO is not set 1263CONFIG_ROOT_NFS=y
1075CONFIG_NFSD=m 1264CONFIG_NFSD=m
1076CONFIG_NFSD_V3=y 1265CONFIG_NFSD_V3=y
1077# CONFIG_NFSD_V3_ACL is not set 1266# CONFIG_NFSD_V3_ACL is not set
1078# CONFIG_NFSD_V4 is not set 1267# CONFIG_NFSD_V4 is not set
1079CONFIG_NFSD_TCP=y
1080CONFIG_ROOT_NFS=y
1081CONFIG_LOCKD=y 1268CONFIG_LOCKD=y
1082CONFIG_LOCKD_V4=y 1269CONFIG_LOCKD_V4=y
1083CONFIG_EXPORTFS=m 1270CONFIG_EXPORTFS=m
1084CONFIG_NFS_COMMON=y 1271CONFIG_NFS_COMMON=y
1085CONFIG_SUNRPC=y 1272CONFIG_SUNRPC=y
1086# CONFIG_SUNRPC_BIND34 is not set
1087# CONFIG_RPCSEC_GSS_KRB5 is not set 1273# CONFIG_RPCSEC_GSS_KRB5 is not set
1088# CONFIG_RPCSEC_GSS_SPKM3 is not set 1274# CONFIG_RPCSEC_GSS_SPKM3 is not set
1089CONFIG_SMB_FS=m 1275CONFIG_SMB_FS=m
@@ -1151,16 +1337,24 @@ CONFIG_NLS_UTF8=m
1151# CONFIG_PRINTK_TIME is not set 1337# CONFIG_PRINTK_TIME is not set
1152CONFIG_ENABLE_WARN_DEPRECATED=y 1338CONFIG_ENABLE_WARN_DEPRECATED=y
1153CONFIG_ENABLE_MUST_CHECK=y 1339CONFIG_ENABLE_MUST_CHECK=y
1340CONFIG_FRAME_WARN=1024
1154CONFIG_MAGIC_SYSRQ=y 1341CONFIG_MAGIC_SYSRQ=y
1342# CONFIG_STRIP_ASM_SYMS is not set
1155# CONFIG_UNUSED_SYMBOLS is not set 1343# CONFIG_UNUSED_SYMBOLS is not set
1156# CONFIG_DEBUG_FS is not set 1344CONFIG_DEBUG_FS=y
1157# CONFIG_HEADERS_CHECK is not set 1345# CONFIG_HEADERS_CHECK is not set
1158CONFIG_DEBUG_KERNEL=y 1346CONFIG_DEBUG_KERNEL=y
1159# CONFIG_DEBUG_SHIRQ is not set 1347# CONFIG_DEBUG_SHIRQ is not set
1160CONFIG_DETECT_SOFTLOCKUP=y 1348CONFIG_DETECT_SOFTLOCKUP=y
1349# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
1350CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
1351CONFIG_DETECT_HUNG_TASK=y
1352# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
1353CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
1161CONFIG_SCHED_DEBUG=y 1354CONFIG_SCHED_DEBUG=y
1162# CONFIG_SCHEDSTATS is not set 1355# CONFIG_SCHEDSTATS is not set
1163# CONFIG_TIMER_STATS is not set 1356# CONFIG_TIMER_STATS is not set
1357# CONFIG_DEBUG_OBJECTS is not set
1164# CONFIG_SLUB_DEBUG_ON is not set 1358# CONFIG_SLUB_DEBUG_ON is not set
1165# CONFIG_SLUB_STATS is not set 1359# CONFIG_SLUB_STATS is not set
1166# CONFIG_DEBUG_RT_MUTEXES is not set 1360# CONFIG_DEBUG_RT_MUTEXES is not set
@@ -1172,19 +1366,48 @@ CONFIG_SCHED_DEBUG=y
1172# CONFIG_LOCK_STAT is not set 1366# CONFIG_LOCK_STAT is not set
1173# CONFIG_DEBUG_SPINLOCK_SLEEP is not set 1367# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
1174# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set 1368# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
1369CONFIG_STACKTRACE=y
1175# CONFIG_DEBUG_KOBJECT is not set 1370# CONFIG_DEBUG_KOBJECT is not set
1176CONFIG_DEBUG_BUGVERBOSE=y 1371CONFIG_DEBUG_BUGVERBOSE=y
1177# CONFIG_DEBUG_INFO is not set 1372# CONFIG_DEBUG_INFO is not set
1178# CONFIG_DEBUG_VM is not set 1373# CONFIG_DEBUG_VM is not set
1374# CONFIG_DEBUG_WRITECOUNT is not set
1375# CONFIG_DEBUG_MEMORY_INIT is not set
1179# CONFIG_DEBUG_LIST is not set 1376# CONFIG_DEBUG_LIST is not set
1180# CONFIG_DEBUG_SG is not set 1377# CONFIG_DEBUG_SG is not set
1378# CONFIG_DEBUG_NOTIFIERS is not set
1379# CONFIG_DEBUG_CREDENTIALS is not set
1181CONFIG_FRAME_POINTER=y 1380CONFIG_FRAME_POINTER=y
1182# CONFIG_BOOT_PRINTK_DELAY is not set 1381# CONFIG_BOOT_PRINTK_DELAY is not set
1183# CONFIG_RCU_TORTURE_TEST is not set 1382# CONFIG_RCU_TORTURE_TEST is not set
1383# CONFIG_RCU_CPU_STALL_DETECTOR is not set
1184# CONFIG_KPROBES_SANITY_TEST is not set 1384# CONFIG_KPROBES_SANITY_TEST is not set
1185# CONFIG_BACKTRACE_SELF_TEST is not set 1385# CONFIG_BACKTRACE_SELF_TEST is not set
1386# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
1387# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
1186# CONFIG_LKDTM is not set 1388# CONFIG_LKDTM is not set
1187# CONFIG_FAULT_INJECTION is not set 1389# CONFIG_FAULT_INJECTION is not set
1390# CONFIG_PAGE_POISONING is not set
1391CONFIG_NOP_TRACER=y
1392CONFIG_RING_BUFFER=y
1393CONFIG_EVENT_TRACING=y
1394CONFIG_CONTEXT_SWITCH_TRACER=y
1395CONFIG_RING_BUFFER_ALLOW_SWAP=y
1396CONFIG_TRACING=y
1397CONFIG_TRACING_SUPPORT=y
1398CONFIG_FTRACE=y
1399# CONFIG_IRQSOFF_TRACER is not set
1400# CONFIG_SCHED_TRACER is not set
1401# CONFIG_ENABLE_DEFAULT_TRACERS is not set
1402# CONFIG_BOOT_TRACER is not set
1403CONFIG_BRANCH_PROFILE_NONE=y
1404# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
1405# CONFIG_PROFILE_ALL_BRANCHES is not set
1406# CONFIG_KMEMTRACE is not set
1407# CONFIG_WORKQUEUE_TRACER is not set
1408# CONFIG_BLK_DEV_IO_TRACE is not set
1409# CONFIG_RING_BUFFER_BENCHMARK is not set
1410# CONFIG_DYNAMIC_DEBUG is not set
1188# CONFIG_SAMPLES is not set 1411# CONFIG_SAMPLES is not set
1189 1412
1190# 1413#
@@ -1192,63 +1415,118 @@ CONFIG_FRAME_POINTER=y
1192# 1415#
1193# CONFIG_KEYS is not set 1416# CONFIG_KEYS is not set
1194# CONFIG_SECURITY is not set 1417# CONFIG_SECURITY is not set
1418# CONFIG_SECURITYFS is not set
1195# CONFIG_SECURITY_FILE_CAPABILITIES is not set 1419# CONFIG_SECURITY_FILE_CAPABILITIES is not set
1196CONFIG_CRYPTO=y 1420CONFIG_CRYPTO=y
1421
1422#
1423# Crypto core or helper
1424#
1425# CONFIG_CRYPTO_FIPS is not set
1197CONFIG_CRYPTO_ALGAPI=y 1426CONFIG_CRYPTO_ALGAPI=y
1427CONFIG_CRYPTO_ALGAPI2=y
1198CONFIG_CRYPTO_AEAD=y 1428CONFIG_CRYPTO_AEAD=y
1429CONFIG_CRYPTO_AEAD2=y
1199CONFIG_CRYPTO_BLKCIPHER=y 1430CONFIG_CRYPTO_BLKCIPHER=y
1200# CONFIG_CRYPTO_SEQIV is not set 1431CONFIG_CRYPTO_BLKCIPHER2=y
1201CONFIG_CRYPTO_HASH=y 1432CONFIG_CRYPTO_HASH=y
1433CONFIG_CRYPTO_HASH2=y
1434CONFIG_CRYPTO_RNG=m
1435CONFIG_CRYPTO_RNG2=y
1436CONFIG_CRYPTO_PCOMP=y
1202CONFIG_CRYPTO_MANAGER=y 1437CONFIG_CRYPTO_MANAGER=y
1438CONFIG_CRYPTO_MANAGER2=y
1439# CONFIG_CRYPTO_GF128MUL is not set
1440# CONFIG_CRYPTO_NULL is not set
1441CONFIG_CRYPTO_WORKQUEUE=y
1442# CONFIG_CRYPTO_CRYPTD is not set
1443CONFIG_CRYPTO_AUTHENC=y
1444# CONFIG_CRYPTO_TEST is not set
1445
1446#
1447# Authenticated Encryption with Associated Data
1448#
1449# CONFIG_CRYPTO_CCM is not set
1450# CONFIG_CRYPTO_GCM is not set
1451# CONFIG_CRYPTO_SEQIV is not set
1452
1453#
1454# Block modes
1455#
1456CONFIG_CRYPTO_CBC=y
1457# CONFIG_CRYPTO_CTR is not set
1458# CONFIG_CRYPTO_CTS is not set
1459CONFIG_CRYPTO_ECB=m
1460# CONFIG_CRYPTO_LRW is not set
1461# CONFIG_CRYPTO_PCBC is not set
1462# CONFIG_CRYPTO_XTS is not set
1463
1464#
1465# Hash modes
1466#
1203CONFIG_CRYPTO_HMAC=y 1467CONFIG_CRYPTO_HMAC=y
1204# CONFIG_CRYPTO_XCBC is not set 1468# CONFIG_CRYPTO_XCBC is not set
1205# CONFIG_CRYPTO_NULL is not set 1469# CONFIG_CRYPTO_VMAC is not set
1470
1471#
1472# Digest
1473#
1474# CONFIG_CRYPTO_CRC32C is not set
1475# CONFIG_CRYPTO_GHASH is not set
1206# CONFIG_CRYPTO_MD4 is not set 1476# CONFIG_CRYPTO_MD4 is not set
1207CONFIG_CRYPTO_MD5=y 1477CONFIG_CRYPTO_MD5=y
1478# CONFIG_CRYPTO_MICHAEL_MIC is not set
1479# CONFIG_CRYPTO_RMD128 is not set
1480# CONFIG_CRYPTO_RMD160 is not set
1481# CONFIG_CRYPTO_RMD256 is not set
1482# CONFIG_CRYPTO_RMD320 is not set
1208CONFIG_CRYPTO_SHA1=y 1483CONFIG_CRYPTO_SHA1=y
1209# CONFIG_CRYPTO_SHA256 is not set 1484# CONFIG_CRYPTO_SHA256 is not set
1210# CONFIG_CRYPTO_SHA512 is not set 1485# CONFIG_CRYPTO_SHA512 is not set
1211# CONFIG_CRYPTO_WP512 is not set
1212# CONFIG_CRYPTO_TGR192 is not set 1486# CONFIG_CRYPTO_TGR192 is not set
1213# CONFIG_CRYPTO_GF128MUL is not set 1487# CONFIG_CRYPTO_WP512 is not set
1214CONFIG_CRYPTO_ECB=m 1488
1215CONFIG_CRYPTO_CBC=y 1489#
1216# CONFIG_CRYPTO_PCBC is not set 1490# Ciphers
1217# CONFIG_CRYPTO_LRW is not set 1491#
1218# CONFIG_CRYPTO_XTS is not set 1492CONFIG_CRYPTO_AES=m
1219# CONFIG_CRYPTO_CTR is not set 1493# CONFIG_CRYPTO_ANUBIS is not set
1220# CONFIG_CRYPTO_GCM is not set 1494CONFIG_CRYPTO_ARC4=m
1221# CONFIG_CRYPTO_CCM is not set
1222# CONFIG_CRYPTO_CRYPTD is not set
1223CONFIG_CRYPTO_DES=y
1224# CONFIG_CRYPTO_FCRYPT is not set
1225# CONFIG_CRYPTO_BLOWFISH is not set 1495# CONFIG_CRYPTO_BLOWFISH is not set
1226# CONFIG_CRYPTO_TWOFISH is not set 1496# CONFIG_CRYPTO_CAMELLIA is not set
1227# CONFIG_CRYPTO_SERPENT is not set
1228# CONFIG_CRYPTO_AES is not set
1229# CONFIG_CRYPTO_CAST5 is not set 1497# CONFIG_CRYPTO_CAST5 is not set
1230# CONFIG_CRYPTO_CAST6 is not set 1498# CONFIG_CRYPTO_CAST6 is not set
1231# CONFIG_CRYPTO_TEA is not set 1499CONFIG_CRYPTO_DES=y
1232CONFIG_CRYPTO_ARC4=m 1500# CONFIG_CRYPTO_FCRYPT is not set
1233# CONFIG_CRYPTO_KHAZAD is not set 1501# CONFIG_CRYPTO_KHAZAD is not set
1234# CONFIG_CRYPTO_ANUBIS is not set
1235# CONFIG_CRYPTO_SEED is not set
1236# CONFIG_CRYPTO_SALSA20 is not set 1502# CONFIG_CRYPTO_SALSA20 is not set
1503# CONFIG_CRYPTO_SEED is not set
1504# CONFIG_CRYPTO_SERPENT is not set
1505# CONFIG_CRYPTO_TEA is not set
1506# CONFIG_CRYPTO_TWOFISH is not set
1507
1508#
1509# Compression
1510#
1237CONFIG_CRYPTO_DEFLATE=y 1511CONFIG_CRYPTO_DEFLATE=y
1238# CONFIG_CRYPTO_MICHAEL_MIC is not set 1512# CONFIG_CRYPTO_ZLIB is not set
1239# CONFIG_CRYPTO_CRC32C is not set
1240# CONFIG_CRYPTO_CAMELLIA is not set
1241# CONFIG_CRYPTO_TEST is not set
1242CONFIG_CRYPTO_AUTHENC=y
1243CONFIG_CRYPTO_LZO=y 1513CONFIG_CRYPTO_LZO=y
1514
1515#
1516# Random Number Generation
1517#
1518CONFIG_CRYPTO_ANSI_CPRNG=m
1244CONFIG_CRYPTO_HW=y 1519CONFIG_CRYPTO_HW=y
1520CONFIG_BINARY_PRINTF=y
1245 1521
1246# 1522#
1247# Library routines 1523# Library routines
1248# 1524#
1249CONFIG_BITREVERSE=y 1525CONFIG_BITREVERSE=y
1526CONFIG_GENERIC_FIND_LAST_BIT=y
1250CONFIG_CRC_CCITT=m 1527CONFIG_CRC_CCITT=m
1251CONFIG_CRC16=y 1528CONFIG_CRC16=y
1529# CONFIG_CRC_T10DIF is not set
1252# CONFIG_CRC_ITU_T is not set 1530# CONFIG_CRC_ITU_T is not set
1253CONFIG_CRC32=y 1531CONFIG_CRC32=y
1254# CONFIG_CRC7 is not set 1532# CONFIG_CRC7 is not set
@@ -1257,8 +1535,9 @@ CONFIG_ZLIB_INFLATE=y
1257CONFIG_ZLIB_DEFLATE=y 1535CONFIG_ZLIB_DEFLATE=y
1258CONFIG_LZO_COMPRESS=y 1536CONFIG_LZO_COMPRESS=y
1259CONFIG_LZO_DECOMPRESS=y 1537CONFIG_LZO_DECOMPRESS=y
1538CONFIG_DECOMPRESS_GZIP=y
1260CONFIG_GENERIC_ALLOCATOR=y 1539CONFIG_GENERIC_ALLOCATOR=y
1261CONFIG_PLIST=y
1262CONFIG_HAS_IOMEM=y 1540CONFIG_HAS_IOMEM=y
1263CONFIG_HAS_IOPORT=y 1541CONFIG_HAS_IOPORT=y
1264CONFIG_HAS_DMA=y 1542CONFIG_HAS_DMA=y
1543CONFIG_NLATTR=y
diff --git a/arch/avr32/configs/atngw100_evklcd101_defconfig b/arch/avr32/configs/atngw100_evklcd101_defconfig
index a96b68ea5e83..5ef67da343bc 100644
--- a/arch/avr32/configs/atngw100_evklcd101_defconfig
+++ b/arch/avr32/configs/atngw100_evklcd101_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.25.6 3# Linux kernel version: 2.6.32-rc5
4# Wed Jun 18 16:09:32 2008 4# Thu Oct 29 09:37:19 2009
5# 5#
6CONFIG_AVR32=y 6CONFIG_AVR32=y
7CONFIG_GENERIC_GPIO=y 7CONFIG_GENERIC_GPIO=y
@@ -21,6 +21,7 @@ CONFIG_GENERIC_HWEIGHT=y
21CONFIG_GENERIC_CALIBRATE_DELAY=y 21CONFIG_GENERIC_CALIBRATE_DELAY=y
22CONFIG_GENERIC_BUG=y 22CONFIG_GENERIC_BUG=y
23CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" 23CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
24CONFIG_CONSTRUCTORS=y
24 25
25# 26#
26# General setup 27# General setup
@@ -34,22 +35,37 @@ CONFIG_SWAP=y
34CONFIG_SYSVIPC=y 35CONFIG_SYSVIPC=y
35CONFIG_SYSVIPC_SYSCTL=y 36CONFIG_SYSVIPC_SYSCTL=y
36CONFIG_POSIX_MQUEUE=y 37CONFIG_POSIX_MQUEUE=y
38CONFIG_POSIX_MQUEUE_SYSCTL=y
37CONFIG_BSD_PROCESS_ACCT=y 39CONFIG_BSD_PROCESS_ACCT=y
38CONFIG_BSD_PROCESS_ACCT_V3=y 40CONFIG_BSD_PROCESS_ACCT_V3=y
39# CONFIG_TASKSTATS is not set 41# CONFIG_TASKSTATS is not set
40# CONFIG_AUDIT is not set 42# CONFIG_AUDIT is not set
43
44#
45# RCU Subsystem
46#
47CONFIG_TREE_RCU=y
48# CONFIG_TREE_PREEMPT_RCU is not set
49# CONFIG_RCU_TRACE is not set
50CONFIG_RCU_FANOUT=32
51# CONFIG_RCU_FANOUT_EXACT is not set
52# CONFIG_TREE_RCU_TRACE is not set
41# CONFIG_IKCONFIG is not set 53# CONFIG_IKCONFIG is not set
42CONFIG_LOG_BUF_SHIFT=14 54CONFIG_LOG_BUF_SHIFT=14
43# CONFIG_CGROUPS is not set
44# CONFIG_GROUP_SCHED is not set 55# CONFIG_GROUP_SCHED is not set
56# CONFIG_CGROUPS is not set
45CONFIG_SYSFS_DEPRECATED=y 57CONFIG_SYSFS_DEPRECATED=y
46CONFIG_SYSFS_DEPRECATED_V2=y 58CONFIG_SYSFS_DEPRECATED_V2=y
47# CONFIG_RELAY is not set 59# CONFIG_RELAY is not set
48# CONFIG_NAMESPACES is not set 60# CONFIG_NAMESPACES is not set
49CONFIG_BLK_DEV_INITRD=y 61CONFIG_BLK_DEV_INITRD=y
50CONFIG_INITRAMFS_SOURCE="" 62CONFIG_INITRAMFS_SOURCE=""
63CONFIG_RD_GZIP=y
64# CONFIG_RD_BZIP2 is not set
65# CONFIG_RD_LZMA is not set
51CONFIG_CC_OPTIMIZE_FOR_SIZE=y 66CONFIG_CC_OPTIMIZE_FOR_SIZE=y
52CONFIG_SYSCTL=y 67CONFIG_SYSCTL=y
68CONFIG_ANON_INODES=y
53CONFIG_EMBEDDED=y 69CONFIG_EMBEDDED=y
54# CONFIG_SYSCTL_SYSCALL is not set 70# CONFIG_SYSCTL_SYSCALL is not set
55CONFIG_KALLSYMS=y 71CONFIG_KALLSYMS=y
@@ -59,43 +75,51 @@ CONFIG_HOTPLUG=y
59CONFIG_PRINTK=y 75CONFIG_PRINTK=y
60CONFIG_BUG=y 76CONFIG_BUG=y
61CONFIG_ELF_CORE=y 77CONFIG_ELF_CORE=y
62# CONFIG_COMPAT_BRK is not set
63# CONFIG_BASE_FULL is not set 78# CONFIG_BASE_FULL is not set
64CONFIG_FUTEX=y 79CONFIG_FUTEX=y
65CONFIG_ANON_INODES=y
66CONFIG_EPOLL=y 80CONFIG_EPOLL=y
67CONFIG_SIGNALFD=y 81CONFIG_SIGNALFD=y
68CONFIG_TIMERFD=y 82CONFIG_TIMERFD=y
69CONFIG_EVENTFD=y 83CONFIG_EVENTFD=y
70CONFIG_SHMEM=y 84CONFIG_SHMEM=y
85CONFIG_AIO=y
86
87#
88# Kernel Performance Events And Counters
89#
71CONFIG_VM_EVENT_COUNTERS=y 90CONFIG_VM_EVENT_COUNTERS=y
72CONFIG_SLUB_DEBUG=y 91CONFIG_SLUB_DEBUG=y
92# CONFIG_COMPAT_BRK is not set
73# CONFIG_SLAB is not set 93# CONFIG_SLAB is not set
74CONFIG_SLUB=y 94CONFIG_SLUB=y
75# CONFIG_SLOB is not set 95# CONFIG_SLOB is not set
76CONFIG_PROFILING=y 96CONFIG_PROFILING=y
77# CONFIG_MARKERS is not set 97CONFIG_TRACEPOINTS=y
78CONFIG_OPROFILE=m 98CONFIG_OPROFILE=m
79CONFIG_HAVE_OPROFILE=y 99CONFIG_HAVE_OPROFILE=y
80CONFIG_KPROBES=y 100CONFIG_KPROBES=y
81CONFIG_HAVE_KPROBES=y 101CONFIG_HAVE_KPROBES=y
82# CONFIG_HAVE_KRETPROBES is not set 102CONFIG_HAVE_CLK=y
83CONFIG_PROC_PAGE_MONITOR=y 103
104#
105# GCOV-based kernel profiling
106#
107# CONFIG_GCOV_KERNEL is not set
108CONFIG_SLOW_WORK=y
109# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
84CONFIG_SLABINFO=y 110CONFIG_SLABINFO=y
85CONFIG_RT_MUTEXES=y 111CONFIG_RT_MUTEXES=y
86# CONFIG_TINY_SHMEM is not set
87CONFIG_BASE_SMALL=1 112CONFIG_BASE_SMALL=1
88CONFIG_MODULES=y 113CONFIG_MODULES=y
114# CONFIG_MODULE_FORCE_LOAD is not set
89CONFIG_MODULE_UNLOAD=y 115CONFIG_MODULE_UNLOAD=y
90CONFIG_MODULE_FORCE_UNLOAD=y 116CONFIG_MODULE_FORCE_UNLOAD=y
91# CONFIG_MODVERSIONS is not set 117# CONFIG_MODVERSIONS is not set
92# CONFIG_MODULE_SRCVERSION_ALL is not set 118# CONFIG_MODULE_SRCVERSION_ALL is not set
93CONFIG_KMOD=y
94CONFIG_BLOCK=y 119CONFIG_BLOCK=y
95# CONFIG_LBD is not set 120CONFIG_LBDAF=y
96# CONFIG_BLK_DEV_IO_TRACE is not set
97# CONFIG_LSF is not set
98# CONFIG_BLK_DEV_BSG is not set 121# CONFIG_BLK_DEV_BSG is not set
122# CONFIG_BLK_DEV_INTEGRITY is not set
99 123
100# 124#
101# IO Schedulers 125# IO Schedulers
@@ -109,7 +133,7 @@ CONFIG_IOSCHED_CFQ=y
109CONFIG_DEFAULT_CFQ=y 133CONFIG_DEFAULT_CFQ=y
110# CONFIG_DEFAULT_NOOP is not set 134# CONFIG_DEFAULT_NOOP is not set
111CONFIG_DEFAULT_IOSCHED="cfq" 135CONFIG_DEFAULT_IOSCHED="cfq"
112CONFIG_CLASSIC_RCU=y 136CONFIG_FREEZER=y
113 137
114# 138#
115# System Type and features 139# System Type and features
@@ -124,13 +148,20 @@ CONFIG_PERFORMANCE_COUNTERS=y
124CONFIG_PLATFORM_AT32AP=y 148CONFIG_PLATFORM_AT32AP=y
125CONFIG_CPU_AT32AP700X=y 149CONFIG_CPU_AT32AP700X=y
126CONFIG_CPU_AT32AP7000=y 150CONFIG_CPU_AT32AP7000=y
151CONFIG_BOARD_ATNGW100_COMMON=y
127# CONFIG_BOARD_ATSTK1000 is not set 152# CONFIG_BOARD_ATSTK1000 is not set
128CONFIG_BOARD_ATNGW100=y 153CONFIG_BOARD_ATNGW100_MKI=y
154# CONFIG_BOARD_ATNGW100_MKII is not set
155# CONFIG_BOARD_HAMMERHEAD is not set
156# CONFIG_BOARD_FAVR_32 is not set
157# CONFIG_BOARD_MERISC is not set
158# CONFIG_BOARD_MIMC200 is not set
159# CONFIG_BOARD_ATNGW100_ADDON_NONE is not set
129CONFIG_BOARD_ATNGW100_EVKLCD10X=y 160CONFIG_BOARD_ATNGW100_EVKLCD10X=y
161# CONFIG_BOARD_ATNGW100_MRMT is not set
130# CONFIG_BOARD_ATNGW100_EVKLCD10X_QVGA is not set 162# CONFIG_BOARD_ATNGW100_EVKLCD10X_QVGA is not set
131CONFIG_BOARD_ATNGW100_EVKLCD10X_VGA=y 163CONFIG_BOARD_ATNGW100_EVKLCD10X_VGA=y
132# CONFIG_BOARD_ATNGW100_EVKLCD10X_POW_QVGA is not set 164# CONFIG_BOARD_ATNGW100_EVKLCD10X_POW_QVGA is not set
133CONFIG_BOARD_ATNGW100_I2C_GPIO=y
134CONFIG_LOADER_U_BOOT=y 165CONFIG_LOADER_U_BOOT=y
135 166
136# 167#
@@ -139,14 +170,14 @@ CONFIG_LOADER_U_BOOT=y
139# CONFIG_AP700X_32_BIT_SMC is not set 170# CONFIG_AP700X_32_BIT_SMC is not set
140CONFIG_AP700X_16_BIT_SMC=y 171CONFIG_AP700X_16_BIT_SMC=y
141# CONFIG_AP700X_8_BIT_SMC is not set 172# CONFIG_AP700X_8_BIT_SMC is not set
142CONFIG_GPIO_DEV=y
143CONFIG_LOAD_ADDRESS=0x10000000 173CONFIG_LOAD_ADDRESS=0x10000000
144CONFIG_ENTRY_ADDRESS=0x90000000 174CONFIG_ENTRY_ADDRESS=0x90000000
145CONFIG_PHYS_OFFSET=0x10000000 175CONFIG_PHYS_OFFSET=0x10000000
146CONFIG_PREEMPT_NONE=y 176CONFIG_PREEMPT_NONE=y
147# CONFIG_PREEMPT_VOLUNTARY is not set 177# CONFIG_PREEMPT_VOLUNTARY is not set
148# CONFIG_PREEMPT is not set 178# CONFIG_PREEMPT is not set
149# CONFIG_HAVE_ARCH_BOOTMEM_NODE is not set 179CONFIG_QUICKLIST=y
180# CONFIG_HAVE_ARCH_BOOTMEM is not set
150# CONFIG_ARCH_HAVE_MEMORY_PRESENT is not set 181# CONFIG_ARCH_HAVE_MEMORY_PRESENT is not set
151# CONFIG_NEED_NODE_MEMMAP_SIZE is not set 182# CONFIG_NEED_NODE_MEMMAP_SIZE is not set
152CONFIG_ARCH_FLATMEM_ENABLE=y 183CONFIG_ARCH_FLATMEM_ENABLE=y
@@ -158,33 +189,36 @@ CONFIG_FLATMEM_MANUAL=y
158# CONFIG_SPARSEMEM_MANUAL is not set 189# CONFIG_SPARSEMEM_MANUAL is not set
159CONFIG_FLATMEM=y 190CONFIG_FLATMEM=y
160CONFIG_FLAT_NODE_MEM_MAP=y 191CONFIG_FLAT_NODE_MEM_MAP=y
161# CONFIG_SPARSEMEM_STATIC is not set 192CONFIG_PAGEFLAGS_EXTENDED=y
162# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
163CONFIG_SPLIT_PTLOCK_CPUS=4 193CONFIG_SPLIT_PTLOCK_CPUS=4
164# CONFIG_RESOURCES_64BIT is not set 194# CONFIG_PHYS_ADDR_T_64BIT is not set
165CONFIG_ZONE_DMA_FLAG=0 195CONFIG_ZONE_DMA_FLAG=0
196CONFIG_NR_QUICK=2
166CONFIG_VIRT_TO_BUS=y 197CONFIG_VIRT_TO_BUS=y
198CONFIG_HAVE_MLOCK=y
199CONFIG_HAVE_MLOCKED_PAGE_BIT=y
200# CONFIG_KSM is not set
201CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
167# CONFIG_OWNERSHIP_TRACE is not set 202# CONFIG_OWNERSHIP_TRACE is not set
168CONFIG_NMI_DEBUGGING=y 203CONFIG_NMI_DEBUGGING=y
169CONFIG_DW_DMAC=y
170# CONFIG_HZ_100 is not set 204# CONFIG_HZ_100 is not set
171CONFIG_HZ_250=y 205CONFIG_HZ_250=y
172# CONFIG_HZ_300 is not set 206# CONFIG_HZ_300 is not set
173# CONFIG_HZ_1000 is not set 207# CONFIG_HZ_1000 is not set
174CONFIG_HZ=250 208CONFIG_HZ=250
175# CONFIG_SCHED_HRTICK is not set 209CONFIG_SCHED_HRTICK=y
176CONFIG_CMDLINE="" 210CONFIG_CMDLINE=""
177 211
178# 212#
179# Power management options 213# Power management options
180# 214#
181CONFIG_ARCH_SUSPEND_POSSIBLE=y
182CONFIG_PM=y 215CONFIG_PM=y
183# CONFIG_PM_LEGACY is not set
184# CONFIG_PM_DEBUG is not set 216# CONFIG_PM_DEBUG is not set
185CONFIG_PM_SLEEP=y 217CONFIG_PM_SLEEP=y
186CONFIG_SUSPEND=y 218CONFIG_SUSPEND=y
187CONFIG_SUSPEND_FREEZER=y 219CONFIG_SUSPEND_FREEZER=y
220# CONFIG_PM_RUNTIME is not set
221CONFIG_ARCH_SUSPEND_POSSIBLE=y
188 222
189# 223#
190# CPU Frequency scaling 224# CPU Frequency scaling
@@ -194,6 +228,7 @@ CONFIG_CPU_FREQ_TABLE=y
194# CONFIG_CPU_FREQ_DEBUG is not set 228# CONFIG_CPU_FREQ_DEBUG is not set
195# CONFIG_CPU_FREQ_STAT is not set 229# CONFIG_CPU_FREQ_STAT is not set
196# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set 230# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set
231# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
197# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set 232# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
198CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y 233CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
199# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set 234# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
@@ -214,11 +249,9 @@ CONFIG_CPU_FREQ_AT32AP=y
214# Executable file formats 249# Executable file formats
215# 250#
216CONFIG_BINFMT_ELF=y 251CONFIG_BINFMT_ELF=y
252# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
253# CONFIG_HAVE_AOUT is not set
217# CONFIG_BINFMT_MISC is not set 254# CONFIG_BINFMT_MISC is not set
218
219#
220# Networking
221#
222CONFIG_NET=y 255CONFIG_NET=y
223 256
224# 257#
@@ -232,6 +265,7 @@ CONFIG_XFRM_USER=y
232# CONFIG_XFRM_SUB_POLICY is not set 265# CONFIG_XFRM_SUB_POLICY is not set
233# CONFIG_XFRM_MIGRATE is not set 266# CONFIG_XFRM_MIGRATE is not set
234# CONFIG_XFRM_STATISTICS is not set 267# CONFIG_XFRM_STATISTICS is not set
268CONFIG_XFRM_IPCOMP=y
235CONFIG_NET_KEY=y 269CONFIG_NET_KEY=y
236# CONFIG_NET_KEY_MIGRATE is not set 270# CONFIG_NET_KEY_MIGRATE is not set
237CONFIG_INET=y 271CONFIG_INET=y
@@ -269,7 +303,6 @@ CONFIG_INET_TCP_DIAG=y
269CONFIG_TCP_CONG_CUBIC=y 303CONFIG_TCP_CONG_CUBIC=y
270CONFIG_DEFAULT_TCP_CONG="cubic" 304CONFIG_DEFAULT_TCP_CONG="cubic"
271# CONFIG_TCP_MD5SIG is not set 305# CONFIG_TCP_MD5SIG is not set
272# CONFIG_IP_VS is not set
273CONFIG_IPV6=y 306CONFIG_IPV6=y
274# CONFIG_IPV6_PRIVACY is not set 307# CONFIG_IPV6_PRIVACY is not set
275# CONFIG_IPV6_ROUTER_PREF is not set 308# CONFIG_IPV6_ROUTER_PREF is not set
@@ -285,8 +318,10 @@ CONFIG_INET6_XFRM_MODE_TUNNEL=y
285CONFIG_INET6_XFRM_MODE_BEET=y 318CONFIG_INET6_XFRM_MODE_BEET=y
286# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set 319# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
287CONFIG_IPV6_SIT=y 320CONFIG_IPV6_SIT=y
321CONFIG_IPV6_NDISC_NODETYPE=y
288# CONFIG_IPV6_TUNNEL is not set 322# CONFIG_IPV6_TUNNEL is not set
289# CONFIG_IPV6_MULTIPLE_TABLES is not set 323# CONFIG_IPV6_MULTIPLE_TABLES is not set
324# CONFIG_IPV6_MROUTE is not set
290# CONFIG_NETWORK_SECMARK is not set 325# CONFIG_NETWORK_SECMARK is not set
291CONFIG_NETFILTER=y 326CONFIG_NETFILTER=y
292# CONFIG_NETFILTER_DEBUG is not set 327# CONFIG_NETFILTER_DEBUG is not set
@@ -310,10 +345,12 @@ CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
310CONFIG_NETFILTER_XT_MATCH_MARK=m 345CONFIG_NETFILTER_XT_MATCH_MARK=m
311CONFIG_NETFILTER_XT_MATCH_POLICY=m 346CONFIG_NETFILTER_XT_MATCH_POLICY=m
312CONFIG_NETFILTER_XT_MATCH_STATE=m 347CONFIG_NETFILTER_XT_MATCH_STATE=m
348# CONFIG_IP_VS is not set
313 349
314# 350#
315# IP: Netfilter Configuration 351# IP: Netfilter Configuration
316# 352#
353CONFIG_NF_DEFRAG_IPV4=m
317CONFIG_NF_CONNTRACK_IPV4=m 354CONFIG_NF_CONNTRACK_IPV4=m
318CONFIG_NF_CONNTRACK_PROC_COMPAT=y 355CONFIG_NF_CONNTRACK_PROC_COMPAT=y
319CONFIG_IP_NF_IPTABLES=m 356CONFIG_IP_NF_IPTABLES=m
@@ -339,16 +376,20 @@ CONFIG_IP_NF_MANGLE=m
339CONFIG_NF_CONNTRACK_IPV6=m 376CONFIG_NF_CONNTRACK_IPV6=m
340CONFIG_IP6_NF_IPTABLES=m 377CONFIG_IP6_NF_IPTABLES=m
341CONFIG_IP6_NF_MATCH_IPV6HEADER=m 378CONFIG_IP6_NF_MATCH_IPV6HEADER=m
342CONFIG_IP6_NF_FILTER=m
343CONFIG_IP6_NF_TARGET_LOG=m 379CONFIG_IP6_NF_TARGET_LOG=m
380CONFIG_IP6_NF_FILTER=m
344CONFIG_IP6_NF_TARGET_REJECT=m 381CONFIG_IP6_NF_TARGET_REJECT=m
345CONFIG_IP6_NF_MANGLE=m 382CONFIG_IP6_NF_MANGLE=m
346# CONFIG_IP_DCCP is not set 383# CONFIG_IP_DCCP is not set
347# CONFIG_IP_SCTP is not set 384# CONFIG_IP_SCTP is not set
385# CONFIG_RDS is not set
348# CONFIG_TIPC is not set 386# CONFIG_TIPC is not set
349# CONFIG_ATM is not set 387# CONFIG_ATM is not set
388CONFIG_STP=m
350CONFIG_BRIDGE=m 389CONFIG_BRIDGE=m
390# CONFIG_NET_DSA is not set
351CONFIG_VLAN_8021Q=m 391CONFIG_VLAN_8021Q=m
392# CONFIG_VLAN_8021Q_GVRP is not set
352# CONFIG_DECNET is not set 393# CONFIG_DECNET is not set
353CONFIG_LLC=m 394CONFIG_LLC=m
354# CONFIG_LLC2 is not set 395# CONFIG_LLC2 is not set
@@ -358,26 +399,33 @@ CONFIG_LLC=m
358# CONFIG_LAPB is not set 399# CONFIG_LAPB is not set
359# CONFIG_ECONET is not set 400# CONFIG_ECONET is not set
360# CONFIG_WAN_ROUTER is not set 401# CONFIG_WAN_ROUTER is not set
402# CONFIG_PHONET is not set
403# CONFIG_IEEE802154 is not set
361# CONFIG_NET_SCHED is not set 404# CONFIG_NET_SCHED is not set
405# CONFIG_DCB is not set
362 406
363# 407#
364# Network testing 408# Network testing
365# 409#
366# CONFIG_NET_PKTGEN is not set 410# CONFIG_NET_PKTGEN is not set
367# CONFIG_NET_TCPPROBE is not set 411# CONFIG_NET_TCPPROBE is not set
412# CONFIG_NET_DROP_MONITOR is not set
368# CONFIG_HAMRADIO is not set 413# CONFIG_HAMRADIO is not set
369# CONFIG_CAN is not set 414# CONFIG_CAN is not set
370# CONFIG_IRDA is not set 415# CONFIG_IRDA is not set
371# CONFIG_BT is not set 416# CONFIG_BT is not set
372# CONFIG_AF_RXRPC is not set 417# CONFIG_AF_RXRPC is not set
418CONFIG_WIRELESS=y
419# CONFIG_CFG80211 is not set
420CONFIG_CFG80211_DEFAULT_PS_VALUE=0
421# CONFIG_WIRELESS_OLD_REGULATORY is not set
422# CONFIG_WIRELESS_EXT is not set
423# CONFIG_LIB80211 is not set
373 424
374# 425#
375# Wireless 426# CFG80211 needs to be enabled for MAC80211
376# 427#
377# CONFIG_CFG80211 is not set 428# CONFIG_WIMAX is not set
378# CONFIG_WIRELESS_EXT is not set
379# CONFIG_MAC80211 is not set
380# CONFIG_IEEE80211 is not set
381# CONFIG_RFKILL is not set 429# CONFIG_RFKILL is not set
382# CONFIG_NET_9P is not set 430# CONFIG_NET_9P is not set
383 431
@@ -389,6 +437,7 @@ CONFIG_LLC=m
389# Generic Driver Options 437# Generic Driver Options
390# 438#
391CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 439CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
440# CONFIG_DEVTMPFS is not set
392CONFIG_STANDALONE=y 441CONFIG_STANDALONE=y
393# CONFIG_PREVENT_FIRMWARE_BUILD is not set 442# CONFIG_PREVENT_FIRMWARE_BUILD is not set
394# CONFIG_FW_LOADER is not set 443# CONFIG_FW_LOADER is not set
@@ -398,10 +447,12 @@ CONFIG_STANDALONE=y
398# CONFIG_CONNECTOR is not set 447# CONFIG_CONNECTOR is not set
399CONFIG_MTD=y 448CONFIG_MTD=y
400# CONFIG_MTD_DEBUG is not set 449# CONFIG_MTD_DEBUG is not set
450# CONFIG_MTD_TESTS is not set
401# CONFIG_MTD_CONCAT is not set 451# CONFIG_MTD_CONCAT is not set
402CONFIG_MTD_PARTITIONS=y 452CONFIG_MTD_PARTITIONS=y
403# CONFIG_MTD_REDBOOT_PARTS is not set 453# CONFIG_MTD_REDBOOT_PARTS is not set
404CONFIG_MTD_CMDLINE_PARTS=y 454CONFIG_MTD_CMDLINE_PARTS=y
455# CONFIG_MTD_AR7_PARTS is not set
405 456
406# 457#
407# User Modules And Translation Layers 458# User Modules And Translation Layers
@@ -446,16 +497,17 @@ CONFIG_MTD_CFI_UTIL=y
446# 497#
447# CONFIG_MTD_COMPLEX_MAPPINGS is not set 498# CONFIG_MTD_COMPLEX_MAPPINGS is not set
448CONFIG_MTD_PHYSMAP=y 499CONFIG_MTD_PHYSMAP=y
449CONFIG_MTD_PHYSMAP_START=0x80000000 500# CONFIG_MTD_PHYSMAP_COMPAT is not set
450CONFIG_MTD_PHYSMAP_LEN=0x0
451CONFIG_MTD_PHYSMAP_BANKWIDTH=2
452# CONFIG_MTD_PLATRAM is not set 501# CONFIG_MTD_PLATRAM is not set
453 502
454# 503#
455# Self-contained MTD device drivers 504# Self-contained MTD device drivers
456# 505#
457CONFIG_MTD_DATAFLASH=y 506CONFIG_MTD_DATAFLASH=y
507# CONFIG_MTD_DATAFLASH_WRITE_VERIFY is not set
508# CONFIG_MTD_DATAFLASH_OTP is not set
458# CONFIG_MTD_M25P80 is not set 509# CONFIG_MTD_M25P80 is not set
510# CONFIG_MTD_SST25L is not set
459# CONFIG_MTD_SLRAM is not set 511# CONFIG_MTD_SLRAM is not set
460# CONFIG_MTD_PHRAM is not set 512# CONFIG_MTD_PHRAM is not set
461# CONFIG_MTD_MTDRAM is not set 513# CONFIG_MTD_MTDRAM is not set
@@ -471,6 +523,11 @@ CONFIG_MTD_DATAFLASH=y
471# CONFIG_MTD_ONENAND is not set 523# CONFIG_MTD_ONENAND is not set
472 524
473# 525#
526# LPDDR flash memory drivers
527#
528# CONFIG_MTD_LPDDR is not set
529
530#
474# UBI - Unsorted block images 531# UBI - Unsorted block images
475# 532#
476CONFIG_MTD_UBI=y 533CONFIG_MTD_UBI=y
@@ -499,10 +556,20 @@ CONFIG_MISC_DEVICES=y
499CONFIG_ATMEL_TCLIB=y 556CONFIG_ATMEL_TCLIB=y
500CONFIG_ATMEL_TCB_CLKSRC=y 557CONFIG_ATMEL_TCB_CLKSRC=y
501CONFIG_ATMEL_TCB_CLKSRC_BLOCK=0 558CONFIG_ATMEL_TCB_CLKSRC_BLOCK=0
502# CONFIG_EEPROM_93CX6 is not set 559# CONFIG_ICS932S401 is not set
503# CONFIG_ATMEL_SSC is not set 560# CONFIG_ATMEL_SSC is not set
504# CONFIG_ENCLOSURE_SERVICES is not set 561# CONFIG_ENCLOSURE_SERVICES is not set
505# CONFIG_HAVE_IDE is not set 562# CONFIG_ISL29003 is not set
563# CONFIG_C2PORT is not set
564
565#
566# EEPROM support
567#
568# CONFIG_EEPROM_AT24 is not set
569# CONFIG_EEPROM_AT25 is not set
570# CONFIG_EEPROM_LEGACY is not set
571# CONFIG_EEPROM_MAX6875 is not set
572# CONFIG_EEPROM_93CX6 is not set
506 573
507# 574#
508# SCSI device support 575# SCSI device support
@@ -514,7 +581,6 @@ CONFIG_ATMEL_TCB_CLKSRC_BLOCK=0
514# CONFIG_ATA is not set 581# CONFIG_ATA is not set
515# CONFIG_MD is not set 582# CONFIG_MD is not set
516CONFIG_NETDEVICES=y 583CONFIG_NETDEVICES=y
517# CONFIG_NETDEVICES_MULTIQUEUE is not set
518# CONFIG_DUMMY is not set 584# CONFIG_DUMMY is not set
519# CONFIG_BONDING is not set 585# CONFIG_BONDING is not set
520# CONFIG_MACVLAN is not set 586# CONFIG_MACVLAN is not set
@@ -536,25 +602,37 @@ CONFIG_PHYLIB=y
536# CONFIG_BROADCOM_PHY is not set 602# CONFIG_BROADCOM_PHY is not set
537# CONFIG_ICPLUS_PHY is not set 603# CONFIG_ICPLUS_PHY is not set
538# CONFIG_REALTEK_PHY is not set 604# CONFIG_REALTEK_PHY is not set
605# CONFIG_NATIONAL_PHY is not set
606# CONFIG_STE10XP is not set
607# CONFIG_LSI_ET1011C_PHY is not set
539# CONFIG_FIXED_PHY is not set 608# CONFIG_FIXED_PHY is not set
540# CONFIG_MDIO_BITBANG is not set 609# CONFIG_MDIO_BITBANG is not set
541CONFIG_NET_ETHERNET=y 610CONFIG_NET_ETHERNET=y
542# CONFIG_MII is not set 611# CONFIG_MII is not set
543CONFIG_MACB=y 612CONFIG_MACB=y
544# CONFIG_ENC28J60 is not set 613# CONFIG_ENC28J60 is not set
614# CONFIG_ETHOC is not set
615# CONFIG_DNET is not set
545# CONFIG_IBM_NEW_EMAC_ZMII is not set 616# CONFIG_IBM_NEW_EMAC_ZMII is not set
546# CONFIG_IBM_NEW_EMAC_RGMII is not set 617# CONFIG_IBM_NEW_EMAC_RGMII is not set
547# CONFIG_IBM_NEW_EMAC_TAH is not set 618# CONFIG_IBM_NEW_EMAC_TAH is not set
548# CONFIG_IBM_NEW_EMAC_EMAC4 is not set 619# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
620# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
621# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
622# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
549# CONFIG_B44 is not set 623# CONFIG_B44 is not set
624# CONFIG_KS8842 is not set
625# CONFIG_KS8851 is not set
626# CONFIG_KS8851_MLL is not set
550# CONFIG_NETDEV_1000 is not set 627# CONFIG_NETDEV_1000 is not set
551# CONFIG_NETDEV_10000 is not set 628# CONFIG_NETDEV_10000 is not set
629CONFIG_WLAN=y
630# CONFIG_WLAN_PRE80211 is not set
631# CONFIG_WLAN_80211 is not set
552 632
553# 633#
554# Wireless LAN 634# Enable WiMAX (Networking options) to see the WiMAX drivers
555# 635#
556# CONFIG_WLAN_PRE80211 is not set
557# CONFIG_WLAN_80211 is not set
558# CONFIG_WAN is not set 636# CONFIG_WAN is not set
559CONFIG_PPP=m 637CONFIG_PPP=m
560# CONFIG_PPP_MULTILINK is not set 638# CONFIG_PPP_MULTILINK is not set
@@ -598,15 +676,30 @@ CONFIG_INPUT_EVDEV=m
598# CONFIG_INPUT_TABLET is not set 676# CONFIG_INPUT_TABLET is not set
599CONFIG_INPUT_TOUCHSCREEN=y 677CONFIG_INPUT_TOUCHSCREEN=y
600# CONFIG_TOUCHSCREEN_ADS7846 is not set 678# CONFIG_TOUCHSCREEN_ADS7846 is not set
679# CONFIG_TOUCHSCREEN_AD7877 is not set
680# CONFIG_TOUCHSCREEN_AD7879_I2C is not set
681# CONFIG_TOUCHSCREEN_AD7879_SPI is not set
682# CONFIG_TOUCHSCREEN_AD7879 is not set
683# CONFIG_TOUCHSCREEN_EETI is not set
601# CONFIG_TOUCHSCREEN_FUJITSU is not set 684# CONFIG_TOUCHSCREEN_FUJITSU is not set
602# CONFIG_TOUCHSCREEN_GUNZE is not set 685# CONFIG_TOUCHSCREEN_GUNZE is not set
603# CONFIG_TOUCHSCREEN_ELO is not set 686# CONFIG_TOUCHSCREEN_ELO is not set
687# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
688# CONFIG_TOUCHSCREEN_MCS5000 is not set
604# CONFIG_TOUCHSCREEN_MTOUCH is not set 689# CONFIG_TOUCHSCREEN_MTOUCH is not set
690# CONFIG_TOUCHSCREEN_INEXIO is not set
605# CONFIG_TOUCHSCREEN_MK712 is not set 691# CONFIG_TOUCHSCREEN_MK712 is not set
606# CONFIG_TOUCHSCREEN_PENMOUNT is not set 692# CONFIG_TOUCHSCREEN_PENMOUNT is not set
607# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set 693# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
608# CONFIG_TOUCHSCREEN_TOUCHWIN is not set 694# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
609# CONFIG_TOUCHSCREEN_UCB1400 is not set 695CONFIG_TOUCHSCREEN_WM97XX=m
696CONFIG_TOUCHSCREEN_WM9705=y
697CONFIG_TOUCHSCREEN_WM9712=y
698CONFIG_TOUCHSCREEN_WM9713=y
699# CONFIG_TOUCHSCREEN_WM97XX_ATMEL is not set
700# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
701# CONFIG_TOUCHSCREEN_TSC2007 is not set
702# CONFIG_TOUCHSCREEN_W90X900 is not set
610# CONFIG_INPUT_MISC is not set 703# CONFIG_INPUT_MISC is not set
611 704
612# 705#
@@ -619,9 +712,11 @@ CONFIG_INPUT_TOUCHSCREEN=y
619# Character devices 712# Character devices
620# 713#
621CONFIG_VT=y 714CONFIG_VT=y
715CONFIG_CONSOLE_TRANSLATIONS=y
622CONFIG_VT_CONSOLE=y 716CONFIG_VT_CONSOLE=y
623CONFIG_HW_CONSOLE=y 717CONFIG_HW_CONSOLE=y
624# CONFIG_VT_HW_CONSOLE_BINDING is not set 718# CONFIG_VT_HW_CONSOLE_BINDING is not set
719CONFIG_DEVKMEM=y
625# CONFIG_SERIAL_NONSTANDARD is not set 720# CONFIG_SERIAL_NONSTANDARD is not set
626 721
627# 722#
@@ -636,9 +731,11 @@ CONFIG_SERIAL_ATMEL=y
636CONFIG_SERIAL_ATMEL_CONSOLE=y 731CONFIG_SERIAL_ATMEL_CONSOLE=y
637CONFIG_SERIAL_ATMEL_PDC=y 732CONFIG_SERIAL_ATMEL_PDC=y
638# CONFIG_SERIAL_ATMEL_TTYAT is not set 733# CONFIG_SERIAL_ATMEL_TTYAT is not set
734# CONFIG_SERIAL_MAX3100 is not set
639CONFIG_SERIAL_CORE=y 735CONFIG_SERIAL_CORE=y
640CONFIG_SERIAL_CORE_CONSOLE=y 736CONFIG_SERIAL_CORE_CONSOLE=y
641CONFIG_UNIX98_PTYS=y 737CONFIG_UNIX98_PTYS=y
738# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
642# CONFIG_LEGACY_PTYS is not set 739# CONFIG_LEGACY_PTYS is not set
643# CONFIG_IPMI_HANDLER is not set 740# CONFIG_IPMI_HANDLER is not set
644# CONFIG_HW_RANDOM is not set 741# CONFIG_HW_RANDOM is not set
@@ -647,45 +744,44 @@ CONFIG_UNIX98_PTYS=y
647# CONFIG_TCG_TPM is not set 744# CONFIG_TCG_TPM is not set
648CONFIG_I2C=m 745CONFIG_I2C=m
649CONFIG_I2C_BOARDINFO=y 746CONFIG_I2C_BOARDINFO=y
747CONFIG_I2C_COMPAT=y
650CONFIG_I2C_CHARDEV=m 748CONFIG_I2C_CHARDEV=m
749CONFIG_I2C_HELPER_AUTO=y
750CONFIG_I2C_ALGOBIT=m
651 751
652# 752#
653# I2C Algorithms 753# I2C Hardware Bus support
654# 754#
655CONFIG_I2C_ALGOBIT=m
656# CONFIG_I2C_ALGOPCF is not set
657# CONFIG_I2C_ALGOPCA is not set
658 755
659# 756#
660# I2C Hardware Bus support 757# I2C system bus drivers (mostly embedded / system-on-chip)
661# 758#
662CONFIG_I2C_ATMELTWI=m 759# CONFIG_I2C_DESIGNWARE is not set
663CONFIG_I2C_GPIO=m 760CONFIG_I2C_GPIO=m
664# CONFIG_I2C_OCORES is not set 761# CONFIG_I2C_OCORES is not set
665# CONFIG_I2C_PARPORT_LIGHT is not set
666# CONFIG_I2C_SIMTEC is not set 762# CONFIG_I2C_SIMTEC is not set
763
764#
765# External I2C/SMBus adapter drivers
766#
767# CONFIG_I2C_PARPORT_LIGHT is not set
667# CONFIG_I2C_TAOS_EVM is not set 768# CONFIG_I2C_TAOS_EVM is not set
769
770#
771# Other I2C/SMBus bus drivers
772#
773# CONFIG_I2C_PCA_PLATFORM is not set
668# CONFIG_I2C_STUB is not set 774# CONFIG_I2C_STUB is not set
669 775
670# 776#
671# Miscellaneous I2C Chip support 777# Miscellaneous I2C Chip support
672# 778#
673# CONFIG_DS1682 is not set 779# CONFIG_DS1682 is not set
674# CONFIG_EEPROM_LEGACY is not set
675# CONFIG_SENSORS_PCF8574 is not set
676# CONFIG_PCF8575 is not set
677# CONFIG_SENSORS_PCF8591 is not set
678# CONFIG_TPS65010 is not set
679# CONFIG_SENSORS_MAX6875 is not set
680# CONFIG_SENSORS_TSL2550 is not set 780# CONFIG_SENSORS_TSL2550 is not set
681# CONFIG_I2C_DEBUG_CORE is not set 781# CONFIG_I2C_DEBUG_CORE is not set
682# CONFIG_I2C_DEBUG_ALGO is not set 782# CONFIG_I2C_DEBUG_ALGO is not set
683# CONFIG_I2C_DEBUG_BUS is not set 783# CONFIG_I2C_DEBUG_BUS is not set
684# CONFIG_I2C_DEBUG_CHIP is not set 784# CONFIG_I2C_DEBUG_CHIP is not set
685
686#
687# SPI support
688#
689CONFIG_SPI=y 785CONFIG_SPI=y
690# CONFIG_SPI_DEBUG is not set 786# CONFIG_SPI_DEBUG is not set
691CONFIG_SPI_MASTER=y 787CONFIG_SPI_MASTER=y
@@ -695,30 +791,48 @@ CONFIG_SPI_MASTER=y
695# 791#
696CONFIG_SPI_ATMEL=y 792CONFIG_SPI_ATMEL=y
697# CONFIG_SPI_BITBANG is not set 793# CONFIG_SPI_BITBANG is not set
794# CONFIG_SPI_GPIO is not set
698 795
699# 796#
700# SPI Protocol Masters 797# SPI Protocol Masters
701# 798#
702# CONFIG_EEPROM_AT25 is not set
703CONFIG_SPI_SPIDEV=m 799CONFIG_SPI_SPIDEV=m
704# CONFIG_SPI_TLE62X0 is not set 800# CONFIG_SPI_TLE62X0 is not set
705CONFIG_HAVE_GPIO_LIB=y
706 801
707# 802#
708# GPIO Support 803# PPS support
709# 804#
805# CONFIG_PPS is not set
806CONFIG_ARCH_REQUIRE_GPIOLIB=y
807CONFIG_GPIOLIB=y
710# CONFIG_DEBUG_GPIO is not set 808# CONFIG_DEBUG_GPIO is not set
809# CONFIG_GPIO_SYSFS is not set
810
811#
812# Memory mapped GPIO expanders:
813#
711 814
712# 815#
713# I2C GPIO expanders: 816# I2C GPIO expanders:
714# 817#
818# CONFIG_GPIO_MAX732X is not set
715# CONFIG_GPIO_PCA953X is not set 819# CONFIG_GPIO_PCA953X is not set
716# CONFIG_GPIO_PCF857X is not set 820# CONFIG_GPIO_PCF857X is not set
717 821
718# 822#
823# PCI GPIO expanders:
824#
825
826#
719# SPI GPIO expanders: 827# SPI GPIO expanders:
720# 828#
829# CONFIG_GPIO_MAX7301 is not set
721# CONFIG_GPIO_MCP23S08 is not set 830# CONFIG_GPIO_MCP23S08 is not set
831# CONFIG_GPIO_MC33880 is not set
832
833#
834# AC97 GPIO expanders:
835#
722# CONFIG_W1 is not set 836# CONFIG_W1 is not set
723# CONFIG_POWER_SUPPLY is not set 837# CONFIG_POWER_SUPPLY is not set
724# CONFIG_HWMON is not set 838# CONFIG_HWMON is not set
@@ -731,24 +845,31 @@ CONFIG_WATCHDOG=y
731# 845#
732# CONFIG_SOFT_WATCHDOG is not set 846# CONFIG_SOFT_WATCHDOG is not set
733CONFIG_AT32AP700X_WDT=y 847CONFIG_AT32AP700X_WDT=y
848CONFIG_SSB_POSSIBLE=y
734 849
735# 850#
736# Sonics Silicon Backplane 851# Sonics Silicon Backplane
737# 852#
738CONFIG_SSB_POSSIBLE=y
739# CONFIG_SSB is not set 853# CONFIG_SSB is not set
740 854
741# 855#
742# Multifunction device drivers 856# Multifunction device drivers
743# 857#
858# CONFIG_MFD_CORE is not set
744# CONFIG_MFD_SM501 is not set 859# CONFIG_MFD_SM501 is not set
745 860# CONFIG_HTC_PASIC3 is not set
746# 861# CONFIG_UCB1400_CORE is not set
747# Multimedia devices 862# CONFIG_TPS65010 is not set
748# 863# CONFIG_MFD_TMIO is not set
749# CONFIG_VIDEO_DEV is not set 864# CONFIG_MFD_WM8400 is not set
750# CONFIG_DVB_CORE is not set 865# CONFIG_MFD_WM831X is not set
751# CONFIG_DAB is not set 866# CONFIG_MFD_WM8350_I2C is not set
867# CONFIG_MFD_PCF50633 is not set
868# CONFIG_MFD_MC13783 is not set
869# CONFIG_AB3100_CORE is not set
870# CONFIG_EZX_PCAP is not set
871# CONFIG_REGULATOR is not set
872# CONFIG_MEDIA_SUPPORT is not set
752 873
753# 874#
754# Graphics support 875# Graphics support
@@ -758,6 +879,7 @@ CONFIG_SSB_POSSIBLE=y
758CONFIG_FB=y 879CONFIG_FB=y
759# CONFIG_FIRMWARE_EDID is not set 880# CONFIG_FIRMWARE_EDID is not set
760# CONFIG_FB_DDC is not set 881# CONFIG_FB_DDC is not set
882# CONFIG_FB_BOOT_VESA_SUPPORT is not set
761CONFIG_FB_CFB_FILLRECT=y 883CONFIG_FB_CFB_FILLRECT=y
762CONFIG_FB_CFB_COPYAREA=y 884CONFIG_FB_CFB_COPYAREA=y
763CONFIG_FB_CFB_IMAGEBLIT=y 885CONFIG_FB_CFB_IMAGEBLIT=y
@@ -765,8 +887,8 @@ CONFIG_FB_CFB_IMAGEBLIT=y
765# CONFIG_FB_SYS_FILLRECT is not set 887# CONFIG_FB_SYS_FILLRECT is not set
766# CONFIG_FB_SYS_COPYAREA is not set 888# CONFIG_FB_SYS_COPYAREA is not set
767# CONFIG_FB_SYS_IMAGEBLIT is not set 889# CONFIG_FB_SYS_IMAGEBLIT is not set
890# CONFIG_FB_FOREIGN_ENDIAN is not set
768# CONFIG_FB_SYS_FOPS is not set 891# CONFIG_FB_SYS_FOPS is not set
769CONFIG_FB_DEFERRED_IO=y
770# CONFIG_FB_SVGALIB is not set 892# CONFIG_FB_SVGALIB is not set
771# CONFIG_FB_MACMODES is not set 893# CONFIG_FB_MACMODES is not set
772# CONFIG_FB_BACKLIGHT is not set 894# CONFIG_FB_BACKLIGHT is not set
@@ -779,6 +901,9 @@ CONFIG_FB_DEFERRED_IO=y
779# CONFIG_FB_S1D13XXX is not set 901# CONFIG_FB_S1D13XXX is not set
780CONFIG_FB_ATMEL=y 902CONFIG_FB_ATMEL=y
781# CONFIG_FB_VIRTUAL is not set 903# CONFIG_FB_VIRTUAL is not set
904# CONFIG_FB_METRONOME is not set
905# CONFIG_FB_MB862XX is not set
906# CONFIG_FB_BROADSHEET is not set
782# CONFIG_BACKLIGHT_LCD_SUPPORT is not set 907# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
783 908
784# 909#
@@ -792,119 +917,124 @@ CONFIG_FB_ATMEL=y
792CONFIG_DUMMY_CONSOLE=y 917CONFIG_DUMMY_CONSOLE=y
793# CONFIG_FRAMEBUFFER_CONSOLE is not set 918# CONFIG_FRAMEBUFFER_CONSOLE is not set
794# CONFIG_LOGO is not set 919# CONFIG_LOGO is not set
795
796#
797# Sound
798#
799CONFIG_SOUND=y 920CONFIG_SOUND=y
800 921CONFIG_SOUND_OSS_CORE=y
801# 922CONFIG_SOUND_OSS_CORE_PRECLAIM=y
802# Advanced Linux Sound Architecture
803#
804CONFIG_SND=y 923CONFIG_SND=y
805CONFIG_SND_TIMER=m 924CONFIG_SND_TIMER=y
806CONFIG_SND_PCM=m 925CONFIG_SND_PCM=m
807# CONFIG_SND_SEQUENCER is not set 926# CONFIG_SND_SEQUENCER is not set
808CONFIG_SND_OSSEMUL=y 927CONFIG_SND_OSSEMUL=y
809CONFIG_SND_MIXER_OSS=m 928CONFIG_SND_MIXER_OSS=m
810CONFIG_SND_PCM_OSS=m 929CONFIG_SND_PCM_OSS=m
811CONFIG_SND_PCM_OSS_PLUGINS=y 930CONFIG_SND_PCM_OSS_PLUGINS=y
931CONFIG_SND_HRTIMER=y
812# CONFIG_SND_DYNAMIC_MINORS is not set 932# CONFIG_SND_DYNAMIC_MINORS is not set
813# CONFIG_SND_SUPPORT_OLD_API is not set 933# CONFIG_SND_SUPPORT_OLD_API is not set
814CONFIG_SND_VERBOSE_PROCFS=y 934CONFIG_SND_VERBOSE_PROCFS=y
815# CONFIG_SND_VERBOSE_PRINTK is not set 935# CONFIG_SND_VERBOSE_PRINTK is not set
816# CONFIG_SND_DEBUG is not set 936# CONFIG_SND_DEBUG is not set
817 937CONFIG_SND_VMASTER=y
818# 938# CONFIG_SND_RAWMIDI_SEQ is not set
819# Generic devices 939# CONFIG_SND_OPL3_LIB_SEQ is not set
820# 940# CONFIG_SND_OPL4_LIB_SEQ is not set
941# CONFIG_SND_SBAWE_SEQ is not set
942# CONFIG_SND_EMU10K1_SEQ is not set
821CONFIG_SND_AC97_CODEC=m 943CONFIG_SND_AC97_CODEC=m
822# CONFIG_SND_DUMMY is not set 944# CONFIG_SND_DRIVERS is not set
823# CONFIG_SND_MTPAV is not set
824# CONFIG_SND_SERIAL_U16550 is not set
825# CONFIG_SND_MPU401 is not set
826 945
827# 946#
828# AVR32 devices 947# Atmel devices (AVR32 and AT91)
829#
830CONFIG_SND_ATMEL_AC97=m
831
832#
833# SPI devices
834#
835
836#
837# System on Chip audio support
838# 948#
949# CONFIG_SND_ATMEL_ABDAC is not set
950CONFIG_SND_ATMEL_AC97C=m
951# CONFIG_SND_SPI is not set
839# CONFIG_SND_SOC is not set 952# CONFIG_SND_SOC is not set
840
841#
842# SoC Audio support for SuperH
843#
844
845#
846# ALSA SoC audio for Freescale SOCs
847#
848
849#
850# Open Sound System
851#
852# CONFIG_SOUND_PRIME is not set 953# CONFIG_SOUND_PRIME is not set
853CONFIG_AC97_BUS=m 954CONFIG_AC97_BUS=m
854CONFIG_HID_SUPPORT=y 955CONFIG_HID_SUPPORT=y
855CONFIG_HID=y 956CONFIG_HID=y
856# CONFIG_HID_DEBUG is not set
857# CONFIG_HIDRAW is not set 957# CONFIG_HIDRAW is not set
958# CONFIG_HID_PID is not set
959
960#
961# Special HID drivers
962#
858CONFIG_USB_SUPPORT=y 963CONFIG_USB_SUPPORT=y
859# CONFIG_USB_ARCH_HAS_HCD is not set 964# CONFIG_USB_ARCH_HAS_HCD is not set
860# CONFIG_USB_ARCH_HAS_OHCI is not set 965# CONFIG_USB_ARCH_HAS_OHCI is not set
861# CONFIG_USB_ARCH_HAS_EHCI is not set 966# CONFIG_USB_ARCH_HAS_EHCI is not set
967# CONFIG_USB_OTG_WHITELIST is not set
968# CONFIG_USB_OTG_BLACKLIST_HUB is not set
969# CONFIG_USB_GADGET_MUSB_HDRC is not set
862 970
863# 971#
864# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' 972# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
865# 973#
866CONFIG_USB_GADGET=y 974CONFIG_USB_GADGET=y
867# CONFIG_USB_GADGET_DEBUG is not set 975# CONFIG_USB_GADGET_DEBUG is not set
868# CONFIG_USB_GADGET_DEBUG_FILES is not set 976# CONFIG_USB_GADGET_DEBUG_FILES is not set
977# CONFIG_USB_GADGET_DEBUG_FS is not set
978CONFIG_USB_GADGET_VBUS_DRAW=350
869CONFIG_USB_GADGET_SELECTED=y 979CONFIG_USB_GADGET_SELECTED=y
870# CONFIG_USB_GADGET_AMD5536UDC is not set 980# CONFIG_USB_GADGET_AT91 is not set
871CONFIG_USB_GADGET_ATMEL_USBA=y 981CONFIG_USB_GADGET_ATMEL_USBA=y
872CONFIG_USB_ATMEL_USBA=y 982CONFIG_USB_ATMEL_USBA=y
873# CONFIG_USB_GADGET_FSL_USB2 is not set 983# CONFIG_USB_GADGET_FSL_USB2 is not set
874# CONFIG_USB_GADGET_NET2280 is not set
875# CONFIG_USB_GADGET_PXA2XX is not set
876# CONFIG_USB_GADGET_M66592 is not set
877# CONFIG_USB_GADGET_GOKU is not set
878# CONFIG_USB_GADGET_LH7A40X is not set 984# CONFIG_USB_GADGET_LH7A40X is not set
879# CONFIG_USB_GADGET_OMAP is not set 985# CONFIG_USB_GADGET_OMAP is not set
986# CONFIG_USB_GADGET_PXA25X is not set
987# CONFIG_USB_GADGET_R8A66597 is not set
988# CONFIG_USB_GADGET_PXA27X is not set
989# CONFIG_USB_GADGET_S3C_HSOTG is not set
990# CONFIG_USB_GADGET_IMX is not set
880# CONFIG_USB_GADGET_S3C2410 is not set 991# CONFIG_USB_GADGET_S3C2410 is not set
881# CONFIG_USB_GADGET_AT91 is not set 992# CONFIG_USB_GADGET_M66592 is not set
993# CONFIG_USB_GADGET_AMD5536UDC is not set
994# CONFIG_USB_GADGET_FSL_QE is not set
995# CONFIG_USB_GADGET_CI13XXX is not set
996# CONFIG_USB_GADGET_NET2280 is not set
997# CONFIG_USB_GADGET_GOKU is not set
998# CONFIG_USB_GADGET_LANGWELL is not set
882# CONFIG_USB_GADGET_DUMMY_HCD is not set 999# CONFIG_USB_GADGET_DUMMY_HCD is not set
883CONFIG_USB_GADGET_DUALSPEED=y 1000CONFIG_USB_GADGET_DUALSPEED=y
884CONFIG_USB_ZERO=m 1001CONFIG_USB_ZERO=m
1002# CONFIG_USB_AUDIO is not set
885CONFIG_USB_ETH=m 1003CONFIG_USB_ETH=m
886CONFIG_USB_ETH_RNDIS=y 1004CONFIG_USB_ETH_RNDIS=y
1005# CONFIG_USB_ETH_EEM is not set
887CONFIG_USB_GADGETFS=m 1006CONFIG_USB_GADGETFS=m
888CONFIG_USB_FILE_STORAGE=m 1007CONFIG_USB_FILE_STORAGE=m
889# CONFIG_USB_FILE_STORAGE_TEST is not set 1008# CONFIG_USB_FILE_STORAGE_TEST is not set
890CONFIG_USB_G_SERIAL=m 1009CONFIG_USB_G_SERIAL=m
891# CONFIG_USB_MIDI_GADGET is not set 1010# CONFIG_USB_MIDI_GADGET is not set
892# CONFIG_USB_G_PRINTER is not set 1011# CONFIG_USB_G_PRINTER is not set
1012CONFIG_USB_CDC_COMPOSITE=m
1013
1014#
1015# OTG and related infrastructure
1016#
1017# CONFIG_USB_GPIO_VBUS is not set
1018# CONFIG_NOP_USB_XCEIV is not set
893CONFIG_MMC=y 1019CONFIG_MMC=y
894# CONFIG_MMC_DEBUG is not set 1020# CONFIG_MMC_DEBUG is not set
895# CONFIG_MMC_UNSAFE_RESUME is not set 1021# CONFIG_MMC_UNSAFE_RESUME is not set
896 1022
897# 1023#
898# MMC/SD Card Drivers 1024# MMC/SD/SDIO Card Drivers
899# 1025#
900CONFIG_MMC_BLOCK=y 1026CONFIG_MMC_BLOCK=y
901CONFIG_MMC_BLOCK_BOUNCE=y 1027CONFIG_MMC_BLOCK_BOUNCE=y
902# CONFIG_SDIO_UART is not set 1028# CONFIG_SDIO_UART is not set
1029# CONFIG_MMC_TEST is not set
903 1030
904# 1031#
905# MMC/SD Host Controller Drivers 1032# MMC/SD/SDIO Host Controller Drivers
906# 1033#
1034# CONFIG_MMC_SDHCI is not set
1035# CONFIG_MMC_AT91 is not set
907CONFIG_MMC_ATMELMCI=y 1036CONFIG_MMC_ATMELMCI=y
1037# CONFIG_MMC_ATMELMCI_DMA is not set
908# CONFIG_MMC_SPI is not set 1038# CONFIG_MMC_SPI is not set
909# CONFIG_MEMSTICK is not set 1039# CONFIG_MEMSTICK is not set
910CONFIG_NEW_LEDS=y 1040CONFIG_NEW_LEDS=y
@@ -913,7 +1043,13 @@ CONFIG_LEDS_CLASS=y
913# 1043#
914# LED drivers 1044# LED drivers
915# 1045#
1046# CONFIG_LEDS_PCA9532 is not set
916CONFIG_LEDS_GPIO=y 1047CONFIG_LEDS_GPIO=y
1048CONFIG_LEDS_GPIO_PLATFORM=y
1049# CONFIG_LEDS_LP3944 is not set
1050# CONFIG_LEDS_PCA955X is not set
1051# CONFIG_LEDS_DAC124S085 is not set
1052# CONFIG_LEDS_BD2802 is not set
917 1053
918# 1054#
919# LED Triggers 1055# LED Triggers
@@ -921,6 +1057,14 @@ CONFIG_LEDS_GPIO=y
921CONFIG_LEDS_TRIGGERS=y 1057CONFIG_LEDS_TRIGGERS=y
922CONFIG_LEDS_TRIGGER_TIMER=y 1058CONFIG_LEDS_TRIGGER_TIMER=y
923CONFIG_LEDS_TRIGGER_HEARTBEAT=y 1059CONFIG_LEDS_TRIGGER_HEARTBEAT=y
1060# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
1061# CONFIG_LEDS_TRIGGER_GPIO is not set
1062# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set
1063
1064#
1065# iptables trigger is under Netfilter config (LED target)
1066#
1067# CONFIG_ACCESSIBILITY is not set
924CONFIG_RTC_LIB=y 1068CONFIG_RTC_LIB=y
925CONFIG_RTC_CLASS=y 1069CONFIG_RTC_CLASS=y
926CONFIG_RTC_HCTOSYS=y 1070CONFIG_RTC_HCTOSYS=y
@@ -950,51 +1094,84 @@ CONFIG_RTC_INTF_DEV=y
950# CONFIG_RTC_DRV_PCF8583 is not set 1094# CONFIG_RTC_DRV_PCF8583 is not set
951# CONFIG_RTC_DRV_M41T80 is not set 1095# CONFIG_RTC_DRV_M41T80 is not set
952# CONFIG_RTC_DRV_S35390A is not set 1096# CONFIG_RTC_DRV_S35390A is not set
1097# CONFIG_RTC_DRV_FM3130 is not set
1098# CONFIG_RTC_DRV_RX8581 is not set
1099# CONFIG_RTC_DRV_RX8025 is not set
953 1100
954# 1101#
955# SPI RTC drivers 1102# SPI RTC drivers
956# 1103#
1104# CONFIG_RTC_DRV_M41T94 is not set
1105# CONFIG_RTC_DRV_DS1305 is not set
1106# CONFIG_RTC_DRV_DS1390 is not set
957# CONFIG_RTC_DRV_MAX6902 is not set 1107# CONFIG_RTC_DRV_MAX6902 is not set
958# CONFIG_RTC_DRV_R9701 is not set 1108# CONFIG_RTC_DRV_R9701 is not set
959# CONFIG_RTC_DRV_RS5C348 is not set 1109# CONFIG_RTC_DRV_RS5C348 is not set
1110# CONFIG_RTC_DRV_DS3234 is not set
1111# CONFIG_RTC_DRV_PCF2123 is not set
960 1112
961# 1113#
962# Platform RTC drivers 1114# Platform RTC drivers
963# 1115#
1116# CONFIG_RTC_DRV_DS1286 is not set
964# CONFIG_RTC_DRV_DS1511 is not set 1117# CONFIG_RTC_DRV_DS1511 is not set
965# CONFIG_RTC_DRV_DS1553 is not set 1118# CONFIG_RTC_DRV_DS1553 is not set
966# CONFIG_RTC_DRV_DS1742 is not set 1119# CONFIG_RTC_DRV_DS1742 is not set
967# CONFIG_RTC_DRV_STK17TA8 is not set 1120# CONFIG_RTC_DRV_STK17TA8 is not set
968# CONFIG_RTC_DRV_M48T86 is not set 1121# CONFIG_RTC_DRV_M48T86 is not set
1122# CONFIG_RTC_DRV_M48T35 is not set
969# CONFIG_RTC_DRV_M48T59 is not set 1123# CONFIG_RTC_DRV_M48T59 is not set
1124# CONFIG_RTC_DRV_BQ4802 is not set
970# CONFIG_RTC_DRV_V3020 is not set 1125# CONFIG_RTC_DRV_V3020 is not set
971 1126
972# 1127#
973# on-CPU RTC drivers 1128# on-CPU RTC drivers
974# 1129#
975CONFIG_RTC_DRV_AT32AP700X=y 1130CONFIG_RTC_DRV_AT32AP700X=y
1131CONFIG_DMADEVICES=y
976 1132
977# 1133#
978# Userspace I/O 1134# DMA Devices
979# 1135#
1136CONFIG_DW_DMAC=y
1137CONFIG_DMA_ENGINE=y
1138
1139#
1140# DMA Clients
1141#
1142# CONFIG_NET_DMA is not set
1143# CONFIG_ASYNC_TX_DMA is not set
1144# CONFIG_DMATEST is not set
1145# CONFIG_AUXDISPLAY is not set
980# CONFIG_UIO is not set 1146# CONFIG_UIO is not set
981 1147
982# 1148#
1149# TI VLYNQ
1150#
1151# CONFIG_STAGING is not set
1152
1153#
983# File systems 1154# File systems
984# 1155#
985CONFIG_EXT2_FS=y 1156CONFIG_EXT2_FS=y
986# CONFIG_EXT2_FS_XATTR is not set 1157# CONFIG_EXT2_FS_XATTR is not set
987# CONFIG_EXT2_FS_XIP is not set 1158# CONFIG_EXT2_FS_XIP is not set
988CONFIG_EXT3_FS=y 1159CONFIG_EXT3_FS=y
1160# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
989# CONFIG_EXT3_FS_XATTR is not set 1161# CONFIG_EXT3_FS_XATTR is not set
990# CONFIG_EXT4DEV_FS is not set 1162# CONFIG_EXT4_FS is not set
991CONFIG_JBD=y 1163CONFIG_JBD=y
1164# CONFIG_JBD_DEBUG is not set
992# CONFIG_REISERFS_FS is not set 1165# CONFIG_REISERFS_FS is not set
993# CONFIG_JFS_FS is not set 1166# CONFIG_JFS_FS is not set
994# CONFIG_FS_POSIX_ACL is not set 1167# CONFIG_FS_POSIX_ACL is not set
995# CONFIG_XFS_FS is not set 1168# CONFIG_XFS_FS is not set
996# CONFIG_GFS2_FS is not set 1169# CONFIG_GFS2_FS is not set
997# CONFIG_OCFS2_FS is not set 1170# CONFIG_OCFS2_FS is not set
1171# CONFIG_BTRFS_FS is not set
1172# CONFIG_NILFS2_FS is not set
1173CONFIG_FILE_LOCKING=y
1174CONFIG_FSNOTIFY=y
998# CONFIG_DNOTIFY is not set 1175# CONFIG_DNOTIFY is not set
999CONFIG_INOTIFY=y 1176CONFIG_INOTIFY=y
1000CONFIG_INOTIFY_USER=y 1177CONFIG_INOTIFY_USER=y
@@ -1002,6 +1179,12 @@ CONFIG_INOTIFY_USER=y
1002# CONFIG_AUTOFS_FS is not set 1179# CONFIG_AUTOFS_FS is not set
1003# CONFIG_AUTOFS4_FS is not set 1180# CONFIG_AUTOFS4_FS is not set
1004CONFIG_FUSE_FS=m 1181CONFIG_FUSE_FS=m
1182# CONFIG_CUSE is not set
1183
1184#
1185# Caches
1186#
1187# CONFIG_FSCACHE is not set
1005 1188
1006# 1189#
1007# CD-ROM/DVD Filesystems 1190# CD-ROM/DVD Filesystems
@@ -1025,15 +1208,13 @@ CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
1025CONFIG_PROC_FS=y 1208CONFIG_PROC_FS=y
1026# CONFIG_PROC_KCORE is not set 1209# CONFIG_PROC_KCORE is not set
1027CONFIG_PROC_SYSCTL=y 1210CONFIG_PROC_SYSCTL=y
1211CONFIG_PROC_PAGE_MONITOR=y
1028CONFIG_SYSFS=y 1212CONFIG_SYSFS=y
1029CONFIG_TMPFS=y 1213CONFIG_TMPFS=y
1030# CONFIG_TMPFS_POSIX_ACL is not set 1214# CONFIG_TMPFS_POSIX_ACL is not set
1031# CONFIG_HUGETLB_PAGE is not set 1215# CONFIG_HUGETLB_PAGE is not set
1032CONFIG_CONFIGFS_FS=y 1216CONFIG_CONFIGFS_FS=y
1033 1217CONFIG_MISC_FILESYSTEMS=y
1034#
1035# Miscellaneous filesystems
1036#
1037# CONFIG_ADFS_FS is not set 1218# CONFIG_ADFS_FS is not set
1038# CONFIG_AFFS_FS is not set 1219# CONFIG_AFFS_FS is not set
1039# CONFIG_HFS_FS is not set 1220# CONFIG_HFS_FS is not set
@@ -1059,8 +1240,10 @@ CONFIG_UBIFS_FS_LZO=y
1059CONFIG_UBIFS_FS_ZLIB=y 1240CONFIG_UBIFS_FS_ZLIB=y
1060# CONFIG_UBIFS_FS_DEBUG is not set 1241# CONFIG_UBIFS_FS_DEBUG is not set
1061# CONFIG_CRAMFS is not set 1242# CONFIG_CRAMFS is not set
1243# CONFIG_SQUASHFS is not set
1062# CONFIG_VXFS_FS is not set 1244# CONFIG_VXFS_FS is not set
1063# CONFIG_MINIX_FS is not set 1245# CONFIG_MINIX_FS is not set
1246# CONFIG_OMFS_FS is not set
1064# CONFIG_HPFS_FS is not set 1247# CONFIG_HPFS_FS is not set
1065# CONFIG_QNX4FS_FS is not set 1248# CONFIG_QNX4FS_FS is not set
1066# CONFIG_ROMFS_FS is not set 1249# CONFIG_ROMFS_FS is not set
@@ -1071,19 +1254,16 @@ CONFIG_NFS_FS=y
1071CONFIG_NFS_V3=y 1254CONFIG_NFS_V3=y
1072# CONFIG_NFS_V3_ACL is not set 1255# CONFIG_NFS_V3_ACL is not set
1073# CONFIG_NFS_V4 is not set 1256# CONFIG_NFS_V4 is not set
1074# CONFIG_NFS_DIRECTIO is not set 1257CONFIG_ROOT_NFS=y
1075CONFIG_NFSD=m 1258CONFIG_NFSD=m
1076CONFIG_NFSD_V3=y 1259CONFIG_NFSD_V3=y
1077# CONFIG_NFSD_V3_ACL is not set 1260# CONFIG_NFSD_V3_ACL is not set
1078# CONFIG_NFSD_V4 is not set 1261# CONFIG_NFSD_V4 is not set
1079CONFIG_NFSD_TCP=y
1080CONFIG_ROOT_NFS=y
1081CONFIG_LOCKD=y 1262CONFIG_LOCKD=y
1082CONFIG_LOCKD_V4=y 1263CONFIG_LOCKD_V4=y
1083CONFIG_EXPORTFS=m 1264CONFIG_EXPORTFS=m
1084CONFIG_NFS_COMMON=y 1265CONFIG_NFS_COMMON=y
1085CONFIG_SUNRPC=y 1266CONFIG_SUNRPC=y
1086# CONFIG_SUNRPC_BIND34 is not set
1087# CONFIG_RPCSEC_GSS_KRB5 is not set 1267# CONFIG_RPCSEC_GSS_KRB5 is not set
1088# CONFIG_RPCSEC_GSS_SPKM3 is not set 1268# CONFIG_RPCSEC_GSS_SPKM3 is not set
1089CONFIG_SMB_FS=m 1269CONFIG_SMB_FS=m
@@ -1151,16 +1331,24 @@ CONFIG_NLS_UTF8=m
1151# CONFIG_PRINTK_TIME is not set 1331# CONFIG_PRINTK_TIME is not set
1152CONFIG_ENABLE_WARN_DEPRECATED=y 1332CONFIG_ENABLE_WARN_DEPRECATED=y
1153CONFIG_ENABLE_MUST_CHECK=y 1333CONFIG_ENABLE_MUST_CHECK=y
1334CONFIG_FRAME_WARN=1024
1154CONFIG_MAGIC_SYSRQ=y 1335CONFIG_MAGIC_SYSRQ=y
1336# CONFIG_STRIP_ASM_SYMS is not set
1155# CONFIG_UNUSED_SYMBOLS is not set 1337# CONFIG_UNUSED_SYMBOLS is not set
1156# CONFIG_DEBUG_FS is not set 1338CONFIG_DEBUG_FS=y
1157# CONFIG_HEADERS_CHECK is not set 1339# CONFIG_HEADERS_CHECK is not set
1158CONFIG_DEBUG_KERNEL=y 1340CONFIG_DEBUG_KERNEL=y
1159# CONFIG_DEBUG_SHIRQ is not set 1341# CONFIG_DEBUG_SHIRQ is not set
1160CONFIG_DETECT_SOFTLOCKUP=y 1342CONFIG_DETECT_SOFTLOCKUP=y
1343# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
1344CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
1345CONFIG_DETECT_HUNG_TASK=y
1346# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
1347CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
1161CONFIG_SCHED_DEBUG=y 1348CONFIG_SCHED_DEBUG=y
1162# CONFIG_SCHEDSTATS is not set 1349# CONFIG_SCHEDSTATS is not set
1163# CONFIG_TIMER_STATS is not set 1350# CONFIG_TIMER_STATS is not set
1351# CONFIG_DEBUG_OBJECTS is not set
1164# CONFIG_SLUB_DEBUG_ON is not set 1352# CONFIG_SLUB_DEBUG_ON is not set
1165# CONFIG_SLUB_STATS is not set 1353# CONFIG_SLUB_STATS is not set
1166# CONFIG_DEBUG_RT_MUTEXES is not set 1354# CONFIG_DEBUG_RT_MUTEXES is not set
@@ -1172,19 +1360,48 @@ CONFIG_SCHED_DEBUG=y
1172# CONFIG_LOCK_STAT is not set 1360# CONFIG_LOCK_STAT is not set
1173# CONFIG_DEBUG_SPINLOCK_SLEEP is not set 1361# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
1174# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set 1362# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
1363CONFIG_STACKTRACE=y
1175# CONFIG_DEBUG_KOBJECT is not set 1364# CONFIG_DEBUG_KOBJECT is not set
1176CONFIG_DEBUG_BUGVERBOSE=y 1365CONFIG_DEBUG_BUGVERBOSE=y
1177# CONFIG_DEBUG_INFO is not set 1366# CONFIG_DEBUG_INFO is not set
1178# CONFIG_DEBUG_VM is not set 1367# CONFIG_DEBUG_VM is not set
1368# CONFIG_DEBUG_WRITECOUNT is not set
1369# CONFIG_DEBUG_MEMORY_INIT is not set
1179# CONFIG_DEBUG_LIST is not set 1370# CONFIG_DEBUG_LIST is not set
1180# CONFIG_DEBUG_SG is not set 1371# CONFIG_DEBUG_SG is not set
1372# CONFIG_DEBUG_NOTIFIERS is not set
1373# CONFIG_DEBUG_CREDENTIALS is not set
1181CONFIG_FRAME_POINTER=y 1374CONFIG_FRAME_POINTER=y
1182# CONFIG_BOOT_PRINTK_DELAY is not set 1375# CONFIG_BOOT_PRINTK_DELAY is not set
1183# CONFIG_RCU_TORTURE_TEST is not set 1376# CONFIG_RCU_TORTURE_TEST is not set
1377# CONFIG_RCU_CPU_STALL_DETECTOR is not set
1184# CONFIG_KPROBES_SANITY_TEST is not set 1378# CONFIG_KPROBES_SANITY_TEST is not set
1185# CONFIG_BACKTRACE_SELF_TEST is not set 1379# CONFIG_BACKTRACE_SELF_TEST is not set
1380# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
1381# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
1186# CONFIG_LKDTM is not set 1382# CONFIG_LKDTM is not set
1187# CONFIG_FAULT_INJECTION is not set 1383# CONFIG_FAULT_INJECTION is not set
1384# CONFIG_PAGE_POISONING is not set
1385CONFIG_NOP_TRACER=y
1386CONFIG_RING_BUFFER=y
1387CONFIG_EVENT_TRACING=y
1388CONFIG_CONTEXT_SWITCH_TRACER=y
1389CONFIG_RING_BUFFER_ALLOW_SWAP=y
1390CONFIG_TRACING=y
1391CONFIG_TRACING_SUPPORT=y
1392CONFIG_FTRACE=y
1393# CONFIG_IRQSOFF_TRACER is not set
1394# CONFIG_SCHED_TRACER is not set
1395# CONFIG_ENABLE_DEFAULT_TRACERS is not set
1396# CONFIG_BOOT_TRACER is not set
1397CONFIG_BRANCH_PROFILE_NONE=y
1398# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
1399# CONFIG_PROFILE_ALL_BRANCHES is not set
1400# CONFIG_KMEMTRACE is not set
1401# CONFIG_WORKQUEUE_TRACER is not set
1402# CONFIG_BLK_DEV_IO_TRACE is not set
1403# CONFIG_RING_BUFFER_BENCHMARK is not set
1404# CONFIG_DYNAMIC_DEBUG is not set
1188# CONFIG_SAMPLES is not set 1405# CONFIG_SAMPLES is not set
1189 1406
1190# 1407#
@@ -1192,63 +1409,118 @@ CONFIG_FRAME_POINTER=y
1192# 1409#
1193# CONFIG_KEYS is not set 1410# CONFIG_KEYS is not set
1194# CONFIG_SECURITY is not set 1411# CONFIG_SECURITY is not set
1412# CONFIG_SECURITYFS is not set
1195# CONFIG_SECURITY_FILE_CAPABILITIES is not set 1413# CONFIG_SECURITY_FILE_CAPABILITIES is not set
1196CONFIG_CRYPTO=y 1414CONFIG_CRYPTO=y
1415
1416#
1417# Crypto core or helper
1418#
1419# CONFIG_CRYPTO_FIPS is not set
1197CONFIG_CRYPTO_ALGAPI=y 1420CONFIG_CRYPTO_ALGAPI=y
1421CONFIG_CRYPTO_ALGAPI2=y
1198CONFIG_CRYPTO_AEAD=y 1422CONFIG_CRYPTO_AEAD=y
1423CONFIG_CRYPTO_AEAD2=y
1199CONFIG_CRYPTO_BLKCIPHER=y 1424CONFIG_CRYPTO_BLKCIPHER=y
1200# CONFIG_CRYPTO_SEQIV is not set 1425CONFIG_CRYPTO_BLKCIPHER2=y
1201CONFIG_CRYPTO_HASH=y 1426CONFIG_CRYPTO_HASH=y
1427CONFIG_CRYPTO_HASH2=y
1428CONFIG_CRYPTO_RNG=m
1429CONFIG_CRYPTO_RNG2=y
1430CONFIG_CRYPTO_PCOMP=y
1202CONFIG_CRYPTO_MANAGER=y 1431CONFIG_CRYPTO_MANAGER=y
1432CONFIG_CRYPTO_MANAGER2=y
1433# CONFIG_CRYPTO_GF128MUL is not set
1434# CONFIG_CRYPTO_NULL is not set
1435CONFIG_CRYPTO_WORKQUEUE=y
1436# CONFIG_CRYPTO_CRYPTD is not set
1437CONFIG_CRYPTO_AUTHENC=y
1438# CONFIG_CRYPTO_TEST is not set
1439
1440#
1441# Authenticated Encryption with Associated Data
1442#
1443# CONFIG_CRYPTO_CCM is not set
1444# CONFIG_CRYPTO_GCM is not set
1445# CONFIG_CRYPTO_SEQIV is not set
1446
1447#
1448# Block modes
1449#
1450CONFIG_CRYPTO_CBC=y
1451# CONFIG_CRYPTO_CTR is not set
1452# CONFIG_CRYPTO_CTS is not set
1453CONFIG_CRYPTO_ECB=m
1454# CONFIG_CRYPTO_LRW is not set
1455# CONFIG_CRYPTO_PCBC is not set
1456# CONFIG_CRYPTO_XTS is not set
1457
1458#
1459# Hash modes
1460#
1203CONFIG_CRYPTO_HMAC=y 1461CONFIG_CRYPTO_HMAC=y
1204# CONFIG_CRYPTO_XCBC is not set 1462# CONFIG_CRYPTO_XCBC is not set
1205# CONFIG_CRYPTO_NULL is not set 1463# CONFIG_CRYPTO_VMAC is not set
1464
1465#
1466# Digest
1467#
1468# CONFIG_CRYPTO_CRC32C is not set
1469# CONFIG_CRYPTO_GHASH is not set
1206# CONFIG_CRYPTO_MD4 is not set 1470# CONFIG_CRYPTO_MD4 is not set
1207CONFIG_CRYPTO_MD5=y 1471CONFIG_CRYPTO_MD5=y
1472# CONFIG_CRYPTO_MICHAEL_MIC is not set
1473# CONFIG_CRYPTO_RMD128 is not set
1474# CONFIG_CRYPTO_RMD160 is not set
1475# CONFIG_CRYPTO_RMD256 is not set
1476# CONFIG_CRYPTO_RMD320 is not set
1208CONFIG_CRYPTO_SHA1=y 1477CONFIG_CRYPTO_SHA1=y
1209# CONFIG_CRYPTO_SHA256 is not set 1478# CONFIG_CRYPTO_SHA256 is not set
1210# CONFIG_CRYPTO_SHA512 is not set 1479# CONFIG_CRYPTO_SHA512 is not set
1211# CONFIG_CRYPTO_WP512 is not set
1212# CONFIG_CRYPTO_TGR192 is not set 1480# CONFIG_CRYPTO_TGR192 is not set
1213# CONFIG_CRYPTO_GF128MUL is not set 1481# CONFIG_CRYPTO_WP512 is not set
1214CONFIG_CRYPTO_ECB=m 1482
1215CONFIG_CRYPTO_CBC=y 1483#
1216# CONFIG_CRYPTO_PCBC is not set 1484# Ciphers
1217# CONFIG_CRYPTO_LRW is not set 1485#
1218# CONFIG_CRYPTO_XTS is not set 1486CONFIG_CRYPTO_AES=m
1219# CONFIG_CRYPTO_CTR is not set 1487# CONFIG_CRYPTO_ANUBIS is not set
1220# CONFIG_CRYPTO_GCM is not set 1488CONFIG_CRYPTO_ARC4=m
1221# CONFIG_CRYPTO_CCM is not set
1222# CONFIG_CRYPTO_CRYPTD is not set
1223CONFIG_CRYPTO_DES=y
1224# CONFIG_CRYPTO_FCRYPT is not set
1225# CONFIG_CRYPTO_BLOWFISH is not set 1489# CONFIG_CRYPTO_BLOWFISH is not set
1226# CONFIG_CRYPTO_TWOFISH is not set 1490# CONFIG_CRYPTO_CAMELLIA is not set
1227# CONFIG_CRYPTO_SERPENT is not set
1228# CONFIG_CRYPTO_AES is not set
1229# CONFIG_CRYPTO_CAST5 is not set 1491# CONFIG_CRYPTO_CAST5 is not set
1230# CONFIG_CRYPTO_CAST6 is not set 1492# CONFIG_CRYPTO_CAST6 is not set
1231# CONFIG_CRYPTO_TEA is not set 1493CONFIG_CRYPTO_DES=y
1232CONFIG_CRYPTO_ARC4=m 1494# CONFIG_CRYPTO_FCRYPT is not set
1233# CONFIG_CRYPTO_KHAZAD is not set 1495# CONFIG_CRYPTO_KHAZAD is not set
1234# CONFIG_CRYPTO_ANUBIS is not set
1235# CONFIG_CRYPTO_SEED is not set
1236# CONFIG_CRYPTO_SALSA20 is not set 1496# CONFIG_CRYPTO_SALSA20 is not set
1497# CONFIG_CRYPTO_SEED is not set
1498# CONFIG_CRYPTO_SERPENT is not set
1499# CONFIG_CRYPTO_TEA is not set
1500# CONFIG_CRYPTO_TWOFISH is not set
1501
1502#
1503# Compression
1504#
1237CONFIG_CRYPTO_DEFLATE=y 1505CONFIG_CRYPTO_DEFLATE=y
1238# CONFIG_CRYPTO_MICHAEL_MIC is not set 1506# CONFIG_CRYPTO_ZLIB is not set
1239# CONFIG_CRYPTO_CRC32C is not set
1240# CONFIG_CRYPTO_CAMELLIA is not set
1241# CONFIG_CRYPTO_TEST is not set
1242CONFIG_CRYPTO_AUTHENC=y
1243CONFIG_CRYPTO_LZO=y 1507CONFIG_CRYPTO_LZO=y
1508
1509#
1510# Random Number Generation
1511#
1512CONFIG_CRYPTO_ANSI_CPRNG=m
1244CONFIG_CRYPTO_HW=y 1513CONFIG_CRYPTO_HW=y
1514CONFIG_BINARY_PRINTF=y
1245 1515
1246# 1516#
1247# Library routines 1517# Library routines
1248# 1518#
1249CONFIG_BITREVERSE=y 1519CONFIG_BITREVERSE=y
1520CONFIG_GENERIC_FIND_LAST_BIT=y
1250CONFIG_CRC_CCITT=m 1521CONFIG_CRC_CCITT=m
1251CONFIG_CRC16=y 1522CONFIG_CRC16=y
1523# CONFIG_CRC_T10DIF is not set
1252# CONFIG_CRC_ITU_T is not set 1524# CONFIG_CRC_ITU_T is not set
1253CONFIG_CRC32=y 1525CONFIG_CRC32=y
1254# CONFIG_CRC7 is not set 1526# CONFIG_CRC7 is not set
@@ -1257,8 +1529,9 @@ CONFIG_ZLIB_INFLATE=y
1257CONFIG_ZLIB_DEFLATE=y 1529CONFIG_ZLIB_DEFLATE=y
1258CONFIG_LZO_COMPRESS=y 1530CONFIG_LZO_COMPRESS=y
1259CONFIG_LZO_DECOMPRESS=y 1531CONFIG_LZO_DECOMPRESS=y
1532CONFIG_DECOMPRESS_GZIP=y
1260CONFIG_GENERIC_ALLOCATOR=y 1533CONFIG_GENERIC_ALLOCATOR=y
1261CONFIG_PLIST=y
1262CONFIG_HAS_IOMEM=y 1534CONFIG_HAS_IOMEM=y
1263CONFIG_HAS_IOPORT=y 1535CONFIG_HAS_IOPORT=y
1264CONFIG_HAS_DMA=y 1536CONFIG_HAS_DMA=y
1537CONFIG_NLATTR=y
diff --git a/arch/avr32/configs/atngw100mkii_defconfig b/arch/avr32/configs/atngw100mkii_defconfig
new file mode 100644
index 000000000000..9b8b5b3b9c71
--- /dev/null
+++ b/arch/avr32/configs/atngw100mkii_defconfig
@@ -0,0 +1,1414 @@
1#
2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.32-rc5
4# Thu Nov 5 15:32:26 2009
5#
6CONFIG_AVR32=y
7CONFIG_GENERIC_GPIO=y
8CONFIG_GENERIC_HARDIRQS=y
9CONFIG_STACKTRACE_SUPPORT=y
10CONFIG_LOCKDEP_SUPPORT=y
11CONFIG_TRACE_IRQFLAGS_SUPPORT=y
12CONFIG_HARDIRQS_SW_RESEND=y
13CONFIG_GENERIC_IRQ_PROBE=y
14CONFIG_RWSEM_GENERIC_SPINLOCK=y
15CONFIG_GENERIC_TIME=y
16CONFIG_GENERIC_CLOCKEVENTS=y
17# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
18# CONFIG_ARCH_HAS_ILOG2_U32 is not set
19# CONFIG_ARCH_HAS_ILOG2_U64 is not set
20CONFIG_GENERIC_HWEIGHT=y
21CONFIG_GENERIC_CALIBRATE_DELAY=y
22CONFIG_GENERIC_BUG=y
23CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
24CONFIG_CONSTRUCTORS=y
25
26#
27# General setup
28#
29CONFIG_EXPERIMENTAL=y
30CONFIG_BROKEN_ON_SMP=y
31CONFIG_INIT_ENV_ARG_LIMIT=32
32CONFIG_LOCALVERSION=""
33# CONFIG_LOCALVERSION_AUTO is not set
34CONFIG_SWAP=y
35CONFIG_SYSVIPC=y
36CONFIG_SYSVIPC_SYSCTL=y
37CONFIG_POSIX_MQUEUE=y
38CONFIG_POSIX_MQUEUE_SYSCTL=y
39CONFIG_BSD_PROCESS_ACCT=y
40CONFIG_BSD_PROCESS_ACCT_V3=y
41# CONFIG_TASKSTATS is not set
42# CONFIG_AUDIT is not set
43
44#
45# RCU Subsystem
46#
47CONFIG_TREE_RCU=y
48# CONFIG_TREE_PREEMPT_RCU is not set
49# CONFIG_RCU_TRACE is not set
50CONFIG_RCU_FANOUT=32
51# CONFIG_RCU_FANOUT_EXACT is not set
52# CONFIG_TREE_RCU_TRACE is not set
53# CONFIG_IKCONFIG is not set
54CONFIG_LOG_BUF_SHIFT=14
55# CONFIG_GROUP_SCHED is not set
56# CONFIG_CGROUPS is not set
57CONFIG_SYSFS_DEPRECATED=y
58CONFIG_SYSFS_DEPRECATED_V2=y
59# CONFIG_RELAY is not set
60# CONFIG_NAMESPACES is not set
61CONFIG_BLK_DEV_INITRD=y
62CONFIG_INITRAMFS_SOURCE=""
63CONFIG_RD_GZIP=y
64# CONFIG_RD_BZIP2 is not set
65# CONFIG_RD_LZMA is not set
66CONFIG_CC_OPTIMIZE_FOR_SIZE=y
67CONFIG_SYSCTL=y
68CONFIG_ANON_INODES=y
69CONFIG_EMBEDDED=y
70# CONFIG_SYSCTL_SYSCALL is not set
71CONFIG_KALLSYMS=y
72# CONFIG_KALLSYMS_ALL is not set
73# CONFIG_KALLSYMS_EXTRA_PASS is not set
74CONFIG_HOTPLUG=y
75CONFIG_PRINTK=y
76CONFIG_BUG=y
77CONFIG_ELF_CORE=y
78# CONFIG_BASE_FULL is not set
79CONFIG_FUTEX=y
80CONFIG_EPOLL=y
81CONFIG_SIGNALFD=y
82CONFIG_TIMERFD=y
83CONFIG_EVENTFD=y
84CONFIG_SHMEM=y
85CONFIG_AIO=y
86
87#
88# Kernel Performance Events And Counters
89#
90CONFIG_VM_EVENT_COUNTERS=y
91CONFIG_SLUB_DEBUG=y
92# CONFIG_COMPAT_BRK is not set
93# CONFIG_SLAB is not set
94CONFIG_SLUB=y
95# CONFIG_SLOB is not set
96CONFIG_PROFILING=y
97CONFIG_TRACEPOINTS=y
98CONFIG_OPROFILE=m
99CONFIG_HAVE_OPROFILE=y
100CONFIG_KPROBES=y
101CONFIG_HAVE_KPROBES=y
102CONFIG_HAVE_CLK=y
103
104#
105# GCOV-based kernel profiling
106#
107# CONFIG_GCOV_KERNEL is not set
108CONFIG_SLOW_WORK=y
109# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
110CONFIG_SLABINFO=y
111CONFIG_RT_MUTEXES=y
112CONFIG_BASE_SMALL=1
113CONFIG_MODULES=y
114# CONFIG_MODULE_FORCE_LOAD is not set
115CONFIG_MODULE_UNLOAD=y
116CONFIG_MODULE_FORCE_UNLOAD=y
117# CONFIG_MODVERSIONS is not set
118# CONFIG_MODULE_SRCVERSION_ALL is not set
119CONFIG_BLOCK=y
120CONFIG_LBDAF=y
121# CONFIG_BLK_DEV_BSG is not set
122# CONFIG_BLK_DEV_INTEGRITY is not set
123
124#
125# IO Schedulers
126#
127CONFIG_IOSCHED_NOOP=y
128# CONFIG_IOSCHED_AS is not set
129# CONFIG_IOSCHED_DEADLINE is not set
130CONFIG_IOSCHED_CFQ=y
131# CONFIG_DEFAULT_AS is not set
132# CONFIG_DEFAULT_DEADLINE is not set
133CONFIG_DEFAULT_CFQ=y
134# CONFIG_DEFAULT_NOOP is not set
135CONFIG_DEFAULT_IOSCHED="cfq"
136CONFIG_FREEZER=y
137
138#
139# System Type and features
140#
141CONFIG_TICK_ONESHOT=y
142CONFIG_NO_HZ=y
143CONFIG_HIGH_RES_TIMERS=y
144CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
145CONFIG_SUBARCH_AVR32B=y
146CONFIG_MMU=y
147CONFIG_PERFORMANCE_COUNTERS=y
148CONFIG_PLATFORM_AT32AP=y
149CONFIG_CPU_AT32AP700X=y
150CONFIG_CPU_AT32AP7000=y
151CONFIG_BOARD_ATNGW100_COMMON=y
152# CONFIG_BOARD_ATSTK1000 is not set
153# CONFIG_BOARD_ATNGW100_MKI is not set
154CONFIG_BOARD_ATNGW100_MKII=y
155# CONFIG_BOARD_HAMMERHEAD is not set
156# CONFIG_BOARD_FAVR_32 is not set
157# CONFIG_BOARD_MERISC is not set
158# CONFIG_BOARD_MIMC200 is not set
159# CONFIG_BOARD_ATNGW100_MKII_LCD is not set
160CONFIG_BOARD_ATNGW100_ADDON_NONE=y
161# CONFIG_BOARD_ATNGW100_EVKLCD10X is not set
162# CONFIG_BOARD_ATNGW100_MRMT is not set
163CONFIG_LOADER_U_BOOT=y
164
165#
166# Atmel AVR32 AP options
167#
168# CONFIG_AP700X_32_BIT_SMC is not set
169CONFIG_AP700X_16_BIT_SMC=y
170# CONFIG_AP700X_8_BIT_SMC is not set
171CONFIG_LOAD_ADDRESS=0x10000000
172CONFIG_ENTRY_ADDRESS=0x90000000
173CONFIG_PHYS_OFFSET=0x10000000
174CONFIG_PREEMPT_NONE=y
175# CONFIG_PREEMPT_VOLUNTARY is not set
176# CONFIG_PREEMPT is not set
177CONFIG_QUICKLIST=y
178# CONFIG_HAVE_ARCH_BOOTMEM is not set
179# CONFIG_ARCH_HAVE_MEMORY_PRESENT is not set
180# CONFIG_NEED_NODE_MEMMAP_SIZE is not set
181CONFIG_ARCH_FLATMEM_ENABLE=y
182# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set
183# CONFIG_ARCH_SPARSEMEM_ENABLE is not set
184CONFIG_SELECT_MEMORY_MODEL=y
185CONFIG_FLATMEM_MANUAL=y
186# CONFIG_DISCONTIGMEM_MANUAL is not set
187# CONFIG_SPARSEMEM_MANUAL is not set
188CONFIG_FLATMEM=y
189CONFIG_FLAT_NODE_MEM_MAP=y
190CONFIG_PAGEFLAGS_EXTENDED=y
191CONFIG_SPLIT_PTLOCK_CPUS=4
192# CONFIG_PHYS_ADDR_T_64BIT is not set
193CONFIG_ZONE_DMA_FLAG=0
194CONFIG_NR_QUICK=2
195CONFIG_VIRT_TO_BUS=y
196CONFIG_HAVE_MLOCK=y
197CONFIG_HAVE_MLOCKED_PAGE_BIT=y
198# CONFIG_KSM is not set
199CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
200# CONFIG_OWNERSHIP_TRACE is not set
201CONFIG_NMI_DEBUGGING=y
202# CONFIG_HZ_100 is not set
203CONFIG_HZ_250=y
204# CONFIG_HZ_300 is not set
205# CONFIG_HZ_1000 is not set
206CONFIG_HZ=250
207CONFIG_SCHED_HRTICK=y
208CONFIG_CMDLINE=""
209
210#
211# Power management options
212#
213CONFIG_PM=y
214# CONFIG_PM_DEBUG is not set
215CONFIG_PM_SLEEP=y
216CONFIG_SUSPEND=y
217CONFIG_SUSPEND_FREEZER=y
218# CONFIG_PM_RUNTIME is not set
219CONFIG_ARCH_SUSPEND_POSSIBLE=y
220
221#
222# CPU Frequency scaling
223#
224CONFIG_CPU_FREQ=y
225CONFIG_CPU_FREQ_TABLE=y
226# CONFIG_CPU_FREQ_DEBUG is not set
227# CONFIG_CPU_FREQ_STAT is not set
228# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set
229# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
230# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
231CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
232# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
233CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
234# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set
235CONFIG_CPU_FREQ_GOV_USERSPACE=y
236CONFIG_CPU_FREQ_GOV_ONDEMAND=y
237# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set
238CONFIG_CPU_FREQ_AT32AP=y
239
240#
241# Bus options
242#
243# CONFIG_ARCH_SUPPORTS_MSI is not set
244# CONFIG_PCCARD is not set
245
246#
247# Executable file formats
248#
249CONFIG_BINFMT_ELF=y
250# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
251# CONFIG_HAVE_AOUT is not set
252# CONFIG_BINFMT_MISC is not set
253CONFIG_NET=y
254
255#
256# Networking options
257#
258CONFIG_PACKET=y
259CONFIG_PACKET_MMAP=y
260CONFIG_UNIX=y
261CONFIG_XFRM=y
262CONFIG_XFRM_USER=y
263# CONFIG_XFRM_SUB_POLICY is not set
264# CONFIG_XFRM_MIGRATE is not set
265# CONFIG_XFRM_STATISTICS is not set
266CONFIG_XFRM_IPCOMP=y
267CONFIG_NET_KEY=y
268# CONFIG_NET_KEY_MIGRATE is not set
269CONFIG_INET=y
270CONFIG_IP_MULTICAST=y
271CONFIG_IP_ADVANCED_ROUTER=y
272CONFIG_ASK_IP_FIB_HASH=y
273# CONFIG_IP_FIB_TRIE is not set
274CONFIG_IP_FIB_HASH=y
275# CONFIG_IP_MULTIPLE_TABLES is not set
276# CONFIG_IP_ROUTE_MULTIPATH is not set
277# CONFIG_IP_ROUTE_VERBOSE is not set
278CONFIG_IP_PNP=y
279CONFIG_IP_PNP_DHCP=y
280# CONFIG_IP_PNP_BOOTP is not set
281# CONFIG_IP_PNP_RARP is not set
282# CONFIG_NET_IPIP is not set
283# CONFIG_NET_IPGRE is not set
284CONFIG_IP_MROUTE=y
285CONFIG_IP_PIMSM_V1=y
286# CONFIG_IP_PIMSM_V2 is not set
287# CONFIG_ARPD is not set
288CONFIG_SYN_COOKIES=y
289CONFIG_INET_AH=y
290CONFIG_INET_ESP=y
291CONFIG_INET_IPCOMP=y
292CONFIG_INET_XFRM_TUNNEL=y
293CONFIG_INET_TUNNEL=y
294CONFIG_INET_XFRM_MODE_TRANSPORT=y
295CONFIG_INET_XFRM_MODE_TUNNEL=y
296CONFIG_INET_XFRM_MODE_BEET=y
297# CONFIG_INET_LRO is not set
298CONFIG_INET_DIAG=y
299CONFIG_INET_TCP_DIAG=y
300# CONFIG_TCP_CONG_ADVANCED is not set
301CONFIG_TCP_CONG_CUBIC=y
302CONFIG_DEFAULT_TCP_CONG="cubic"
303# CONFIG_TCP_MD5SIG is not set
304CONFIG_IPV6=y
305# CONFIG_IPV6_PRIVACY is not set
306# CONFIG_IPV6_ROUTER_PREF is not set
307# CONFIG_IPV6_OPTIMISTIC_DAD is not set
308CONFIG_INET6_AH=y
309CONFIG_INET6_ESP=y
310CONFIG_INET6_IPCOMP=y
311# CONFIG_IPV6_MIP6 is not set
312CONFIG_INET6_XFRM_TUNNEL=y
313CONFIG_INET6_TUNNEL=y
314CONFIG_INET6_XFRM_MODE_TRANSPORT=y
315CONFIG_INET6_XFRM_MODE_TUNNEL=y
316CONFIG_INET6_XFRM_MODE_BEET=y
317# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
318CONFIG_IPV6_SIT=y
319CONFIG_IPV6_NDISC_NODETYPE=y
320# CONFIG_IPV6_TUNNEL is not set
321# CONFIG_IPV6_MULTIPLE_TABLES is not set
322# CONFIG_IPV6_MROUTE is not set
323# CONFIG_NETWORK_SECMARK is not set
324CONFIG_NETFILTER=y
325# CONFIG_NETFILTER_DEBUG is not set
326# CONFIG_NETFILTER_ADVANCED is not set
327
328#
329# Core Netfilter Configuration
330#
331CONFIG_NETFILTER_NETLINK=m
332CONFIG_NETFILTER_NETLINK_LOG=m
333CONFIG_NF_CONNTRACK=m
334CONFIG_NF_CONNTRACK_FTP=m
335CONFIG_NF_CONNTRACK_IRC=m
336CONFIG_NF_CONNTRACK_SIP=m
337CONFIG_NF_CT_NETLINK=m
338CONFIG_NETFILTER_XTABLES=y
339CONFIG_NETFILTER_XT_TARGET_MARK=m
340CONFIG_NETFILTER_XT_TARGET_NFLOG=m
341CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
342CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
343CONFIG_NETFILTER_XT_MATCH_MARK=m
344CONFIG_NETFILTER_XT_MATCH_POLICY=m
345CONFIG_NETFILTER_XT_MATCH_STATE=m
346# CONFIG_IP_VS is not set
347
348#
349# IP: Netfilter Configuration
350#
351CONFIG_NF_DEFRAG_IPV4=m
352CONFIG_NF_CONNTRACK_IPV4=m
353CONFIG_NF_CONNTRACK_PROC_COMPAT=y
354CONFIG_IP_NF_IPTABLES=m
355CONFIG_IP_NF_FILTER=m
356CONFIG_IP_NF_TARGET_REJECT=m
357CONFIG_IP_NF_TARGET_LOG=m
358# CONFIG_IP_NF_TARGET_ULOG is not set
359CONFIG_NF_NAT=m
360CONFIG_NF_NAT_NEEDED=y
361CONFIG_IP_NF_TARGET_MASQUERADE=m
362CONFIG_NF_NAT_FTP=m
363CONFIG_NF_NAT_IRC=m
364# CONFIG_NF_NAT_TFTP is not set
365# CONFIG_NF_NAT_AMANDA is not set
366# CONFIG_NF_NAT_PPTP is not set
367# CONFIG_NF_NAT_H323 is not set
368CONFIG_NF_NAT_SIP=m
369CONFIG_IP_NF_MANGLE=m
370
371#
372# IPv6: Netfilter Configuration
373#
374CONFIG_NF_CONNTRACK_IPV6=m
375CONFIG_IP6_NF_IPTABLES=m
376CONFIG_IP6_NF_MATCH_IPV6HEADER=m
377CONFIG_IP6_NF_TARGET_LOG=m
378CONFIG_IP6_NF_FILTER=m
379CONFIG_IP6_NF_TARGET_REJECT=m
380CONFIG_IP6_NF_MANGLE=m
381# CONFIG_IP_DCCP is not set
382# CONFIG_IP_SCTP is not set
383# CONFIG_RDS is not set
384# CONFIG_TIPC is not set
385# CONFIG_ATM is not set
386CONFIG_STP=m
387CONFIG_BRIDGE=m
388# CONFIG_NET_DSA is not set
389CONFIG_VLAN_8021Q=m
390# CONFIG_VLAN_8021Q_GVRP is not set
391# CONFIG_DECNET is not set
392CONFIG_LLC=m
393# CONFIG_LLC2 is not set
394# CONFIG_IPX is not set
395# CONFIG_ATALK is not set
396# CONFIG_X25 is not set
397# CONFIG_LAPB is not set
398# CONFIG_ECONET is not set
399# CONFIG_WAN_ROUTER is not set
400# CONFIG_PHONET is not set
401# CONFIG_IEEE802154 is not set
402# CONFIG_NET_SCHED is not set
403# CONFIG_DCB is not set
404
405#
406# Network testing
407#
408# CONFIG_NET_PKTGEN is not set
409# CONFIG_NET_TCPPROBE is not set
410# CONFIG_NET_DROP_MONITOR is not set
411# CONFIG_HAMRADIO is not set
412# CONFIG_CAN is not set
413# CONFIG_IRDA is not set
414# CONFIG_BT is not set
415# CONFIG_AF_RXRPC is not set
416CONFIG_WIRELESS=y
417# CONFIG_CFG80211 is not set
418CONFIG_CFG80211_DEFAULT_PS_VALUE=0
419# CONFIG_WIRELESS_OLD_REGULATORY is not set
420# CONFIG_WIRELESS_EXT is not set
421# CONFIG_LIB80211 is not set
422
423#
424# CFG80211 needs to be enabled for MAC80211
425#
426# CONFIG_WIMAX is not set
427# CONFIG_RFKILL is not set
428# CONFIG_NET_9P is not set
429
430#
431# Device Drivers
432#
433
434#
435# Generic Driver Options
436#
437CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
438# CONFIG_DEVTMPFS is not set
439CONFIG_STANDALONE=y
440# CONFIG_PREVENT_FIRMWARE_BUILD is not set
441# CONFIG_FW_LOADER is not set
442# CONFIG_DEBUG_DRIVER is not set
443# CONFIG_DEBUG_DEVRES is not set
444# CONFIG_SYS_HYPERVISOR is not set
445# CONFIG_CONNECTOR is not set
446CONFIG_MTD=y
447# CONFIG_MTD_DEBUG is not set
448# CONFIG_MTD_TESTS is not set
449# CONFIG_MTD_CONCAT is not set
450CONFIG_MTD_PARTITIONS=y
451# CONFIG_MTD_REDBOOT_PARTS is not set
452CONFIG_MTD_CMDLINE_PARTS=y
453# CONFIG_MTD_AR7_PARTS is not set
454
455#
456# User Modules And Translation Layers
457#
458CONFIG_MTD_CHAR=y
459CONFIG_MTD_BLKDEVS=y
460CONFIG_MTD_BLOCK=y
461# CONFIG_FTL is not set
462# CONFIG_NFTL is not set
463# CONFIG_INFTL is not set
464# CONFIG_RFD_FTL is not set
465# CONFIG_SSFDC is not set
466# CONFIG_MTD_OOPS is not set
467
468#
469# RAM/ROM/Flash chip drivers
470#
471CONFIG_MTD_CFI=y
472# CONFIG_MTD_JEDECPROBE is not set
473CONFIG_MTD_GEN_PROBE=y
474# CONFIG_MTD_CFI_ADV_OPTIONS is not set
475CONFIG_MTD_MAP_BANK_WIDTH_1=y
476CONFIG_MTD_MAP_BANK_WIDTH_2=y
477CONFIG_MTD_MAP_BANK_WIDTH_4=y
478# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
479# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
480# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
481CONFIG_MTD_CFI_I1=y
482CONFIG_MTD_CFI_I2=y
483# CONFIG_MTD_CFI_I4 is not set
484# CONFIG_MTD_CFI_I8 is not set
485CONFIG_MTD_CFI_INTELEXT=y
486# CONFIG_MTD_CFI_AMDSTD is not set
487# CONFIG_MTD_CFI_STAA is not set
488CONFIG_MTD_CFI_UTIL=y
489# CONFIG_MTD_RAM is not set
490# CONFIG_MTD_ROM is not set
491# CONFIG_MTD_ABSENT is not set
492
493#
494# Mapping drivers for chip access
495#
496# CONFIG_MTD_COMPLEX_MAPPINGS is not set
497CONFIG_MTD_PHYSMAP=y
498# CONFIG_MTD_PHYSMAP_COMPAT is not set
499# CONFIG_MTD_PLATRAM is not set
500
501#
502# Self-contained MTD device drivers
503#
504CONFIG_MTD_DATAFLASH=y
505# CONFIG_MTD_DATAFLASH_WRITE_VERIFY is not set
506# CONFIG_MTD_DATAFLASH_OTP is not set
507# CONFIG_MTD_M25P80 is not set
508# CONFIG_MTD_SST25L is not set
509# CONFIG_MTD_SLRAM is not set
510# CONFIG_MTD_PHRAM is not set
511# CONFIG_MTD_MTDRAM is not set
512# CONFIG_MTD_BLOCK2MTD is not set
513
514#
515# Disk-On-Chip Device Drivers
516#
517# CONFIG_MTD_DOC2000 is not set
518# CONFIG_MTD_DOC2001 is not set
519# CONFIG_MTD_DOC2001PLUS is not set
520CONFIG_MTD_NAND=y
521# CONFIG_MTD_NAND_VERIFY_WRITE is not set
522# CONFIG_MTD_NAND_ECC_SMC is not set
523# CONFIG_MTD_NAND_MUSEUM_IDS is not set
524CONFIG_MTD_NAND_IDS=y
525# CONFIG_MTD_NAND_DISKONCHIP is not set
526CONFIG_MTD_NAND_ATMEL=y
527CONFIG_MTD_NAND_ATMEL_ECC_HW=y
528# CONFIG_MTD_NAND_ATMEL_ECC_SOFT is not set
529# CONFIG_MTD_NAND_ATMEL_ECC_NONE is not set
530# CONFIG_MTD_NAND_NANDSIM is not set
531# CONFIG_MTD_NAND_PLATFORM is not set
532# CONFIG_MTD_ONENAND is not set
533
534#
535# LPDDR flash memory drivers
536#
537# CONFIG_MTD_LPDDR is not set
538
539#
540# UBI - Unsorted block images
541#
542CONFIG_MTD_UBI=y
543CONFIG_MTD_UBI_WL_THRESHOLD=4096
544CONFIG_MTD_UBI_BEB_RESERVE=1
545# CONFIG_MTD_UBI_GLUEBI is not set
546
547#
548# UBI debugging options
549#
550# CONFIG_MTD_UBI_DEBUG is not set
551# CONFIG_PARPORT is not set
552CONFIG_BLK_DEV=y
553# CONFIG_BLK_DEV_COW_COMMON is not set
554CONFIG_BLK_DEV_LOOP=m
555# CONFIG_BLK_DEV_CRYPTOLOOP is not set
556CONFIG_BLK_DEV_NBD=m
557CONFIG_BLK_DEV_RAM=m
558CONFIG_BLK_DEV_RAM_COUNT=16
559CONFIG_BLK_DEV_RAM_SIZE=4096
560# CONFIG_BLK_DEV_XIP is not set
561# CONFIG_CDROM_PKTCDVD is not set
562# CONFIG_ATA_OVER_ETH is not set
563CONFIG_MISC_DEVICES=y
564# CONFIG_ATMEL_PWM is not set
565CONFIG_ATMEL_TCLIB=y
566CONFIG_ATMEL_TCB_CLKSRC=y
567CONFIG_ATMEL_TCB_CLKSRC_BLOCK=0
568# CONFIG_ICS932S401 is not set
569# CONFIG_ATMEL_SSC is not set
570# CONFIG_ENCLOSURE_SERVICES is not set
571# CONFIG_ISL29003 is not set
572# CONFIG_C2PORT is not set
573
574#
575# EEPROM support
576#
577# CONFIG_EEPROM_AT24 is not set
578# CONFIG_EEPROM_AT25 is not set
579# CONFIG_EEPROM_LEGACY is not set
580# CONFIG_EEPROM_MAX6875 is not set
581# CONFIG_EEPROM_93CX6 is not set
582
583#
584# SCSI device support
585#
586# CONFIG_RAID_ATTRS is not set
587# CONFIG_SCSI is not set
588# CONFIG_SCSI_DMA is not set
589# CONFIG_SCSI_NETLINK is not set
590# CONFIG_ATA is not set
591# CONFIG_MD is not set
592CONFIG_NETDEVICES=y
593# CONFIG_DUMMY is not set
594# CONFIG_BONDING is not set
595# CONFIG_MACVLAN is not set
596# CONFIG_EQUALIZER is not set
597CONFIG_TUN=m
598# CONFIG_VETH is not set
599CONFIG_PHYLIB=y
600
601#
602# MII PHY device drivers
603#
604# CONFIG_MARVELL_PHY is not set
605# CONFIG_DAVICOM_PHY is not set
606# CONFIG_QSEMI_PHY is not set
607# CONFIG_LXT_PHY is not set
608# CONFIG_CICADA_PHY is not set
609# CONFIG_VITESSE_PHY is not set
610# CONFIG_SMSC_PHY is not set
611# CONFIG_BROADCOM_PHY is not set
612# CONFIG_ICPLUS_PHY is not set
613# CONFIG_REALTEK_PHY is not set
614# CONFIG_NATIONAL_PHY is not set
615# CONFIG_STE10XP is not set
616# CONFIG_LSI_ET1011C_PHY is not set
617# CONFIG_FIXED_PHY is not set
618# CONFIG_MDIO_BITBANG is not set
619CONFIG_NET_ETHERNET=y
620# CONFIG_MII is not set
621CONFIG_MACB=y
622# CONFIG_ENC28J60 is not set
623# CONFIG_ETHOC is not set
624# CONFIG_DNET is not set
625# CONFIG_IBM_NEW_EMAC_ZMII is not set
626# CONFIG_IBM_NEW_EMAC_RGMII is not set
627# CONFIG_IBM_NEW_EMAC_TAH is not set
628# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
629# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
630# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
631# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
632# CONFIG_B44 is not set
633# CONFIG_KS8842 is not set
634# CONFIG_KS8851 is not set
635# CONFIG_KS8851_MLL is not set
636# CONFIG_NETDEV_1000 is not set
637# CONFIG_NETDEV_10000 is not set
638CONFIG_WLAN=y
639# CONFIG_WLAN_PRE80211 is not set
640# CONFIG_WLAN_80211 is not set
641
642#
643# Enable WiMAX (Networking options) to see the WiMAX drivers
644#
645# CONFIG_WAN is not set
646CONFIG_PPP=m
647# CONFIG_PPP_MULTILINK is not set
648CONFIG_PPP_FILTER=y
649CONFIG_PPP_ASYNC=m
650# CONFIG_PPP_SYNC_TTY is not set
651CONFIG_PPP_DEFLATE=m
652CONFIG_PPP_BSDCOMP=m
653CONFIG_PPP_MPPE=m
654CONFIG_PPPOE=m
655# CONFIG_PPPOL2TP is not set
656# CONFIG_SLIP is not set
657CONFIG_SLHC=m
658# CONFIG_NETCONSOLE is not set
659# CONFIG_NETPOLL is not set
660# CONFIG_NET_POLL_CONTROLLER is not set
661# CONFIG_ISDN is not set
662# CONFIG_PHONE is not set
663
664#
665# Input device support
666#
667# CONFIG_INPUT is not set
668
669#
670# Hardware I/O ports
671#
672# CONFIG_SERIO is not set
673# CONFIG_GAMEPORT is not set
674
675#
676# Character devices
677#
678# CONFIG_VT is not set
679# CONFIG_DEVKMEM is not set
680# CONFIG_SERIAL_NONSTANDARD is not set
681
682#
683# Serial drivers
684#
685# CONFIG_SERIAL_8250 is not set
686
687#
688# Non-8250 serial port support
689#
690CONFIG_SERIAL_ATMEL=y
691CONFIG_SERIAL_ATMEL_CONSOLE=y
692CONFIG_SERIAL_ATMEL_PDC=y
693# CONFIG_SERIAL_ATMEL_TTYAT is not set
694# CONFIG_SERIAL_MAX3100 is not set
695CONFIG_SERIAL_CORE=y
696CONFIG_SERIAL_CORE_CONSOLE=y
697CONFIG_UNIX98_PTYS=y
698# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
699# CONFIG_LEGACY_PTYS is not set
700# CONFIG_IPMI_HANDLER is not set
701# CONFIG_HW_RANDOM is not set
702# CONFIG_R3964 is not set
703# CONFIG_RAW_DRIVER is not set
704# CONFIG_TCG_TPM is not set
705CONFIG_I2C=m
706CONFIG_I2C_BOARDINFO=y
707CONFIG_I2C_COMPAT=y
708CONFIG_I2C_CHARDEV=m
709CONFIG_I2C_HELPER_AUTO=y
710CONFIG_I2C_ALGOBIT=m
711
712#
713# I2C Hardware Bus support
714#
715
716#
717# I2C system bus drivers (mostly embedded / system-on-chip)
718#
719# CONFIG_I2C_DESIGNWARE is not set
720CONFIG_I2C_GPIO=m
721# CONFIG_I2C_OCORES is not set
722# CONFIG_I2C_SIMTEC is not set
723
724#
725# External I2C/SMBus adapter drivers
726#
727# CONFIG_I2C_PARPORT_LIGHT is not set
728# CONFIG_I2C_TAOS_EVM is not set
729
730#
731# Other I2C/SMBus bus drivers
732#
733# CONFIG_I2C_PCA_PLATFORM is not set
734# CONFIG_I2C_STUB is not set
735
736#
737# Miscellaneous I2C Chip support
738#
739# CONFIG_DS1682 is not set
740# CONFIG_SENSORS_TSL2550 is not set
741# CONFIG_I2C_DEBUG_CORE is not set
742# CONFIG_I2C_DEBUG_ALGO is not set
743# CONFIG_I2C_DEBUG_BUS is not set
744# CONFIG_I2C_DEBUG_CHIP is not set
745CONFIG_SPI=y
746# CONFIG_SPI_DEBUG is not set
747CONFIG_SPI_MASTER=y
748
749#
750# SPI Master Controller Drivers
751#
752CONFIG_SPI_ATMEL=y
753# CONFIG_SPI_BITBANG is not set
754# CONFIG_SPI_GPIO is not set
755
756#
757# SPI Protocol Masters
758#
759CONFIG_SPI_SPIDEV=m
760# CONFIG_SPI_TLE62X0 is not set
761
762#
763# PPS support
764#
765# CONFIG_PPS is not set
766CONFIG_ARCH_REQUIRE_GPIOLIB=y
767CONFIG_GPIOLIB=y
768# CONFIG_DEBUG_GPIO is not set
769CONFIG_GPIO_SYSFS=y
770
771#
772# Memory mapped GPIO expanders:
773#
774
775#
776# I2C GPIO expanders:
777#
778# CONFIG_GPIO_MAX732X is not set
779# CONFIG_GPIO_PCA953X is not set
780# CONFIG_GPIO_PCF857X is not set
781
782#
783# PCI GPIO expanders:
784#
785
786#
787# SPI GPIO expanders:
788#
789# CONFIG_GPIO_MAX7301 is not set
790# CONFIG_GPIO_MCP23S08 is not set
791# CONFIG_GPIO_MC33880 is not set
792
793#
794# AC97 GPIO expanders:
795#
796# CONFIG_W1 is not set
797# CONFIG_POWER_SUPPLY is not set
798# CONFIG_HWMON is not set
799# CONFIG_THERMAL is not set
800CONFIG_WATCHDOG=y
801# CONFIG_WATCHDOG_NOWAYOUT is not set
802
803#
804# Watchdog Device Drivers
805#
806# CONFIG_SOFT_WATCHDOG is not set
807CONFIG_AT32AP700X_WDT=y
808CONFIG_SSB_POSSIBLE=y
809
810#
811# Sonics Silicon Backplane
812#
813# CONFIG_SSB is not set
814
815#
816# Multifunction device drivers
817#
818# CONFIG_MFD_CORE is not set
819# CONFIG_MFD_SM501 is not set
820# CONFIG_HTC_PASIC3 is not set
821# CONFIG_TPS65010 is not set
822# CONFIG_MFD_TMIO is not set
823# CONFIG_MFD_WM8400 is not set
824# CONFIG_MFD_WM831X is not set
825# CONFIG_MFD_WM8350_I2C is not set
826# CONFIG_MFD_PCF50633 is not set
827# CONFIG_MFD_MC13783 is not set
828# CONFIG_AB3100_CORE is not set
829# CONFIG_EZX_PCAP is not set
830# CONFIG_REGULATOR is not set
831# CONFIG_MEDIA_SUPPORT is not set
832
833#
834# Graphics support
835#
836# CONFIG_VGASTATE is not set
837# CONFIG_VIDEO_OUTPUT_CONTROL is not set
838# CONFIG_FB is not set
839# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
840
841#
842# Display device support
843#
844# CONFIG_DISPLAY_SUPPORT is not set
845# CONFIG_SOUND is not set
846CONFIG_USB_SUPPORT=y
847# CONFIG_USB_ARCH_HAS_HCD is not set
848# CONFIG_USB_ARCH_HAS_OHCI is not set
849# CONFIG_USB_ARCH_HAS_EHCI is not set
850# CONFIG_USB_OTG_WHITELIST is not set
851# CONFIG_USB_OTG_BLACKLIST_HUB is not set
852# CONFIG_USB_GADGET_MUSB_HDRC is not set
853
854#
855# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
856#
857CONFIG_USB_GADGET=y
858# CONFIG_USB_GADGET_DEBUG is not set
859# CONFIG_USB_GADGET_DEBUG_FILES is not set
860# CONFIG_USB_GADGET_DEBUG_FS is not set
861CONFIG_USB_GADGET_VBUS_DRAW=2
862CONFIG_USB_GADGET_SELECTED=y
863# CONFIG_USB_GADGET_AT91 is not set
864CONFIG_USB_GADGET_ATMEL_USBA=y
865CONFIG_USB_ATMEL_USBA=y
866# CONFIG_USB_GADGET_FSL_USB2 is not set
867# CONFIG_USB_GADGET_LH7A40X is not set
868# CONFIG_USB_GADGET_OMAP is not set
869# CONFIG_USB_GADGET_PXA25X is not set
870# CONFIG_USB_GADGET_R8A66597 is not set
871# CONFIG_USB_GADGET_PXA27X is not set
872# CONFIG_USB_GADGET_S3C_HSOTG is not set
873# CONFIG_USB_GADGET_IMX is not set
874# CONFIG_USB_GADGET_S3C2410 is not set
875# CONFIG_USB_GADGET_M66592 is not set
876# CONFIG_USB_GADGET_AMD5536UDC is not set
877# CONFIG_USB_GADGET_FSL_QE is not set
878# CONFIG_USB_GADGET_CI13XXX is not set
879# CONFIG_USB_GADGET_NET2280 is not set
880# CONFIG_USB_GADGET_GOKU is not set
881# CONFIG_USB_GADGET_LANGWELL is not set
882# CONFIG_USB_GADGET_DUMMY_HCD is not set
883CONFIG_USB_GADGET_DUALSPEED=y
884CONFIG_USB_ZERO=m
885# CONFIG_USB_AUDIO is not set
886CONFIG_USB_ETH=m
887CONFIG_USB_ETH_RNDIS=y
888# CONFIG_USB_ETH_EEM is not set
889CONFIG_USB_GADGETFS=m
890CONFIG_USB_FILE_STORAGE=m
891# CONFIG_USB_FILE_STORAGE_TEST is not set
892CONFIG_USB_G_SERIAL=m
893# CONFIG_USB_MIDI_GADGET is not set
894# CONFIG_USB_G_PRINTER is not set
895CONFIG_USB_CDC_COMPOSITE=m
896
897#
898# OTG and related infrastructure
899#
900# CONFIG_USB_GPIO_VBUS is not set
901# CONFIG_NOP_USB_XCEIV is not set
902CONFIG_MMC=y
903# CONFIG_MMC_DEBUG is not set
904# CONFIG_MMC_UNSAFE_RESUME is not set
905
906#
907# MMC/SD/SDIO Card Drivers
908#
909CONFIG_MMC_BLOCK=y
910CONFIG_MMC_BLOCK_BOUNCE=y
911# CONFIG_SDIO_UART is not set
912CONFIG_MMC_TEST=m
913
914#
915# MMC/SD/SDIO Host Controller Drivers
916#
917# CONFIG_MMC_SDHCI is not set
918# CONFIG_MMC_AT91 is not set
919CONFIG_MMC_ATMELMCI=y
920# CONFIG_MMC_ATMELMCI_DMA is not set
921CONFIG_MMC_SPI=m
922# CONFIG_MEMSTICK is not set
923CONFIG_NEW_LEDS=y
924CONFIG_LEDS_CLASS=y
925
926#
927# LED drivers
928#
929CONFIG_LEDS_GPIO=y
930CONFIG_LEDS_GPIO_PLATFORM=y
931# CONFIG_LEDS_LP3944 is not set
932# CONFIG_LEDS_PCA955X is not set
933# CONFIG_LEDS_DAC124S085 is not set
934# CONFIG_LEDS_BD2802 is not set
935
936#
937# LED Triggers
938#
939CONFIG_LEDS_TRIGGERS=y
940CONFIG_LEDS_TRIGGER_TIMER=y
941CONFIG_LEDS_TRIGGER_HEARTBEAT=y
942# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
943# CONFIG_LEDS_TRIGGER_GPIO is not set
944CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
945
946#
947# iptables trigger is under Netfilter config (LED target)
948#
949# CONFIG_ACCESSIBILITY is not set
950CONFIG_RTC_LIB=y
951CONFIG_RTC_CLASS=y
952CONFIG_RTC_HCTOSYS=y
953CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
954# CONFIG_RTC_DEBUG is not set
955
956#
957# RTC interfaces
958#
959CONFIG_RTC_INTF_SYSFS=y
960CONFIG_RTC_INTF_PROC=y
961CONFIG_RTC_INTF_DEV=y
962# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
963# CONFIG_RTC_DRV_TEST is not set
964
965#
966# I2C RTC drivers
967#
968# CONFIG_RTC_DRV_DS1307 is not set
969# CONFIG_RTC_DRV_DS1374 is not set
970# CONFIG_RTC_DRV_DS1672 is not set
971# CONFIG_RTC_DRV_MAX6900 is not set
972# CONFIG_RTC_DRV_RS5C372 is not set
973# CONFIG_RTC_DRV_ISL1208 is not set
974# CONFIG_RTC_DRV_X1205 is not set
975# CONFIG_RTC_DRV_PCF8563 is not set
976# CONFIG_RTC_DRV_PCF8583 is not set
977# CONFIG_RTC_DRV_M41T80 is not set
978# CONFIG_RTC_DRV_S35390A is not set
979# CONFIG_RTC_DRV_FM3130 is not set
980# CONFIG_RTC_DRV_RX8581 is not set
981# CONFIG_RTC_DRV_RX8025 is not set
982
983#
984# SPI RTC drivers
985#
986# CONFIG_RTC_DRV_M41T94 is not set
987# CONFIG_RTC_DRV_DS1305 is not set
988# CONFIG_RTC_DRV_DS1390 is not set
989# CONFIG_RTC_DRV_MAX6902 is not set
990# CONFIG_RTC_DRV_R9701 is not set
991# CONFIG_RTC_DRV_RS5C348 is not set
992# CONFIG_RTC_DRV_DS3234 is not set
993# CONFIG_RTC_DRV_PCF2123 is not set
994
995#
996# Platform RTC drivers
997#
998# CONFIG_RTC_DRV_DS1286 is not set
999# CONFIG_RTC_DRV_DS1511 is not set
1000# CONFIG_RTC_DRV_DS1553 is not set
1001# CONFIG_RTC_DRV_DS1742 is not set
1002# CONFIG_RTC_DRV_STK17TA8 is not set
1003# CONFIG_RTC_DRV_M48T86 is not set
1004# CONFIG_RTC_DRV_M48T35 is not set
1005# CONFIG_RTC_DRV_M48T59 is not set
1006# CONFIG_RTC_DRV_BQ4802 is not set
1007# CONFIG_RTC_DRV_V3020 is not set
1008
1009#
1010# on-CPU RTC drivers
1011#
1012CONFIG_RTC_DRV_AT32AP700X=y
1013CONFIG_DMADEVICES=y
1014
1015#
1016# DMA Devices
1017#
1018CONFIG_DW_DMAC=y
1019CONFIG_DMA_ENGINE=y
1020
1021#
1022# DMA Clients
1023#
1024# CONFIG_NET_DMA is not set
1025# CONFIG_ASYNC_TX_DMA is not set
1026# CONFIG_DMATEST is not set
1027# CONFIG_AUXDISPLAY is not set
1028# CONFIG_UIO is not set
1029
1030#
1031# TI VLYNQ
1032#
1033# CONFIG_STAGING is not set
1034
1035#
1036# File systems
1037#
1038CONFIG_EXT2_FS=y
1039# CONFIG_EXT2_FS_XATTR is not set
1040# CONFIG_EXT2_FS_XIP is not set
1041CONFIG_EXT3_FS=y
1042# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
1043# CONFIG_EXT3_FS_XATTR is not set
1044# CONFIG_EXT4_FS is not set
1045CONFIG_JBD=y
1046# CONFIG_JBD_DEBUG is not set
1047# CONFIG_REISERFS_FS is not set
1048# CONFIG_JFS_FS is not set
1049# CONFIG_FS_POSIX_ACL is not set
1050# CONFIG_XFS_FS is not set
1051# CONFIG_GFS2_FS is not set
1052# CONFIG_OCFS2_FS is not set
1053# CONFIG_BTRFS_FS is not set
1054# CONFIG_NILFS2_FS is not set
1055CONFIG_FILE_LOCKING=y
1056CONFIG_FSNOTIFY=y
1057# CONFIG_DNOTIFY is not set
1058CONFIG_INOTIFY=y
1059CONFIG_INOTIFY_USER=y
1060# CONFIG_QUOTA is not set
1061# CONFIG_AUTOFS_FS is not set
1062# CONFIG_AUTOFS4_FS is not set
1063CONFIG_FUSE_FS=m
1064# CONFIG_CUSE is not set
1065
1066#
1067# Caches
1068#
1069# CONFIG_FSCACHE is not set
1070
1071#
1072# CD-ROM/DVD Filesystems
1073#
1074# CONFIG_ISO9660_FS is not set
1075# CONFIG_UDF_FS is not set
1076
1077#
1078# DOS/FAT/NT Filesystems
1079#
1080CONFIG_FAT_FS=m
1081CONFIG_MSDOS_FS=m
1082CONFIG_VFAT_FS=m
1083CONFIG_FAT_DEFAULT_CODEPAGE=850
1084CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
1085# CONFIG_NTFS_FS is not set
1086
1087#
1088# Pseudo filesystems
1089#
1090CONFIG_PROC_FS=y
1091# CONFIG_PROC_KCORE is not set
1092CONFIG_PROC_SYSCTL=y
1093CONFIG_PROC_PAGE_MONITOR=y
1094CONFIG_SYSFS=y
1095CONFIG_TMPFS=y
1096# CONFIG_TMPFS_POSIX_ACL is not set
1097# CONFIG_HUGETLB_PAGE is not set
1098CONFIG_CONFIGFS_FS=m
1099CONFIG_MISC_FILESYSTEMS=y
1100# CONFIG_ADFS_FS is not set
1101# CONFIG_AFFS_FS is not set
1102# CONFIG_HFS_FS is not set
1103# CONFIG_HFSPLUS_FS is not set
1104# CONFIG_BEFS_FS is not set
1105# CONFIG_BFS_FS is not set
1106# CONFIG_EFS_FS is not set
1107CONFIG_JFFS2_FS=y
1108CONFIG_JFFS2_FS_DEBUG=0
1109CONFIG_JFFS2_FS_WRITEBUFFER=y
1110# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
1111# CONFIG_JFFS2_SUMMARY is not set
1112# CONFIG_JFFS2_FS_XATTR is not set
1113# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
1114CONFIG_JFFS2_ZLIB=y
1115# CONFIG_JFFS2_LZO is not set
1116CONFIG_JFFS2_RTIME=y
1117# CONFIG_JFFS2_RUBIN is not set
1118# CONFIG_UBIFS_FS is not set
1119# CONFIG_CRAMFS is not set
1120# CONFIG_SQUASHFS is not set
1121# CONFIG_VXFS_FS is not set
1122# CONFIG_MINIX_FS is not set
1123# CONFIG_OMFS_FS is not set
1124# CONFIG_HPFS_FS is not set
1125# CONFIG_QNX4FS_FS is not set
1126# CONFIG_ROMFS_FS is not set
1127# CONFIG_SYSV_FS is not set
1128CONFIG_UFS_FS=y
1129# CONFIG_UFS_FS_WRITE is not set
1130# CONFIG_UFS_DEBUG is not set
1131CONFIG_NETWORK_FILESYSTEMS=y
1132CONFIG_NFS_FS=y
1133CONFIG_NFS_V3=y
1134# CONFIG_NFS_V3_ACL is not set
1135# CONFIG_NFS_V4 is not set
1136CONFIG_ROOT_NFS=y
1137CONFIG_NFSD=m
1138CONFIG_NFSD_V3=y
1139# CONFIG_NFSD_V3_ACL is not set
1140# CONFIG_NFSD_V4 is not set
1141CONFIG_LOCKD=y
1142CONFIG_LOCKD_V4=y
1143CONFIG_EXPORTFS=m
1144CONFIG_NFS_COMMON=y
1145CONFIG_SUNRPC=y
1146# CONFIG_RPCSEC_GSS_KRB5 is not set
1147# CONFIG_RPCSEC_GSS_SPKM3 is not set
1148CONFIG_SMB_FS=m
1149# CONFIG_SMB_NLS_DEFAULT is not set
1150CONFIG_CIFS=m
1151# CONFIG_CIFS_STATS is not set
1152# CONFIG_CIFS_WEAK_PW_HASH is not set
1153# CONFIG_CIFS_XATTR is not set
1154# CONFIG_CIFS_DEBUG2 is not set
1155# CONFIG_CIFS_EXPERIMENTAL is not set
1156# CONFIG_NCP_FS is not set
1157# CONFIG_CODA_FS is not set
1158# CONFIG_AFS_FS is not set
1159
1160#
1161# Partition Types
1162#
1163# CONFIG_PARTITION_ADVANCED is not set
1164CONFIG_MSDOS_PARTITION=y
1165CONFIG_NLS=m
1166CONFIG_NLS_DEFAULT="iso8859-1"
1167CONFIG_NLS_CODEPAGE_437=m
1168# CONFIG_NLS_CODEPAGE_737 is not set
1169# CONFIG_NLS_CODEPAGE_775 is not set
1170CONFIG_NLS_CODEPAGE_850=m
1171# CONFIG_NLS_CODEPAGE_852 is not set
1172# CONFIG_NLS_CODEPAGE_855 is not set
1173# CONFIG_NLS_CODEPAGE_857 is not set
1174# CONFIG_NLS_CODEPAGE_860 is not set
1175# CONFIG_NLS_CODEPAGE_861 is not set
1176# CONFIG_NLS_CODEPAGE_862 is not set
1177# CONFIG_NLS_CODEPAGE_863 is not set
1178# CONFIG_NLS_CODEPAGE_864 is not set
1179# CONFIG_NLS_CODEPAGE_865 is not set
1180# CONFIG_NLS_CODEPAGE_866 is not set
1181# CONFIG_NLS_CODEPAGE_869 is not set
1182# CONFIG_NLS_CODEPAGE_936 is not set
1183# CONFIG_NLS_CODEPAGE_950 is not set
1184# CONFIG_NLS_CODEPAGE_932 is not set
1185# CONFIG_NLS_CODEPAGE_949 is not set
1186# CONFIG_NLS_CODEPAGE_874 is not set
1187# CONFIG_NLS_ISO8859_8 is not set
1188# CONFIG_NLS_CODEPAGE_1250 is not set
1189# CONFIG_NLS_CODEPAGE_1251 is not set
1190# CONFIG_NLS_ASCII is not set
1191CONFIG_NLS_ISO8859_1=m
1192# CONFIG_NLS_ISO8859_2 is not set
1193# CONFIG_NLS_ISO8859_3 is not set
1194# CONFIG_NLS_ISO8859_4 is not set
1195# CONFIG_NLS_ISO8859_5 is not set
1196# CONFIG_NLS_ISO8859_6 is not set
1197# CONFIG_NLS_ISO8859_7 is not set
1198# CONFIG_NLS_ISO8859_9 is not set
1199# CONFIG_NLS_ISO8859_13 is not set
1200# CONFIG_NLS_ISO8859_14 is not set
1201# CONFIG_NLS_ISO8859_15 is not set
1202# CONFIG_NLS_KOI8_R is not set
1203# CONFIG_NLS_KOI8_U is not set
1204CONFIG_NLS_UTF8=m
1205# CONFIG_DLM is not set
1206
1207#
1208# Kernel hacking
1209#
1210# CONFIG_PRINTK_TIME is not set
1211CONFIG_ENABLE_WARN_DEPRECATED=y
1212CONFIG_ENABLE_MUST_CHECK=y
1213CONFIG_FRAME_WARN=1024
1214CONFIG_MAGIC_SYSRQ=y
1215# CONFIG_STRIP_ASM_SYMS is not set
1216# CONFIG_UNUSED_SYMBOLS is not set
1217CONFIG_DEBUG_FS=y
1218# CONFIG_HEADERS_CHECK is not set
1219CONFIG_DEBUG_KERNEL=y
1220# CONFIG_DEBUG_SHIRQ is not set
1221CONFIG_DETECT_SOFTLOCKUP=y
1222# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
1223CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
1224CONFIG_DETECT_HUNG_TASK=y
1225# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
1226CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
1227CONFIG_SCHED_DEBUG=y
1228# CONFIG_SCHEDSTATS is not set
1229# CONFIG_TIMER_STATS is not set
1230# CONFIG_DEBUG_OBJECTS is not set
1231# CONFIG_SLUB_DEBUG_ON is not set
1232# CONFIG_SLUB_STATS is not set
1233# CONFIG_DEBUG_RT_MUTEXES is not set
1234# CONFIG_RT_MUTEX_TESTER is not set
1235# CONFIG_DEBUG_SPINLOCK is not set
1236# CONFIG_DEBUG_MUTEXES is not set
1237# CONFIG_DEBUG_LOCK_ALLOC is not set
1238# CONFIG_PROVE_LOCKING is not set
1239# CONFIG_LOCK_STAT is not set
1240# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
1241# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
1242CONFIG_STACKTRACE=y
1243# CONFIG_DEBUG_KOBJECT is not set
1244CONFIG_DEBUG_BUGVERBOSE=y
1245# CONFIG_DEBUG_INFO is not set
1246# CONFIG_DEBUG_VM is not set
1247# CONFIG_DEBUG_WRITECOUNT is not set
1248# CONFIG_DEBUG_MEMORY_INIT is not set
1249# CONFIG_DEBUG_LIST is not set
1250# CONFIG_DEBUG_SG is not set
1251# CONFIG_DEBUG_NOTIFIERS is not set
1252# CONFIG_DEBUG_CREDENTIALS is not set
1253CONFIG_FRAME_POINTER=y
1254# CONFIG_BOOT_PRINTK_DELAY is not set
1255# CONFIG_RCU_TORTURE_TEST is not set
1256# CONFIG_RCU_CPU_STALL_DETECTOR is not set
1257# CONFIG_KPROBES_SANITY_TEST is not set
1258# CONFIG_BACKTRACE_SELF_TEST is not set
1259# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
1260# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
1261# CONFIG_LKDTM is not set
1262# CONFIG_FAULT_INJECTION is not set
1263# CONFIG_PAGE_POISONING is not set
1264CONFIG_NOP_TRACER=y
1265CONFIG_RING_BUFFER=y
1266CONFIG_EVENT_TRACING=y
1267CONFIG_CONTEXT_SWITCH_TRACER=y
1268CONFIG_RING_BUFFER_ALLOW_SWAP=y
1269CONFIG_TRACING=y
1270CONFIG_TRACING_SUPPORT=y
1271CONFIG_FTRACE=y
1272# CONFIG_IRQSOFF_TRACER is not set
1273# CONFIG_SCHED_TRACER is not set
1274# CONFIG_ENABLE_DEFAULT_TRACERS is not set
1275# CONFIG_BOOT_TRACER is not set
1276CONFIG_BRANCH_PROFILE_NONE=y
1277# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
1278# CONFIG_PROFILE_ALL_BRANCHES is not set
1279# CONFIG_KMEMTRACE is not set
1280# CONFIG_WORKQUEUE_TRACER is not set
1281# CONFIG_BLK_DEV_IO_TRACE is not set
1282# CONFIG_RING_BUFFER_BENCHMARK is not set
1283# CONFIG_DYNAMIC_DEBUG is not set
1284# CONFIG_SAMPLES is not set
1285
1286#
1287# Security options
1288#
1289# CONFIG_KEYS is not set
1290# CONFIG_SECURITY is not set
1291# CONFIG_SECURITYFS is not set
1292# CONFIG_SECURITY_FILE_CAPABILITIES is not set
1293CONFIG_CRYPTO=y
1294
1295#
1296# Crypto core or helper
1297#
1298# CONFIG_CRYPTO_FIPS is not set
1299CONFIG_CRYPTO_ALGAPI=y
1300CONFIG_CRYPTO_ALGAPI2=y
1301CONFIG_CRYPTO_AEAD=y
1302CONFIG_CRYPTO_AEAD2=y
1303CONFIG_CRYPTO_BLKCIPHER=y
1304CONFIG_CRYPTO_BLKCIPHER2=y
1305CONFIG_CRYPTO_HASH=y
1306CONFIG_CRYPTO_HASH2=y
1307CONFIG_CRYPTO_RNG=m
1308CONFIG_CRYPTO_RNG2=y
1309CONFIG_CRYPTO_PCOMP=y
1310CONFIG_CRYPTO_MANAGER=y
1311CONFIG_CRYPTO_MANAGER2=y
1312# CONFIG_CRYPTO_GF128MUL is not set
1313# CONFIG_CRYPTO_NULL is not set
1314CONFIG_CRYPTO_WORKQUEUE=y
1315# CONFIG_CRYPTO_CRYPTD is not set
1316CONFIG_CRYPTO_AUTHENC=y
1317# CONFIG_CRYPTO_TEST is not set
1318
1319#
1320# Authenticated Encryption with Associated Data
1321#
1322# CONFIG_CRYPTO_CCM is not set
1323# CONFIG_CRYPTO_GCM is not set
1324# CONFIG_CRYPTO_SEQIV is not set
1325
1326#
1327# Block modes
1328#
1329CONFIG_CRYPTO_CBC=y
1330# CONFIG_CRYPTO_CTR is not set
1331# CONFIG_CRYPTO_CTS is not set
1332CONFIG_CRYPTO_ECB=m
1333# CONFIG_CRYPTO_LRW is not set
1334CONFIG_CRYPTO_PCBC=m
1335# CONFIG_CRYPTO_XTS is not set
1336
1337#
1338# Hash modes
1339#
1340CONFIG_CRYPTO_HMAC=y
1341# CONFIG_CRYPTO_XCBC is not set
1342# CONFIG_CRYPTO_VMAC is not set
1343
1344#
1345# Digest
1346#
1347# CONFIG_CRYPTO_CRC32C is not set
1348# CONFIG_CRYPTO_GHASH is not set
1349# CONFIG_CRYPTO_MD4 is not set
1350CONFIG_CRYPTO_MD5=y
1351# CONFIG_CRYPTO_MICHAEL_MIC is not set
1352# CONFIG_CRYPTO_RMD128 is not set
1353# CONFIG_CRYPTO_RMD160 is not set
1354# CONFIG_CRYPTO_RMD256 is not set
1355# CONFIG_CRYPTO_RMD320 is not set
1356CONFIG_CRYPTO_SHA1=y
1357# CONFIG_CRYPTO_SHA256 is not set
1358# CONFIG_CRYPTO_SHA512 is not set
1359# CONFIG_CRYPTO_TGR192 is not set
1360# CONFIG_CRYPTO_WP512 is not set
1361
1362#
1363# Ciphers
1364#
1365CONFIG_CRYPTO_AES=m
1366# CONFIG_CRYPTO_ANUBIS is not set
1367CONFIG_CRYPTO_ARC4=m
1368# CONFIG_CRYPTO_BLOWFISH is not set
1369# CONFIG_CRYPTO_CAMELLIA is not set
1370# CONFIG_CRYPTO_CAST5 is not set
1371# CONFIG_CRYPTO_CAST6 is not set
1372CONFIG_CRYPTO_DES=y
1373# CONFIG_CRYPTO_FCRYPT is not set
1374# CONFIG_CRYPTO_KHAZAD is not set
1375# CONFIG_CRYPTO_SALSA20 is not set
1376# CONFIG_CRYPTO_SEED is not set
1377# CONFIG_CRYPTO_SERPENT is not set
1378# CONFIG_CRYPTO_TEA is not set
1379# CONFIG_CRYPTO_TWOFISH is not set
1380
1381#
1382# Compression
1383#
1384CONFIG_CRYPTO_DEFLATE=y
1385# CONFIG_CRYPTO_ZLIB is not set
1386# CONFIG_CRYPTO_LZO is not set
1387
1388#
1389# Random Number Generation
1390#
1391CONFIG_CRYPTO_ANSI_CPRNG=m
1392CONFIG_CRYPTO_HW=y
1393CONFIG_BINARY_PRINTF=y
1394
1395#
1396# Library routines
1397#
1398CONFIG_BITREVERSE=y
1399CONFIG_GENERIC_FIND_LAST_BIT=y
1400CONFIG_CRC_CCITT=m
1401# CONFIG_CRC16 is not set
1402# CONFIG_CRC_T10DIF is not set
1403CONFIG_CRC_ITU_T=m
1404CONFIG_CRC32=y
1405CONFIG_CRC7=m
1406# CONFIG_LIBCRC32C is not set
1407CONFIG_ZLIB_INFLATE=y
1408CONFIG_ZLIB_DEFLATE=y
1409CONFIG_DECOMPRESS_GZIP=y
1410CONFIG_GENERIC_ALLOCATOR=y
1411CONFIG_HAS_IOMEM=y
1412CONFIG_HAS_IOPORT=y
1413CONFIG_HAS_DMA=y
1414CONFIG_NLATTR=y
diff --git a/arch/avr32/configs/atngw100mkii_evklcd100_defconfig b/arch/avr32/configs/atngw100mkii_evklcd100_defconfig
new file mode 100644
index 000000000000..01e913d66be4
--- /dev/null
+++ b/arch/avr32/configs/atngw100mkii_evklcd100_defconfig
@@ -0,0 +1,1549 @@
1#
2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.32-rc5
4# Thu Nov 5 15:33:09 2009
5#
6CONFIG_AVR32=y
7CONFIG_GENERIC_GPIO=y
8CONFIG_GENERIC_HARDIRQS=y
9CONFIG_STACKTRACE_SUPPORT=y
10CONFIG_LOCKDEP_SUPPORT=y
11CONFIG_TRACE_IRQFLAGS_SUPPORT=y
12CONFIG_HARDIRQS_SW_RESEND=y
13CONFIG_GENERIC_IRQ_PROBE=y
14CONFIG_RWSEM_GENERIC_SPINLOCK=y
15CONFIG_GENERIC_TIME=y
16CONFIG_GENERIC_CLOCKEVENTS=y
17# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
18# CONFIG_ARCH_HAS_ILOG2_U32 is not set
19# CONFIG_ARCH_HAS_ILOG2_U64 is not set
20CONFIG_GENERIC_HWEIGHT=y
21CONFIG_GENERIC_CALIBRATE_DELAY=y
22CONFIG_GENERIC_BUG=y
23CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
24CONFIG_CONSTRUCTORS=y
25
26#
27# General setup
28#
29CONFIG_EXPERIMENTAL=y
30CONFIG_BROKEN_ON_SMP=y
31CONFIG_INIT_ENV_ARG_LIMIT=32
32CONFIG_LOCALVERSION=""
33# CONFIG_LOCALVERSION_AUTO is not set
34CONFIG_SWAP=y
35CONFIG_SYSVIPC=y
36CONFIG_SYSVIPC_SYSCTL=y
37CONFIG_POSIX_MQUEUE=y
38CONFIG_POSIX_MQUEUE_SYSCTL=y
39CONFIG_BSD_PROCESS_ACCT=y
40CONFIG_BSD_PROCESS_ACCT_V3=y
41# CONFIG_TASKSTATS is not set
42# CONFIG_AUDIT is not set
43
44#
45# RCU Subsystem
46#
47CONFIG_TREE_RCU=y
48# CONFIG_TREE_PREEMPT_RCU is not set
49# CONFIG_RCU_TRACE is not set
50CONFIG_RCU_FANOUT=32
51# CONFIG_RCU_FANOUT_EXACT is not set
52# CONFIG_TREE_RCU_TRACE is not set
53# CONFIG_IKCONFIG is not set
54CONFIG_LOG_BUF_SHIFT=14
55# CONFIG_GROUP_SCHED is not set
56# CONFIG_CGROUPS is not set
57CONFIG_SYSFS_DEPRECATED=y
58CONFIG_SYSFS_DEPRECATED_V2=y
59# CONFIG_RELAY is not set
60# CONFIG_NAMESPACES is not set
61CONFIG_BLK_DEV_INITRD=y
62CONFIG_INITRAMFS_SOURCE=""
63CONFIG_RD_GZIP=y
64# CONFIG_RD_BZIP2 is not set
65# CONFIG_RD_LZMA is not set
66CONFIG_CC_OPTIMIZE_FOR_SIZE=y
67CONFIG_SYSCTL=y
68CONFIG_ANON_INODES=y
69CONFIG_EMBEDDED=y
70# CONFIG_SYSCTL_SYSCALL is not set
71CONFIG_KALLSYMS=y
72# CONFIG_KALLSYMS_ALL is not set
73# CONFIG_KALLSYMS_EXTRA_PASS is not set
74CONFIG_HOTPLUG=y
75CONFIG_PRINTK=y
76CONFIG_BUG=y
77CONFIG_ELF_CORE=y
78# CONFIG_BASE_FULL is not set
79CONFIG_FUTEX=y
80CONFIG_EPOLL=y
81CONFIG_SIGNALFD=y
82CONFIG_TIMERFD=y
83CONFIG_EVENTFD=y
84CONFIG_SHMEM=y
85CONFIG_AIO=y
86
87#
88# Kernel Performance Events And Counters
89#
90CONFIG_VM_EVENT_COUNTERS=y
91CONFIG_SLUB_DEBUG=y
92# CONFIG_COMPAT_BRK is not set
93# CONFIG_SLAB is not set
94CONFIG_SLUB=y
95# CONFIG_SLOB is not set
96CONFIG_PROFILING=y
97CONFIG_TRACEPOINTS=y
98CONFIG_OPROFILE=m
99CONFIG_HAVE_OPROFILE=y
100CONFIG_KPROBES=y
101CONFIG_HAVE_KPROBES=y
102CONFIG_HAVE_CLK=y
103
104#
105# GCOV-based kernel profiling
106#
107# CONFIG_GCOV_KERNEL is not set
108CONFIG_SLOW_WORK=y
109# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
110CONFIG_SLABINFO=y
111CONFIG_RT_MUTEXES=y
112CONFIG_BASE_SMALL=1
113CONFIG_MODULES=y
114# CONFIG_MODULE_FORCE_LOAD is not set
115CONFIG_MODULE_UNLOAD=y
116CONFIG_MODULE_FORCE_UNLOAD=y
117# CONFIG_MODVERSIONS is not set
118# CONFIG_MODULE_SRCVERSION_ALL is not set
119CONFIG_BLOCK=y
120CONFIG_LBDAF=y
121# CONFIG_BLK_DEV_BSG is not set
122# CONFIG_BLK_DEV_INTEGRITY is not set
123
124#
125# IO Schedulers
126#
127CONFIG_IOSCHED_NOOP=y
128# CONFIG_IOSCHED_AS is not set
129# CONFIG_IOSCHED_DEADLINE is not set
130CONFIG_IOSCHED_CFQ=y
131# CONFIG_DEFAULT_AS is not set
132# CONFIG_DEFAULT_DEADLINE is not set
133CONFIG_DEFAULT_CFQ=y
134# CONFIG_DEFAULT_NOOP is not set
135CONFIG_DEFAULT_IOSCHED="cfq"
136CONFIG_FREEZER=y
137
138#
139# System Type and features
140#
141CONFIG_TICK_ONESHOT=y
142CONFIG_NO_HZ=y
143CONFIG_HIGH_RES_TIMERS=y
144CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
145CONFIG_SUBARCH_AVR32B=y
146CONFIG_MMU=y
147CONFIG_PERFORMANCE_COUNTERS=y
148CONFIG_PLATFORM_AT32AP=y
149CONFIG_CPU_AT32AP700X=y
150CONFIG_CPU_AT32AP7000=y
151CONFIG_BOARD_ATNGW100_COMMON=y
152# CONFIG_BOARD_ATSTK1000 is not set
153# CONFIG_BOARD_ATNGW100_MKI is not set
154CONFIG_BOARD_ATNGW100_MKII=y
155# CONFIG_BOARD_HAMMERHEAD is not set
156# CONFIG_BOARD_FAVR_32 is not set
157# CONFIG_BOARD_MERISC is not set
158# CONFIG_BOARD_MIMC200 is not set
159CONFIG_BOARD_ATNGW100_MKII_LCD=y
160# CONFIG_BOARD_ATNGW100_ADDON_NONE is not set
161CONFIG_BOARD_ATNGW100_EVKLCD10X=y
162# CONFIG_BOARD_ATNGW100_MRMT is not set
163CONFIG_BOARD_ATNGW100_EVKLCD10X_QVGA=y
164# CONFIG_BOARD_ATNGW100_EVKLCD10X_VGA is not set
165# CONFIG_BOARD_ATNGW100_EVKLCD10X_POW_QVGA is not set
166CONFIG_LOADER_U_BOOT=y
167
168#
169# Atmel AVR32 AP options
170#
171# CONFIG_AP700X_32_BIT_SMC is not set
172CONFIG_AP700X_16_BIT_SMC=y
173# CONFIG_AP700X_8_BIT_SMC is not set
174CONFIG_LOAD_ADDRESS=0x10000000
175CONFIG_ENTRY_ADDRESS=0x90000000
176CONFIG_PHYS_OFFSET=0x10000000
177CONFIG_PREEMPT_NONE=y
178# CONFIG_PREEMPT_VOLUNTARY is not set
179# CONFIG_PREEMPT is not set
180CONFIG_QUICKLIST=y
181# CONFIG_HAVE_ARCH_BOOTMEM is not set
182# CONFIG_ARCH_HAVE_MEMORY_PRESENT is not set
183# CONFIG_NEED_NODE_MEMMAP_SIZE is not set
184CONFIG_ARCH_FLATMEM_ENABLE=y
185# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set
186# CONFIG_ARCH_SPARSEMEM_ENABLE is not set
187CONFIG_SELECT_MEMORY_MODEL=y
188CONFIG_FLATMEM_MANUAL=y
189# CONFIG_DISCONTIGMEM_MANUAL is not set
190# CONFIG_SPARSEMEM_MANUAL is not set
191CONFIG_FLATMEM=y
192CONFIG_FLAT_NODE_MEM_MAP=y
193CONFIG_PAGEFLAGS_EXTENDED=y
194CONFIG_SPLIT_PTLOCK_CPUS=4
195# CONFIG_PHYS_ADDR_T_64BIT is not set
196CONFIG_ZONE_DMA_FLAG=0
197CONFIG_NR_QUICK=2
198CONFIG_VIRT_TO_BUS=y
199CONFIG_HAVE_MLOCK=y
200CONFIG_HAVE_MLOCKED_PAGE_BIT=y
201# CONFIG_KSM is not set
202CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
203# CONFIG_OWNERSHIP_TRACE is not set
204CONFIG_NMI_DEBUGGING=y
205# CONFIG_HZ_100 is not set
206CONFIG_HZ_250=y
207# CONFIG_HZ_300 is not set
208# CONFIG_HZ_1000 is not set
209CONFIG_HZ=250
210CONFIG_SCHED_HRTICK=y
211CONFIG_CMDLINE=""
212
213#
214# Power management options
215#
216CONFIG_PM=y
217# CONFIG_PM_DEBUG is not set
218CONFIG_PM_SLEEP=y
219CONFIG_SUSPEND=y
220CONFIG_SUSPEND_FREEZER=y
221# CONFIG_PM_RUNTIME is not set
222CONFIG_ARCH_SUSPEND_POSSIBLE=y
223
224#
225# CPU Frequency scaling
226#
227CONFIG_CPU_FREQ=y
228CONFIG_CPU_FREQ_TABLE=y
229# CONFIG_CPU_FREQ_DEBUG is not set
230# CONFIG_CPU_FREQ_STAT is not set
231# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set
232# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
233# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
234CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
235# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
236CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
237# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set
238CONFIG_CPU_FREQ_GOV_USERSPACE=y
239CONFIG_CPU_FREQ_GOV_ONDEMAND=y
240# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set
241CONFIG_CPU_FREQ_AT32AP=y
242
243#
244# Bus options
245#
246# CONFIG_ARCH_SUPPORTS_MSI is not set
247# CONFIG_PCCARD is not set
248
249#
250# Executable file formats
251#
252CONFIG_BINFMT_ELF=y
253# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
254# CONFIG_HAVE_AOUT is not set
255# CONFIG_BINFMT_MISC is not set
256CONFIG_NET=y
257
258#
259# Networking options
260#
261CONFIG_PACKET=y
262CONFIG_PACKET_MMAP=y
263CONFIG_UNIX=y
264CONFIG_XFRM=y
265CONFIG_XFRM_USER=y
266# CONFIG_XFRM_SUB_POLICY is not set
267# CONFIG_XFRM_MIGRATE is not set
268# CONFIG_XFRM_STATISTICS is not set
269CONFIG_XFRM_IPCOMP=y
270CONFIG_NET_KEY=y
271# CONFIG_NET_KEY_MIGRATE is not set
272CONFIG_INET=y
273CONFIG_IP_MULTICAST=y
274CONFIG_IP_ADVANCED_ROUTER=y
275CONFIG_ASK_IP_FIB_HASH=y
276# CONFIG_IP_FIB_TRIE is not set
277CONFIG_IP_FIB_HASH=y
278# CONFIG_IP_MULTIPLE_TABLES is not set
279# CONFIG_IP_ROUTE_MULTIPATH is not set
280# CONFIG_IP_ROUTE_VERBOSE is not set
281CONFIG_IP_PNP=y
282CONFIG_IP_PNP_DHCP=y
283# CONFIG_IP_PNP_BOOTP is not set
284# CONFIG_IP_PNP_RARP is not set
285# CONFIG_NET_IPIP is not set
286# CONFIG_NET_IPGRE is not set
287CONFIG_IP_MROUTE=y
288CONFIG_IP_PIMSM_V1=y
289# CONFIG_IP_PIMSM_V2 is not set
290# CONFIG_ARPD is not set
291CONFIG_SYN_COOKIES=y
292CONFIG_INET_AH=y
293CONFIG_INET_ESP=y
294CONFIG_INET_IPCOMP=y
295CONFIG_INET_XFRM_TUNNEL=y
296CONFIG_INET_TUNNEL=y
297CONFIG_INET_XFRM_MODE_TRANSPORT=y
298CONFIG_INET_XFRM_MODE_TUNNEL=y
299CONFIG_INET_XFRM_MODE_BEET=y
300# CONFIG_INET_LRO is not set
301CONFIG_INET_DIAG=y
302CONFIG_INET_TCP_DIAG=y
303# CONFIG_TCP_CONG_ADVANCED is not set
304CONFIG_TCP_CONG_CUBIC=y
305CONFIG_DEFAULT_TCP_CONG="cubic"
306# CONFIG_TCP_MD5SIG is not set
307CONFIG_IPV6=y
308# CONFIG_IPV6_PRIVACY is not set
309# CONFIG_IPV6_ROUTER_PREF is not set
310# CONFIG_IPV6_OPTIMISTIC_DAD is not set
311CONFIG_INET6_AH=y
312CONFIG_INET6_ESP=y
313CONFIG_INET6_IPCOMP=y
314# CONFIG_IPV6_MIP6 is not set
315CONFIG_INET6_XFRM_TUNNEL=y
316CONFIG_INET6_TUNNEL=y
317CONFIG_INET6_XFRM_MODE_TRANSPORT=y
318CONFIG_INET6_XFRM_MODE_TUNNEL=y
319CONFIG_INET6_XFRM_MODE_BEET=y
320# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
321CONFIG_IPV6_SIT=y
322CONFIG_IPV6_NDISC_NODETYPE=y
323# CONFIG_IPV6_TUNNEL is not set
324# CONFIG_IPV6_MULTIPLE_TABLES is not set
325# CONFIG_IPV6_MROUTE is not set
326# CONFIG_NETWORK_SECMARK is not set
327CONFIG_NETFILTER=y
328# CONFIG_NETFILTER_DEBUG is not set
329# CONFIG_NETFILTER_ADVANCED is not set
330
331#
332# Core Netfilter Configuration
333#
334CONFIG_NETFILTER_NETLINK=m
335CONFIG_NETFILTER_NETLINK_LOG=m
336CONFIG_NF_CONNTRACK=m
337CONFIG_NF_CONNTRACK_FTP=m
338CONFIG_NF_CONNTRACK_IRC=m
339CONFIG_NF_CONNTRACK_SIP=m
340CONFIG_NF_CT_NETLINK=m
341CONFIG_NETFILTER_XTABLES=y
342CONFIG_NETFILTER_XT_TARGET_MARK=m
343CONFIG_NETFILTER_XT_TARGET_NFLOG=m
344CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
345CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
346CONFIG_NETFILTER_XT_MATCH_MARK=m
347CONFIG_NETFILTER_XT_MATCH_POLICY=m
348CONFIG_NETFILTER_XT_MATCH_STATE=m
349# CONFIG_IP_VS is not set
350
351#
352# IP: Netfilter Configuration
353#
354CONFIG_NF_DEFRAG_IPV4=m
355CONFIG_NF_CONNTRACK_IPV4=m
356CONFIG_NF_CONNTRACK_PROC_COMPAT=y
357CONFIG_IP_NF_IPTABLES=m
358CONFIG_IP_NF_FILTER=m
359CONFIG_IP_NF_TARGET_REJECT=m
360CONFIG_IP_NF_TARGET_LOG=m
361# CONFIG_IP_NF_TARGET_ULOG is not set
362CONFIG_NF_NAT=m
363CONFIG_NF_NAT_NEEDED=y
364CONFIG_IP_NF_TARGET_MASQUERADE=m
365CONFIG_NF_NAT_FTP=m
366CONFIG_NF_NAT_IRC=m
367# CONFIG_NF_NAT_TFTP is not set
368# CONFIG_NF_NAT_AMANDA is not set
369# CONFIG_NF_NAT_PPTP is not set
370# CONFIG_NF_NAT_H323 is not set
371CONFIG_NF_NAT_SIP=m
372CONFIG_IP_NF_MANGLE=m
373
374#
375# IPv6: Netfilter Configuration
376#
377CONFIG_NF_CONNTRACK_IPV6=m
378CONFIG_IP6_NF_IPTABLES=m
379CONFIG_IP6_NF_MATCH_IPV6HEADER=m
380CONFIG_IP6_NF_TARGET_LOG=m
381CONFIG_IP6_NF_FILTER=m
382CONFIG_IP6_NF_TARGET_REJECT=m
383CONFIG_IP6_NF_MANGLE=m
384# CONFIG_IP_DCCP is not set
385# CONFIG_IP_SCTP is not set
386# CONFIG_RDS is not set
387# CONFIG_TIPC is not set
388# CONFIG_ATM is not set
389CONFIG_STP=m
390CONFIG_BRIDGE=m
391# CONFIG_NET_DSA is not set
392CONFIG_VLAN_8021Q=m
393# CONFIG_VLAN_8021Q_GVRP is not set
394# CONFIG_DECNET is not set
395CONFIG_LLC=m
396# CONFIG_LLC2 is not set
397# CONFIG_IPX is not set
398# CONFIG_ATALK is not set
399# CONFIG_X25 is not set
400# CONFIG_LAPB is not set
401# CONFIG_ECONET is not set
402# CONFIG_WAN_ROUTER is not set
403# CONFIG_PHONET is not set
404# CONFIG_IEEE802154 is not set
405# CONFIG_NET_SCHED is not set
406# CONFIG_DCB is not set
407
408#
409# Network testing
410#
411# CONFIG_NET_PKTGEN is not set
412# CONFIG_NET_TCPPROBE is not set
413# CONFIG_NET_DROP_MONITOR is not set
414# CONFIG_HAMRADIO is not set
415# CONFIG_CAN is not set
416# CONFIG_IRDA is not set
417# CONFIG_BT is not set
418# CONFIG_AF_RXRPC is not set
419CONFIG_WIRELESS=y
420# CONFIG_CFG80211 is not set
421CONFIG_CFG80211_DEFAULT_PS_VALUE=0
422# CONFIG_WIRELESS_OLD_REGULATORY is not set
423# CONFIG_WIRELESS_EXT is not set
424# CONFIG_LIB80211 is not set
425
426#
427# CFG80211 needs to be enabled for MAC80211
428#
429# CONFIG_WIMAX is not set
430# CONFIG_RFKILL is not set
431# CONFIG_NET_9P is not set
432
433#
434# Device Drivers
435#
436
437#
438# Generic Driver Options
439#
440CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
441# CONFIG_DEVTMPFS is not set
442CONFIG_STANDALONE=y
443# CONFIG_PREVENT_FIRMWARE_BUILD is not set
444# CONFIG_FW_LOADER is not set
445# CONFIG_DEBUG_DRIVER is not set
446# CONFIG_DEBUG_DEVRES is not set
447# CONFIG_SYS_HYPERVISOR is not set
448# CONFIG_CONNECTOR is not set
449CONFIG_MTD=y
450# CONFIG_MTD_DEBUG is not set
451# CONFIG_MTD_TESTS is not set
452# CONFIG_MTD_CONCAT is not set
453CONFIG_MTD_PARTITIONS=y
454# CONFIG_MTD_REDBOOT_PARTS is not set
455CONFIG_MTD_CMDLINE_PARTS=y
456# CONFIG_MTD_AR7_PARTS is not set
457
458#
459# User Modules And Translation Layers
460#
461CONFIG_MTD_CHAR=y
462CONFIG_MTD_BLKDEVS=y
463CONFIG_MTD_BLOCK=y
464# CONFIG_FTL is not set
465# CONFIG_NFTL is not set
466# CONFIG_INFTL is not set
467# CONFIG_RFD_FTL is not set
468# CONFIG_SSFDC is not set
469# CONFIG_MTD_OOPS is not set
470
471#
472# RAM/ROM/Flash chip drivers
473#
474CONFIG_MTD_CFI=y
475# CONFIG_MTD_JEDECPROBE is not set
476CONFIG_MTD_GEN_PROBE=y
477# CONFIG_MTD_CFI_ADV_OPTIONS is not set
478CONFIG_MTD_MAP_BANK_WIDTH_1=y
479CONFIG_MTD_MAP_BANK_WIDTH_2=y
480CONFIG_MTD_MAP_BANK_WIDTH_4=y
481# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
482# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
483# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
484CONFIG_MTD_CFI_I1=y
485CONFIG_MTD_CFI_I2=y
486# CONFIG_MTD_CFI_I4 is not set
487# CONFIG_MTD_CFI_I8 is not set
488CONFIG_MTD_CFI_INTELEXT=y
489# CONFIG_MTD_CFI_AMDSTD is not set
490# CONFIG_MTD_CFI_STAA is not set
491CONFIG_MTD_CFI_UTIL=y
492# CONFIG_MTD_RAM is not set
493# CONFIG_MTD_ROM is not set
494# CONFIG_MTD_ABSENT is not set
495
496#
497# Mapping drivers for chip access
498#
499# CONFIG_MTD_COMPLEX_MAPPINGS is not set
500CONFIG_MTD_PHYSMAP=y
501# CONFIG_MTD_PHYSMAP_COMPAT is not set
502# CONFIG_MTD_PLATRAM is not set
503
504#
505# Self-contained MTD device drivers
506#
507CONFIG_MTD_DATAFLASH=y
508# CONFIG_MTD_DATAFLASH_WRITE_VERIFY is not set
509# CONFIG_MTD_DATAFLASH_OTP is not set
510# CONFIG_MTD_M25P80 is not set
511# CONFIG_MTD_SST25L is not set
512# CONFIG_MTD_SLRAM is not set
513# CONFIG_MTD_PHRAM is not set
514# CONFIG_MTD_MTDRAM is not set
515# CONFIG_MTD_BLOCK2MTD is not set
516
517#
518# Disk-On-Chip Device Drivers
519#
520# CONFIG_MTD_DOC2000 is not set
521# CONFIG_MTD_DOC2001 is not set
522# CONFIG_MTD_DOC2001PLUS is not set
523CONFIG_MTD_NAND=y
524# CONFIG_MTD_NAND_VERIFY_WRITE is not set
525# CONFIG_MTD_NAND_ECC_SMC is not set
526# CONFIG_MTD_NAND_MUSEUM_IDS is not set
527CONFIG_MTD_NAND_IDS=y
528# CONFIG_MTD_NAND_DISKONCHIP is not set
529CONFIG_MTD_NAND_ATMEL=y
530CONFIG_MTD_NAND_ATMEL_ECC_HW=y
531# CONFIG_MTD_NAND_ATMEL_ECC_SOFT is not set
532# CONFIG_MTD_NAND_ATMEL_ECC_NONE is not set
533# CONFIG_MTD_NAND_NANDSIM is not set
534# CONFIG_MTD_NAND_PLATFORM is not set
535# CONFIG_MTD_ONENAND is not set
536
537#
538# LPDDR flash memory drivers
539#
540# CONFIG_MTD_LPDDR is not set
541
542#
543# UBI - Unsorted block images
544#
545CONFIG_MTD_UBI=y
546CONFIG_MTD_UBI_WL_THRESHOLD=4096
547CONFIG_MTD_UBI_BEB_RESERVE=1
548# CONFIG_MTD_UBI_GLUEBI is not set
549
550#
551# UBI debugging options
552#
553# CONFIG_MTD_UBI_DEBUG is not set
554# CONFIG_PARPORT is not set
555CONFIG_BLK_DEV=y
556# CONFIG_BLK_DEV_COW_COMMON is not set
557CONFIG_BLK_DEV_LOOP=m
558# CONFIG_BLK_DEV_CRYPTOLOOP is not set
559CONFIG_BLK_DEV_NBD=m
560CONFIG_BLK_DEV_RAM=m
561CONFIG_BLK_DEV_RAM_COUNT=16
562CONFIG_BLK_DEV_RAM_SIZE=4096
563# CONFIG_BLK_DEV_XIP is not set
564# CONFIG_CDROM_PKTCDVD is not set
565# CONFIG_ATA_OVER_ETH is not set
566CONFIG_MISC_DEVICES=y
567# CONFIG_ATMEL_PWM is not set
568CONFIG_ATMEL_TCLIB=y
569CONFIG_ATMEL_TCB_CLKSRC=y
570CONFIG_ATMEL_TCB_CLKSRC_BLOCK=0
571# CONFIG_ICS932S401 is not set
572# CONFIG_ATMEL_SSC is not set
573# CONFIG_ENCLOSURE_SERVICES is not set
574# CONFIG_ISL29003 is not set
575# CONFIG_C2PORT is not set
576
577#
578# EEPROM support
579#
580# CONFIG_EEPROM_AT24 is not set
581# CONFIG_EEPROM_AT25 is not set
582# CONFIG_EEPROM_LEGACY is not set
583# CONFIG_EEPROM_MAX6875 is not set
584# CONFIG_EEPROM_93CX6 is not set
585
586#
587# SCSI device support
588#
589# CONFIG_RAID_ATTRS is not set
590# CONFIG_SCSI is not set
591# CONFIG_SCSI_DMA is not set
592# CONFIG_SCSI_NETLINK is not set
593# CONFIG_ATA is not set
594# CONFIG_MD is not set
595CONFIG_NETDEVICES=y
596# CONFIG_DUMMY is not set
597# CONFIG_BONDING is not set
598# CONFIG_MACVLAN is not set
599# CONFIG_EQUALIZER is not set
600# CONFIG_TUN is not set
601# CONFIG_VETH is not set
602CONFIG_PHYLIB=y
603
604#
605# MII PHY device drivers
606#
607# CONFIG_MARVELL_PHY is not set
608# CONFIG_DAVICOM_PHY is not set
609# CONFIG_QSEMI_PHY is not set
610# CONFIG_LXT_PHY is not set
611# CONFIG_CICADA_PHY is not set
612# CONFIG_VITESSE_PHY is not set
613# CONFIG_SMSC_PHY is not set
614# CONFIG_BROADCOM_PHY is not set
615# CONFIG_ICPLUS_PHY is not set
616# CONFIG_REALTEK_PHY is not set
617# CONFIG_NATIONAL_PHY is not set
618# CONFIG_STE10XP is not set
619# CONFIG_LSI_ET1011C_PHY is not set
620# CONFIG_FIXED_PHY is not set
621# CONFIG_MDIO_BITBANG is not set
622CONFIG_NET_ETHERNET=y
623# CONFIG_MII is not set
624CONFIG_MACB=y
625# CONFIG_ENC28J60 is not set
626# CONFIG_ETHOC is not set
627# CONFIG_DNET is not set
628# CONFIG_IBM_NEW_EMAC_ZMII is not set
629# CONFIG_IBM_NEW_EMAC_RGMII is not set
630# CONFIG_IBM_NEW_EMAC_TAH is not set
631# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
632# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
633# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
634# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
635# CONFIG_B44 is not set
636# CONFIG_KS8842 is not set
637# CONFIG_KS8851 is not set
638# CONFIG_KS8851_MLL is not set
639# CONFIG_NETDEV_1000 is not set
640# CONFIG_NETDEV_10000 is not set
641CONFIG_WLAN=y
642# CONFIG_WLAN_PRE80211 is not set
643# CONFIG_WLAN_80211 is not set
644
645#
646# Enable WiMAX (Networking options) to see the WiMAX drivers
647#
648# CONFIG_WAN is not set
649CONFIG_PPP=m
650# CONFIG_PPP_MULTILINK is not set
651CONFIG_PPP_FILTER=y
652CONFIG_PPP_ASYNC=m
653# CONFIG_PPP_SYNC_TTY is not set
654CONFIG_PPP_DEFLATE=m
655CONFIG_PPP_BSDCOMP=m
656CONFIG_PPP_MPPE=m
657CONFIG_PPPOE=m
658# CONFIG_PPPOL2TP is not set
659# CONFIG_SLIP is not set
660CONFIG_SLHC=m
661# CONFIG_NETCONSOLE is not set
662# CONFIG_NETPOLL is not set
663# CONFIG_NET_POLL_CONTROLLER is not set
664# CONFIG_ISDN is not set
665# CONFIG_PHONE is not set
666
667#
668# Input device support
669#
670CONFIG_INPUT=y
671# CONFIG_INPUT_FF_MEMLESS is not set
672# CONFIG_INPUT_POLLDEV is not set
673
674#
675# Userland interfaces
676#
677# CONFIG_INPUT_MOUSEDEV is not set
678# CONFIG_INPUT_JOYDEV is not set
679CONFIG_INPUT_EVDEV=m
680# CONFIG_INPUT_EVBUG is not set
681
682#
683# Input Device Drivers
684#
685# CONFIG_INPUT_KEYBOARD is not set
686# CONFIG_INPUT_MOUSE is not set
687# CONFIG_INPUT_JOYSTICK is not set
688# CONFIG_INPUT_TABLET is not set
689CONFIG_INPUT_TOUCHSCREEN=y
690# CONFIG_TOUCHSCREEN_ADS7846 is not set
691# CONFIG_TOUCHSCREEN_AD7877 is not set
692# CONFIG_TOUCHSCREEN_AD7879_I2C is not set
693# CONFIG_TOUCHSCREEN_AD7879_SPI is not set
694# CONFIG_TOUCHSCREEN_AD7879 is not set
695# CONFIG_TOUCHSCREEN_EETI is not set
696# CONFIG_TOUCHSCREEN_FUJITSU is not set
697# CONFIG_TOUCHSCREEN_GUNZE is not set
698# CONFIG_TOUCHSCREEN_ELO is not set
699# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
700# CONFIG_TOUCHSCREEN_MCS5000 is not set
701# CONFIG_TOUCHSCREEN_MTOUCH is not set
702# CONFIG_TOUCHSCREEN_INEXIO is not set
703# CONFIG_TOUCHSCREEN_MK712 is not set
704# CONFIG_TOUCHSCREEN_PENMOUNT is not set
705# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
706# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
707CONFIG_TOUCHSCREEN_WM97XX=m
708CONFIG_TOUCHSCREEN_WM9705=y
709CONFIG_TOUCHSCREEN_WM9712=y
710CONFIG_TOUCHSCREEN_WM9713=y
711# CONFIG_TOUCHSCREEN_WM97XX_ATMEL is not set
712# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
713# CONFIG_TOUCHSCREEN_TSC2007 is not set
714# CONFIG_TOUCHSCREEN_W90X900 is not set
715# CONFIG_INPUT_MISC is not set
716
717#
718# Hardware I/O ports
719#
720# CONFIG_SERIO is not set
721# CONFIG_GAMEPORT is not set
722
723#
724# Character devices
725#
726CONFIG_VT=y
727CONFIG_CONSOLE_TRANSLATIONS=y
728CONFIG_VT_CONSOLE=y
729CONFIG_HW_CONSOLE=y
730# CONFIG_VT_HW_CONSOLE_BINDING is not set
731CONFIG_DEVKMEM=y
732# CONFIG_SERIAL_NONSTANDARD is not set
733
734#
735# Serial drivers
736#
737# CONFIG_SERIAL_8250 is not set
738
739#
740# Non-8250 serial port support
741#
742CONFIG_SERIAL_ATMEL=y
743CONFIG_SERIAL_ATMEL_CONSOLE=y
744CONFIG_SERIAL_ATMEL_PDC=y
745# CONFIG_SERIAL_ATMEL_TTYAT is not set
746# CONFIG_SERIAL_MAX3100 is not set
747CONFIG_SERIAL_CORE=y
748CONFIG_SERIAL_CORE_CONSOLE=y
749CONFIG_UNIX98_PTYS=y
750# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
751# CONFIG_LEGACY_PTYS is not set
752# CONFIG_IPMI_HANDLER is not set
753# CONFIG_HW_RANDOM is not set
754# CONFIG_R3964 is not set
755# CONFIG_RAW_DRIVER is not set
756# CONFIG_TCG_TPM is not set
757CONFIG_I2C=m
758CONFIG_I2C_BOARDINFO=y
759CONFIG_I2C_COMPAT=y
760CONFIG_I2C_CHARDEV=m
761CONFIG_I2C_HELPER_AUTO=y
762CONFIG_I2C_ALGOBIT=m
763
764#
765# I2C Hardware Bus support
766#
767
768#
769# I2C system bus drivers (mostly embedded / system-on-chip)
770#
771# CONFIG_I2C_DESIGNWARE is not set
772CONFIG_I2C_GPIO=m
773# CONFIG_I2C_OCORES is not set
774# CONFIG_I2C_SIMTEC is not set
775
776#
777# External I2C/SMBus adapter drivers
778#
779# CONFIG_I2C_PARPORT_LIGHT is not set
780# CONFIG_I2C_TAOS_EVM is not set
781
782#
783# Other I2C/SMBus bus drivers
784#
785# CONFIG_I2C_PCA_PLATFORM is not set
786# CONFIG_I2C_STUB is not set
787
788#
789# Miscellaneous I2C Chip support
790#
791# CONFIG_DS1682 is not set
792# CONFIG_SENSORS_TSL2550 is not set
793# CONFIG_I2C_DEBUG_CORE is not set
794# CONFIG_I2C_DEBUG_ALGO is not set
795# CONFIG_I2C_DEBUG_BUS is not set
796# CONFIG_I2C_DEBUG_CHIP is not set
797CONFIG_SPI=y
798# CONFIG_SPI_DEBUG is not set
799CONFIG_SPI_MASTER=y
800
801#
802# SPI Master Controller Drivers
803#
804CONFIG_SPI_ATMEL=y
805# CONFIG_SPI_BITBANG is not set
806# CONFIG_SPI_GPIO is not set
807
808#
809# SPI Protocol Masters
810#
811CONFIG_SPI_SPIDEV=m
812# CONFIG_SPI_TLE62X0 is not set
813
814#
815# PPS support
816#
817# CONFIG_PPS is not set
818CONFIG_ARCH_REQUIRE_GPIOLIB=y
819CONFIG_GPIOLIB=y
820# CONFIG_DEBUG_GPIO is not set
821# CONFIG_GPIO_SYSFS is not set
822
823#
824# Memory mapped GPIO expanders:
825#
826
827#
828# I2C GPIO expanders:
829#
830# CONFIG_GPIO_MAX732X is not set
831# CONFIG_GPIO_PCA953X is not set
832# CONFIG_GPIO_PCF857X is not set
833
834#
835# PCI GPIO expanders:
836#
837
838#
839# SPI GPIO expanders:
840#
841# CONFIG_GPIO_MAX7301 is not set
842# CONFIG_GPIO_MCP23S08 is not set
843# CONFIG_GPIO_MC33880 is not set
844
845#
846# AC97 GPIO expanders:
847#
848# CONFIG_W1 is not set
849# CONFIG_POWER_SUPPLY is not set
850# CONFIG_HWMON is not set
851# CONFIG_THERMAL is not set
852CONFIG_WATCHDOG=y
853# CONFIG_WATCHDOG_NOWAYOUT is not set
854
855#
856# Watchdog Device Drivers
857#
858# CONFIG_SOFT_WATCHDOG is not set
859CONFIG_AT32AP700X_WDT=y
860CONFIG_SSB_POSSIBLE=y
861
862#
863# Sonics Silicon Backplane
864#
865# CONFIG_SSB is not set
866
867#
868# Multifunction device drivers
869#
870# CONFIG_MFD_CORE is not set
871# CONFIG_MFD_SM501 is not set
872# CONFIG_HTC_PASIC3 is not set
873# CONFIG_UCB1400_CORE is not set
874# CONFIG_TPS65010 is not set
875# CONFIG_MFD_TMIO is not set
876# CONFIG_MFD_WM8400 is not set
877# CONFIG_MFD_WM831X is not set
878# CONFIG_MFD_WM8350_I2C is not set
879# CONFIG_MFD_PCF50633 is not set
880# CONFIG_MFD_MC13783 is not set
881# CONFIG_AB3100_CORE is not set
882# CONFIG_EZX_PCAP is not set
883# CONFIG_REGULATOR is not set
884# CONFIG_MEDIA_SUPPORT is not set
885
886#
887# Graphics support
888#
889# CONFIG_VGASTATE is not set
890# CONFIG_VIDEO_OUTPUT_CONTROL is not set
891CONFIG_FB=y
892# CONFIG_FIRMWARE_EDID is not set
893# CONFIG_FB_DDC is not set
894# CONFIG_FB_BOOT_VESA_SUPPORT is not set
895CONFIG_FB_CFB_FILLRECT=y
896CONFIG_FB_CFB_COPYAREA=y
897CONFIG_FB_CFB_IMAGEBLIT=y
898# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
899# CONFIG_FB_SYS_FILLRECT is not set
900# CONFIG_FB_SYS_COPYAREA is not set
901# CONFIG_FB_SYS_IMAGEBLIT is not set
902# CONFIG_FB_FOREIGN_ENDIAN is not set
903# CONFIG_FB_SYS_FOPS is not set
904# CONFIG_FB_SVGALIB is not set
905# CONFIG_FB_MACMODES is not set
906# CONFIG_FB_BACKLIGHT is not set
907# CONFIG_FB_MODE_HELPERS is not set
908# CONFIG_FB_TILEBLITTING is not set
909
910#
911# Frame buffer hardware drivers
912#
913# CONFIG_FB_S1D13XXX is not set
914CONFIG_FB_ATMEL=y
915# CONFIG_FB_VIRTUAL is not set
916# CONFIG_FB_METRONOME is not set
917# CONFIG_FB_MB862XX is not set
918# CONFIG_FB_BROADSHEET is not set
919# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
920
921#
922# Display device support
923#
924# CONFIG_DISPLAY_SUPPORT is not set
925
926#
927# Console display driver support
928#
929CONFIG_DUMMY_CONSOLE=y
930# CONFIG_FRAMEBUFFER_CONSOLE is not set
931# CONFIG_LOGO is not set
932CONFIG_SOUND=y
933CONFIG_SOUND_OSS_CORE=y
934CONFIG_SOUND_OSS_CORE_PRECLAIM=y
935CONFIG_SND=y
936CONFIG_SND_TIMER=y
937CONFIG_SND_PCM=m
938# CONFIG_SND_SEQUENCER is not set
939CONFIG_SND_OSSEMUL=y
940CONFIG_SND_MIXER_OSS=m
941CONFIG_SND_PCM_OSS=m
942CONFIG_SND_PCM_OSS_PLUGINS=y
943CONFIG_SND_HRTIMER=y
944# CONFIG_SND_DYNAMIC_MINORS is not set
945# CONFIG_SND_SUPPORT_OLD_API is not set
946CONFIG_SND_VERBOSE_PROCFS=y
947# CONFIG_SND_VERBOSE_PRINTK is not set
948# CONFIG_SND_DEBUG is not set
949CONFIG_SND_VMASTER=y
950# CONFIG_SND_RAWMIDI_SEQ is not set
951# CONFIG_SND_OPL3_LIB_SEQ is not set
952# CONFIG_SND_OPL4_LIB_SEQ is not set
953# CONFIG_SND_SBAWE_SEQ is not set
954# CONFIG_SND_EMU10K1_SEQ is not set
955CONFIG_SND_AC97_CODEC=m
956# CONFIG_SND_DRIVERS is not set
957
958#
959# Atmel devices (AVR32 and AT91)
960#
961# CONFIG_SND_ATMEL_ABDAC is not set
962CONFIG_SND_ATMEL_AC97C=m
963# CONFIG_SND_SPI is not set
964# CONFIG_SND_SOC is not set
965# CONFIG_SOUND_PRIME is not set
966CONFIG_AC97_BUS=m
967CONFIG_HID_SUPPORT=y
968CONFIG_HID=y
969# CONFIG_HIDRAW is not set
970# CONFIG_HID_PID is not set
971
972#
973# Special HID drivers
974#
975CONFIG_USB_SUPPORT=y
976# CONFIG_USB_ARCH_HAS_HCD is not set
977# CONFIG_USB_ARCH_HAS_OHCI is not set
978# CONFIG_USB_ARCH_HAS_EHCI is not set
979# CONFIG_USB_OTG_WHITELIST is not set
980# CONFIG_USB_OTG_BLACKLIST_HUB is not set
981# CONFIG_USB_GADGET_MUSB_HDRC is not set
982
983#
984# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
985#
986CONFIG_USB_GADGET=y
987# CONFIG_USB_GADGET_DEBUG is not set
988# CONFIG_USB_GADGET_DEBUG_FILES is not set
989# CONFIG_USB_GADGET_DEBUG_FS is not set
990CONFIG_USB_GADGET_VBUS_DRAW=350
991CONFIG_USB_GADGET_SELECTED=y
992# CONFIG_USB_GADGET_AT91 is not set
993CONFIG_USB_GADGET_ATMEL_USBA=y
994CONFIG_USB_ATMEL_USBA=y
995# CONFIG_USB_GADGET_FSL_USB2 is not set
996# CONFIG_USB_GADGET_LH7A40X is not set
997# CONFIG_USB_GADGET_OMAP is not set
998# CONFIG_USB_GADGET_PXA25X is not set
999# CONFIG_USB_GADGET_R8A66597 is not set
1000# CONFIG_USB_GADGET_PXA27X is not set
1001# CONFIG_USB_GADGET_S3C_HSOTG is not set
1002# CONFIG_USB_GADGET_IMX is not set
1003# CONFIG_USB_GADGET_S3C2410 is not set
1004# CONFIG_USB_GADGET_M66592 is not set
1005# CONFIG_USB_GADGET_AMD5536UDC is not set
1006# CONFIG_USB_GADGET_FSL_QE is not set
1007# CONFIG_USB_GADGET_CI13XXX is not set
1008# CONFIG_USB_GADGET_NET2280 is not set
1009# CONFIG_USB_GADGET_GOKU is not set
1010# CONFIG_USB_GADGET_LANGWELL is not set
1011# CONFIG_USB_GADGET_DUMMY_HCD is not set
1012CONFIG_USB_GADGET_DUALSPEED=y
1013CONFIG_USB_ZERO=m
1014# CONFIG_USB_AUDIO is not set
1015CONFIG_USB_ETH=m
1016CONFIG_USB_ETH_RNDIS=y
1017# CONFIG_USB_ETH_EEM is not set
1018CONFIG_USB_GADGETFS=m
1019CONFIG_USB_FILE_STORAGE=m
1020# CONFIG_USB_FILE_STORAGE_TEST is not set
1021CONFIG_USB_G_SERIAL=m
1022# CONFIG_USB_MIDI_GADGET is not set
1023# CONFIG_USB_G_PRINTER is not set
1024CONFIG_USB_CDC_COMPOSITE=m
1025
1026#
1027# OTG and related infrastructure
1028#
1029# CONFIG_USB_GPIO_VBUS is not set
1030# CONFIG_NOP_USB_XCEIV is not set
1031CONFIG_MMC=y
1032# CONFIG_MMC_DEBUG is not set
1033# CONFIG_MMC_UNSAFE_RESUME is not set
1034
1035#
1036# MMC/SD/SDIO Card Drivers
1037#
1038CONFIG_MMC_BLOCK=y
1039CONFIG_MMC_BLOCK_BOUNCE=y
1040# CONFIG_SDIO_UART is not set
1041# CONFIG_MMC_TEST is not set
1042
1043#
1044# MMC/SD/SDIO Host Controller Drivers
1045#
1046# CONFIG_MMC_SDHCI is not set
1047# CONFIG_MMC_AT91 is not set
1048CONFIG_MMC_ATMELMCI=y
1049# CONFIG_MMC_ATMELMCI_DMA is not set
1050# CONFIG_MMC_SPI is not set
1051# CONFIG_MEMSTICK is not set
1052CONFIG_NEW_LEDS=y
1053CONFIG_LEDS_CLASS=y
1054
1055#
1056# LED drivers
1057#
1058# CONFIG_LEDS_PCA9532 is not set
1059CONFIG_LEDS_GPIO=y
1060CONFIG_LEDS_GPIO_PLATFORM=y
1061# CONFIG_LEDS_LP3944 is not set
1062# CONFIG_LEDS_PCA955X is not set
1063# CONFIG_LEDS_DAC124S085 is not set
1064# CONFIG_LEDS_BD2802 is not set
1065
1066#
1067# LED Triggers
1068#
1069CONFIG_LEDS_TRIGGERS=y
1070CONFIG_LEDS_TRIGGER_TIMER=y
1071CONFIG_LEDS_TRIGGER_HEARTBEAT=y
1072# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
1073# CONFIG_LEDS_TRIGGER_GPIO is not set
1074# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set
1075
1076#
1077# iptables trigger is under Netfilter config (LED target)
1078#
1079# CONFIG_ACCESSIBILITY is not set
1080CONFIG_RTC_LIB=y
1081CONFIG_RTC_CLASS=y
1082CONFIG_RTC_HCTOSYS=y
1083CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
1084# CONFIG_RTC_DEBUG is not set
1085
1086#
1087# RTC interfaces
1088#
1089CONFIG_RTC_INTF_SYSFS=y
1090CONFIG_RTC_INTF_PROC=y
1091CONFIG_RTC_INTF_DEV=y
1092# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
1093# CONFIG_RTC_DRV_TEST is not set
1094
1095#
1096# I2C RTC drivers
1097#
1098# CONFIG_RTC_DRV_DS1307 is not set
1099# CONFIG_RTC_DRV_DS1374 is not set
1100# CONFIG_RTC_DRV_DS1672 is not set
1101# CONFIG_RTC_DRV_MAX6900 is not set
1102# CONFIG_RTC_DRV_RS5C372 is not set
1103# CONFIG_RTC_DRV_ISL1208 is not set
1104# CONFIG_RTC_DRV_X1205 is not set
1105# CONFIG_RTC_DRV_PCF8563 is not set
1106# CONFIG_RTC_DRV_PCF8583 is not set
1107# CONFIG_RTC_DRV_M41T80 is not set
1108# CONFIG_RTC_DRV_S35390A is not set
1109# CONFIG_RTC_DRV_FM3130 is not set
1110# CONFIG_RTC_DRV_RX8581 is not set
1111# CONFIG_RTC_DRV_RX8025 is not set
1112
1113#
1114# SPI RTC drivers
1115#
1116# CONFIG_RTC_DRV_M41T94 is not set
1117# CONFIG_RTC_DRV_DS1305 is not set
1118# CONFIG_RTC_DRV_DS1390 is not set
1119# CONFIG_RTC_DRV_MAX6902 is not set
1120# CONFIG_RTC_DRV_R9701 is not set
1121# CONFIG_RTC_DRV_RS5C348 is not set
1122# CONFIG_RTC_DRV_DS3234 is not set
1123# CONFIG_RTC_DRV_PCF2123 is not set
1124
1125#
1126# Platform RTC drivers
1127#
1128# CONFIG_RTC_DRV_DS1286 is not set
1129# CONFIG_RTC_DRV_DS1511 is not set
1130# CONFIG_RTC_DRV_DS1553 is not set
1131# CONFIG_RTC_DRV_DS1742 is not set
1132# CONFIG_RTC_DRV_STK17TA8 is not set
1133# CONFIG_RTC_DRV_M48T86 is not set
1134# CONFIG_RTC_DRV_M48T35 is not set
1135# CONFIG_RTC_DRV_M48T59 is not set
1136# CONFIG_RTC_DRV_BQ4802 is not set
1137# CONFIG_RTC_DRV_V3020 is not set
1138
1139#
1140# on-CPU RTC drivers
1141#
1142CONFIG_RTC_DRV_AT32AP700X=y
1143CONFIG_DMADEVICES=y
1144
1145#
1146# DMA Devices
1147#
1148CONFIG_DW_DMAC=y
1149CONFIG_DMA_ENGINE=y
1150
1151#
1152# DMA Clients
1153#
1154# CONFIG_NET_DMA is not set
1155# CONFIG_ASYNC_TX_DMA is not set
1156# CONFIG_DMATEST is not set
1157# CONFIG_AUXDISPLAY is not set
1158# CONFIG_UIO is not set
1159
1160#
1161# TI VLYNQ
1162#
1163# CONFIG_STAGING is not set
1164
1165#
1166# File systems
1167#
1168CONFIG_EXT2_FS=y
1169# CONFIG_EXT2_FS_XATTR is not set
1170# CONFIG_EXT2_FS_XIP is not set
1171CONFIG_EXT3_FS=y
1172# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
1173# CONFIG_EXT3_FS_XATTR is not set
1174# CONFIG_EXT4_FS is not set
1175CONFIG_JBD=y
1176# CONFIG_JBD_DEBUG is not set
1177# CONFIG_REISERFS_FS is not set
1178# CONFIG_JFS_FS is not set
1179# CONFIG_FS_POSIX_ACL is not set
1180# CONFIG_XFS_FS is not set
1181# CONFIG_GFS2_FS is not set
1182# CONFIG_OCFS2_FS is not set
1183# CONFIG_BTRFS_FS is not set
1184# CONFIG_NILFS2_FS is not set
1185CONFIG_FILE_LOCKING=y
1186CONFIG_FSNOTIFY=y
1187# CONFIG_DNOTIFY is not set
1188CONFIG_INOTIFY=y
1189CONFIG_INOTIFY_USER=y
1190# CONFIG_QUOTA is not set
1191# CONFIG_AUTOFS_FS is not set
1192# CONFIG_AUTOFS4_FS is not set
1193CONFIG_FUSE_FS=m
1194# CONFIG_CUSE is not set
1195
1196#
1197# Caches
1198#
1199# CONFIG_FSCACHE is not set
1200
1201#
1202# CD-ROM/DVD Filesystems
1203#
1204# CONFIG_ISO9660_FS is not set
1205# CONFIG_UDF_FS is not set
1206
1207#
1208# DOS/FAT/NT Filesystems
1209#
1210CONFIG_FAT_FS=m
1211CONFIG_MSDOS_FS=m
1212CONFIG_VFAT_FS=m
1213CONFIG_FAT_DEFAULT_CODEPAGE=850
1214CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
1215# CONFIG_NTFS_FS is not set
1216
1217#
1218# Pseudo filesystems
1219#
1220CONFIG_PROC_FS=y
1221# CONFIG_PROC_KCORE is not set
1222CONFIG_PROC_SYSCTL=y
1223CONFIG_PROC_PAGE_MONITOR=y
1224CONFIG_SYSFS=y
1225CONFIG_TMPFS=y
1226# CONFIG_TMPFS_POSIX_ACL is not set
1227# CONFIG_HUGETLB_PAGE is not set
1228CONFIG_CONFIGFS_FS=y
1229CONFIG_MISC_FILESYSTEMS=y
1230# CONFIG_ADFS_FS is not set
1231# CONFIG_AFFS_FS is not set
1232# CONFIG_HFS_FS is not set
1233# CONFIG_HFSPLUS_FS is not set
1234# CONFIG_BEFS_FS is not set
1235# CONFIG_BFS_FS is not set
1236# CONFIG_EFS_FS is not set
1237CONFIG_JFFS2_FS=y
1238CONFIG_JFFS2_FS_DEBUG=0
1239CONFIG_JFFS2_FS_WRITEBUFFER=y
1240# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
1241# CONFIG_JFFS2_SUMMARY is not set
1242# CONFIG_JFFS2_FS_XATTR is not set
1243# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
1244CONFIG_JFFS2_ZLIB=y
1245# CONFIG_JFFS2_LZO is not set
1246CONFIG_JFFS2_RTIME=y
1247# CONFIG_JFFS2_RUBIN is not set
1248CONFIG_UBIFS_FS=y
1249# CONFIG_UBIFS_FS_XATTR is not set
1250# CONFIG_UBIFS_FS_ADVANCED_COMPR is not set
1251CONFIG_UBIFS_FS_LZO=y
1252CONFIG_UBIFS_FS_ZLIB=y
1253# CONFIG_UBIFS_FS_DEBUG is not set
1254# CONFIG_CRAMFS is not set
1255# CONFIG_SQUASHFS is not set
1256# CONFIG_VXFS_FS is not set
1257# CONFIG_MINIX_FS is not set
1258# CONFIG_OMFS_FS is not set
1259# CONFIG_HPFS_FS is not set
1260# CONFIG_QNX4FS_FS is not set
1261# CONFIG_ROMFS_FS is not set
1262# CONFIG_SYSV_FS is not set
1263# CONFIG_UFS_FS is not set
1264CONFIG_NETWORK_FILESYSTEMS=y
1265CONFIG_NFS_FS=y
1266CONFIG_NFS_V3=y
1267# CONFIG_NFS_V3_ACL is not set
1268# CONFIG_NFS_V4 is not set
1269CONFIG_ROOT_NFS=y
1270CONFIG_NFSD=m
1271CONFIG_NFSD_V3=y
1272# CONFIG_NFSD_V3_ACL is not set
1273# CONFIG_NFSD_V4 is not set
1274CONFIG_LOCKD=y
1275CONFIG_LOCKD_V4=y
1276CONFIG_EXPORTFS=m
1277CONFIG_NFS_COMMON=y
1278CONFIG_SUNRPC=y
1279# CONFIG_RPCSEC_GSS_KRB5 is not set
1280# CONFIG_RPCSEC_GSS_SPKM3 is not set
1281CONFIG_SMB_FS=m
1282# CONFIG_SMB_NLS_DEFAULT is not set
1283CONFIG_CIFS=m
1284# CONFIG_CIFS_STATS is not set
1285# CONFIG_CIFS_WEAK_PW_HASH is not set
1286# CONFIG_CIFS_XATTR is not set
1287# CONFIG_CIFS_DEBUG2 is not set
1288# CONFIG_CIFS_EXPERIMENTAL is not set
1289# CONFIG_NCP_FS is not set
1290# CONFIG_CODA_FS is not set
1291# CONFIG_AFS_FS is not set
1292
1293#
1294# Partition Types
1295#
1296# CONFIG_PARTITION_ADVANCED is not set
1297CONFIG_MSDOS_PARTITION=y
1298CONFIG_NLS=m
1299CONFIG_NLS_DEFAULT="iso8859-1"
1300CONFIG_NLS_CODEPAGE_437=m
1301# CONFIG_NLS_CODEPAGE_737 is not set
1302# CONFIG_NLS_CODEPAGE_775 is not set
1303CONFIG_NLS_CODEPAGE_850=m
1304# CONFIG_NLS_CODEPAGE_852 is not set
1305# CONFIG_NLS_CODEPAGE_855 is not set
1306# CONFIG_NLS_CODEPAGE_857 is not set
1307# CONFIG_NLS_CODEPAGE_860 is not set
1308# CONFIG_NLS_CODEPAGE_861 is not set
1309# CONFIG_NLS_CODEPAGE_862 is not set
1310# CONFIG_NLS_CODEPAGE_863 is not set
1311# CONFIG_NLS_CODEPAGE_864 is not set
1312# CONFIG_NLS_CODEPAGE_865 is not set
1313# CONFIG_NLS_CODEPAGE_866 is not set
1314# CONFIG_NLS_CODEPAGE_869 is not set
1315# CONFIG_NLS_CODEPAGE_936 is not set
1316# CONFIG_NLS_CODEPAGE_950 is not set
1317# CONFIG_NLS_CODEPAGE_932 is not set
1318# CONFIG_NLS_CODEPAGE_949 is not set
1319# CONFIG_NLS_CODEPAGE_874 is not set
1320# CONFIG_NLS_ISO8859_8 is not set
1321# CONFIG_NLS_CODEPAGE_1250 is not set
1322# CONFIG_NLS_CODEPAGE_1251 is not set
1323# CONFIG_NLS_ASCII is not set
1324CONFIG_NLS_ISO8859_1=m
1325# CONFIG_NLS_ISO8859_2 is not set
1326# CONFIG_NLS_ISO8859_3 is not set
1327# CONFIG_NLS_ISO8859_4 is not set
1328# CONFIG_NLS_ISO8859_5 is not set
1329# CONFIG_NLS_ISO8859_6 is not set
1330# CONFIG_NLS_ISO8859_7 is not set
1331# CONFIG_NLS_ISO8859_9 is not set
1332# CONFIG_NLS_ISO8859_13 is not set
1333# CONFIG_NLS_ISO8859_14 is not set
1334# CONFIG_NLS_ISO8859_15 is not set
1335# CONFIG_NLS_KOI8_R is not set
1336# CONFIG_NLS_KOI8_U is not set
1337CONFIG_NLS_UTF8=m
1338# CONFIG_DLM is not set
1339
1340#
1341# Kernel hacking
1342#
1343# CONFIG_PRINTK_TIME is not set
1344CONFIG_ENABLE_WARN_DEPRECATED=y
1345CONFIG_ENABLE_MUST_CHECK=y
1346CONFIG_FRAME_WARN=1024
1347CONFIG_MAGIC_SYSRQ=y
1348# CONFIG_STRIP_ASM_SYMS is not set
1349# CONFIG_UNUSED_SYMBOLS is not set
1350CONFIG_DEBUG_FS=y
1351# CONFIG_HEADERS_CHECK is not set
1352CONFIG_DEBUG_KERNEL=y
1353# CONFIG_DEBUG_SHIRQ is not set
1354CONFIG_DETECT_SOFTLOCKUP=y
1355# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
1356CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
1357CONFIG_DETECT_HUNG_TASK=y
1358# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
1359CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
1360CONFIG_SCHED_DEBUG=y
1361# CONFIG_SCHEDSTATS is not set
1362# CONFIG_TIMER_STATS is not set
1363# CONFIG_DEBUG_OBJECTS is not set
1364# CONFIG_SLUB_DEBUG_ON is not set
1365# CONFIG_SLUB_STATS is not set
1366# CONFIG_DEBUG_RT_MUTEXES is not set
1367# CONFIG_RT_MUTEX_TESTER is not set
1368# CONFIG_DEBUG_SPINLOCK is not set
1369# CONFIG_DEBUG_MUTEXES is not set
1370# CONFIG_DEBUG_LOCK_ALLOC is not set
1371# CONFIG_PROVE_LOCKING is not set
1372# CONFIG_LOCK_STAT is not set
1373# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
1374# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
1375CONFIG_STACKTRACE=y
1376# CONFIG_DEBUG_KOBJECT is not set
1377CONFIG_DEBUG_BUGVERBOSE=y
1378# CONFIG_DEBUG_INFO is not set
1379# CONFIG_DEBUG_VM is not set
1380# CONFIG_DEBUG_WRITECOUNT is not set
1381# CONFIG_DEBUG_MEMORY_INIT is not set
1382# CONFIG_DEBUG_LIST is not set
1383# CONFIG_DEBUG_SG is not set
1384# CONFIG_DEBUG_NOTIFIERS is not set
1385# CONFIG_DEBUG_CREDENTIALS is not set
1386CONFIG_FRAME_POINTER=y
1387# CONFIG_BOOT_PRINTK_DELAY is not set
1388# CONFIG_RCU_TORTURE_TEST is not set
1389# CONFIG_RCU_CPU_STALL_DETECTOR is not set
1390# CONFIG_KPROBES_SANITY_TEST is not set
1391# CONFIG_BACKTRACE_SELF_TEST is not set
1392# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
1393# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
1394# CONFIG_LKDTM is not set
1395# CONFIG_FAULT_INJECTION is not set
1396# CONFIG_PAGE_POISONING is not set
1397CONFIG_NOP_TRACER=y
1398CONFIG_RING_BUFFER=y
1399CONFIG_EVENT_TRACING=y
1400CONFIG_CONTEXT_SWITCH_TRACER=y
1401CONFIG_RING_BUFFER_ALLOW_SWAP=y
1402CONFIG_TRACING=y
1403CONFIG_TRACING_SUPPORT=y
1404CONFIG_FTRACE=y
1405# CONFIG_IRQSOFF_TRACER is not set
1406# CONFIG_SCHED_TRACER is not set
1407# CONFIG_ENABLE_DEFAULT_TRACERS is not set
1408# CONFIG_BOOT_TRACER is not set
1409CONFIG_BRANCH_PROFILE_NONE=y
1410# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
1411# CONFIG_PROFILE_ALL_BRANCHES is not set
1412# CONFIG_KMEMTRACE is not set
1413# CONFIG_WORKQUEUE_TRACER is not set
1414# CONFIG_BLK_DEV_IO_TRACE is not set
1415# CONFIG_RING_BUFFER_BENCHMARK is not set
1416# CONFIG_DYNAMIC_DEBUG is not set
1417# CONFIG_SAMPLES is not set
1418
1419#
1420# Security options
1421#
1422# CONFIG_KEYS is not set
1423# CONFIG_SECURITY is not set
1424# CONFIG_SECURITYFS is not set
1425# CONFIG_SECURITY_FILE_CAPABILITIES is not set
1426CONFIG_CRYPTO=y
1427
1428#
1429# Crypto core or helper
1430#
1431# CONFIG_CRYPTO_FIPS is not set
1432CONFIG_CRYPTO_ALGAPI=y
1433CONFIG_CRYPTO_ALGAPI2=y
1434CONFIG_CRYPTO_AEAD=y
1435CONFIG_CRYPTO_AEAD2=y
1436CONFIG_CRYPTO_BLKCIPHER=y
1437CONFIG_CRYPTO_BLKCIPHER2=y
1438CONFIG_CRYPTO_HASH=y
1439CONFIG_CRYPTO_HASH2=y
1440CONFIG_CRYPTO_RNG=m
1441CONFIG_CRYPTO_RNG2=y
1442CONFIG_CRYPTO_PCOMP=y
1443CONFIG_CRYPTO_MANAGER=y
1444CONFIG_CRYPTO_MANAGER2=y
1445# CONFIG_CRYPTO_GF128MUL is not set
1446# CONFIG_CRYPTO_NULL is not set
1447CONFIG_CRYPTO_WORKQUEUE=y
1448# CONFIG_CRYPTO_CRYPTD is not set
1449CONFIG_CRYPTO_AUTHENC=y
1450# CONFIG_CRYPTO_TEST is not set
1451
1452#
1453# Authenticated Encryption with Associated Data
1454#
1455# CONFIG_CRYPTO_CCM is not set
1456# CONFIG_CRYPTO_GCM is not set
1457# CONFIG_CRYPTO_SEQIV is not set
1458
1459#
1460# Block modes
1461#
1462CONFIG_CRYPTO_CBC=y
1463# CONFIG_CRYPTO_CTR is not set
1464# CONFIG_CRYPTO_CTS is not set
1465CONFIG_CRYPTO_ECB=m
1466# CONFIG_CRYPTO_LRW is not set
1467# CONFIG_CRYPTO_PCBC is not set
1468# CONFIG_CRYPTO_XTS is not set
1469
1470#
1471# Hash modes
1472#
1473CONFIG_CRYPTO_HMAC=y
1474# CONFIG_CRYPTO_XCBC is not set
1475# CONFIG_CRYPTO_VMAC is not set
1476
1477#
1478# Digest
1479#
1480# CONFIG_CRYPTO_CRC32C is not set
1481# CONFIG_CRYPTO_GHASH is not set
1482# CONFIG_CRYPTO_MD4 is not set
1483CONFIG_CRYPTO_MD5=y
1484# CONFIG_CRYPTO_MICHAEL_MIC is not set
1485# CONFIG_CRYPTO_RMD128 is not set
1486# CONFIG_CRYPTO_RMD160 is not set
1487# CONFIG_CRYPTO_RMD256 is not set
1488# CONFIG_CRYPTO_RMD320 is not set
1489CONFIG_CRYPTO_SHA1=y
1490# CONFIG_CRYPTO_SHA256 is not set
1491# CONFIG_CRYPTO_SHA512 is not set
1492# CONFIG_CRYPTO_TGR192 is not set
1493# CONFIG_CRYPTO_WP512 is not set
1494
1495#
1496# Ciphers
1497#
1498CONFIG_CRYPTO_AES=m
1499# CONFIG_CRYPTO_ANUBIS is not set
1500CONFIG_CRYPTO_ARC4=m
1501# CONFIG_CRYPTO_BLOWFISH is not set
1502# CONFIG_CRYPTO_CAMELLIA is not set
1503# CONFIG_CRYPTO_CAST5 is not set
1504# CONFIG_CRYPTO_CAST6 is not set
1505CONFIG_CRYPTO_DES=y
1506# CONFIG_CRYPTO_FCRYPT is not set
1507# CONFIG_CRYPTO_KHAZAD is not set
1508# CONFIG_CRYPTO_SALSA20 is not set
1509# CONFIG_CRYPTO_SEED is not set
1510# CONFIG_CRYPTO_SERPENT is not set
1511# CONFIG_CRYPTO_TEA is not set
1512# CONFIG_CRYPTO_TWOFISH is not set
1513
1514#
1515# Compression
1516#
1517CONFIG_CRYPTO_DEFLATE=y
1518# CONFIG_CRYPTO_ZLIB is not set
1519CONFIG_CRYPTO_LZO=y
1520
1521#
1522# Random Number Generation
1523#
1524CONFIG_CRYPTO_ANSI_CPRNG=m
1525CONFIG_CRYPTO_HW=y
1526CONFIG_BINARY_PRINTF=y
1527
1528#
1529# Library routines
1530#
1531CONFIG_BITREVERSE=y
1532CONFIG_GENERIC_FIND_LAST_BIT=y
1533CONFIG_CRC_CCITT=m
1534CONFIG_CRC16=y
1535# CONFIG_CRC_T10DIF is not set
1536# CONFIG_CRC_ITU_T is not set
1537CONFIG_CRC32=y
1538# CONFIG_CRC7 is not set
1539# CONFIG_LIBCRC32C is not set
1540CONFIG_ZLIB_INFLATE=y
1541CONFIG_ZLIB_DEFLATE=y
1542CONFIG_LZO_COMPRESS=y
1543CONFIG_LZO_DECOMPRESS=y
1544CONFIG_DECOMPRESS_GZIP=y
1545CONFIG_GENERIC_ALLOCATOR=y
1546CONFIG_HAS_IOMEM=y
1547CONFIG_HAS_IOPORT=y
1548CONFIG_HAS_DMA=y
1549CONFIG_NLATTR=y
diff --git a/arch/avr32/configs/atngw100mkii_evklcd101_defconfig b/arch/avr32/configs/atngw100mkii_evklcd101_defconfig
new file mode 100644
index 000000000000..bbf6bc316ecf
--- /dev/null
+++ b/arch/avr32/configs/atngw100mkii_evklcd101_defconfig
@@ -0,0 +1,1549 @@
1#
2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.32-rc5
4# Thu Nov 5 15:33:32 2009
5#
6CONFIG_AVR32=y
7CONFIG_GENERIC_GPIO=y
8CONFIG_GENERIC_HARDIRQS=y
9CONFIG_STACKTRACE_SUPPORT=y
10CONFIG_LOCKDEP_SUPPORT=y
11CONFIG_TRACE_IRQFLAGS_SUPPORT=y
12CONFIG_HARDIRQS_SW_RESEND=y
13CONFIG_GENERIC_IRQ_PROBE=y
14CONFIG_RWSEM_GENERIC_SPINLOCK=y
15CONFIG_GENERIC_TIME=y
16CONFIG_GENERIC_CLOCKEVENTS=y
17# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
18# CONFIG_ARCH_HAS_ILOG2_U32 is not set
19# CONFIG_ARCH_HAS_ILOG2_U64 is not set
20CONFIG_GENERIC_HWEIGHT=y
21CONFIG_GENERIC_CALIBRATE_DELAY=y
22CONFIG_GENERIC_BUG=y
23CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
24CONFIG_CONSTRUCTORS=y
25
26#
27# General setup
28#
29CONFIG_EXPERIMENTAL=y
30CONFIG_BROKEN_ON_SMP=y
31CONFIG_INIT_ENV_ARG_LIMIT=32
32CONFIG_LOCALVERSION=""
33# CONFIG_LOCALVERSION_AUTO is not set
34CONFIG_SWAP=y
35CONFIG_SYSVIPC=y
36CONFIG_SYSVIPC_SYSCTL=y
37CONFIG_POSIX_MQUEUE=y
38CONFIG_POSIX_MQUEUE_SYSCTL=y
39CONFIG_BSD_PROCESS_ACCT=y
40CONFIG_BSD_PROCESS_ACCT_V3=y
41# CONFIG_TASKSTATS is not set
42# CONFIG_AUDIT is not set
43
44#
45# RCU Subsystem
46#
47CONFIG_TREE_RCU=y
48# CONFIG_TREE_PREEMPT_RCU is not set
49# CONFIG_RCU_TRACE is not set
50CONFIG_RCU_FANOUT=32
51# CONFIG_RCU_FANOUT_EXACT is not set
52# CONFIG_TREE_RCU_TRACE is not set
53# CONFIG_IKCONFIG is not set
54CONFIG_LOG_BUF_SHIFT=14
55# CONFIG_GROUP_SCHED is not set
56# CONFIG_CGROUPS is not set
57CONFIG_SYSFS_DEPRECATED=y
58CONFIG_SYSFS_DEPRECATED_V2=y
59# CONFIG_RELAY is not set
60# CONFIG_NAMESPACES is not set
61CONFIG_BLK_DEV_INITRD=y
62CONFIG_INITRAMFS_SOURCE=""
63CONFIG_RD_GZIP=y
64# CONFIG_RD_BZIP2 is not set
65# CONFIG_RD_LZMA is not set
66CONFIG_CC_OPTIMIZE_FOR_SIZE=y
67CONFIG_SYSCTL=y
68CONFIG_ANON_INODES=y
69CONFIG_EMBEDDED=y
70# CONFIG_SYSCTL_SYSCALL is not set
71CONFIG_KALLSYMS=y
72# CONFIG_KALLSYMS_ALL is not set
73# CONFIG_KALLSYMS_EXTRA_PASS is not set
74CONFIG_HOTPLUG=y
75CONFIG_PRINTK=y
76CONFIG_BUG=y
77CONFIG_ELF_CORE=y
78# CONFIG_BASE_FULL is not set
79CONFIG_FUTEX=y
80CONFIG_EPOLL=y
81CONFIG_SIGNALFD=y
82CONFIG_TIMERFD=y
83CONFIG_EVENTFD=y
84CONFIG_SHMEM=y
85CONFIG_AIO=y
86
87#
88# Kernel Performance Events And Counters
89#
90CONFIG_VM_EVENT_COUNTERS=y
91CONFIG_SLUB_DEBUG=y
92# CONFIG_COMPAT_BRK is not set
93# CONFIG_SLAB is not set
94CONFIG_SLUB=y
95# CONFIG_SLOB is not set
96CONFIG_PROFILING=y
97CONFIG_TRACEPOINTS=y
98CONFIG_OPROFILE=m
99CONFIG_HAVE_OPROFILE=y
100CONFIG_KPROBES=y
101CONFIG_HAVE_KPROBES=y
102CONFIG_HAVE_CLK=y
103
104#
105# GCOV-based kernel profiling
106#
107# CONFIG_GCOV_KERNEL is not set
108CONFIG_SLOW_WORK=y
109# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
110CONFIG_SLABINFO=y
111CONFIG_RT_MUTEXES=y
112CONFIG_BASE_SMALL=1
113CONFIG_MODULES=y
114# CONFIG_MODULE_FORCE_LOAD is not set
115CONFIG_MODULE_UNLOAD=y
116CONFIG_MODULE_FORCE_UNLOAD=y
117# CONFIG_MODVERSIONS is not set
118# CONFIG_MODULE_SRCVERSION_ALL is not set
119CONFIG_BLOCK=y
120CONFIG_LBDAF=y
121# CONFIG_BLK_DEV_BSG is not set
122# CONFIG_BLK_DEV_INTEGRITY is not set
123
124#
125# IO Schedulers
126#
127CONFIG_IOSCHED_NOOP=y
128# CONFIG_IOSCHED_AS is not set
129# CONFIG_IOSCHED_DEADLINE is not set
130CONFIG_IOSCHED_CFQ=y
131# CONFIG_DEFAULT_AS is not set
132# CONFIG_DEFAULT_DEADLINE is not set
133CONFIG_DEFAULT_CFQ=y
134# CONFIG_DEFAULT_NOOP is not set
135CONFIG_DEFAULT_IOSCHED="cfq"
136CONFIG_FREEZER=y
137
138#
139# System Type and features
140#
141CONFIG_TICK_ONESHOT=y
142CONFIG_NO_HZ=y
143CONFIG_HIGH_RES_TIMERS=y
144CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
145CONFIG_SUBARCH_AVR32B=y
146CONFIG_MMU=y
147CONFIG_PERFORMANCE_COUNTERS=y
148CONFIG_PLATFORM_AT32AP=y
149CONFIG_CPU_AT32AP700X=y
150CONFIG_CPU_AT32AP7000=y
151CONFIG_BOARD_ATNGW100_COMMON=y
152# CONFIG_BOARD_ATSTK1000 is not set
153# CONFIG_BOARD_ATNGW100_MKI is not set
154CONFIG_BOARD_ATNGW100_MKII=y
155# CONFIG_BOARD_HAMMERHEAD is not set
156# CONFIG_BOARD_FAVR_32 is not set
157# CONFIG_BOARD_MERISC is not set
158# CONFIG_BOARD_MIMC200 is not set
159CONFIG_BOARD_ATNGW100_MKII_LCD=y
160# CONFIG_BOARD_ATNGW100_ADDON_NONE is not set
161CONFIG_BOARD_ATNGW100_EVKLCD10X=y
162# CONFIG_BOARD_ATNGW100_MRMT is not set
163# CONFIG_BOARD_ATNGW100_EVKLCD10X_QVGA is not set
164CONFIG_BOARD_ATNGW100_EVKLCD10X_VGA=y
165# CONFIG_BOARD_ATNGW100_EVKLCD10X_POW_QVGA is not set
166CONFIG_LOADER_U_BOOT=y
167
168#
169# Atmel AVR32 AP options
170#
171# CONFIG_AP700X_32_BIT_SMC is not set
172CONFIG_AP700X_16_BIT_SMC=y
173# CONFIG_AP700X_8_BIT_SMC is not set
174CONFIG_LOAD_ADDRESS=0x10000000
175CONFIG_ENTRY_ADDRESS=0x90000000
176CONFIG_PHYS_OFFSET=0x10000000
177CONFIG_PREEMPT_NONE=y
178# CONFIG_PREEMPT_VOLUNTARY is not set
179# CONFIG_PREEMPT is not set
180CONFIG_QUICKLIST=y
181# CONFIG_HAVE_ARCH_BOOTMEM is not set
182# CONFIG_ARCH_HAVE_MEMORY_PRESENT is not set
183# CONFIG_NEED_NODE_MEMMAP_SIZE is not set
184CONFIG_ARCH_FLATMEM_ENABLE=y
185# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set
186# CONFIG_ARCH_SPARSEMEM_ENABLE is not set
187CONFIG_SELECT_MEMORY_MODEL=y
188CONFIG_FLATMEM_MANUAL=y
189# CONFIG_DISCONTIGMEM_MANUAL is not set
190# CONFIG_SPARSEMEM_MANUAL is not set
191CONFIG_FLATMEM=y
192CONFIG_FLAT_NODE_MEM_MAP=y
193CONFIG_PAGEFLAGS_EXTENDED=y
194CONFIG_SPLIT_PTLOCK_CPUS=4
195# CONFIG_PHYS_ADDR_T_64BIT is not set
196CONFIG_ZONE_DMA_FLAG=0
197CONFIG_NR_QUICK=2
198CONFIG_VIRT_TO_BUS=y
199CONFIG_HAVE_MLOCK=y
200CONFIG_HAVE_MLOCKED_PAGE_BIT=y
201# CONFIG_KSM is not set
202CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
203# CONFIG_OWNERSHIP_TRACE is not set
204CONFIG_NMI_DEBUGGING=y
205# CONFIG_HZ_100 is not set
206CONFIG_HZ_250=y
207# CONFIG_HZ_300 is not set
208# CONFIG_HZ_1000 is not set
209CONFIG_HZ=250
210CONFIG_SCHED_HRTICK=y
211CONFIG_CMDLINE=""
212
213#
214# Power management options
215#
216CONFIG_PM=y
217# CONFIG_PM_DEBUG is not set
218CONFIG_PM_SLEEP=y
219CONFIG_SUSPEND=y
220CONFIG_SUSPEND_FREEZER=y
221# CONFIG_PM_RUNTIME is not set
222CONFIG_ARCH_SUSPEND_POSSIBLE=y
223
224#
225# CPU Frequency scaling
226#
227CONFIG_CPU_FREQ=y
228CONFIG_CPU_FREQ_TABLE=y
229# CONFIG_CPU_FREQ_DEBUG is not set
230# CONFIG_CPU_FREQ_STAT is not set
231# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set
232# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
233# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
234CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
235# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
236CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
237# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set
238CONFIG_CPU_FREQ_GOV_USERSPACE=y
239CONFIG_CPU_FREQ_GOV_ONDEMAND=y
240# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set
241CONFIG_CPU_FREQ_AT32AP=y
242
243#
244# Bus options
245#
246# CONFIG_ARCH_SUPPORTS_MSI is not set
247# CONFIG_PCCARD is not set
248
249#
250# Executable file formats
251#
252CONFIG_BINFMT_ELF=y
253# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
254# CONFIG_HAVE_AOUT is not set
255# CONFIG_BINFMT_MISC is not set
256CONFIG_NET=y
257
258#
259# Networking options
260#
261CONFIG_PACKET=y
262CONFIG_PACKET_MMAP=y
263CONFIG_UNIX=y
264CONFIG_XFRM=y
265CONFIG_XFRM_USER=y
266# CONFIG_XFRM_SUB_POLICY is not set
267# CONFIG_XFRM_MIGRATE is not set
268# CONFIG_XFRM_STATISTICS is not set
269CONFIG_XFRM_IPCOMP=y
270CONFIG_NET_KEY=y
271# CONFIG_NET_KEY_MIGRATE is not set
272CONFIG_INET=y
273CONFIG_IP_MULTICAST=y
274CONFIG_IP_ADVANCED_ROUTER=y
275CONFIG_ASK_IP_FIB_HASH=y
276# CONFIG_IP_FIB_TRIE is not set
277CONFIG_IP_FIB_HASH=y
278# CONFIG_IP_MULTIPLE_TABLES is not set
279# CONFIG_IP_ROUTE_MULTIPATH is not set
280# CONFIG_IP_ROUTE_VERBOSE is not set
281CONFIG_IP_PNP=y
282CONFIG_IP_PNP_DHCP=y
283# CONFIG_IP_PNP_BOOTP is not set
284# CONFIG_IP_PNP_RARP is not set
285# CONFIG_NET_IPIP is not set
286# CONFIG_NET_IPGRE is not set
287CONFIG_IP_MROUTE=y
288CONFIG_IP_PIMSM_V1=y
289# CONFIG_IP_PIMSM_V2 is not set
290# CONFIG_ARPD is not set
291CONFIG_SYN_COOKIES=y
292CONFIG_INET_AH=y
293CONFIG_INET_ESP=y
294CONFIG_INET_IPCOMP=y
295CONFIG_INET_XFRM_TUNNEL=y
296CONFIG_INET_TUNNEL=y
297CONFIG_INET_XFRM_MODE_TRANSPORT=y
298CONFIG_INET_XFRM_MODE_TUNNEL=y
299CONFIG_INET_XFRM_MODE_BEET=y
300# CONFIG_INET_LRO is not set
301CONFIG_INET_DIAG=y
302CONFIG_INET_TCP_DIAG=y
303# CONFIG_TCP_CONG_ADVANCED is not set
304CONFIG_TCP_CONG_CUBIC=y
305CONFIG_DEFAULT_TCP_CONG="cubic"
306# CONFIG_TCP_MD5SIG is not set
307CONFIG_IPV6=y
308# CONFIG_IPV6_PRIVACY is not set
309# CONFIG_IPV6_ROUTER_PREF is not set
310# CONFIG_IPV6_OPTIMISTIC_DAD is not set
311CONFIG_INET6_AH=y
312CONFIG_INET6_ESP=y
313CONFIG_INET6_IPCOMP=y
314# CONFIG_IPV6_MIP6 is not set
315CONFIG_INET6_XFRM_TUNNEL=y
316CONFIG_INET6_TUNNEL=y
317CONFIG_INET6_XFRM_MODE_TRANSPORT=y
318CONFIG_INET6_XFRM_MODE_TUNNEL=y
319CONFIG_INET6_XFRM_MODE_BEET=y
320# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
321CONFIG_IPV6_SIT=y
322CONFIG_IPV6_NDISC_NODETYPE=y
323# CONFIG_IPV6_TUNNEL is not set
324# CONFIG_IPV6_MULTIPLE_TABLES is not set
325# CONFIG_IPV6_MROUTE is not set
326# CONFIG_NETWORK_SECMARK is not set
327CONFIG_NETFILTER=y
328# CONFIG_NETFILTER_DEBUG is not set
329# CONFIG_NETFILTER_ADVANCED is not set
330
331#
332# Core Netfilter Configuration
333#
334CONFIG_NETFILTER_NETLINK=m
335CONFIG_NETFILTER_NETLINK_LOG=m
336CONFIG_NF_CONNTRACK=m
337CONFIG_NF_CONNTRACK_FTP=m
338CONFIG_NF_CONNTRACK_IRC=m
339CONFIG_NF_CONNTRACK_SIP=m
340CONFIG_NF_CT_NETLINK=m
341CONFIG_NETFILTER_XTABLES=y
342CONFIG_NETFILTER_XT_TARGET_MARK=m
343CONFIG_NETFILTER_XT_TARGET_NFLOG=m
344CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
345CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
346CONFIG_NETFILTER_XT_MATCH_MARK=m
347CONFIG_NETFILTER_XT_MATCH_POLICY=m
348CONFIG_NETFILTER_XT_MATCH_STATE=m
349# CONFIG_IP_VS is not set
350
351#
352# IP: Netfilter Configuration
353#
354CONFIG_NF_DEFRAG_IPV4=m
355CONFIG_NF_CONNTRACK_IPV4=m
356CONFIG_NF_CONNTRACK_PROC_COMPAT=y
357CONFIG_IP_NF_IPTABLES=m
358CONFIG_IP_NF_FILTER=m
359CONFIG_IP_NF_TARGET_REJECT=m
360CONFIG_IP_NF_TARGET_LOG=m
361# CONFIG_IP_NF_TARGET_ULOG is not set
362CONFIG_NF_NAT=m
363CONFIG_NF_NAT_NEEDED=y
364CONFIG_IP_NF_TARGET_MASQUERADE=m
365CONFIG_NF_NAT_FTP=m
366CONFIG_NF_NAT_IRC=m
367# CONFIG_NF_NAT_TFTP is not set
368# CONFIG_NF_NAT_AMANDA is not set
369# CONFIG_NF_NAT_PPTP is not set
370# CONFIG_NF_NAT_H323 is not set
371CONFIG_NF_NAT_SIP=m
372CONFIG_IP_NF_MANGLE=m
373
374#
375# IPv6: Netfilter Configuration
376#
377CONFIG_NF_CONNTRACK_IPV6=m
378CONFIG_IP6_NF_IPTABLES=m
379CONFIG_IP6_NF_MATCH_IPV6HEADER=m
380CONFIG_IP6_NF_TARGET_LOG=m
381CONFIG_IP6_NF_FILTER=m
382CONFIG_IP6_NF_TARGET_REJECT=m
383CONFIG_IP6_NF_MANGLE=m
384# CONFIG_IP_DCCP is not set
385# CONFIG_IP_SCTP is not set
386# CONFIG_RDS is not set
387# CONFIG_TIPC is not set
388# CONFIG_ATM is not set
389CONFIG_STP=m
390CONFIG_BRIDGE=m
391# CONFIG_NET_DSA is not set
392CONFIG_VLAN_8021Q=m
393# CONFIG_VLAN_8021Q_GVRP is not set
394# CONFIG_DECNET is not set
395CONFIG_LLC=m
396# CONFIG_LLC2 is not set
397# CONFIG_IPX is not set
398# CONFIG_ATALK is not set
399# CONFIG_X25 is not set
400# CONFIG_LAPB is not set
401# CONFIG_ECONET is not set
402# CONFIG_WAN_ROUTER is not set
403# CONFIG_PHONET is not set
404# CONFIG_IEEE802154 is not set
405# CONFIG_NET_SCHED is not set
406# CONFIG_DCB is not set
407
408#
409# Network testing
410#
411# CONFIG_NET_PKTGEN is not set
412# CONFIG_NET_TCPPROBE is not set
413# CONFIG_NET_DROP_MONITOR is not set
414# CONFIG_HAMRADIO is not set
415# CONFIG_CAN is not set
416# CONFIG_IRDA is not set
417# CONFIG_BT is not set
418# CONFIG_AF_RXRPC is not set
419CONFIG_WIRELESS=y
420# CONFIG_CFG80211 is not set
421CONFIG_CFG80211_DEFAULT_PS_VALUE=0
422# CONFIG_WIRELESS_OLD_REGULATORY is not set
423# CONFIG_WIRELESS_EXT is not set
424# CONFIG_LIB80211 is not set
425
426#
427# CFG80211 needs to be enabled for MAC80211
428#
429# CONFIG_WIMAX is not set
430# CONFIG_RFKILL is not set
431# CONFIG_NET_9P is not set
432
433#
434# Device Drivers
435#
436
437#
438# Generic Driver Options
439#
440CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
441# CONFIG_DEVTMPFS is not set
442CONFIG_STANDALONE=y
443# CONFIG_PREVENT_FIRMWARE_BUILD is not set
444# CONFIG_FW_LOADER is not set
445# CONFIG_DEBUG_DRIVER is not set
446# CONFIG_DEBUG_DEVRES is not set
447# CONFIG_SYS_HYPERVISOR is not set
448# CONFIG_CONNECTOR is not set
449CONFIG_MTD=y
450# CONFIG_MTD_DEBUG is not set
451# CONFIG_MTD_TESTS is not set
452# CONFIG_MTD_CONCAT is not set
453CONFIG_MTD_PARTITIONS=y
454# CONFIG_MTD_REDBOOT_PARTS is not set
455CONFIG_MTD_CMDLINE_PARTS=y
456# CONFIG_MTD_AR7_PARTS is not set
457
458#
459# User Modules And Translation Layers
460#
461CONFIG_MTD_CHAR=y
462CONFIG_MTD_BLKDEVS=y
463CONFIG_MTD_BLOCK=y
464# CONFIG_FTL is not set
465# CONFIG_NFTL is not set
466# CONFIG_INFTL is not set
467# CONFIG_RFD_FTL is not set
468# CONFIG_SSFDC is not set
469# CONFIG_MTD_OOPS is not set
470
471#
472# RAM/ROM/Flash chip drivers
473#
474CONFIG_MTD_CFI=y
475# CONFIG_MTD_JEDECPROBE is not set
476CONFIG_MTD_GEN_PROBE=y
477# CONFIG_MTD_CFI_ADV_OPTIONS is not set
478CONFIG_MTD_MAP_BANK_WIDTH_1=y
479CONFIG_MTD_MAP_BANK_WIDTH_2=y
480CONFIG_MTD_MAP_BANK_WIDTH_4=y
481# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
482# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
483# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
484CONFIG_MTD_CFI_I1=y
485CONFIG_MTD_CFI_I2=y
486# CONFIG_MTD_CFI_I4 is not set
487# CONFIG_MTD_CFI_I8 is not set
488CONFIG_MTD_CFI_INTELEXT=y
489# CONFIG_MTD_CFI_AMDSTD is not set
490# CONFIG_MTD_CFI_STAA is not set
491CONFIG_MTD_CFI_UTIL=y
492# CONFIG_MTD_RAM is not set
493# CONFIG_MTD_ROM is not set
494# CONFIG_MTD_ABSENT is not set
495
496#
497# Mapping drivers for chip access
498#
499# CONFIG_MTD_COMPLEX_MAPPINGS is not set
500CONFIG_MTD_PHYSMAP=y
501# CONFIG_MTD_PHYSMAP_COMPAT is not set
502# CONFIG_MTD_PLATRAM is not set
503
504#
505# Self-contained MTD device drivers
506#
507CONFIG_MTD_DATAFLASH=y
508# CONFIG_MTD_DATAFLASH_WRITE_VERIFY is not set
509# CONFIG_MTD_DATAFLASH_OTP is not set
510# CONFIG_MTD_M25P80 is not set
511# CONFIG_MTD_SST25L is not set
512# CONFIG_MTD_SLRAM is not set
513# CONFIG_MTD_PHRAM is not set
514# CONFIG_MTD_MTDRAM is not set
515# CONFIG_MTD_BLOCK2MTD is not set
516
517#
518# Disk-On-Chip Device Drivers
519#
520# CONFIG_MTD_DOC2000 is not set
521# CONFIG_MTD_DOC2001 is not set
522# CONFIG_MTD_DOC2001PLUS is not set
523CONFIG_MTD_NAND=y
524# CONFIG_MTD_NAND_VERIFY_WRITE is not set
525# CONFIG_MTD_NAND_ECC_SMC is not set
526# CONFIG_MTD_NAND_MUSEUM_IDS is not set
527CONFIG_MTD_NAND_IDS=y
528# CONFIG_MTD_NAND_DISKONCHIP is not set
529CONFIG_MTD_NAND_ATMEL=y
530CONFIG_MTD_NAND_ATMEL_ECC_HW=y
531# CONFIG_MTD_NAND_ATMEL_ECC_SOFT is not set
532# CONFIG_MTD_NAND_ATMEL_ECC_NONE is not set
533# CONFIG_MTD_NAND_NANDSIM is not set
534# CONFIG_MTD_NAND_PLATFORM is not set
535# CONFIG_MTD_ONENAND is not set
536
537#
538# LPDDR flash memory drivers
539#
540# CONFIG_MTD_LPDDR is not set
541
542#
543# UBI - Unsorted block images
544#
545CONFIG_MTD_UBI=y
546CONFIG_MTD_UBI_WL_THRESHOLD=4096
547CONFIG_MTD_UBI_BEB_RESERVE=1
548# CONFIG_MTD_UBI_GLUEBI is not set
549
550#
551# UBI debugging options
552#
553# CONFIG_MTD_UBI_DEBUG is not set
554# CONFIG_PARPORT is not set
555CONFIG_BLK_DEV=y
556# CONFIG_BLK_DEV_COW_COMMON is not set
557CONFIG_BLK_DEV_LOOP=m
558# CONFIG_BLK_DEV_CRYPTOLOOP is not set
559CONFIG_BLK_DEV_NBD=m
560CONFIG_BLK_DEV_RAM=m
561CONFIG_BLK_DEV_RAM_COUNT=16
562CONFIG_BLK_DEV_RAM_SIZE=4096
563# CONFIG_BLK_DEV_XIP is not set
564# CONFIG_CDROM_PKTCDVD is not set
565# CONFIG_ATA_OVER_ETH is not set
566CONFIG_MISC_DEVICES=y
567# CONFIG_ATMEL_PWM is not set
568CONFIG_ATMEL_TCLIB=y
569CONFIG_ATMEL_TCB_CLKSRC=y
570CONFIG_ATMEL_TCB_CLKSRC_BLOCK=0
571# CONFIG_ICS932S401 is not set
572# CONFIG_ATMEL_SSC is not set
573# CONFIG_ENCLOSURE_SERVICES is not set
574# CONFIG_ISL29003 is not set
575# CONFIG_C2PORT is not set
576
577#
578# EEPROM support
579#
580# CONFIG_EEPROM_AT24 is not set
581# CONFIG_EEPROM_AT25 is not set
582# CONFIG_EEPROM_LEGACY is not set
583# CONFIG_EEPROM_MAX6875 is not set
584# CONFIG_EEPROM_93CX6 is not set
585
586#
587# SCSI device support
588#
589# CONFIG_RAID_ATTRS is not set
590# CONFIG_SCSI is not set
591# CONFIG_SCSI_DMA is not set
592# CONFIG_SCSI_NETLINK is not set
593# CONFIG_ATA is not set
594# CONFIG_MD is not set
595CONFIG_NETDEVICES=y
596# CONFIG_DUMMY is not set
597# CONFIG_BONDING is not set
598# CONFIG_MACVLAN is not set
599# CONFIG_EQUALIZER is not set
600# CONFIG_TUN is not set
601# CONFIG_VETH is not set
602CONFIG_PHYLIB=y
603
604#
605# MII PHY device drivers
606#
607# CONFIG_MARVELL_PHY is not set
608# CONFIG_DAVICOM_PHY is not set
609# CONFIG_QSEMI_PHY is not set
610# CONFIG_LXT_PHY is not set
611# CONFIG_CICADA_PHY is not set
612# CONFIG_VITESSE_PHY is not set
613# CONFIG_SMSC_PHY is not set
614# CONFIG_BROADCOM_PHY is not set
615# CONFIG_ICPLUS_PHY is not set
616# CONFIG_REALTEK_PHY is not set
617# CONFIG_NATIONAL_PHY is not set
618# CONFIG_STE10XP is not set
619# CONFIG_LSI_ET1011C_PHY is not set
620# CONFIG_FIXED_PHY is not set
621# CONFIG_MDIO_BITBANG is not set
622CONFIG_NET_ETHERNET=y
623# CONFIG_MII is not set
624CONFIG_MACB=y
625# CONFIG_ENC28J60 is not set
626# CONFIG_ETHOC is not set
627# CONFIG_DNET is not set
628# CONFIG_IBM_NEW_EMAC_ZMII is not set
629# CONFIG_IBM_NEW_EMAC_RGMII is not set
630# CONFIG_IBM_NEW_EMAC_TAH is not set
631# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
632# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
633# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
634# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
635# CONFIG_B44 is not set
636# CONFIG_KS8842 is not set
637# CONFIG_KS8851 is not set
638# CONFIG_KS8851_MLL is not set
639# CONFIG_NETDEV_1000 is not set
640# CONFIG_NETDEV_10000 is not set
641CONFIG_WLAN=y
642# CONFIG_WLAN_PRE80211 is not set
643# CONFIG_WLAN_80211 is not set
644
645#
646# Enable WiMAX (Networking options) to see the WiMAX drivers
647#
648# CONFIG_WAN is not set
649CONFIG_PPP=m
650# CONFIG_PPP_MULTILINK is not set
651CONFIG_PPP_FILTER=y
652CONFIG_PPP_ASYNC=m
653# CONFIG_PPP_SYNC_TTY is not set
654CONFIG_PPP_DEFLATE=m
655CONFIG_PPP_BSDCOMP=m
656CONFIG_PPP_MPPE=m
657CONFIG_PPPOE=m
658# CONFIG_PPPOL2TP is not set
659# CONFIG_SLIP is not set
660CONFIG_SLHC=m
661# CONFIG_NETCONSOLE is not set
662# CONFIG_NETPOLL is not set
663# CONFIG_NET_POLL_CONTROLLER is not set
664# CONFIG_ISDN is not set
665# CONFIG_PHONE is not set
666
667#
668# Input device support
669#
670CONFIG_INPUT=y
671# CONFIG_INPUT_FF_MEMLESS is not set
672# CONFIG_INPUT_POLLDEV is not set
673
674#
675# Userland interfaces
676#
677# CONFIG_INPUT_MOUSEDEV is not set
678# CONFIG_INPUT_JOYDEV is not set
679CONFIG_INPUT_EVDEV=m
680# CONFIG_INPUT_EVBUG is not set
681
682#
683# Input Device Drivers
684#
685# CONFIG_INPUT_KEYBOARD is not set
686# CONFIG_INPUT_MOUSE is not set
687# CONFIG_INPUT_JOYSTICK is not set
688# CONFIG_INPUT_TABLET is not set
689CONFIG_INPUT_TOUCHSCREEN=y
690# CONFIG_TOUCHSCREEN_ADS7846 is not set
691# CONFIG_TOUCHSCREEN_AD7877 is not set
692# CONFIG_TOUCHSCREEN_AD7879_I2C is not set
693# CONFIG_TOUCHSCREEN_AD7879_SPI is not set
694# CONFIG_TOUCHSCREEN_AD7879 is not set
695# CONFIG_TOUCHSCREEN_EETI is not set
696# CONFIG_TOUCHSCREEN_FUJITSU is not set
697# CONFIG_TOUCHSCREEN_GUNZE is not set
698# CONFIG_TOUCHSCREEN_ELO is not set
699# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
700# CONFIG_TOUCHSCREEN_MCS5000 is not set
701# CONFIG_TOUCHSCREEN_MTOUCH is not set
702# CONFIG_TOUCHSCREEN_INEXIO is not set
703# CONFIG_TOUCHSCREEN_MK712 is not set
704# CONFIG_TOUCHSCREEN_PENMOUNT is not set
705# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
706# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
707CONFIG_TOUCHSCREEN_WM97XX=m
708CONFIG_TOUCHSCREEN_WM9705=y
709CONFIG_TOUCHSCREEN_WM9712=y
710CONFIG_TOUCHSCREEN_WM9713=y
711# CONFIG_TOUCHSCREEN_WM97XX_ATMEL is not set
712# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
713# CONFIG_TOUCHSCREEN_TSC2007 is not set
714# CONFIG_TOUCHSCREEN_W90X900 is not set
715# CONFIG_INPUT_MISC is not set
716
717#
718# Hardware I/O ports
719#
720# CONFIG_SERIO is not set
721# CONFIG_GAMEPORT is not set
722
723#
724# Character devices
725#
726CONFIG_VT=y
727CONFIG_CONSOLE_TRANSLATIONS=y
728CONFIG_VT_CONSOLE=y
729CONFIG_HW_CONSOLE=y
730# CONFIG_VT_HW_CONSOLE_BINDING is not set
731CONFIG_DEVKMEM=y
732# CONFIG_SERIAL_NONSTANDARD is not set
733
734#
735# Serial drivers
736#
737# CONFIG_SERIAL_8250 is not set
738
739#
740# Non-8250 serial port support
741#
742CONFIG_SERIAL_ATMEL=y
743CONFIG_SERIAL_ATMEL_CONSOLE=y
744CONFIG_SERIAL_ATMEL_PDC=y
745# CONFIG_SERIAL_ATMEL_TTYAT is not set
746# CONFIG_SERIAL_MAX3100 is not set
747CONFIG_SERIAL_CORE=y
748CONFIG_SERIAL_CORE_CONSOLE=y
749CONFIG_UNIX98_PTYS=y
750# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
751# CONFIG_LEGACY_PTYS is not set
752# CONFIG_IPMI_HANDLER is not set
753# CONFIG_HW_RANDOM is not set
754# CONFIG_R3964 is not set
755# CONFIG_RAW_DRIVER is not set
756# CONFIG_TCG_TPM is not set
757CONFIG_I2C=m
758CONFIG_I2C_BOARDINFO=y
759CONFIG_I2C_COMPAT=y
760CONFIG_I2C_CHARDEV=m
761CONFIG_I2C_HELPER_AUTO=y
762CONFIG_I2C_ALGOBIT=m
763
764#
765# I2C Hardware Bus support
766#
767
768#
769# I2C system bus drivers (mostly embedded / system-on-chip)
770#
771# CONFIG_I2C_DESIGNWARE is not set
772CONFIG_I2C_GPIO=m
773# CONFIG_I2C_OCORES is not set
774# CONFIG_I2C_SIMTEC is not set
775
776#
777# External I2C/SMBus adapter drivers
778#
779# CONFIG_I2C_PARPORT_LIGHT is not set
780# CONFIG_I2C_TAOS_EVM is not set
781
782#
783# Other I2C/SMBus bus drivers
784#
785# CONFIG_I2C_PCA_PLATFORM is not set
786# CONFIG_I2C_STUB is not set
787
788#
789# Miscellaneous I2C Chip support
790#
791# CONFIG_DS1682 is not set
792# CONFIG_SENSORS_TSL2550 is not set
793# CONFIG_I2C_DEBUG_CORE is not set
794# CONFIG_I2C_DEBUG_ALGO is not set
795# CONFIG_I2C_DEBUG_BUS is not set
796# CONFIG_I2C_DEBUG_CHIP is not set
797CONFIG_SPI=y
798# CONFIG_SPI_DEBUG is not set
799CONFIG_SPI_MASTER=y
800
801#
802# SPI Master Controller Drivers
803#
804CONFIG_SPI_ATMEL=y
805# CONFIG_SPI_BITBANG is not set
806# CONFIG_SPI_GPIO is not set
807
808#
809# SPI Protocol Masters
810#
811CONFIG_SPI_SPIDEV=m
812# CONFIG_SPI_TLE62X0 is not set
813
814#
815# PPS support
816#
817# CONFIG_PPS is not set
818CONFIG_ARCH_REQUIRE_GPIOLIB=y
819CONFIG_GPIOLIB=y
820# CONFIG_DEBUG_GPIO is not set
821# CONFIG_GPIO_SYSFS is not set
822
823#
824# Memory mapped GPIO expanders:
825#
826
827#
828# I2C GPIO expanders:
829#
830# CONFIG_GPIO_MAX732X is not set
831# CONFIG_GPIO_PCA953X is not set
832# CONFIG_GPIO_PCF857X is not set
833
834#
835# PCI GPIO expanders:
836#
837
838#
839# SPI GPIO expanders:
840#
841# CONFIG_GPIO_MAX7301 is not set
842# CONFIG_GPIO_MCP23S08 is not set
843# CONFIG_GPIO_MC33880 is not set
844
845#
846# AC97 GPIO expanders:
847#
848# CONFIG_W1 is not set
849# CONFIG_POWER_SUPPLY is not set
850# CONFIG_HWMON is not set
851# CONFIG_THERMAL is not set
852CONFIG_WATCHDOG=y
853# CONFIG_WATCHDOG_NOWAYOUT is not set
854
855#
856# Watchdog Device Drivers
857#
858# CONFIG_SOFT_WATCHDOG is not set
859CONFIG_AT32AP700X_WDT=y
860CONFIG_SSB_POSSIBLE=y
861
862#
863# Sonics Silicon Backplane
864#
865# CONFIG_SSB is not set
866
867#
868# Multifunction device drivers
869#
870# CONFIG_MFD_CORE is not set
871# CONFIG_MFD_SM501 is not set
872# CONFIG_HTC_PASIC3 is not set
873# CONFIG_UCB1400_CORE is not set
874# CONFIG_TPS65010 is not set
875# CONFIG_MFD_TMIO is not set
876# CONFIG_MFD_WM8400 is not set
877# CONFIG_MFD_WM831X is not set
878# CONFIG_MFD_WM8350_I2C is not set
879# CONFIG_MFD_PCF50633 is not set
880# CONFIG_MFD_MC13783 is not set
881# CONFIG_AB3100_CORE is not set
882# CONFIG_EZX_PCAP is not set
883# CONFIG_REGULATOR is not set
884# CONFIG_MEDIA_SUPPORT is not set
885
886#
887# Graphics support
888#
889# CONFIG_VGASTATE is not set
890# CONFIG_VIDEO_OUTPUT_CONTROL is not set
891CONFIG_FB=y
892# CONFIG_FIRMWARE_EDID is not set
893# CONFIG_FB_DDC is not set
894# CONFIG_FB_BOOT_VESA_SUPPORT is not set
895CONFIG_FB_CFB_FILLRECT=y
896CONFIG_FB_CFB_COPYAREA=y
897CONFIG_FB_CFB_IMAGEBLIT=y
898# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
899# CONFIG_FB_SYS_FILLRECT is not set
900# CONFIG_FB_SYS_COPYAREA is not set
901# CONFIG_FB_SYS_IMAGEBLIT is not set
902# CONFIG_FB_FOREIGN_ENDIAN is not set
903# CONFIG_FB_SYS_FOPS is not set
904# CONFIG_FB_SVGALIB is not set
905# CONFIG_FB_MACMODES is not set
906# CONFIG_FB_BACKLIGHT is not set
907# CONFIG_FB_MODE_HELPERS is not set
908# CONFIG_FB_TILEBLITTING is not set
909
910#
911# Frame buffer hardware drivers
912#
913# CONFIG_FB_S1D13XXX is not set
914CONFIG_FB_ATMEL=y
915# CONFIG_FB_VIRTUAL is not set
916# CONFIG_FB_METRONOME is not set
917# CONFIG_FB_MB862XX is not set
918# CONFIG_FB_BROADSHEET is not set
919# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
920
921#
922# Display device support
923#
924# CONFIG_DISPLAY_SUPPORT is not set
925
926#
927# Console display driver support
928#
929CONFIG_DUMMY_CONSOLE=y
930# CONFIG_FRAMEBUFFER_CONSOLE is not set
931# CONFIG_LOGO is not set
932CONFIG_SOUND=y
933CONFIG_SOUND_OSS_CORE=y
934CONFIG_SOUND_OSS_CORE_PRECLAIM=y
935CONFIG_SND=y
936CONFIG_SND_TIMER=y
937CONFIG_SND_PCM=m
938# CONFIG_SND_SEQUENCER is not set
939CONFIG_SND_OSSEMUL=y
940CONFIG_SND_MIXER_OSS=m
941CONFIG_SND_PCM_OSS=m
942CONFIG_SND_PCM_OSS_PLUGINS=y
943CONFIG_SND_HRTIMER=y
944# CONFIG_SND_DYNAMIC_MINORS is not set
945# CONFIG_SND_SUPPORT_OLD_API is not set
946CONFIG_SND_VERBOSE_PROCFS=y
947# CONFIG_SND_VERBOSE_PRINTK is not set
948# CONFIG_SND_DEBUG is not set
949CONFIG_SND_VMASTER=y
950# CONFIG_SND_RAWMIDI_SEQ is not set
951# CONFIG_SND_OPL3_LIB_SEQ is not set
952# CONFIG_SND_OPL4_LIB_SEQ is not set
953# CONFIG_SND_SBAWE_SEQ is not set
954# CONFIG_SND_EMU10K1_SEQ is not set
955CONFIG_SND_AC97_CODEC=m
956# CONFIG_SND_DRIVERS is not set
957
958#
959# Atmel devices (AVR32 and AT91)
960#
961# CONFIG_SND_ATMEL_ABDAC is not set
962CONFIG_SND_ATMEL_AC97C=m
963# CONFIG_SND_SPI is not set
964# CONFIG_SND_SOC is not set
965# CONFIG_SOUND_PRIME is not set
966CONFIG_AC97_BUS=m
967CONFIG_HID_SUPPORT=y
968CONFIG_HID=y
969# CONFIG_HIDRAW is not set
970# CONFIG_HID_PID is not set
971
972#
973# Special HID drivers
974#
975CONFIG_USB_SUPPORT=y
976# CONFIG_USB_ARCH_HAS_HCD is not set
977# CONFIG_USB_ARCH_HAS_OHCI is not set
978# CONFIG_USB_ARCH_HAS_EHCI is not set
979# CONFIG_USB_OTG_WHITELIST is not set
980# CONFIG_USB_OTG_BLACKLIST_HUB is not set
981# CONFIG_USB_GADGET_MUSB_HDRC is not set
982
983#
984# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
985#
986CONFIG_USB_GADGET=y
987# CONFIG_USB_GADGET_DEBUG is not set
988# CONFIG_USB_GADGET_DEBUG_FILES is not set
989# CONFIG_USB_GADGET_DEBUG_FS is not set
990CONFIG_USB_GADGET_VBUS_DRAW=350
991CONFIG_USB_GADGET_SELECTED=y
992# CONFIG_USB_GADGET_AT91 is not set
993CONFIG_USB_GADGET_ATMEL_USBA=y
994CONFIG_USB_ATMEL_USBA=y
995# CONFIG_USB_GADGET_FSL_USB2 is not set
996# CONFIG_USB_GADGET_LH7A40X is not set
997# CONFIG_USB_GADGET_OMAP is not set
998# CONFIG_USB_GADGET_PXA25X is not set
999# CONFIG_USB_GADGET_R8A66597 is not set
1000# CONFIG_USB_GADGET_PXA27X is not set
1001# CONFIG_USB_GADGET_S3C_HSOTG is not set
1002# CONFIG_USB_GADGET_IMX is not set
1003# CONFIG_USB_GADGET_S3C2410 is not set
1004# CONFIG_USB_GADGET_M66592 is not set
1005# CONFIG_USB_GADGET_AMD5536UDC is not set
1006# CONFIG_USB_GADGET_FSL_QE is not set
1007# CONFIG_USB_GADGET_CI13XXX is not set
1008# CONFIG_USB_GADGET_NET2280 is not set
1009# CONFIG_USB_GADGET_GOKU is not set
1010# CONFIG_USB_GADGET_LANGWELL is not set
1011# CONFIG_USB_GADGET_DUMMY_HCD is not set
1012CONFIG_USB_GADGET_DUALSPEED=y
1013CONFIG_USB_ZERO=m
1014# CONFIG_USB_AUDIO is not set
1015CONFIG_USB_ETH=m
1016CONFIG_USB_ETH_RNDIS=y
1017# CONFIG_USB_ETH_EEM is not set
1018CONFIG_USB_GADGETFS=m
1019CONFIG_USB_FILE_STORAGE=m
1020# CONFIG_USB_FILE_STORAGE_TEST is not set
1021CONFIG_USB_G_SERIAL=m
1022# CONFIG_USB_MIDI_GADGET is not set
1023# CONFIG_USB_G_PRINTER is not set
1024CONFIG_USB_CDC_COMPOSITE=m
1025
1026#
1027# OTG and related infrastructure
1028#
1029# CONFIG_USB_GPIO_VBUS is not set
1030# CONFIG_NOP_USB_XCEIV is not set
1031CONFIG_MMC=y
1032# CONFIG_MMC_DEBUG is not set
1033# CONFIG_MMC_UNSAFE_RESUME is not set
1034
1035#
1036# MMC/SD/SDIO Card Drivers
1037#
1038CONFIG_MMC_BLOCK=y
1039CONFIG_MMC_BLOCK_BOUNCE=y
1040# CONFIG_SDIO_UART is not set
1041# CONFIG_MMC_TEST is not set
1042
1043#
1044# MMC/SD/SDIO Host Controller Drivers
1045#
1046# CONFIG_MMC_SDHCI is not set
1047# CONFIG_MMC_AT91 is not set
1048CONFIG_MMC_ATMELMCI=y
1049# CONFIG_MMC_ATMELMCI_DMA is not set
1050# CONFIG_MMC_SPI is not set
1051# CONFIG_MEMSTICK is not set
1052CONFIG_NEW_LEDS=y
1053CONFIG_LEDS_CLASS=y
1054
1055#
1056# LED drivers
1057#
1058# CONFIG_LEDS_PCA9532 is not set
1059CONFIG_LEDS_GPIO=y
1060CONFIG_LEDS_GPIO_PLATFORM=y
1061# CONFIG_LEDS_LP3944 is not set
1062# CONFIG_LEDS_PCA955X is not set
1063# CONFIG_LEDS_DAC124S085 is not set
1064# CONFIG_LEDS_BD2802 is not set
1065
1066#
1067# LED Triggers
1068#
1069CONFIG_LEDS_TRIGGERS=y
1070CONFIG_LEDS_TRIGGER_TIMER=y
1071CONFIG_LEDS_TRIGGER_HEARTBEAT=y
1072# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
1073# CONFIG_LEDS_TRIGGER_GPIO is not set
1074# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set
1075
1076#
1077# iptables trigger is under Netfilter config (LED target)
1078#
1079# CONFIG_ACCESSIBILITY is not set
1080CONFIG_RTC_LIB=y
1081CONFIG_RTC_CLASS=y
1082CONFIG_RTC_HCTOSYS=y
1083CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
1084# CONFIG_RTC_DEBUG is not set
1085
1086#
1087# RTC interfaces
1088#
1089CONFIG_RTC_INTF_SYSFS=y
1090CONFIG_RTC_INTF_PROC=y
1091CONFIG_RTC_INTF_DEV=y
1092# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
1093# CONFIG_RTC_DRV_TEST is not set
1094
1095#
1096# I2C RTC drivers
1097#
1098# CONFIG_RTC_DRV_DS1307 is not set
1099# CONFIG_RTC_DRV_DS1374 is not set
1100# CONFIG_RTC_DRV_DS1672 is not set
1101# CONFIG_RTC_DRV_MAX6900 is not set
1102# CONFIG_RTC_DRV_RS5C372 is not set
1103# CONFIG_RTC_DRV_ISL1208 is not set
1104# CONFIG_RTC_DRV_X1205 is not set
1105# CONFIG_RTC_DRV_PCF8563 is not set
1106# CONFIG_RTC_DRV_PCF8583 is not set
1107# CONFIG_RTC_DRV_M41T80 is not set
1108# CONFIG_RTC_DRV_S35390A is not set
1109# CONFIG_RTC_DRV_FM3130 is not set
1110# CONFIG_RTC_DRV_RX8581 is not set
1111# CONFIG_RTC_DRV_RX8025 is not set
1112
1113#
1114# SPI RTC drivers
1115#
1116# CONFIG_RTC_DRV_M41T94 is not set
1117# CONFIG_RTC_DRV_DS1305 is not set
1118# CONFIG_RTC_DRV_DS1390 is not set
1119# CONFIG_RTC_DRV_MAX6902 is not set
1120# CONFIG_RTC_DRV_R9701 is not set
1121# CONFIG_RTC_DRV_RS5C348 is not set
1122# CONFIG_RTC_DRV_DS3234 is not set
1123# CONFIG_RTC_DRV_PCF2123 is not set
1124
1125#
1126# Platform RTC drivers
1127#
1128# CONFIG_RTC_DRV_DS1286 is not set
1129# CONFIG_RTC_DRV_DS1511 is not set
1130# CONFIG_RTC_DRV_DS1553 is not set
1131# CONFIG_RTC_DRV_DS1742 is not set
1132# CONFIG_RTC_DRV_STK17TA8 is not set
1133# CONFIG_RTC_DRV_M48T86 is not set
1134# CONFIG_RTC_DRV_M48T35 is not set
1135# CONFIG_RTC_DRV_M48T59 is not set
1136# CONFIG_RTC_DRV_BQ4802 is not set
1137# CONFIG_RTC_DRV_V3020 is not set
1138
1139#
1140# on-CPU RTC drivers
1141#
1142CONFIG_RTC_DRV_AT32AP700X=y
1143CONFIG_DMADEVICES=y
1144
1145#
1146# DMA Devices
1147#
1148CONFIG_DW_DMAC=y
1149CONFIG_DMA_ENGINE=y
1150
1151#
1152# DMA Clients
1153#
1154# CONFIG_NET_DMA is not set
1155# CONFIG_ASYNC_TX_DMA is not set
1156# CONFIG_DMATEST is not set
1157# CONFIG_AUXDISPLAY is not set
1158# CONFIG_UIO is not set
1159
1160#
1161# TI VLYNQ
1162#
1163# CONFIG_STAGING is not set
1164
1165#
1166# File systems
1167#
1168CONFIG_EXT2_FS=y
1169# CONFIG_EXT2_FS_XATTR is not set
1170# CONFIG_EXT2_FS_XIP is not set
1171CONFIG_EXT3_FS=y
1172# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
1173# CONFIG_EXT3_FS_XATTR is not set
1174# CONFIG_EXT4_FS is not set
1175CONFIG_JBD=y
1176# CONFIG_JBD_DEBUG is not set
1177# CONFIG_REISERFS_FS is not set
1178# CONFIG_JFS_FS is not set
1179# CONFIG_FS_POSIX_ACL is not set
1180# CONFIG_XFS_FS is not set
1181# CONFIG_GFS2_FS is not set
1182# CONFIG_OCFS2_FS is not set
1183# CONFIG_BTRFS_FS is not set
1184# CONFIG_NILFS2_FS is not set
1185CONFIG_FILE_LOCKING=y
1186CONFIG_FSNOTIFY=y
1187# CONFIG_DNOTIFY is not set
1188CONFIG_INOTIFY=y
1189CONFIG_INOTIFY_USER=y
1190# CONFIG_QUOTA is not set
1191# CONFIG_AUTOFS_FS is not set
1192# CONFIG_AUTOFS4_FS is not set
1193CONFIG_FUSE_FS=m
1194# CONFIG_CUSE is not set
1195
1196#
1197# Caches
1198#
1199# CONFIG_FSCACHE is not set
1200
1201#
1202# CD-ROM/DVD Filesystems
1203#
1204# CONFIG_ISO9660_FS is not set
1205# CONFIG_UDF_FS is not set
1206
1207#
1208# DOS/FAT/NT Filesystems
1209#
1210CONFIG_FAT_FS=m
1211CONFIG_MSDOS_FS=m
1212CONFIG_VFAT_FS=m
1213CONFIG_FAT_DEFAULT_CODEPAGE=850
1214CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
1215# CONFIG_NTFS_FS is not set
1216
1217#
1218# Pseudo filesystems
1219#
1220CONFIG_PROC_FS=y
1221# CONFIG_PROC_KCORE is not set
1222CONFIG_PROC_SYSCTL=y
1223CONFIG_PROC_PAGE_MONITOR=y
1224CONFIG_SYSFS=y
1225CONFIG_TMPFS=y
1226# CONFIG_TMPFS_POSIX_ACL is not set
1227# CONFIG_HUGETLB_PAGE is not set
1228CONFIG_CONFIGFS_FS=y
1229CONFIG_MISC_FILESYSTEMS=y
1230# CONFIG_ADFS_FS is not set
1231# CONFIG_AFFS_FS is not set
1232# CONFIG_HFS_FS is not set
1233# CONFIG_HFSPLUS_FS is not set
1234# CONFIG_BEFS_FS is not set
1235# CONFIG_BFS_FS is not set
1236# CONFIG_EFS_FS is not set
1237CONFIG_JFFS2_FS=y
1238CONFIG_JFFS2_FS_DEBUG=0
1239CONFIG_JFFS2_FS_WRITEBUFFER=y
1240# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
1241# CONFIG_JFFS2_SUMMARY is not set
1242# CONFIG_JFFS2_FS_XATTR is not set
1243# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
1244CONFIG_JFFS2_ZLIB=y
1245# CONFIG_JFFS2_LZO is not set
1246CONFIG_JFFS2_RTIME=y
1247# CONFIG_JFFS2_RUBIN is not set
1248CONFIG_UBIFS_FS=y
1249# CONFIG_UBIFS_FS_XATTR is not set
1250# CONFIG_UBIFS_FS_ADVANCED_COMPR is not set
1251CONFIG_UBIFS_FS_LZO=y
1252CONFIG_UBIFS_FS_ZLIB=y
1253# CONFIG_UBIFS_FS_DEBUG is not set
1254# CONFIG_CRAMFS is not set
1255# CONFIG_SQUASHFS is not set
1256# CONFIG_VXFS_FS is not set
1257# CONFIG_MINIX_FS is not set
1258# CONFIG_OMFS_FS is not set
1259# CONFIG_HPFS_FS is not set
1260# CONFIG_QNX4FS_FS is not set
1261# CONFIG_ROMFS_FS is not set
1262# CONFIG_SYSV_FS is not set
1263# CONFIG_UFS_FS is not set
1264CONFIG_NETWORK_FILESYSTEMS=y
1265CONFIG_NFS_FS=y
1266CONFIG_NFS_V3=y
1267# CONFIG_NFS_V3_ACL is not set
1268# CONFIG_NFS_V4 is not set
1269CONFIG_ROOT_NFS=y
1270CONFIG_NFSD=m
1271CONFIG_NFSD_V3=y
1272# CONFIG_NFSD_V3_ACL is not set
1273# CONFIG_NFSD_V4 is not set
1274CONFIG_LOCKD=y
1275CONFIG_LOCKD_V4=y
1276CONFIG_EXPORTFS=m
1277CONFIG_NFS_COMMON=y
1278CONFIG_SUNRPC=y
1279# CONFIG_RPCSEC_GSS_KRB5 is not set
1280# CONFIG_RPCSEC_GSS_SPKM3 is not set
1281CONFIG_SMB_FS=m
1282# CONFIG_SMB_NLS_DEFAULT is not set
1283CONFIG_CIFS=m
1284# CONFIG_CIFS_STATS is not set
1285# CONFIG_CIFS_WEAK_PW_HASH is not set
1286# CONFIG_CIFS_XATTR is not set
1287# CONFIG_CIFS_DEBUG2 is not set
1288# CONFIG_CIFS_EXPERIMENTAL is not set
1289# CONFIG_NCP_FS is not set
1290# CONFIG_CODA_FS is not set
1291# CONFIG_AFS_FS is not set
1292
1293#
1294# Partition Types
1295#
1296# CONFIG_PARTITION_ADVANCED is not set
1297CONFIG_MSDOS_PARTITION=y
1298CONFIG_NLS=m
1299CONFIG_NLS_DEFAULT="iso8859-1"
1300CONFIG_NLS_CODEPAGE_437=m
1301# CONFIG_NLS_CODEPAGE_737 is not set
1302# CONFIG_NLS_CODEPAGE_775 is not set
1303CONFIG_NLS_CODEPAGE_850=m
1304# CONFIG_NLS_CODEPAGE_852 is not set
1305# CONFIG_NLS_CODEPAGE_855 is not set
1306# CONFIG_NLS_CODEPAGE_857 is not set
1307# CONFIG_NLS_CODEPAGE_860 is not set
1308# CONFIG_NLS_CODEPAGE_861 is not set
1309# CONFIG_NLS_CODEPAGE_862 is not set
1310# CONFIG_NLS_CODEPAGE_863 is not set
1311# CONFIG_NLS_CODEPAGE_864 is not set
1312# CONFIG_NLS_CODEPAGE_865 is not set
1313# CONFIG_NLS_CODEPAGE_866 is not set
1314# CONFIG_NLS_CODEPAGE_869 is not set
1315# CONFIG_NLS_CODEPAGE_936 is not set
1316# CONFIG_NLS_CODEPAGE_950 is not set
1317# CONFIG_NLS_CODEPAGE_932 is not set
1318# CONFIG_NLS_CODEPAGE_949 is not set
1319# CONFIG_NLS_CODEPAGE_874 is not set
1320# CONFIG_NLS_ISO8859_8 is not set
1321# CONFIG_NLS_CODEPAGE_1250 is not set
1322# CONFIG_NLS_CODEPAGE_1251 is not set
1323# CONFIG_NLS_ASCII is not set
1324CONFIG_NLS_ISO8859_1=m
1325# CONFIG_NLS_ISO8859_2 is not set
1326# CONFIG_NLS_ISO8859_3 is not set
1327# CONFIG_NLS_ISO8859_4 is not set
1328# CONFIG_NLS_ISO8859_5 is not set
1329# CONFIG_NLS_ISO8859_6 is not set
1330# CONFIG_NLS_ISO8859_7 is not set
1331# CONFIG_NLS_ISO8859_9 is not set
1332# CONFIG_NLS_ISO8859_13 is not set
1333# CONFIG_NLS_ISO8859_14 is not set
1334# CONFIG_NLS_ISO8859_15 is not set
1335# CONFIG_NLS_KOI8_R is not set
1336# CONFIG_NLS_KOI8_U is not set
1337CONFIG_NLS_UTF8=m
1338# CONFIG_DLM is not set
1339
1340#
1341# Kernel hacking
1342#
1343# CONFIG_PRINTK_TIME is not set
1344CONFIG_ENABLE_WARN_DEPRECATED=y
1345CONFIG_ENABLE_MUST_CHECK=y
1346CONFIG_FRAME_WARN=1024
1347CONFIG_MAGIC_SYSRQ=y
1348# CONFIG_STRIP_ASM_SYMS is not set
1349# CONFIG_UNUSED_SYMBOLS is not set
1350CONFIG_DEBUG_FS=y
1351# CONFIG_HEADERS_CHECK is not set
1352CONFIG_DEBUG_KERNEL=y
1353# CONFIG_DEBUG_SHIRQ is not set
1354CONFIG_DETECT_SOFTLOCKUP=y
1355# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
1356CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
1357CONFIG_DETECT_HUNG_TASK=y
1358# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
1359CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
1360CONFIG_SCHED_DEBUG=y
1361# CONFIG_SCHEDSTATS is not set
1362# CONFIG_TIMER_STATS is not set
1363# CONFIG_DEBUG_OBJECTS is not set
1364# CONFIG_SLUB_DEBUG_ON is not set
1365# CONFIG_SLUB_STATS is not set
1366# CONFIG_DEBUG_RT_MUTEXES is not set
1367# CONFIG_RT_MUTEX_TESTER is not set
1368# CONFIG_DEBUG_SPINLOCK is not set
1369# CONFIG_DEBUG_MUTEXES is not set
1370# CONFIG_DEBUG_LOCK_ALLOC is not set
1371# CONFIG_PROVE_LOCKING is not set
1372# CONFIG_LOCK_STAT is not set
1373# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
1374# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
1375CONFIG_STACKTRACE=y
1376# CONFIG_DEBUG_KOBJECT is not set
1377CONFIG_DEBUG_BUGVERBOSE=y
1378# CONFIG_DEBUG_INFO is not set
1379# CONFIG_DEBUG_VM is not set
1380# CONFIG_DEBUG_WRITECOUNT is not set
1381# CONFIG_DEBUG_MEMORY_INIT is not set
1382# CONFIG_DEBUG_LIST is not set
1383# CONFIG_DEBUG_SG is not set
1384# CONFIG_DEBUG_NOTIFIERS is not set
1385# CONFIG_DEBUG_CREDENTIALS is not set
1386CONFIG_FRAME_POINTER=y
1387# CONFIG_BOOT_PRINTK_DELAY is not set
1388# CONFIG_RCU_TORTURE_TEST is not set
1389# CONFIG_RCU_CPU_STALL_DETECTOR is not set
1390# CONFIG_KPROBES_SANITY_TEST is not set
1391# CONFIG_BACKTRACE_SELF_TEST is not set
1392# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
1393# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
1394# CONFIG_LKDTM is not set
1395# CONFIG_FAULT_INJECTION is not set
1396# CONFIG_PAGE_POISONING is not set
1397CONFIG_NOP_TRACER=y
1398CONFIG_RING_BUFFER=y
1399CONFIG_EVENT_TRACING=y
1400CONFIG_CONTEXT_SWITCH_TRACER=y
1401CONFIG_RING_BUFFER_ALLOW_SWAP=y
1402CONFIG_TRACING=y
1403CONFIG_TRACING_SUPPORT=y
1404CONFIG_FTRACE=y
1405# CONFIG_IRQSOFF_TRACER is not set
1406# CONFIG_SCHED_TRACER is not set
1407# CONFIG_ENABLE_DEFAULT_TRACERS is not set
1408# CONFIG_BOOT_TRACER is not set
1409CONFIG_BRANCH_PROFILE_NONE=y
1410# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
1411# CONFIG_PROFILE_ALL_BRANCHES is not set
1412# CONFIG_KMEMTRACE is not set
1413# CONFIG_WORKQUEUE_TRACER is not set
1414# CONFIG_BLK_DEV_IO_TRACE is not set
1415# CONFIG_RING_BUFFER_BENCHMARK is not set
1416# CONFIG_DYNAMIC_DEBUG is not set
1417# CONFIG_SAMPLES is not set
1418
1419#
1420# Security options
1421#
1422# CONFIG_KEYS is not set
1423# CONFIG_SECURITY is not set
1424# CONFIG_SECURITYFS is not set
1425# CONFIG_SECURITY_FILE_CAPABILITIES is not set
1426CONFIG_CRYPTO=y
1427
1428#
1429# Crypto core or helper
1430#
1431# CONFIG_CRYPTO_FIPS is not set
1432CONFIG_CRYPTO_ALGAPI=y
1433CONFIG_CRYPTO_ALGAPI2=y
1434CONFIG_CRYPTO_AEAD=y
1435CONFIG_CRYPTO_AEAD2=y
1436CONFIG_CRYPTO_BLKCIPHER=y
1437CONFIG_CRYPTO_BLKCIPHER2=y
1438CONFIG_CRYPTO_HASH=y
1439CONFIG_CRYPTO_HASH2=y
1440CONFIG_CRYPTO_RNG=m
1441CONFIG_CRYPTO_RNG2=y
1442CONFIG_CRYPTO_PCOMP=y
1443CONFIG_CRYPTO_MANAGER=y
1444CONFIG_CRYPTO_MANAGER2=y
1445# CONFIG_CRYPTO_GF128MUL is not set
1446# CONFIG_CRYPTO_NULL is not set
1447CONFIG_CRYPTO_WORKQUEUE=y
1448# CONFIG_CRYPTO_CRYPTD is not set
1449CONFIG_CRYPTO_AUTHENC=y
1450# CONFIG_CRYPTO_TEST is not set
1451
1452#
1453# Authenticated Encryption with Associated Data
1454#
1455# CONFIG_CRYPTO_CCM is not set
1456# CONFIG_CRYPTO_GCM is not set
1457# CONFIG_CRYPTO_SEQIV is not set
1458
1459#
1460# Block modes
1461#
1462CONFIG_CRYPTO_CBC=y
1463# CONFIG_CRYPTO_CTR is not set
1464# CONFIG_CRYPTO_CTS is not set
1465CONFIG_CRYPTO_ECB=m
1466# CONFIG_CRYPTO_LRW is not set
1467# CONFIG_CRYPTO_PCBC is not set
1468# CONFIG_CRYPTO_XTS is not set
1469
1470#
1471# Hash modes
1472#
1473CONFIG_CRYPTO_HMAC=y
1474# CONFIG_CRYPTO_XCBC is not set
1475# CONFIG_CRYPTO_VMAC is not set
1476
1477#
1478# Digest
1479#
1480# CONFIG_CRYPTO_CRC32C is not set
1481# CONFIG_CRYPTO_GHASH is not set
1482# CONFIG_CRYPTO_MD4 is not set
1483CONFIG_CRYPTO_MD5=y
1484# CONFIG_CRYPTO_MICHAEL_MIC is not set
1485# CONFIG_CRYPTO_RMD128 is not set
1486# CONFIG_CRYPTO_RMD160 is not set
1487# CONFIG_CRYPTO_RMD256 is not set
1488# CONFIG_CRYPTO_RMD320 is not set
1489CONFIG_CRYPTO_SHA1=y
1490# CONFIG_CRYPTO_SHA256 is not set
1491# CONFIG_CRYPTO_SHA512 is not set
1492# CONFIG_CRYPTO_TGR192 is not set
1493# CONFIG_CRYPTO_WP512 is not set
1494
1495#
1496# Ciphers
1497#
1498CONFIG_CRYPTO_AES=m
1499# CONFIG_CRYPTO_ANUBIS is not set
1500CONFIG_CRYPTO_ARC4=m
1501# CONFIG_CRYPTO_BLOWFISH is not set
1502# CONFIG_CRYPTO_CAMELLIA is not set
1503# CONFIG_CRYPTO_CAST5 is not set
1504# CONFIG_CRYPTO_CAST6 is not set
1505CONFIG_CRYPTO_DES=y
1506# CONFIG_CRYPTO_FCRYPT is not set
1507# CONFIG_CRYPTO_KHAZAD is not set
1508# CONFIG_CRYPTO_SALSA20 is not set
1509# CONFIG_CRYPTO_SEED is not set
1510# CONFIG_CRYPTO_SERPENT is not set
1511# CONFIG_CRYPTO_TEA is not set
1512# CONFIG_CRYPTO_TWOFISH is not set
1513
1514#
1515# Compression
1516#
1517CONFIG_CRYPTO_DEFLATE=y
1518# CONFIG_CRYPTO_ZLIB is not set
1519CONFIG_CRYPTO_LZO=y
1520
1521#
1522# Random Number Generation
1523#
1524CONFIG_CRYPTO_ANSI_CPRNG=m
1525CONFIG_CRYPTO_HW=y
1526CONFIG_BINARY_PRINTF=y
1527
1528#
1529# Library routines
1530#
1531CONFIG_BITREVERSE=y
1532CONFIG_GENERIC_FIND_LAST_BIT=y
1533CONFIG_CRC_CCITT=m
1534CONFIG_CRC16=y
1535# CONFIG_CRC_T10DIF is not set
1536# CONFIG_CRC_ITU_T is not set
1537CONFIG_CRC32=y
1538# CONFIG_CRC7 is not set
1539# CONFIG_LIBCRC32C is not set
1540CONFIG_ZLIB_INFLATE=y
1541CONFIG_ZLIB_DEFLATE=y
1542CONFIG_LZO_COMPRESS=y
1543CONFIG_LZO_DECOMPRESS=y
1544CONFIG_DECOMPRESS_GZIP=y
1545CONFIG_GENERIC_ALLOCATOR=y
1546CONFIG_HAS_IOMEM=y
1547CONFIG_HAS_IOPORT=y
1548CONFIG_HAS_DMA=y
1549CONFIG_NLATTR=y
diff --git a/arch/avr32/configs/atstk1002_defconfig b/arch/avr32/configs/atstk1002_defconfig
index 0abe90adb1a4..42dafce02389 100644
--- a/arch/avr32/configs/atstk1002_defconfig
+++ b/arch/avr32/configs/atstk1002_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.27-rc1 3# Linux kernel version: 2.6.32-rc5
4# Mon Aug 4 16:02:27 2008 4# Thu Oct 29 13:00:55 2009
5# 5#
6CONFIG_AVR32=y 6CONFIG_AVR32=y
7CONFIG_GENERIC_GPIO=y 7CONFIG_GENERIC_GPIO=y
@@ -21,6 +21,7 @@ CONFIG_GENERIC_HWEIGHT=y
21CONFIG_GENERIC_CALIBRATE_DELAY=y 21CONFIG_GENERIC_CALIBRATE_DELAY=y
22CONFIG_GENERIC_BUG=y 22CONFIG_GENERIC_BUG=y
23CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" 23CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
24CONFIG_CONSTRUCTORS=y
24 25
25# 26#
26# General setup 27# General setup
@@ -34,21 +35,36 @@ CONFIG_SWAP=y
34CONFIG_SYSVIPC=y 35CONFIG_SYSVIPC=y
35CONFIG_SYSVIPC_SYSCTL=y 36CONFIG_SYSVIPC_SYSCTL=y
36CONFIG_POSIX_MQUEUE=y 37CONFIG_POSIX_MQUEUE=y
38CONFIG_POSIX_MQUEUE_SYSCTL=y
37# CONFIG_BSD_PROCESS_ACCT is not set 39# CONFIG_BSD_PROCESS_ACCT is not set
38# CONFIG_TASKSTATS is not set 40# CONFIG_TASKSTATS is not set
39# CONFIG_AUDIT is not set 41# CONFIG_AUDIT is not set
42
43#
44# RCU Subsystem
45#
46CONFIG_TREE_RCU=y
47# CONFIG_TREE_PREEMPT_RCU is not set
48# CONFIG_RCU_TRACE is not set
49CONFIG_RCU_FANOUT=32
50# CONFIG_RCU_FANOUT_EXACT is not set
51# CONFIG_TREE_RCU_TRACE is not set
40# CONFIG_IKCONFIG is not set 52# CONFIG_IKCONFIG is not set
41CONFIG_LOG_BUF_SHIFT=14 53CONFIG_LOG_BUF_SHIFT=14
42# CONFIG_CGROUPS is not set
43# CONFIG_GROUP_SCHED is not set 54# CONFIG_GROUP_SCHED is not set
55# CONFIG_CGROUPS is not set
44CONFIG_SYSFS_DEPRECATED=y 56CONFIG_SYSFS_DEPRECATED=y
45CONFIG_SYSFS_DEPRECATED_V2=y 57CONFIG_SYSFS_DEPRECATED_V2=y
46CONFIG_RELAY=y 58CONFIG_RELAY=y
47# CONFIG_NAMESPACES is not set 59# CONFIG_NAMESPACES is not set
48CONFIG_BLK_DEV_INITRD=y 60CONFIG_BLK_DEV_INITRD=y
49CONFIG_INITRAMFS_SOURCE="" 61CONFIG_INITRAMFS_SOURCE=""
62CONFIG_RD_GZIP=y
63# CONFIG_RD_BZIP2 is not set
64# CONFIG_RD_LZMA is not set
50CONFIG_CC_OPTIMIZE_FOR_SIZE=y 65CONFIG_CC_OPTIMIZE_FOR_SIZE=y
51CONFIG_SYSCTL=y 66CONFIG_SYSCTL=y
67CONFIG_ANON_INODES=y
52CONFIG_EMBEDDED=y 68CONFIG_EMBEDDED=y
53# CONFIG_SYSCTL_SYSCALL is not set 69# CONFIG_SYSCTL_SYSCALL is not set
54CONFIG_KALLSYMS=y 70CONFIG_KALLSYMS=y
@@ -58,38 +74,40 @@ CONFIG_HOTPLUG=y
58CONFIG_PRINTK=y 74CONFIG_PRINTK=y
59CONFIG_BUG=y 75CONFIG_BUG=y
60CONFIG_ELF_CORE=y 76CONFIG_ELF_CORE=y
61# CONFIG_COMPAT_BRK is not set
62# CONFIG_BASE_FULL is not set 77# CONFIG_BASE_FULL is not set
63CONFIG_FUTEX=y 78CONFIG_FUTEX=y
64CONFIG_ANON_INODES=y
65CONFIG_EPOLL=y 79CONFIG_EPOLL=y
66CONFIG_SIGNALFD=y 80CONFIG_SIGNALFD=y
67CONFIG_TIMERFD=y 81CONFIG_TIMERFD=y
68CONFIG_EVENTFD=y 82CONFIG_EVENTFD=y
69CONFIG_SHMEM=y 83CONFIG_SHMEM=y
84CONFIG_AIO=y
85
86#
87# Kernel Performance Events And Counters
88#
70CONFIG_VM_EVENT_COUNTERS=y 89CONFIG_VM_EVENT_COUNTERS=y
71CONFIG_SLUB_DEBUG=y 90CONFIG_SLUB_DEBUG=y
91# CONFIG_COMPAT_BRK is not set
72# CONFIG_SLAB is not set 92# CONFIG_SLAB is not set
73CONFIG_SLUB=y 93CONFIG_SLUB=y
74# CONFIG_SLOB is not set 94# CONFIG_SLOB is not set
75CONFIG_PROFILING=y 95CONFIG_PROFILING=y
76# CONFIG_MARKERS is not set 96CONFIG_TRACEPOINTS=y
77CONFIG_OPROFILE=m 97CONFIG_OPROFILE=m
78CONFIG_HAVE_OPROFILE=y 98CONFIG_HAVE_OPROFILE=y
79CONFIG_KPROBES=y 99CONFIG_KPROBES=y
80# CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS is not set
81# CONFIG_HAVE_IOREMAP_PROT is not set
82CONFIG_HAVE_KPROBES=y 100CONFIG_HAVE_KPROBES=y
83# CONFIG_HAVE_KRETPROBES is not set
84# CONFIG_HAVE_ARCH_TRACEHOOK is not set
85# CONFIG_HAVE_DMA_ATTRS is not set
86# CONFIG_USE_GENERIC_SMP_HELPERS is not set
87CONFIG_HAVE_CLK=y 101CONFIG_HAVE_CLK=y
88CONFIG_PROC_PAGE_MONITOR=y 102
103#
104# GCOV-based kernel profiling
105#
106# CONFIG_GCOV_KERNEL is not set
107# CONFIG_SLOW_WORK is not set
89# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set 108# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
90CONFIG_SLABINFO=y 109CONFIG_SLABINFO=y
91CONFIG_RT_MUTEXES=y 110CONFIG_RT_MUTEXES=y
92# CONFIG_TINY_SHMEM is not set
93CONFIG_BASE_SMALL=1 111CONFIG_BASE_SMALL=1
94CONFIG_MODULES=y 112CONFIG_MODULES=y
95# CONFIG_MODULE_FORCE_LOAD is not set 113# CONFIG_MODULE_FORCE_LOAD is not set
@@ -97,11 +115,8 @@ CONFIG_MODULE_UNLOAD=y
97# CONFIG_MODULE_FORCE_UNLOAD is not set 115# CONFIG_MODULE_FORCE_UNLOAD is not set
98# CONFIG_MODVERSIONS is not set 116# CONFIG_MODVERSIONS is not set
99# CONFIG_MODULE_SRCVERSION_ALL is not set 117# CONFIG_MODULE_SRCVERSION_ALL is not set
100CONFIG_KMOD=y
101CONFIG_BLOCK=y 118CONFIG_BLOCK=y
102# CONFIG_LBD is not set 119CONFIG_LBDAF=y
103# CONFIG_BLK_DEV_IO_TRACE is not set
104# CONFIG_LSF is not set
105# CONFIG_BLK_DEV_BSG is not set 120# CONFIG_BLK_DEV_BSG is not set
106# CONFIG_BLK_DEV_INTEGRITY is not set 121# CONFIG_BLK_DEV_INTEGRITY is not set
107 122
@@ -117,7 +132,7 @@ CONFIG_IOSCHED_CFQ=y
117CONFIG_DEFAULT_CFQ=y 132CONFIG_DEFAULT_CFQ=y
118# CONFIG_DEFAULT_NOOP is not set 133# CONFIG_DEFAULT_NOOP is not set
119CONFIG_DEFAULT_IOSCHED="cfq" 134CONFIG_DEFAULT_IOSCHED="cfq"
120CONFIG_CLASSIC_RCU=y 135CONFIG_FREEZER=y
121 136
122# 137#
123# System Type and features 138# System Type and features
@@ -133,7 +148,12 @@ CONFIG_PLATFORM_AT32AP=y
133CONFIG_CPU_AT32AP700X=y 148CONFIG_CPU_AT32AP700X=y
134CONFIG_CPU_AT32AP7000=y 149CONFIG_CPU_AT32AP7000=y
135CONFIG_BOARD_ATSTK1000=y 150CONFIG_BOARD_ATSTK1000=y
136# CONFIG_BOARD_ATNGW100 is not set 151# CONFIG_BOARD_ATNGW100_MKI is not set
152# CONFIG_BOARD_ATNGW100_MKII is not set
153# CONFIG_BOARD_HAMMERHEAD is not set
154# CONFIG_BOARD_FAVR_32 is not set
155# CONFIG_BOARD_MERISC is not set
156# CONFIG_BOARD_MIMC200 is not set
137CONFIG_BOARD_ATSTK1002=y 157CONFIG_BOARD_ATSTK1002=y
138# CONFIG_BOARD_ATSTK1003 is not set 158# CONFIG_BOARD_ATSTK1003 is not set
139# CONFIG_BOARD_ATSTK1004 is not set 159# CONFIG_BOARD_ATSTK1004 is not set
@@ -159,7 +179,7 @@ CONFIG_PREEMPT_NONE=y
159# CONFIG_PREEMPT_VOLUNTARY is not set 179# CONFIG_PREEMPT_VOLUNTARY is not set
160# CONFIG_PREEMPT is not set 180# CONFIG_PREEMPT is not set
161CONFIG_QUICKLIST=y 181CONFIG_QUICKLIST=y
162# CONFIG_HAVE_ARCH_BOOTMEM_NODE is not set 182# CONFIG_HAVE_ARCH_BOOTMEM is not set
163# CONFIG_ARCH_HAVE_MEMORY_PRESENT is not set 183# CONFIG_ARCH_HAVE_MEMORY_PRESENT is not set
164# CONFIG_NEED_NODE_MEMMAP_SIZE is not set 184# CONFIG_NEED_NODE_MEMMAP_SIZE is not set
165CONFIG_ARCH_FLATMEM_ENABLE=y 185CONFIG_ARCH_FLATMEM_ENABLE=y
@@ -171,14 +191,16 @@ CONFIG_FLATMEM_MANUAL=y
171# CONFIG_SPARSEMEM_MANUAL is not set 191# CONFIG_SPARSEMEM_MANUAL is not set
172CONFIG_FLATMEM=y 192CONFIG_FLATMEM=y
173CONFIG_FLAT_NODE_MEM_MAP=y 193CONFIG_FLAT_NODE_MEM_MAP=y
174# CONFIG_SPARSEMEM_STATIC is not set
175# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
176CONFIG_PAGEFLAGS_EXTENDED=y 194CONFIG_PAGEFLAGS_EXTENDED=y
177CONFIG_SPLIT_PTLOCK_CPUS=4 195CONFIG_SPLIT_PTLOCK_CPUS=4
178# CONFIG_RESOURCES_64BIT is not set 196# CONFIG_PHYS_ADDR_T_64BIT is not set
179CONFIG_ZONE_DMA_FLAG=0 197CONFIG_ZONE_DMA_FLAG=0
180CONFIG_NR_QUICK=2 198CONFIG_NR_QUICK=2
181CONFIG_VIRT_TO_BUS=y 199CONFIG_VIRT_TO_BUS=y
200CONFIG_HAVE_MLOCK=y
201CONFIG_HAVE_MLOCKED_PAGE_BIT=y
202# CONFIG_KSM is not set
203CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
182# CONFIG_OWNERSHIP_TRACE is not set 204# CONFIG_OWNERSHIP_TRACE is not set
183CONFIG_NMI_DEBUGGING=y 205CONFIG_NMI_DEBUGGING=y
184# CONFIG_HZ_100 is not set 206# CONFIG_HZ_100 is not set
@@ -186,7 +208,7 @@ CONFIG_HZ_250=y
186# CONFIG_HZ_300 is not set 208# CONFIG_HZ_300 is not set
187# CONFIG_HZ_1000 is not set 209# CONFIG_HZ_1000 is not set
188CONFIG_HZ=250 210CONFIG_HZ=250
189# CONFIG_SCHED_HRTICK is not set 211CONFIG_SCHED_HRTICK=y
190CONFIG_CMDLINE="" 212CONFIG_CMDLINE=""
191 213
192# 214#
@@ -197,6 +219,7 @@ CONFIG_PM=y
197CONFIG_PM_SLEEP=y 219CONFIG_PM_SLEEP=y
198CONFIG_SUSPEND=y 220CONFIG_SUSPEND=y
199CONFIG_SUSPEND_FREEZER=y 221CONFIG_SUSPEND_FREEZER=y
222# CONFIG_PM_RUNTIME is not set
200CONFIG_ARCH_SUSPEND_POSSIBLE=y 223CONFIG_ARCH_SUSPEND_POSSIBLE=y
201 224
202# 225#
@@ -228,6 +251,8 @@ CONFIG_CPU_FREQ_AT32AP=y
228# Executable file formats 251# Executable file formats
229# 252#
230CONFIG_BINFMT_ELF=y 253CONFIG_BINFMT_ELF=y
254# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
255# CONFIG_HAVE_AOUT is not set
231# CONFIG_BINFMT_MISC is not set 256# CONFIG_BINFMT_MISC is not set
232CONFIG_NET=y 257CONFIG_NET=y
233 258
@@ -295,10 +320,12 @@ CONFIG_IPV6_TUNNEL=m
295# CONFIG_NETFILTER is not set 320# CONFIG_NETFILTER is not set
296# CONFIG_IP_DCCP is not set 321# CONFIG_IP_DCCP is not set
297# CONFIG_IP_SCTP is not set 322# CONFIG_IP_SCTP is not set
323# CONFIG_RDS is not set
298# CONFIG_TIPC is not set 324# CONFIG_TIPC is not set
299# CONFIG_ATM is not set 325# CONFIG_ATM is not set
300CONFIG_STP=m 326CONFIG_STP=m
301CONFIG_BRIDGE=m 327CONFIG_BRIDGE=m
328# CONFIG_NET_DSA is not set
302# CONFIG_VLAN_8021Q is not set 329# CONFIG_VLAN_8021Q is not set
303# CONFIG_DECNET is not set 330# CONFIG_DECNET is not set
304CONFIG_LLC=m 331CONFIG_LLC=m
@@ -309,26 +336,33 @@ CONFIG_LLC=m
309# CONFIG_LAPB is not set 336# CONFIG_LAPB is not set
310# CONFIG_ECONET is not set 337# CONFIG_ECONET is not set
311# CONFIG_WAN_ROUTER is not set 338# CONFIG_WAN_ROUTER is not set
339# CONFIG_PHONET is not set
340# CONFIG_IEEE802154 is not set
312# CONFIG_NET_SCHED is not set 341# CONFIG_NET_SCHED is not set
342# CONFIG_DCB is not set
313 343
314# 344#
315# Network testing 345# Network testing
316# 346#
317# CONFIG_NET_PKTGEN is not set 347# CONFIG_NET_PKTGEN is not set
318# CONFIG_NET_TCPPROBE is not set 348# CONFIG_NET_TCPPROBE is not set
349# CONFIG_NET_DROP_MONITOR is not set
319# CONFIG_HAMRADIO is not set 350# CONFIG_HAMRADIO is not set
320# CONFIG_CAN is not set 351# CONFIG_CAN is not set
321# CONFIG_IRDA is not set 352# CONFIG_IRDA is not set
322# CONFIG_BT is not set 353# CONFIG_BT is not set
323# CONFIG_AF_RXRPC is not set 354# CONFIG_AF_RXRPC is not set
355CONFIG_WIRELESS=y
356# CONFIG_CFG80211 is not set
357CONFIG_CFG80211_DEFAULT_PS_VALUE=0
358# CONFIG_WIRELESS_OLD_REGULATORY is not set
359# CONFIG_WIRELESS_EXT is not set
360# CONFIG_LIB80211 is not set
324 361
325# 362#
326# Wireless 363# CFG80211 needs to be enabled for MAC80211
327# 364#
328# CONFIG_CFG80211 is not set 365# CONFIG_WIMAX is not set
329# CONFIG_WIRELESS_EXT is not set
330# CONFIG_MAC80211 is not set
331# CONFIG_IEEE80211 is not set
332# CONFIG_RFKILL is not set 366# CONFIG_RFKILL is not set
333# CONFIG_NET_9P is not set 367# CONFIG_NET_9P is not set
334 368
@@ -340,6 +374,7 @@ CONFIG_LLC=m
340# Generic Driver Options 374# Generic Driver Options
341# 375#
342CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 376CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
377# CONFIG_DEVTMPFS is not set
343CONFIG_STANDALONE=y 378CONFIG_STANDALONE=y
344# CONFIG_PREVENT_FIRMWARE_BUILD is not set 379# CONFIG_PREVENT_FIRMWARE_BUILD is not set
345# CONFIG_FW_LOADER is not set 380# CONFIG_FW_LOADER is not set
@@ -349,6 +384,7 @@ CONFIG_STANDALONE=y
349# CONFIG_CONNECTOR is not set 384# CONFIG_CONNECTOR is not set
350CONFIG_MTD=y 385CONFIG_MTD=y
351# CONFIG_MTD_DEBUG is not set 386# CONFIG_MTD_DEBUG is not set
387# CONFIG_MTD_TESTS is not set
352# CONFIG_MTD_CONCAT is not set 388# CONFIG_MTD_CONCAT is not set
353CONFIG_MTD_PARTITIONS=y 389CONFIG_MTD_PARTITIONS=y
354# CONFIG_MTD_REDBOOT_PARTS is not set 390# CONFIG_MTD_REDBOOT_PARTS is not set
@@ -398,17 +434,18 @@ CONFIG_MTD_CFI_UTIL=y
398# 434#
399# CONFIG_MTD_COMPLEX_MAPPINGS is not set 435# CONFIG_MTD_COMPLEX_MAPPINGS is not set
400CONFIG_MTD_PHYSMAP=y 436CONFIG_MTD_PHYSMAP=y
401CONFIG_MTD_PHYSMAP_START=0x8000000 437# CONFIG_MTD_PHYSMAP_COMPAT is not set
402CONFIG_MTD_PHYSMAP_LEN=0x0
403CONFIG_MTD_PHYSMAP_BANKWIDTH=2
404# CONFIG_MTD_PLATRAM is not set 438# CONFIG_MTD_PLATRAM is not set
405 439
406# 440#
407# Self-contained MTD device drivers 441# Self-contained MTD device drivers
408# 442#
409CONFIG_MTD_DATAFLASH=m 443CONFIG_MTD_DATAFLASH=m
444# CONFIG_MTD_DATAFLASH_WRITE_VERIFY is not set
445# CONFIG_MTD_DATAFLASH_OTP is not set
410CONFIG_MTD_M25P80=m 446CONFIG_MTD_M25P80=m
411CONFIG_M25PXX_USE_FAST_READ=y 447CONFIG_M25PXX_USE_FAST_READ=y
448# CONFIG_MTD_SST25L is not set
412# CONFIG_MTD_SLRAM is not set 449# CONFIG_MTD_SLRAM is not set
413# CONFIG_MTD_PHRAM is not set 450# CONFIG_MTD_PHRAM is not set
414# CONFIG_MTD_MTDRAM is not set 451# CONFIG_MTD_MTDRAM is not set
@@ -424,9 +461,22 @@ CONFIG_M25PXX_USE_FAST_READ=y
424# CONFIG_MTD_ONENAND is not set 461# CONFIG_MTD_ONENAND is not set
425 462
426# 463#
464# LPDDR flash memory drivers
465#
466# CONFIG_MTD_LPDDR is not set
467
468#
427# UBI - Unsorted block images 469# UBI - Unsorted block images
428# 470#
429# CONFIG_MTD_UBI is not set 471CONFIG_MTD_UBI=y
472CONFIG_MTD_UBI_WL_THRESHOLD=4096
473CONFIG_MTD_UBI_BEB_RESERVE=1
474# CONFIG_MTD_UBI_GLUEBI is not set
475
476#
477# UBI debugging options
478#
479# CONFIG_MTD_UBI_DEBUG is not set
430# CONFIG_PARPORT is not set 480# CONFIG_PARPORT is not set
431CONFIG_BLK_DEV=y 481CONFIG_BLK_DEV=y
432# CONFIG_BLK_DEV_COW_COMMON is not set 482# CONFIG_BLK_DEV_COW_COMMON is not set
@@ -444,10 +494,20 @@ CONFIG_ATMEL_PWM=m
444CONFIG_ATMEL_TCLIB=y 494CONFIG_ATMEL_TCLIB=y
445CONFIG_ATMEL_TCB_CLKSRC=y 495CONFIG_ATMEL_TCB_CLKSRC=y
446CONFIG_ATMEL_TCB_CLKSRC_BLOCK=0 496CONFIG_ATMEL_TCB_CLKSRC_BLOCK=0
447# CONFIG_EEPROM_93CX6 is not set 497# CONFIG_ICS932S401 is not set
448CONFIG_ATMEL_SSC=m 498CONFIG_ATMEL_SSC=m
449# CONFIG_ENCLOSURE_SERVICES is not set 499# CONFIG_ENCLOSURE_SERVICES is not set
450# CONFIG_HAVE_IDE is not set 500# CONFIG_ISL29003 is not set
501# CONFIG_C2PORT is not set
502
503#
504# EEPROM support
505#
506CONFIG_EEPROM_AT24=m
507# CONFIG_EEPROM_AT25 is not set
508# CONFIG_EEPROM_LEGACY is not set
509# CONFIG_EEPROM_MAX6875 is not set
510# CONFIG_EEPROM_93CX6 is not set
451 511
452# 512#
453# SCSI device support 513# SCSI device support
@@ -469,10 +529,6 @@ CONFIG_BLK_DEV_SR=m
469# CONFIG_BLK_DEV_SR_VENDOR is not set 529# CONFIG_BLK_DEV_SR_VENDOR is not set
470# CONFIG_CHR_DEV_SG is not set 530# CONFIG_CHR_DEV_SG is not set
471# CONFIG_CHR_DEV_SCH is not set 531# CONFIG_CHR_DEV_SCH is not set
472
473#
474# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
475#
476# CONFIG_SCSI_MULTI_LUN is not set 532# CONFIG_SCSI_MULTI_LUN is not set
477# CONFIG_SCSI_CONSTANTS is not set 533# CONFIG_SCSI_CONSTANTS is not set
478# CONFIG_SCSI_LOGGING is not set 534# CONFIG_SCSI_LOGGING is not set
@@ -489,8 +545,10 @@ CONFIG_SCSI_WAIT_SCAN=m
489# CONFIG_SCSI_SRP_ATTRS is not set 545# CONFIG_SCSI_SRP_ATTRS is not set
490# CONFIG_SCSI_LOWLEVEL is not set 546# CONFIG_SCSI_LOWLEVEL is not set
491# CONFIG_SCSI_DH is not set 547# CONFIG_SCSI_DH is not set
548# CONFIG_SCSI_OSD_INITIATOR is not set
492CONFIG_ATA=m 549CONFIG_ATA=m
493# CONFIG_ATA_NONSTANDARD is not set 550# CONFIG_ATA_NONSTANDARD is not set
551CONFIG_ATA_VERBOSE_ERROR=y
494# CONFIG_SATA_PMP is not set 552# CONFIG_SATA_PMP is not set
495CONFIG_ATA_SFF=y 553CONFIG_ATA_SFF=y
496# CONFIG_SATA_MV is not set 554# CONFIG_SATA_MV is not set
@@ -519,26 +577,37 @@ CONFIG_PHYLIB=y
519# CONFIG_BROADCOM_PHY is not set 577# CONFIG_BROADCOM_PHY is not set
520# CONFIG_ICPLUS_PHY is not set 578# CONFIG_ICPLUS_PHY is not set
521# CONFIG_REALTEK_PHY is not set 579# CONFIG_REALTEK_PHY is not set
580# CONFIG_NATIONAL_PHY is not set
581# CONFIG_STE10XP is not set
582# CONFIG_LSI_ET1011C_PHY is not set
522# CONFIG_FIXED_PHY is not set 583# CONFIG_FIXED_PHY is not set
523# CONFIG_MDIO_BITBANG is not set 584# CONFIG_MDIO_BITBANG is not set
524CONFIG_NET_ETHERNET=y 585CONFIG_NET_ETHERNET=y
525# CONFIG_MII is not set 586# CONFIG_MII is not set
526CONFIG_MACB=y 587CONFIG_MACB=y
527# CONFIG_ENC28J60 is not set 588# CONFIG_ENC28J60 is not set
589# CONFIG_ETHOC is not set
590# CONFIG_DNET is not set
528# CONFIG_IBM_NEW_EMAC_ZMII is not set 591# CONFIG_IBM_NEW_EMAC_ZMII is not set
529# CONFIG_IBM_NEW_EMAC_RGMII is not set 592# CONFIG_IBM_NEW_EMAC_RGMII is not set
530# CONFIG_IBM_NEW_EMAC_TAH is not set 593# CONFIG_IBM_NEW_EMAC_TAH is not set
531# CONFIG_IBM_NEW_EMAC_EMAC4 is not set 594# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
595# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
596# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
597# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
532# CONFIG_B44 is not set 598# CONFIG_B44 is not set
599# CONFIG_KS8842 is not set
600# CONFIG_KS8851 is not set
601# CONFIG_KS8851_MLL is not set
533# CONFIG_NETDEV_1000 is not set 602# CONFIG_NETDEV_1000 is not set
534# CONFIG_NETDEV_10000 is not set 603# CONFIG_NETDEV_10000 is not set
604CONFIG_WLAN=y
605# CONFIG_WLAN_PRE80211 is not set
606# CONFIG_WLAN_80211 is not set
535 607
536# 608#
537# Wireless LAN 609# Enable WiMAX (Networking options) to see the WiMAX drivers
538# 610#
539# CONFIG_WLAN_PRE80211 is not set
540# CONFIG_WLAN_80211 is not set
541# CONFIG_IWLWIFI_LEDS is not set
542# CONFIG_WAN is not set 611# CONFIG_WAN is not set
543CONFIG_PPP=m 612CONFIG_PPP=m
544# CONFIG_PPP_MULTILINK is not set 613# CONFIG_PPP_MULTILINK is not set
@@ -580,18 +649,25 @@ CONFIG_INPUT_EVDEV=m
580# Input Device Drivers 649# Input Device Drivers
581# 650#
582CONFIG_INPUT_KEYBOARD=y 651CONFIG_INPUT_KEYBOARD=y
652# CONFIG_KEYBOARD_ADP5588 is not set
583# CONFIG_KEYBOARD_ATKBD is not set 653# CONFIG_KEYBOARD_ATKBD is not set
584# CONFIG_KEYBOARD_SUNKBD is not set 654# CONFIG_QT2160 is not set
585# CONFIG_KEYBOARD_LKKBD is not set 655# CONFIG_KEYBOARD_LKKBD is not set
586# CONFIG_KEYBOARD_XTKBD is not set 656CONFIG_KEYBOARD_GPIO=m
657# CONFIG_KEYBOARD_MATRIX is not set
658# CONFIG_KEYBOARD_LM8323 is not set
659# CONFIG_KEYBOARD_MAX7359 is not set
587# CONFIG_KEYBOARD_NEWTON is not set 660# CONFIG_KEYBOARD_NEWTON is not set
661# CONFIG_KEYBOARD_OPENCORES is not set
588# CONFIG_KEYBOARD_STOWAWAY is not set 662# CONFIG_KEYBOARD_STOWAWAY is not set
589CONFIG_KEYBOARD_GPIO=m 663# CONFIG_KEYBOARD_SUNKBD is not set
664# CONFIG_KEYBOARD_XTKBD is not set
590CONFIG_INPUT_MOUSE=y 665CONFIG_INPUT_MOUSE=y
591# CONFIG_MOUSE_PS2 is not set 666# CONFIG_MOUSE_PS2 is not set
592# CONFIG_MOUSE_SERIAL is not set 667# CONFIG_MOUSE_SERIAL is not set
593# CONFIG_MOUSE_VSXXXAA is not set 668# CONFIG_MOUSE_VSXXXAA is not set
594CONFIG_MOUSE_GPIO=m 669CONFIG_MOUSE_GPIO=m
670# CONFIG_MOUSE_SYNAPTICS_I2C is not set
595# CONFIG_INPUT_JOYSTICK is not set 671# CONFIG_INPUT_JOYSTICK is not set
596# CONFIG_INPUT_TABLET is not set 672# CONFIG_INPUT_TABLET is not set
597# CONFIG_INPUT_TOUCHSCREEN is not set 673# CONFIG_INPUT_TOUCHSCREEN is not set
@@ -622,9 +698,11 @@ CONFIG_SERIAL_ATMEL=y
622CONFIG_SERIAL_ATMEL_CONSOLE=y 698CONFIG_SERIAL_ATMEL_CONSOLE=y
623CONFIG_SERIAL_ATMEL_PDC=y 699CONFIG_SERIAL_ATMEL_PDC=y
624# CONFIG_SERIAL_ATMEL_TTYAT is not set 700# CONFIG_SERIAL_ATMEL_TTYAT is not set
701# CONFIG_SERIAL_MAX3100 is not set
625CONFIG_SERIAL_CORE=y 702CONFIG_SERIAL_CORE=y
626CONFIG_SERIAL_CORE_CONSOLE=y 703CONFIG_SERIAL_CORE_CONSOLE=y
627CONFIG_UNIX98_PTYS=y 704CONFIG_UNIX98_PTYS=y
705# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
628# CONFIG_LEGACY_PTYS is not set 706# CONFIG_LEGACY_PTYS is not set
629# CONFIG_IPMI_HANDLER is not set 707# CONFIG_IPMI_HANDLER is not set
630# CONFIG_HW_RANDOM is not set 708# CONFIG_HW_RANDOM is not set
@@ -633,7 +711,9 @@ CONFIG_UNIX98_PTYS=y
633# CONFIG_TCG_TPM is not set 711# CONFIG_TCG_TPM is not set
634CONFIG_I2C=m 712CONFIG_I2C=m
635CONFIG_I2C_BOARDINFO=y 713CONFIG_I2C_BOARDINFO=y
714CONFIG_I2C_COMPAT=y
636CONFIG_I2C_CHARDEV=m 715CONFIG_I2C_CHARDEV=m
716CONFIG_I2C_HELPER_AUTO=y
637CONFIG_I2C_ALGOBIT=m 717CONFIG_I2C_ALGOBIT=m
638 718
639# 719#
@@ -643,6 +723,7 @@ CONFIG_I2C_ALGOBIT=m
643# 723#
644# I2C system bus drivers (mostly embedded / system-on-chip) 724# I2C system bus drivers (mostly embedded / system-on-chip)
645# 725#
726# CONFIG_I2C_DESIGNWARE is not set
646CONFIG_I2C_GPIO=m 727CONFIG_I2C_GPIO=m
647# CONFIG_I2C_OCORES is not set 728# CONFIG_I2C_OCORES is not set
648# CONFIG_I2C_SIMTEC is not set 729# CONFIG_I2C_SIMTEC is not set
@@ -663,14 +744,6 @@ CONFIG_I2C_GPIO=m
663# Miscellaneous I2C Chip support 744# Miscellaneous I2C Chip support
664# 745#
665# CONFIG_DS1682 is not set 746# CONFIG_DS1682 is not set
666CONFIG_EEPROM_AT24=m
667# CONFIG_EEPROM_LEGACY is not set
668# CONFIG_SENSORS_PCF8574 is not set
669# CONFIG_PCF8575 is not set
670# CONFIG_SENSORS_PCA9539 is not set
671# CONFIG_SENSORS_PCF8591 is not set
672# CONFIG_TPS65010 is not set
673# CONFIG_SENSORS_MAX6875 is not set
674# CONFIG_SENSORS_TSL2550 is not set 747# CONFIG_SENSORS_TSL2550 is not set
675# CONFIG_I2C_DEBUG_CORE is not set 748# CONFIG_I2C_DEBUG_CORE is not set
676# CONFIG_I2C_DEBUG_ALGO is not set 749# CONFIG_I2C_DEBUG_ALGO is not set
@@ -685,19 +758,28 @@ CONFIG_SPI_MASTER=y
685# 758#
686CONFIG_SPI_ATMEL=y 759CONFIG_SPI_ATMEL=y
687# CONFIG_SPI_BITBANG is not set 760# CONFIG_SPI_BITBANG is not set
761# CONFIG_SPI_GPIO is not set
688 762
689# 763#
690# SPI Protocol Masters 764# SPI Protocol Masters
691# 765#
692# CONFIG_EEPROM_AT25 is not set
693CONFIG_SPI_SPIDEV=m 766CONFIG_SPI_SPIDEV=m
694# CONFIG_SPI_TLE62X0 is not set 767# CONFIG_SPI_TLE62X0 is not set
768
769#
770# PPS support
771#
772# CONFIG_PPS is not set
695CONFIG_ARCH_REQUIRE_GPIOLIB=y 773CONFIG_ARCH_REQUIRE_GPIOLIB=y
696CONFIG_GPIOLIB=y 774CONFIG_GPIOLIB=y
697# CONFIG_DEBUG_GPIO is not set 775# CONFIG_DEBUG_GPIO is not set
698CONFIG_GPIO_SYSFS=y 776CONFIG_GPIO_SYSFS=y
699 777
700# 778#
779# Memory mapped GPIO expanders:
780#
781
782#
701# I2C GPIO expanders: 783# I2C GPIO expanders:
702# 784#
703# CONFIG_GPIO_MAX732X is not set 785# CONFIG_GPIO_MAX732X is not set
@@ -713,11 +795,15 @@ CONFIG_GPIO_SYSFS=y
713# 795#
714# CONFIG_GPIO_MAX7301 is not set 796# CONFIG_GPIO_MAX7301 is not set
715# CONFIG_GPIO_MCP23S08 is not set 797# CONFIG_GPIO_MCP23S08 is not set
798# CONFIG_GPIO_MC33880 is not set
799
800#
801# AC97 GPIO expanders:
802#
716# CONFIG_W1 is not set 803# CONFIG_W1 is not set
717# CONFIG_POWER_SUPPLY is not set 804# CONFIG_POWER_SUPPLY is not set
718# CONFIG_HWMON is not set 805# CONFIG_HWMON is not set
719# CONFIG_THERMAL is not set 806# CONFIG_THERMAL is not set
720# CONFIG_THERMAL_HWMON is not set
721CONFIG_WATCHDOG=y 807CONFIG_WATCHDOG=y
722# CONFIG_WATCHDOG_NOWAYOUT is not set 808# CONFIG_WATCHDOG_NOWAYOUT is not set
723 809
@@ -726,11 +812,11 @@ CONFIG_WATCHDOG=y
726# 812#
727# CONFIG_SOFT_WATCHDOG is not set 813# CONFIG_SOFT_WATCHDOG is not set
728CONFIG_AT32AP700X_WDT=y 814CONFIG_AT32AP700X_WDT=y
815CONFIG_SSB_POSSIBLE=y
729 816
730# 817#
731# Sonics Silicon Backplane 818# Sonics Silicon Backplane
732# 819#
733CONFIG_SSB_POSSIBLE=y
734# CONFIG_SSB is not set 820# CONFIG_SSB is not set
735 821
736# 822#
@@ -739,22 +825,17 @@ CONFIG_SSB_POSSIBLE=y
739# CONFIG_MFD_CORE is not set 825# CONFIG_MFD_CORE is not set
740# CONFIG_MFD_SM501 is not set 826# CONFIG_MFD_SM501 is not set
741# CONFIG_HTC_PASIC3 is not set 827# CONFIG_HTC_PASIC3 is not set
742 828# CONFIG_TPS65010 is not set
743# 829# CONFIG_MFD_TMIO is not set
744# Multimedia devices 830# CONFIG_MFD_WM8400 is not set
745# 831# CONFIG_MFD_WM831X is not set
746 832# CONFIG_MFD_WM8350_I2C is not set
747# 833# CONFIG_MFD_PCF50633 is not set
748# Multimedia core support 834# CONFIG_MFD_MC13783 is not set
749# 835# CONFIG_AB3100_CORE is not set
750# CONFIG_VIDEO_DEV is not set 836# CONFIG_EZX_PCAP is not set
751# CONFIG_DVB_CORE is not set 837# CONFIG_REGULATOR is not set
752# CONFIG_VIDEO_MEDIA is not set 838# CONFIG_MEDIA_SUPPORT is not set
753
754#
755# Multimedia drivers
756#
757# CONFIG_DAB is not set
758 839
759# 840#
760# Graphics support 841# Graphics support
@@ -764,6 +845,7 @@ CONFIG_SSB_POSSIBLE=y
764CONFIG_FB=y 845CONFIG_FB=y
765# CONFIG_FIRMWARE_EDID is not set 846# CONFIG_FIRMWARE_EDID is not set
766# CONFIG_FB_DDC is not set 847# CONFIG_FB_DDC is not set
848# CONFIG_FB_BOOT_VESA_SUPPORT is not set
767CONFIG_FB_CFB_FILLRECT=y 849CONFIG_FB_CFB_FILLRECT=y
768CONFIG_FB_CFB_COPYAREA=y 850CONFIG_FB_CFB_COPYAREA=y
769CONFIG_FB_CFB_IMAGEBLIT=y 851CONFIG_FB_CFB_IMAGEBLIT=y
@@ -785,10 +867,15 @@ CONFIG_FB_CFB_IMAGEBLIT=y
785# CONFIG_FB_S1D13XXX is not set 867# CONFIG_FB_S1D13XXX is not set
786CONFIG_FB_ATMEL=y 868CONFIG_FB_ATMEL=y
787# CONFIG_FB_VIRTUAL is not set 869# CONFIG_FB_VIRTUAL is not set
870# CONFIG_FB_METRONOME is not set
871# CONFIG_FB_MB862XX is not set
872# CONFIG_FB_BROADSHEET is not set
788CONFIG_BACKLIGHT_LCD_SUPPORT=y 873CONFIG_BACKLIGHT_LCD_SUPPORT=y
789CONFIG_LCD_CLASS_DEVICE=y 874CONFIG_LCD_CLASS_DEVICE=y
875# CONFIG_LCD_LMS283GF05 is not set
790CONFIG_LCD_LTV350QV=y 876CONFIG_LCD_LTV350QV=y
791# CONFIG_LCD_ILI9320 is not set 877# CONFIG_LCD_ILI9320 is not set
878# CONFIG_LCD_TDO24M is not set
792# CONFIG_LCD_VGG2432A4 is not set 879# CONFIG_LCD_VGG2432A4 is not set
793# CONFIG_LCD_PLATFORM is not set 880# CONFIG_LCD_PLATFORM is not set
794# CONFIG_BACKLIGHT_CLASS_DEVICE is not set 881# CONFIG_BACKLIGHT_CLASS_DEVICE is not set
@@ -799,6 +886,8 @@ CONFIG_LCD_LTV350QV=y
799# CONFIG_DISPLAY_SUPPORT is not set 886# CONFIG_DISPLAY_SUPPORT is not set
800# CONFIG_LOGO is not set 887# CONFIG_LOGO is not set
801CONFIG_SOUND=m 888CONFIG_SOUND=m
889CONFIG_SOUND_OSS_CORE=y
890CONFIG_SOUND_OSS_CORE_PRECLAIM=y
802CONFIG_SND=m 891CONFIG_SND=m
803CONFIG_SND_TIMER=m 892CONFIG_SND_TIMER=m
804CONFIG_SND_PCM=m 893CONFIG_SND_PCM=m
@@ -807,12 +896,24 @@ CONFIG_SND_OSSEMUL=y
807CONFIG_SND_MIXER_OSS=m 896CONFIG_SND_MIXER_OSS=m
808CONFIG_SND_PCM_OSS=m 897CONFIG_SND_PCM_OSS=m
809CONFIG_SND_PCM_OSS_PLUGINS=y 898CONFIG_SND_PCM_OSS_PLUGINS=y
899# CONFIG_SND_HRTIMER is not set
810# CONFIG_SND_DYNAMIC_MINORS is not set 900# CONFIG_SND_DYNAMIC_MINORS is not set
811# CONFIG_SND_SUPPORT_OLD_API is not set 901# CONFIG_SND_SUPPORT_OLD_API is not set
812# CONFIG_SND_VERBOSE_PROCFS is not set 902# CONFIG_SND_VERBOSE_PROCFS is not set
813# CONFIG_SND_VERBOSE_PRINTK is not set 903# CONFIG_SND_VERBOSE_PRINTK is not set
814# CONFIG_SND_DEBUG is not set 904# CONFIG_SND_DEBUG is not set
905# CONFIG_SND_RAWMIDI_SEQ is not set
906# CONFIG_SND_OPL3_LIB_SEQ is not set
907# CONFIG_SND_OPL4_LIB_SEQ is not set
908# CONFIG_SND_SBAWE_SEQ is not set
909# CONFIG_SND_EMU10K1_SEQ is not set
815# CONFIG_SND_DRIVERS is not set 910# CONFIG_SND_DRIVERS is not set
911
912#
913# Atmel devices (AVR32 and AT91)
914#
915# CONFIG_SND_ATMEL_ABDAC is not set
916# CONFIG_SND_ATMEL_AC97C is not set
816CONFIG_SND_SPI=y 917CONFIG_SND_SPI=y
817CONFIG_SND_AT73C213=m 918CONFIG_SND_AT73C213=m
818CONFIG_SND_AT73C213_TARGET_BITRATE=48000 919CONFIG_SND_AT73C213_TARGET_BITRATE=48000
@@ -825,33 +926,43 @@ CONFIG_USB_SUPPORT=y
825# CONFIG_USB_ARCH_HAS_EHCI is not set 926# CONFIG_USB_ARCH_HAS_EHCI is not set
826# CONFIG_USB_OTG_WHITELIST is not set 927# CONFIG_USB_OTG_WHITELIST is not set
827# CONFIG_USB_OTG_BLACKLIST_HUB is not set 928# CONFIG_USB_OTG_BLACKLIST_HUB is not set
929# CONFIG_USB_GADGET_MUSB_HDRC is not set
828 930
829# 931#
830# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' 932# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
831# 933#
832CONFIG_USB_GADGET=y 934CONFIG_USB_GADGET=y
833# CONFIG_USB_GADGET_DEBUG is not set 935# CONFIG_USB_GADGET_DEBUG is not set
834# CONFIG_USB_GADGET_DEBUG_FILES is not set 936# CONFIG_USB_GADGET_DEBUG_FILES is not set
835# CONFIG_USB_GADGET_DEBUG_FS is not set 937# CONFIG_USB_GADGET_DEBUG_FS is not set
938CONFIG_USB_GADGET_VBUS_DRAW=2
836CONFIG_USB_GADGET_SELECTED=y 939CONFIG_USB_GADGET_SELECTED=y
837# CONFIG_USB_GADGET_AMD5536UDC is not set 940# CONFIG_USB_GADGET_AT91 is not set
838CONFIG_USB_GADGET_ATMEL_USBA=y 941CONFIG_USB_GADGET_ATMEL_USBA=y
839CONFIG_USB_ATMEL_USBA=y 942CONFIG_USB_ATMEL_USBA=y
840# CONFIG_USB_GADGET_FSL_USB2 is not set 943# CONFIG_USB_GADGET_FSL_USB2 is not set
841# CONFIG_USB_GADGET_NET2280 is not set
842# CONFIG_USB_GADGET_PXA25X is not set
843# CONFIG_USB_GADGET_M66592 is not set
844# CONFIG_USB_GADGET_PXA27X is not set
845# CONFIG_USB_GADGET_GOKU is not set
846# CONFIG_USB_GADGET_LH7A40X is not set 944# CONFIG_USB_GADGET_LH7A40X is not set
847# CONFIG_USB_GADGET_OMAP is not set 945# CONFIG_USB_GADGET_OMAP is not set
946# CONFIG_USB_GADGET_PXA25X is not set
947# CONFIG_USB_GADGET_R8A66597 is not set
948# CONFIG_USB_GADGET_PXA27X is not set
949# CONFIG_USB_GADGET_S3C_HSOTG is not set
950# CONFIG_USB_GADGET_IMX is not set
848# CONFIG_USB_GADGET_S3C2410 is not set 951# CONFIG_USB_GADGET_S3C2410 is not set
849# CONFIG_USB_GADGET_AT91 is not set 952# CONFIG_USB_GADGET_M66592 is not set
953# CONFIG_USB_GADGET_AMD5536UDC is not set
954# CONFIG_USB_GADGET_FSL_QE is not set
955# CONFIG_USB_GADGET_CI13XXX is not set
956# CONFIG_USB_GADGET_NET2280 is not set
957# CONFIG_USB_GADGET_GOKU is not set
958# CONFIG_USB_GADGET_LANGWELL is not set
850# CONFIG_USB_GADGET_DUMMY_HCD is not set 959# CONFIG_USB_GADGET_DUMMY_HCD is not set
851CONFIG_USB_GADGET_DUALSPEED=y 960CONFIG_USB_GADGET_DUALSPEED=y
852CONFIG_USB_ZERO=m 961CONFIG_USB_ZERO=m
962# CONFIG_USB_AUDIO is not set
853CONFIG_USB_ETH=m 963CONFIG_USB_ETH=m
854CONFIG_USB_ETH_RNDIS=y 964CONFIG_USB_ETH_RNDIS=y
965# CONFIG_USB_ETH_EEM is not set
855CONFIG_USB_GADGETFS=m 966CONFIG_USB_GADGETFS=m
856CONFIG_USB_FILE_STORAGE=m 967CONFIG_USB_FILE_STORAGE=m
857# CONFIG_USB_FILE_STORAGE_TEST is not set 968# CONFIG_USB_FILE_STORAGE_TEST is not set
@@ -859,12 +970,18 @@ CONFIG_USB_G_SERIAL=m
859# CONFIG_USB_MIDI_GADGET is not set 970# CONFIG_USB_MIDI_GADGET is not set
860# CONFIG_USB_G_PRINTER is not set 971# CONFIG_USB_G_PRINTER is not set
861CONFIG_USB_CDC_COMPOSITE=m 972CONFIG_USB_CDC_COMPOSITE=m
973
974#
975# OTG and related infrastructure
976#
977# CONFIG_USB_GPIO_VBUS is not set
978# CONFIG_NOP_USB_XCEIV is not set
862CONFIG_MMC=y 979CONFIG_MMC=y
863# CONFIG_MMC_DEBUG is not set 980# CONFIG_MMC_DEBUG is not set
864# CONFIG_MMC_UNSAFE_RESUME is not set 981# CONFIG_MMC_UNSAFE_RESUME is not set
865 982
866# 983#
867# MMC/SD Card Drivers 984# MMC/SD/SDIO Card Drivers
868# 985#
869CONFIG_MMC_BLOCK=y 986CONFIG_MMC_BLOCK=y
870CONFIG_MMC_BLOCK_BOUNCE=y 987CONFIG_MMC_BLOCK_BOUNCE=y
@@ -872,10 +989,12 @@ CONFIG_MMC_BLOCK_BOUNCE=y
872# CONFIG_MMC_TEST is not set 989# CONFIG_MMC_TEST is not set
873 990
874# 991#
875# MMC/SD Host Controller Drivers 992# MMC/SD/SDIO Host Controller Drivers
876# 993#
877# CONFIG_MMC_SDHCI is not set 994# CONFIG_MMC_SDHCI is not set
995# CONFIG_MMC_AT91 is not set
878CONFIG_MMC_ATMELMCI=y 996CONFIG_MMC_ATMELMCI=y
997# CONFIG_MMC_ATMELMCI_DMA is not set
879CONFIG_MMC_SPI=m 998CONFIG_MMC_SPI=m
880# CONFIG_MEMSTICK is not set 999# CONFIG_MEMSTICK is not set
881CONFIG_NEW_LEDS=y 1000CONFIG_NEW_LEDS=y
@@ -887,7 +1006,11 @@ CONFIG_LEDS_CLASS=m
887CONFIG_LEDS_ATMEL_PWM=m 1006CONFIG_LEDS_ATMEL_PWM=m
888# CONFIG_LEDS_PCA9532 is not set 1007# CONFIG_LEDS_PCA9532 is not set
889CONFIG_LEDS_GPIO=m 1008CONFIG_LEDS_GPIO=m
1009CONFIG_LEDS_GPIO_PLATFORM=y
1010# CONFIG_LEDS_LP3944 is not set
890# CONFIG_LEDS_PCA955X is not set 1011# CONFIG_LEDS_PCA955X is not set
1012# CONFIG_LEDS_DAC124S085 is not set
1013# CONFIG_LEDS_BD2802 is not set
891 1014
892# 1015#
893# LED Triggers 1016# LED Triggers
@@ -895,7 +1018,13 @@ CONFIG_LEDS_GPIO=m
895CONFIG_LEDS_TRIGGERS=y 1018CONFIG_LEDS_TRIGGERS=y
896CONFIG_LEDS_TRIGGER_TIMER=m 1019CONFIG_LEDS_TRIGGER_TIMER=m
897CONFIG_LEDS_TRIGGER_HEARTBEAT=m 1020CONFIG_LEDS_TRIGGER_HEARTBEAT=m
1021# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
1022# CONFIG_LEDS_TRIGGER_GPIO is not set
898CONFIG_LEDS_TRIGGER_DEFAULT_ON=m 1023CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
1024
1025#
1026# iptables trigger is under Netfilter config (LED target)
1027#
899# CONFIG_ACCESSIBILITY is not set 1028# CONFIG_ACCESSIBILITY is not set
900CONFIG_RTC_LIB=y 1029CONFIG_RTC_LIB=y
901CONFIG_RTC_CLASS=y 1030CONFIG_RTC_CLASS=y
@@ -927,25 +1056,33 @@ CONFIG_RTC_INTF_DEV=y
927# CONFIG_RTC_DRV_M41T80 is not set 1056# CONFIG_RTC_DRV_M41T80 is not set
928# CONFIG_RTC_DRV_S35390A is not set 1057# CONFIG_RTC_DRV_S35390A is not set
929# CONFIG_RTC_DRV_FM3130 is not set 1058# CONFIG_RTC_DRV_FM3130 is not set
1059# CONFIG_RTC_DRV_RX8581 is not set
1060# CONFIG_RTC_DRV_RX8025 is not set
930 1061
931# 1062#
932# SPI RTC drivers 1063# SPI RTC drivers
933# 1064#
934# CONFIG_RTC_DRV_M41T94 is not set 1065# CONFIG_RTC_DRV_M41T94 is not set
935# CONFIG_RTC_DRV_DS1305 is not set 1066# CONFIG_RTC_DRV_DS1305 is not set
1067# CONFIG_RTC_DRV_DS1390 is not set
936# CONFIG_RTC_DRV_MAX6902 is not set 1068# CONFIG_RTC_DRV_MAX6902 is not set
937# CONFIG_RTC_DRV_R9701 is not set 1069# CONFIG_RTC_DRV_R9701 is not set
938# CONFIG_RTC_DRV_RS5C348 is not set 1070# CONFIG_RTC_DRV_RS5C348 is not set
1071# CONFIG_RTC_DRV_DS3234 is not set
1072# CONFIG_RTC_DRV_PCF2123 is not set
939 1073
940# 1074#
941# Platform RTC drivers 1075# Platform RTC drivers
942# 1076#
1077# CONFIG_RTC_DRV_DS1286 is not set
943# CONFIG_RTC_DRV_DS1511 is not set 1078# CONFIG_RTC_DRV_DS1511 is not set
944# CONFIG_RTC_DRV_DS1553 is not set 1079# CONFIG_RTC_DRV_DS1553 is not set
945# CONFIG_RTC_DRV_DS1742 is not set 1080# CONFIG_RTC_DRV_DS1742 is not set
946# CONFIG_RTC_DRV_STK17TA8 is not set 1081# CONFIG_RTC_DRV_STK17TA8 is not set
947# CONFIG_RTC_DRV_M48T86 is not set 1082# CONFIG_RTC_DRV_M48T86 is not set
1083# CONFIG_RTC_DRV_M48T35 is not set
948# CONFIG_RTC_DRV_M48T59 is not set 1084# CONFIG_RTC_DRV_M48T59 is not set
1085# CONFIG_RTC_DRV_BQ4802 is not set
949# CONFIG_RTC_DRV_V3020 is not set 1086# CONFIG_RTC_DRV_V3020 is not set
950 1087
951# 1088#
@@ -964,25 +1101,45 @@ CONFIG_DMA_ENGINE=y
964# DMA Clients 1101# DMA Clients
965# 1102#
966# CONFIG_NET_DMA is not set 1103# CONFIG_NET_DMA is not set
1104# CONFIG_ASYNC_TX_DMA is not set
967# CONFIG_DMATEST is not set 1105# CONFIG_DMATEST is not set
1106# CONFIG_AUXDISPLAY is not set
968# CONFIG_UIO is not set 1107# CONFIG_UIO is not set
969 1108
970# 1109#
1110# TI VLYNQ
1111#
1112# CONFIG_STAGING is not set
1113
1114#
971# File systems 1115# File systems
972# 1116#
973CONFIG_EXT2_FS=y 1117CONFIG_EXT2_FS=y
974# CONFIG_EXT2_FS_XATTR is not set 1118# CONFIG_EXT2_FS_XATTR is not set
975# CONFIG_EXT2_FS_XIP is not set 1119# CONFIG_EXT2_FS_XIP is not set
976CONFIG_EXT3_FS=y 1120CONFIG_EXT3_FS=y
1121# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
977# CONFIG_EXT3_FS_XATTR is not set 1122# CONFIG_EXT3_FS_XATTR is not set
978# CONFIG_EXT4DEV_FS is not set 1123CONFIG_EXT4_FS=y
1124CONFIG_EXT4_FS_XATTR=y
1125# CONFIG_EXT4_FS_POSIX_ACL is not set
1126# CONFIG_EXT4_FS_SECURITY is not set
1127# CONFIG_EXT4_DEBUG is not set
979CONFIG_JBD=y 1128CONFIG_JBD=y
980# CONFIG_JBD_DEBUG is not set 1129# CONFIG_JBD_DEBUG is not set
1130CONFIG_JBD2=y
1131# CONFIG_JBD2_DEBUG is not set
1132CONFIG_FS_MBCACHE=y
981# CONFIG_REISERFS_FS is not set 1133# CONFIG_REISERFS_FS is not set
982# CONFIG_JFS_FS is not set 1134# CONFIG_JFS_FS is not set
983# CONFIG_FS_POSIX_ACL is not set 1135# CONFIG_FS_POSIX_ACL is not set
984# CONFIG_XFS_FS is not set 1136# CONFIG_XFS_FS is not set
1137# CONFIG_GFS2_FS is not set
985# CONFIG_OCFS2_FS is not set 1138# CONFIG_OCFS2_FS is not set
1139# CONFIG_BTRFS_FS is not set
1140# CONFIG_NILFS2_FS is not set
1141CONFIG_FILE_LOCKING=y
1142CONFIG_FSNOTIFY=y
986# CONFIG_DNOTIFY is not set 1143# CONFIG_DNOTIFY is not set
987CONFIG_INOTIFY=y 1144CONFIG_INOTIFY=y
988CONFIG_INOTIFY_USER=y 1145CONFIG_INOTIFY_USER=y
@@ -990,6 +1147,12 @@ CONFIG_INOTIFY_USER=y
990# CONFIG_AUTOFS_FS is not set 1147# CONFIG_AUTOFS_FS is not set
991# CONFIG_AUTOFS4_FS is not set 1148# CONFIG_AUTOFS4_FS is not set
992CONFIG_FUSE_FS=m 1149CONFIG_FUSE_FS=m
1150# CONFIG_CUSE is not set
1151
1152#
1153# Caches
1154#
1155# CONFIG_FSCACHE is not set
993 1156
994# 1157#
995# CD-ROM/DVD Filesystems 1158# CD-ROM/DVD Filesystems
@@ -1013,15 +1176,13 @@ CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
1013CONFIG_PROC_FS=y 1176CONFIG_PROC_FS=y
1014CONFIG_PROC_KCORE=y 1177CONFIG_PROC_KCORE=y
1015CONFIG_PROC_SYSCTL=y 1178CONFIG_PROC_SYSCTL=y
1179CONFIG_PROC_PAGE_MONITOR=y
1016CONFIG_SYSFS=y 1180CONFIG_SYSFS=y
1017CONFIG_TMPFS=y 1181CONFIG_TMPFS=y
1018# CONFIG_TMPFS_POSIX_ACL is not set 1182# CONFIG_TMPFS_POSIX_ACL is not set
1019# CONFIG_HUGETLB_PAGE is not set 1183# CONFIG_HUGETLB_PAGE is not set
1020# CONFIG_CONFIGFS_FS is not set 1184# CONFIG_CONFIGFS_FS is not set
1021 1185CONFIG_MISC_FILESYSTEMS=y
1022#
1023# Miscellaneous filesystems
1024#
1025# CONFIG_ADFS_FS is not set 1186# CONFIG_ADFS_FS is not set
1026# CONFIG_AFFS_FS is not set 1187# CONFIG_AFFS_FS is not set
1027# CONFIG_HFS_FS is not set 1188# CONFIG_HFS_FS is not set
@@ -1039,7 +1200,14 @@ CONFIG_JFFS2_ZLIB=y
1039# CONFIG_JFFS2_LZO is not set 1200# CONFIG_JFFS2_LZO is not set
1040CONFIG_JFFS2_RTIME=y 1201CONFIG_JFFS2_RTIME=y
1041# CONFIG_JFFS2_RUBIN is not set 1202# CONFIG_JFFS2_RUBIN is not set
1203CONFIG_UBIFS_FS=y
1204# CONFIG_UBIFS_FS_XATTR is not set
1205# CONFIG_UBIFS_FS_ADVANCED_COMPR is not set
1206CONFIG_UBIFS_FS_LZO=y
1207CONFIG_UBIFS_FS_ZLIB=y
1208# CONFIG_UBIFS_FS_DEBUG is not set
1042# CONFIG_CRAMFS is not set 1209# CONFIG_CRAMFS is not set
1210# CONFIG_SQUASHFS is not set
1043# CONFIG_VXFS_FS is not set 1211# CONFIG_VXFS_FS is not set
1044CONFIG_MINIX_FS=m 1212CONFIG_MINIX_FS=m
1045# CONFIG_OMFS_FS is not set 1213# CONFIG_OMFS_FS is not set
@@ -1122,6 +1290,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
1122CONFIG_ENABLE_MUST_CHECK=y 1290CONFIG_ENABLE_MUST_CHECK=y
1123CONFIG_FRAME_WARN=1024 1291CONFIG_FRAME_WARN=1024
1124CONFIG_MAGIC_SYSRQ=y 1292CONFIG_MAGIC_SYSRQ=y
1293# CONFIG_STRIP_ASM_SYMS is not set
1125# CONFIG_UNUSED_SYMBOLS is not set 1294# CONFIG_UNUSED_SYMBOLS is not set
1126CONFIG_DEBUG_FS=y 1295CONFIG_DEBUG_FS=y
1127# CONFIG_HEADERS_CHECK is not set 1296# CONFIG_HEADERS_CHECK is not set
@@ -1130,6 +1299,9 @@ CONFIG_DEBUG_KERNEL=y
1130CONFIG_DETECT_SOFTLOCKUP=y 1299CONFIG_DETECT_SOFTLOCKUP=y
1131# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set 1300# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
1132CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 1301CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
1302CONFIG_DETECT_HUNG_TASK=y
1303# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
1304CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
1133CONFIG_SCHED_DEBUG=y 1305CONFIG_SCHED_DEBUG=y
1134# CONFIG_SCHEDSTATS is not set 1306# CONFIG_SCHEDSTATS is not set
1135# CONFIG_TIMER_STATS is not set 1307# CONFIG_TIMER_STATS is not set
@@ -1145,6 +1317,7 @@ CONFIG_SCHED_DEBUG=y
1145# CONFIG_LOCK_STAT is not set 1317# CONFIG_LOCK_STAT is not set
1146# CONFIG_DEBUG_SPINLOCK_SLEEP is not set 1318# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
1147# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set 1319# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
1320CONFIG_STACKTRACE=y
1148# CONFIG_DEBUG_KOBJECT is not set 1321# CONFIG_DEBUG_KOBJECT is not set
1149CONFIG_DEBUG_BUGVERBOSE=y 1322CONFIG_DEBUG_BUGVERBOSE=y
1150# CONFIG_DEBUG_INFO is not set 1323# CONFIG_DEBUG_INFO is not set
@@ -1153,13 +1326,39 @@ CONFIG_DEBUG_BUGVERBOSE=y
1153# CONFIG_DEBUG_MEMORY_INIT is not set 1326# CONFIG_DEBUG_MEMORY_INIT is not set
1154# CONFIG_DEBUG_LIST is not set 1327# CONFIG_DEBUG_LIST is not set
1155# CONFIG_DEBUG_SG is not set 1328# CONFIG_DEBUG_SG is not set
1329# CONFIG_DEBUG_NOTIFIERS is not set
1330# CONFIG_DEBUG_CREDENTIALS is not set
1156CONFIG_FRAME_POINTER=y 1331CONFIG_FRAME_POINTER=y
1157# CONFIG_BOOT_PRINTK_DELAY is not set 1332# CONFIG_BOOT_PRINTK_DELAY is not set
1158# CONFIG_RCU_TORTURE_TEST is not set 1333# CONFIG_RCU_TORTURE_TEST is not set
1334# CONFIG_RCU_CPU_STALL_DETECTOR is not set
1159# CONFIG_KPROBES_SANITY_TEST is not set 1335# CONFIG_KPROBES_SANITY_TEST is not set
1160# CONFIG_BACKTRACE_SELF_TEST is not set 1336# CONFIG_BACKTRACE_SELF_TEST is not set
1337# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
1338# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
1161# CONFIG_LKDTM is not set 1339# CONFIG_LKDTM is not set
1162# CONFIG_FAULT_INJECTION is not set 1340# CONFIG_FAULT_INJECTION is not set
1341# CONFIG_PAGE_POISONING is not set
1342CONFIG_NOP_TRACER=y
1343CONFIG_RING_BUFFER=y
1344CONFIG_EVENT_TRACING=y
1345CONFIG_CONTEXT_SWITCH_TRACER=y
1346CONFIG_RING_BUFFER_ALLOW_SWAP=y
1347CONFIG_TRACING=y
1348CONFIG_TRACING_SUPPORT=y
1349CONFIG_FTRACE=y
1350# CONFIG_IRQSOFF_TRACER is not set
1351# CONFIG_SCHED_TRACER is not set
1352# CONFIG_ENABLE_DEFAULT_TRACERS is not set
1353# CONFIG_BOOT_TRACER is not set
1354CONFIG_BRANCH_PROFILE_NONE=y
1355# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
1356# CONFIG_PROFILE_ALL_BRANCHES is not set
1357# CONFIG_KMEMTRACE is not set
1358# CONFIG_WORKQUEUE_TRACER is not set
1359# CONFIG_BLK_DEV_IO_TRACE is not set
1360# CONFIG_RING_BUFFER_BENCHMARK is not set
1361# CONFIG_DYNAMIC_DEBUG is not set
1163# CONFIG_SAMPLES is not set 1362# CONFIG_SAMPLES is not set
1164 1363
1165# 1364#
@@ -1167,19 +1366,30 @@ CONFIG_FRAME_POINTER=y
1167# 1366#
1168# CONFIG_KEYS is not set 1367# CONFIG_KEYS is not set
1169# CONFIG_SECURITY is not set 1368# CONFIG_SECURITY is not set
1369# CONFIG_SECURITYFS is not set
1170# CONFIG_SECURITY_FILE_CAPABILITIES is not set 1370# CONFIG_SECURITY_FILE_CAPABILITIES is not set
1171CONFIG_CRYPTO=y 1371CONFIG_CRYPTO=y
1172 1372
1173# 1373#
1174# Crypto core or helper 1374# Crypto core or helper
1175# 1375#
1176CONFIG_CRYPTO_ALGAPI=m 1376# CONFIG_CRYPTO_FIPS is not set
1377CONFIG_CRYPTO_ALGAPI=y
1378CONFIG_CRYPTO_ALGAPI2=y
1177CONFIG_CRYPTO_AEAD=m 1379CONFIG_CRYPTO_AEAD=m
1380CONFIG_CRYPTO_AEAD2=y
1178CONFIG_CRYPTO_BLKCIPHER=m 1381CONFIG_CRYPTO_BLKCIPHER=m
1382CONFIG_CRYPTO_BLKCIPHER2=y
1179CONFIG_CRYPTO_HASH=m 1383CONFIG_CRYPTO_HASH=m
1384CONFIG_CRYPTO_HASH2=y
1385CONFIG_CRYPTO_RNG=m
1386CONFIG_CRYPTO_RNG2=y
1387CONFIG_CRYPTO_PCOMP=y
1180CONFIG_CRYPTO_MANAGER=m 1388CONFIG_CRYPTO_MANAGER=m
1389CONFIG_CRYPTO_MANAGER2=y
1181# CONFIG_CRYPTO_GF128MUL is not set 1390# CONFIG_CRYPTO_GF128MUL is not set
1182# CONFIG_CRYPTO_NULL is not set 1391# CONFIG_CRYPTO_NULL is not set
1392CONFIG_CRYPTO_WORKQUEUE=y
1183# CONFIG_CRYPTO_CRYPTD is not set 1393# CONFIG_CRYPTO_CRYPTD is not set
1184CONFIG_CRYPTO_AUTHENC=m 1394CONFIG_CRYPTO_AUTHENC=m
1185# CONFIG_CRYPTO_TEST is not set 1395# CONFIG_CRYPTO_TEST is not set
@@ -1207,11 +1417,13 @@ CONFIG_CRYPTO_CBC=m
1207# 1417#
1208CONFIG_CRYPTO_HMAC=m 1418CONFIG_CRYPTO_HMAC=m
1209# CONFIG_CRYPTO_XCBC is not set 1419# CONFIG_CRYPTO_XCBC is not set
1420# CONFIG_CRYPTO_VMAC is not set
1210 1421
1211# 1422#
1212# Digest 1423# Digest
1213# 1424#
1214# CONFIG_CRYPTO_CRC32C is not set 1425# CONFIG_CRYPTO_CRC32C is not set
1426# CONFIG_CRYPTO_GHASH is not set
1215# CONFIG_CRYPTO_MD4 is not set 1427# CONFIG_CRYPTO_MD4 is not set
1216CONFIG_CRYPTO_MD5=m 1428CONFIG_CRYPTO_MD5=m
1217# CONFIG_CRYPTO_MICHAEL_MIC is not set 1429# CONFIG_CRYPTO_MICHAEL_MIC is not set
@@ -1228,7 +1440,7 @@ CONFIG_CRYPTO_SHA1=m
1228# 1440#
1229# Ciphers 1441# Ciphers
1230# 1442#
1231# CONFIG_CRYPTO_AES is not set 1443CONFIG_CRYPTO_AES=m
1232# CONFIG_CRYPTO_ANUBIS is not set 1444# CONFIG_CRYPTO_ANUBIS is not set
1233# CONFIG_CRYPTO_ARC4 is not set 1445# CONFIG_CRYPTO_ARC4 is not set
1234# CONFIG_CRYPTO_BLOWFISH is not set 1446# CONFIG_CRYPTO_BLOWFISH is not set
@@ -1247,18 +1459,24 @@ CONFIG_CRYPTO_DES=m
1247# 1459#
1248# Compression 1460# Compression
1249# 1461#
1250CONFIG_CRYPTO_DEFLATE=m 1462CONFIG_CRYPTO_DEFLATE=y
1251# CONFIG_CRYPTO_LZO is not set 1463# CONFIG_CRYPTO_ZLIB is not set
1464CONFIG_CRYPTO_LZO=y
1465
1466#
1467# Random Number Generation
1468#
1469CONFIG_CRYPTO_ANSI_CPRNG=m
1252# CONFIG_CRYPTO_HW is not set 1470# CONFIG_CRYPTO_HW is not set
1471CONFIG_BINARY_PRINTF=y
1253 1472
1254# 1473#
1255# Library routines 1474# Library routines
1256# 1475#
1257CONFIG_BITREVERSE=y 1476CONFIG_BITREVERSE=y
1258# CONFIG_GENERIC_FIND_FIRST_BIT is not set 1477CONFIG_GENERIC_FIND_LAST_BIT=y
1259# CONFIG_GENERIC_FIND_NEXT_BIT is not set
1260CONFIG_CRC_CCITT=m 1478CONFIG_CRC_CCITT=m
1261# CONFIG_CRC16 is not set 1479CONFIG_CRC16=y
1262CONFIG_CRC_T10DIF=m 1480CONFIG_CRC_T10DIF=m
1263CONFIG_CRC_ITU_T=m 1481CONFIG_CRC_ITU_T=m
1264CONFIG_CRC32=y 1482CONFIG_CRC32=y
@@ -1266,8 +1484,11 @@ CONFIG_CRC7=m
1266# CONFIG_LIBCRC32C is not set 1484# CONFIG_LIBCRC32C is not set
1267CONFIG_ZLIB_INFLATE=y 1485CONFIG_ZLIB_INFLATE=y
1268CONFIG_ZLIB_DEFLATE=y 1486CONFIG_ZLIB_DEFLATE=y
1487CONFIG_LZO_COMPRESS=y
1488CONFIG_LZO_DECOMPRESS=y
1489CONFIG_DECOMPRESS_GZIP=y
1269CONFIG_GENERIC_ALLOCATOR=y 1490CONFIG_GENERIC_ALLOCATOR=y
1270CONFIG_PLIST=y
1271CONFIG_HAS_IOMEM=y 1491CONFIG_HAS_IOMEM=y
1272CONFIG_HAS_IOPORT=y 1492CONFIG_HAS_IOPORT=y
1273CONFIG_HAS_DMA=y 1493CONFIG_HAS_DMA=y
1494CONFIG_NLATTR=y
diff --git a/arch/avr32/configs/atstk1006_defconfig b/arch/avr32/configs/atstk1006_defconfig
index c1603c4860e0..363e2381f32a 100644
--- a/arch/avr32/configs/atstk1006_defconfig
+++ b/arch/avr32/configs/atstk1006_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.28-rc8 3# Linux kernel version: 2.6.32-rc5
4# Thu Dec 18 11:22:23 2008 4# Thu Oct 29 13:00:25 2009
5# 5#
6CONFIG_AVR32=y 6CONFIG_AVR32=y
7CONFIG_GENERIC_GPIO=y 7CONFIG_GENERIC_GPIO=y
@@ -21,6 +21,7 @@ CONFIG_GENERIC_HWEIGHT=y
21CONFIG_GENERIC_CALIBRATE_DELAY=y 21CONFIG_GENERIC_CALIBRATE_DELAY=y
22CONFIG_GENERIC_BUG=y 22CONFIG_GENERIC_BUG=y
23CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" 23CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
24CONFIG_CONSTRUCTORS=y
24 25
25# 26#
26# General setup 27# General setup
@@ -34,21 +35,36 @@ CONFIG_SWAP=y
34CONFIG_SYSVIPC=y 35CONFIG_SYSVIPC=y
35CONFIG_SYSVIPC_SYSCTL=y 36CONFIG_SYSVIPC_SYSCTL=y
36CONFIG_POSIX_MQUEUE=y 37CONFIG_POSIX_MQUEUE=y
38CONFIG_POSIX_MQUEUE_SYSCTL=y
37# CONFIG_BSD_PROCESS_ACCT is not set 39# CONFIG_BSD_PROCESS_ACCT is not set
38# CONFIG_TASKSTATS is not set 40# CONFIG_TASKSTATS is not set
39# CONFIG_AUDIT is not set 41# CONFIG_AUDIT is not set
42
43#
44# RCU Subsystem
45#
46CONFIG_TREE_RCU=y
47# CONFIG_TREE_PREEMPT_RCU is not set
48# CONFIG_RCU_TRACE is not set
49CONFIG_RCU_FANOUT=32
50# CONFIG_RCU_FANOUT_EXACT is not set
51# CONFIG_TREE_RCU_TRACE is not set
40# CONFIG_IKCONFIG is not set 52# CONFIG_IKCONFIG is not set
41CONFIG_LOG_BUF_SHIFT=14 53CONFIG_LOG_BUF_SHIFT=14
42# CONFIG_CGROUPS is not set
43# CONFIG_GROUP_SCHED is not set 54# CONFIG_GROUP_SCHED is not set
55# CONFIG_CGROUPS is not set
44CONFIG_SYSFS_DEPRECATED=y 56CONFIG_SYSFS_DEPRECATED=y
45CONFIG_SYSFS_DEPRECATED_V2=y 57CONFIG_SYSFS_DEPRECATED_V2=y
46CONFIG_RELAY=y 58CONFIG_RELAY=y
47# CONFIG_NAMESPACES is not set 59# CONFIG_NAMESPACES is not set
48CONFIG_BLK_DEV_INITRD=y 60CONFIG_BLK_DEV_INITRD=y
49CONFIG_INITRAMFS_SOURCE="" 61CONFIG_INITRAMFS_SOURCE=""
62CONFIG_RD_GZIP=y
63# CONFIG_RD_BZIP2 is not set
64# CONFIG_RD_LZMA is not set
50CONFIG_CC_OPTIMIZE_FOR_SIZE=y 65CONFIG_CC_OPTIMIZE_FOR_SIZE=y
51CONFIG_SYSCTL=y 66CONFIG_SYSCTL=y
67CONFIG_ANON_INODES=y
52CONFIG_EMBEDDED=y 68CONFIG_EMBEDDED=y
53# CONFIG_SYSCTL_SYSCALL is not set 69# CONFIG_SYSCTL_SYSCALL is not set
54CONFIG_KALLSYMS=y 70CONFIG_KALLSYMS=y
@@ -58,32 +74,40 @@ CONFIG_HOTPLUG=y
58CONFIG_PRINTK=y 74CONFIG_PRINTK=y
59CONFIG_BUG=y 75CONFIG_BUG=y
60CONFIG_ELF_CORE=y 76CONFIG_ELF_CORE=y
61# CONFIG_COMPAT_BRK is not set
62# CONFIG_BASE_FULL is not set 77# CONFIG_BASE_FULL is not set
63CONFIG_FUTEX=y 78CONFIG_FUTEX=y
64CONFIG_ANON_INODES=y
65CONFIG_EPOLL=y 79CONFIG_EPOLL=y
66CONFIG_SIGNALFD=y 80CONFIG_SIGNALFD=y
67CONFIG_TIMERFD=y 81CONFIG_TIMERFD=y
68CONFIG_EVENTFD=y 82CONFIG_EVENTFD=y
69CONFIG_SHMEM=y 83CONFIG_SHMEM=y
70CONFIG_AIO=y 84CONFIG_AIO=y
85
86#
87# Kernel Performance Events And Counters
88#
71CONFIG_VM_EVENT_COUNTERS=y 89CONFIG_VM_EVENT_COUNTERS=y
72CONFIG_SLUB_DEBUG=y 90CONFIG_SLUB_DEBUG=y
91# CONFIG_COMPAT_BRK is not set
73# CONFIG_SLAB is not set 92# CONFIG_SLAB is not set
74CONFIG_SLUB=y 93CONFIG_SLUB=y
75# CONFIG_SLOB is not set 94# CONFIG_SLOB is not set
76CONFIG_PROFILING=y 95CONFIG_PROFILING=y
77# CONFIG_MARKERS is not set 96CONFIG_TRACEPOINTS=y
78CONFIG_OPROFILE=m 97CONFIG_OPROFILE=m
79CONFIG_HAVE_OPROFILE=y 98CONFIG_HAVE_OPROFILE=y
80CONFIG_KPROBES=y 99CONFIG_KPROBES=y
81CONFIG_HAVE_KPROBES=y 100CONFIG_HAVE_KPROBES=y
82CONFIG_HAVE_CLK=y 101CONFIG_HAVE_CLK=y
102
103#
104# GCOV-based kernel profiling
105#
106# CONFIG_GCOV_KERNEL is not set
107# CONFIG_SLOW_WORK is not set
83# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set 108# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
84CONFIG_SLABINFO=y 109CONFIG_SLABINFO=y
85CONFIG_RT_MUTEXES=y 110CONFIG_RT_MUTEXES=y
86# CONFIG_TINY_SHMEM is not set
87CONFIG_BASE_SMALL=1 111CONFIG_BASE_SMALL=1
88CONFIG_MODULES=y 112CONFIG_MODULES=y
89# CONFIG_MODULE_FORCE_LOAD is not set 113# CONFIG_MODULE_FORCE_LOAD is not set
@@ -91,11 +115,8 @@ CONFIG_MODULE_UNLOAD=y
91# CONFIG_MODULE_FORCE_UNLOAD is not set 115# CONFIG_MODULE_FORCE_UNLOAD is not set
92# CONFIG_MODVERSIONS is not set 116# CONFIG_MODVERSIONS is not set
93# CONFIG_MODULE_SRCVERSION_ALL is not set 117# CONFIG_MODULE_SRCVERSION_ALL is not set
94CONFIG_KMOD=y
95CONFIG_BLOCK=y 118CONFIG_BLOCK=y
96# CONFIG_LBD is not set 119CONFIG_LBDAF=y
97# CONFIG_BLK_DEV_IO_TRACE is not set
98# CONFIG_LSF is not set
99# CONFIG_BLK_DEV_BSG is not set 120# CONFIG_BLK_DEV_BSG is not set
100# CONFIG_BLK_DEV_INTEGRITY is not set 121# CONFIG_BLK_DEV_INTEGRITY is not set
101 122
@@ -111,7 +132,6 @@ CONFIG_IOSCHED_CFQ=y
111CONFIG_DEFAULT_CFQ=y 132CONFIG_DEFAULT_CFQ=y
112# CONFIG_DEFAULT_NOOP is not set 133# CONFIG_DEFAULT_NOOP is not set
113CONFIG_DEFAULT_IOSCHED="cfq" 134CONFIG_DEFAULT_IOSCHED="cfq"
114CONFIG_CLASSIC_RCU=y
115CONFIG_FREEZER=y 135CONFIG_FREEZER=y
116 136
117# 137#
@@ -128,8 +148,11 @@ CONFIG_PLATFORM_AT32AP=y
128CONFIG_CPU_AT32AP700X=y 148CONFIG_CPU_AT32AP700X=y
129CONFIG_CPU_AT32AP7000=y 149CONFIG_CPU_AT32AP7000=y
130CONFIG_BOARD_ATSTK1000=y 150CONFIG_BOARD_ATSTK1000=y
131# CONFIG_BOARD_ATNGW100 is not set 151# CONFIG_BOARD_ATNGW100_MKI is not set
152# CONFIG_BOARD_ATNGW100_MKII is not set
153# CONFIG_BOARD_HAMMERHEAD is not set
132# CONFIG_BOARD_FAVR_32 is not set 154# CONFIG_BOARD_FAVR_32 is not set
155# CONFIG_BOARD_MERISC is not set
133# CONFIG_BOARD_MIMC200 is not set 156# CONFIG_BOARD_MIMC200 is not set
134# CONFIG_BOARD_ATSTK1002 is not set 157# CONFIG_BOARD_ATSTK1002 is not set
135# CONFIG_BOARD_ATSTK1003 is not set 158# CONFIG_BOARD_ATSTK1003 is not set
@@ -156,7 +179,7 @@ CONFIG_PREEMPT_NONE=y
156# CONFIG_PREEMPT_VOLUNTARY is not set 179# CONFIG_PREEMPT_VOLUNTARY is not set
157# CONFIG_PREEMPT is not set 180# CONFIG_PREEMPT is not set
158CONFIG_QUICKLIST=y 181CONFIG_QUICKLIST=y
159# CONFIG_HAVE_ARCH_BOOTMEM_NODE is not set 182# CONFIG_HAVE_ARCH_BOOTMEM is not set
160# CONFIG_ARCH_HAVE_MEMORY_PRESENT is not set 183# CONFIG_ARCH_HAVE_MEMORY_PRESENT is not set
161# CONFIG_NEED_NODE_MEMMAP_SIZE is not set 184# CONFIG_NEED_NODE_MEMMAP_SIZE is not set
162CONFIG_ARCH_FLATMEM_ENABLE=y 185CONFIG_ARCH_FLATMEM_ENABLE=y
@@ -170,12 +193,14 @@ CONFIG_FLATMEM=y
170CONFIG_FLAT_NODE_MEM_MAP=y 193CONFIG_FLAT_NODE_MEM_MAP=y
171CONFIG_PAGEFLAGS_EXTENDED=y 194CONFIG_PAGEFLAGS_EXTENDED=y
172CONFIG_SPLIT_PTLOCK_CPUS=4 195CONFIG_SPLIT_PTLOCK_CPUS=4
173# CONFIG_RESOURCES_64BIT is not set
174# CONFIG_PHYS_ADDR_T_64BIT is not set 196# CONFIG_PHYS_ADDR_T_64BIT is not set
175CONFIG_ZONE_DMA_FLAG=0 197CONFIG_ZONE_DMA_FLAG=0
176CONFIG_NR_QUICK=2 198CONFIG_NR_QUICK=2
177CONFIG_VIRT_TO_BUS=y 199CONFIG_VIRT_TO_BUS=y
178CONFIG_UNEVICTABLE_LRU=y 200CONFIG_HAVE_MLOCK=y
201CONFIG_HAVE_MLOCKED_PAGE_BIT=y
202# CONFIG_KSM is not set
203CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
179# CONFIG_OWNERSHIP_TRACE is not set 204# CONFIG_OWNERSHIP_TRACE is not set
180CONFIG_NMI_DEBUGGING=y 205CONFIG_NMI_DEBUGGING=y
181# CONFIG_HZ_100 is not set 206# CONFIG_HZ_100 is not set
@@ -194,6 +219,7 @@ CONFIG_PM=y
194CONFIG_PM_SLEEP=y 219CONFIG_PM_SLEEP=y
195CONFIG_SUSPEND=y 220CONFIG_SUSPEND=y
196CONFIG_SUSPEND_FREEZER=y 221CONFIG_SUSPEND_FREEZER=y
222# CONFIG_PM_RUNTIME is not set
197CONFIG_ARCH_SUSPEND_POSSIBLE=y 223CONFIG_ARCH_SUSPEND_POSSIBLE=y
198 224
199# 225#
@@ -294,6 +320,7 @@ CONFIG_IPV6_TUNNEL=m
294# CONFIG_NETFILTER is not set 320# CONFIG_NETFILTER is not set
295# CONFIG_IP_DCCP is not set 321# CONFIG_IP_DCCP is not set
296# CONFIG_IP_SCTP is not set 322# CONFIG_IP_SCTP is not set
323# CONFIG_RDS is not set
297# CONFIG_TIPC is not set 324# CONFIG_TIPC is not set
298# CONFIG_ATM is not set 325# CONFIG_ATM is not set
299CONFIG_STP=m 326CONFIG_STP=m
@@ -309,20 +336,24 @@ CONFIG_LLC=m
309# CONFIG_LAPB is not set 336# CONFIG_LAPB is not set
310# CONFIG_ECONET is not set 337# CONFIG_ECONET is not set
311# CONFIG_WAN_ROUTER is not set 338# CONFIG_WAN_ROUTER is not set
339# CONFIG_PHONET is not set
340# CONFIG_IEEE802154 is not set
312# CONFIG_NET_SCHED is not set 341# CONFIG_NET_SCHED is not set
342# CONFIG_DCB is not set
313 343
314# 344#
315# Network testing 345# Network testing
316# 346#
317# CONFIG_NET_PKTGEN is not set 347# CONFIG_NET_PKTGEN is not set
318# CONFIG_NET_TCPPROBE is not set 348# CONFIG_NET_TCPPROBE is not set
349# CONFIG_NET_DROP_MONITOR is not set
319# CONFIG_HAMRADIO is not set 350# CONFIG_HAMRADIO is not set
320# CONFIG_CAN is not set 351# CONFIG_CAN is not set
321# CONFIG_IRDA is not set 352# CONFIG_IRDA is not set
322# CONFIG_BT is not set 353# CONFIG_BT is not set
323# CONFIG_AF_RXRPC is not set 354# CONFIG_AF_RXRPC is not set
324# CONFIG_PHONET is not set
325# CONFIG_WIRELESS is not set 355# CONFIG_WIRELESS is not set
356# CONFIG_WIMAX is not set
326# CONFIG_RFKILL is not set 357# CONFIG_RFKILL is not set
327# CONFIG_NET_9P is not set 358# CONFIG_NET_9P is not set
328 359
@@ -334,6 +365,7 @@ CONFIG_LLC=m
334# Generic Driver Options 365# Generic Driver Options
335# 366#
336CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 367CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
368# CONFIG_DEVTMPFS is not set
337CONFIG_STANDALONE=y 369CONFIG_STANDALONE=y
338# CONFIG_PREVENT_FIRMWARE_BUILD is not set 370# CONFIG_PREVENT_FIRMWARE_BUILD is not set
339# CONFIG_FW_LOADER is not set 371# CONFIG_FW_LOADER is not set
@@ -343,6 +375,7 @@ CONFIG_STANDALONE=y
343# CONFIG_CONNECTOR is not set 375# CONFIG_CONNECTOR is not set
344CONFIG_MTD=y 376CONFIG_MTD=y
345# CONFIG_MTD_DEBUG is not set 377# CONFIG_MTD_DEBUG is not set
378# CONFIG_MTD_TESTS is not set
346# CONFIG_MTD_CONCAT is not set 379# CONFIG_MTD_CONCAT is not set
347CONFIG_MTD_PARTITIONS=y 380CONFIG_MTD_PARTITIONS=y
348# CONFIG_MTD_REDBOOT_PARTS is not set 381# CONFIG_MTD_REDBOOT_PARTS is not set
@@ -393,9 +426,7 @@ CONFIG_MTD_CFI_UTIL=y
393# 426#
394# CONFIG_MTD_COMPLEX_MAPPINGS is not set 427# CONFIG_MTD_COMPLEX_MAPPINGS is not set
395CONFIG_MTD_PHYSMAP=y 428CONFIG_MTD_PHYSMAP=y
396CONFIG_MTD_PHYSMAP_START=0x8000000 429# CONFIG_MTD_PHYSMAP_COMPAT is not set
397CONFIG_MTD_PHYSMAP_LEN=0x0
398CONFIG_MTD_PHYSMAP_BANKWIDTH=2
399# CONFIG_MTD_PLATRAM is not set 430# CONFIG_MTD_PLATRAM is not set
400 431
401# 432#
@@ -406,6 +437,7 @@ CONFIG_MTD_DATAFLASH=m
406CONFIG_MTD_DATAFLASH_OTP=y 437CONFIG_MTD_DATAFLASH_OTP=y
407CONFIG_MTD_M25P80=m 438CONFIG_MTD_M25P80=m
408CONFIG_M25PXX_USE_FAST_READ=y 439CONFIG_M25PXX_USE_FAST_READ=y
440# CONFIG_MTD_SST25L is not set
409# CONFIG_MTD_SLRAM is not set 441# CONFIG_MTD_SLRAM is not set
410# CONFIG_MTD_PHRAM is not set 442# CONFIG_MTD_PHRAM is not set
411# CONFIG_MTD_MTDRAM is not set 443# CONFIG_MTD_MTDRAM is not set
@@ -432,6 +464,11 @@ CONFIG_MTD_NAND_ATMEL_ECC_HW=y
432# CONFIG_MTD_ONENAND is not set 464# CONFIG_MTD_ONENAND is not set
433 465
434# 466#
467# LPDDR flash memory drivers
468#
469# CONFIG_MTD_LPDDR is not set
470
471#
435# UBI - Unsorted block images 472# UBI - Unsorted block images
436# 473#
437CONFIG_MTD_UBI=y 474CONFIG_MTD_UBI=y
@@ -460,13 +497,22 @@ CONFIG_ATMEL_PWM=m
460CONFIG_ATMEL_TCLIB=y 497CONFIG_ATMEL_TCLIB=y
461CONFIG_ATMEL_TCB_CLKSRC=y 498CONFIG_ATMEL_TCB_CLKSRC=y
462CONFIG_ATMEL_TCB_CLKSRC_BLOCK=0 499CONFIG_ATMEL_TCB_CLKSRC_BLOCK=0
463# CONFIG_EEPROM_93CX6 is not set
464# CONFIG_ICS932S401 is not set 500# CONFIG_ICS932S401 is not set
465CONFIG_ATMEL_SSC=m 501CONFIG_ATMEL_SSC=m
466# CONFIG_ENCLOSURE_SERVICES is not set 502# CONFIG_ENCLOSURE_SERVICES is not set
503# CONFIG_ISL29003 is not set
467# CONFIG_C2PORT is not set 504# CONFIG_C2PORT is not set
468 505
469# 506#
507# EEPROM support
508#
509# CONFIG_EEPROM_AT24 is not set
510# CONFIG_EEPROM_AT25 is not set
511# CONFIG_EEPROM_LEGACY is not set
512# CONFIG_EEPROM_MAX6875 is not set
513# CONFIG_EEPROM_93CX6 is not set
514
515#
470# SCSI device support 516# SCSI device support
471# 517#
472# CONFIG_RAID_ATTRS is not set 518# CONFIG_RAID_ATTRS is not set
@@ -486,10 +532,6 @@ CONFIG_BLK_DEV_SR=m
486# CONFIG_BLK_DEV_SR_VENDOR is not set 532# CONFIG_BLK_DEV_SR_VENDOR is not set
487# CONFIG_CHR_DEV_SG is not set 533# CONFIG_CHR_DEV_SG is not set
488# CONFIG_CHR_DEV_SCH is not set 534# CONFIG_CHR_DEV_SCH is not set
489
490#
491# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
492#
493# CONFIG_SCSI_MULTI_LUN is not set 535# CONFIG_SCSI_MULTI_LUN is not set
494# CONFIG_SCSI_CONSTANTS is not set 536# CONFIG_SCSI_CONSTANTS is not set
495# CONFIG_SCSI_LOGGING is not set 537# CONFIG_SCSI_LOGGING is not set
@@ -506,8 +548,10 @@ CONFIG_SCSI_WAIT_SCAN=m
506# CONFIG_SCSI_SRP_ATTRS is not set 548# CONFIG_SCSI_SRP_ATTRS is not set
507# CONFIG_SCSI_LOWLEVEL is not set 549# CONFIG_SCSI_LOWLEVEL is not set
508# CONFIG_SCSI_DH is not set 550# CONFIG_SCSI_DH is not set
551# CONFIG_SCSI_OSD_INITIATOR is not set
509CONFIG_ATA=m 552CONFIG_ATA=m
510# CONFIG_ATA_NONSTANDARD is not set 553# CONFIG_ATA_NONSTANDARD is not set
554CONFIG_ATA_VERBOSE_ERROR=y
511# CONFIG_SATA_PMP is not set 555# CONFIG_SATA_PMP is not set
512CONFIG_ATA_SFF=y 556CONFIG_ATA_SFF=y
513# CONFIG_SATA_MV is not set 557# CONFIG_SATA_MV is not set
@@ -536,12 +580,17 @@ CONFIG_PHYLIB=y
536# CONFIG_BROADCOM_PHY is not set 580# CONFIG_BROADCOM_PHY is not set
537# CONFIG_ICPLUS_PHY is not set 581# CONFIG_ICPLUS_PHY is not set
538# CONFIG_REALTEK_PHY is not set 582# CONFIG_REALTEK_PHY is not set
583# CONFIG_NATIONAL_PHY is not set
584# CONFIG_STE10XP is not set
585# CONFIG_LSI_ET1011C_PHY is not set
539# CONFIG_FIXED_PHY is not set 586# CONFIG_FIXED_PHY is not set
540# CONFIG_MDIO_BITBANG is not set 587# CONFIG_MDIO_BITBANG is not set
541CONFIG_NET_ETHERNET=y 588CONFIG_NET_ETHERNET=y
542# CONFIG_MII is not set 589# CONFIG_MII is not set
543CONFIG_MACB=y 590CONFIG_MACB=y
544# CONFIG_ENC28J60 is not set 591# CONFIG_ENC28J60 is not set
592# CONFIG_ETHOC is not set
593# CONFIG_DNET is not set
545# CONFIG_IBM_NEW_EMAC_ZMII is not set 594# CONFIG_IBM_NEW_EMAC_ZMII is not set
546# CONFIG_IBM_NEW_EMAC_RGMII is not set 595# CONFIG_IBM_NEW_EMAC_RGMII is not set
547# CONFIG_IBM_NEW_EMAC_TAH is not set 596# CONFIG_IBM_NEW_EMAC_TAH is not set
@@ -550,15 +599,18 @@ CONFIG_MACB=y
550# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set 599# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
551# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set 600# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
552# CONFIG_B44 is not set 601# CONFIG_B44 is not set
602# CONFIG_KS8842 is not set
603# CONFIG_KS8851 is not set
604# CONFIG_KS8851_MLL is not set
553# CONFIG_NETDEV_1000 is not set 605# CONFIG_NETDEV_1000 is not set
554# CONFIG_NETDEV_10000 is not set 606# CONFIG_NETDEV_10000 is not set
607CONFIG_WLAN=y
608# CONFIG_WLAN_PRE80211 is not set
609# CONFIG_WLAN_80211 is not set
555 610
556# 611#
557# Wireless LAN 612# Enable WiMAX (Networking options) to see the WiMAX drivers
558# 613#
559# CONFIG_WLAN_PRE80211 is not set
560# CONFIG_WLAN_80211 is not set
561# CONFIG_IWLWIFI_LEDS is not set
562# CONFIG_WAN is not set 614# CONFIG_WAN is not set
563CONFIG_PPP=m 615CONFIG_PPP=m
564# CONFIG_PPP_MULTILINK is not set 616# CONFIG_PPP_MULTILINK is not set
@@ -600,18 +652,25 @@ CONFIG_INPUT_EVDEV=m
600# Input Device Drivers 652# Input Device Drivers
601# 653#
602CONFIG_INPUT_KEYBOARD=y 654CONFIG_INPUT_KEYBOARD=y
655# CONFIG_KEYBOARD_ADP5588 is not set
603# CONFIG_KEYBOARD_ATKBD is not set 656# CONFIG_KEYBOARD_ATKBD is not set
604# CONFIG_KEYBOARD_SUNKBD is not set 657# CONFIG_QT2160 is not set
605# CONFIG_KEYBOARD_LKKBD is not set 658# CONFIG_KEYBOARD_LKKBD is not set
606# CONFIG_KEYBOARD_XTKBD is not set 659CONFIG_KEYBOARD_GPIO=m
660# CONFIG_KEYBOARD_MATRIX is not set
661# CONFIG_KEYBOARD_LM8323 is not set
662# CONFIG_KEYBOARD_MAX7359 is not set
607# CONFIG_KEYBOARD_NEWTON is not set 663# CONFIG_KEYBOARD_NEWTON is not set
664# CONFIG_KEYBOARD_OPENCORES is not set
608# CONFIG_KEYBOARD_STOWAWAY is not set 665# CONFIG_KEYBOARD_STOWAWAY is not set
609CONFIG_KEYBOARD_GPIO=m 666# CONFIG_KEYBOARD_SUNKBD is not set
667# CONFIG_KEYBOARD_XTKBD is not set
610CONFIG_INPUT_MOUSE=y 668CONFIG_INPUT_MOUSE=y
611# CONFIG_MOUSE_PS2 is not set 669# CONFIG_MOUSE_PS2 is not set
612# CONFIG_MOUSE_SERIAL is not set 670# CONFIG_MOUSE_SERIAL is not set
613# CONFIG_MOUSE_VSXXXAA is not set 671# CONFIG_MOUSE_VSXXXAA is not set
614CONFIG_MOUSE_GPIO=m 672CONFIG_MOUSE_GPIO=m
673# CONFIG_MOUSE_SYNAPTICS_I2C is not set
615# CONFIG_INPUT_JOYSTICK is not set 674# CONFIG_INPUT_JOYSTICK is not set
616# CONFIG_INPUT_TABLET is not set 675# CONFIG_INPUT_TABLET is not set
617# CONFIG_INPUT_TOUCHSCREEN is not set 676# CONFIG_INPUT_TOUCHSCREEN is not set
@@ -642,9 +701,11 @@ CONFIG_SERIAL_ATMEL=y
642CONFIG_SERIAL_ATMEL_CONSOLE=y 701CONFIG_SERIAL_ATMEL_CONSOLE=y
643CONFIG_SERIAL_ATMEL_PDC=y 702CONFIG_SERIAL_ATMEL_PDC=y
644# CONFIG_SERIAL_ATMEL_TTYAT is not set 703# CONFIG_SERIAL_ATMEL_TTYAT is not set
704# CONFIG_SERIAL_MAX3100 is not set
645CONFIG_SERIAL_CORE=y 705CONFIG_SERIAL_CORE=y
646CONFIG_SERIAL_CORE_CONSOLE=y 706CONFIG_SERIAL_CORE_CONSOLE=y
647CONFIG_UNIX98_PTYS=y 707CONFIG_UNIX98_PTYS=y
708# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
648# CONFIG_LEGACY_PTYS is not set 709# CONFIG_LEGACY_PTYS is not set
649# CONFIG_IPMI_HANDLER is not set 710# CONFIG_IPMI_HANDLER is not set
650# CONFIG_HW_RANDOM is not set 711# CONFIG_HW_RANDOM is not set
@@ -653,6 +714,7 @@ CONFIG_UNIX98_PTYS=y
653# CONFIG_TCG_TPM is not set 714# CONFIG_TCG_TPM is not set
654CONFIG_I2C=m 715CONFIG_I2C=m
655CONFIG_I2C_BOARDINFO=y 716CONFIG_I2C_BOARDINFO=y
717CONFIG_I2C_COMPAT=y
656CONFIG_I2C_CHARDEV=m 718CONFIG_I2C_CHARDEV=m
657CONFIG_I2C_HELPER_AUTO=y 719CONFIG_I2C_HELPER_AUTO=y
658CONFIG_I2C_ALGOBIT=m 720CONFIG_I2C_ALGOBIT=m
@@ -664,6 +726,7 @@ CONFIG_I2C_ALGOBIT=m
664# 726#
665# I2C system bus drivers (mostly embedded / system-on-chip) 727# I2C system bus drivers (mostly embedded / system-on-chip)
666# 728#
729# CONFIG_I2C_DESIGNWARE is not set
667CONFIG_I2C_GPIO=m 730CONFIG_I2C_GPIO=m
668# CONFIG_I2C_OCORES is not set 731# CONFIG_I2C_OCORES is not set
669# CONFIG_I2C_SIMTEC is not set 732# CONFIG_I2C_SIMTEC is not set
@@ -684,14 +747,6 @@ CONFIG_I2C_GPIO=m
684# Miscellaneous I2C Chip support 747# Miscellaneous I2C Chip support
685# 748#
686# CONFIG_DS1682 is not set 749# CONFIG_DS1682 is not set
687# CONFIG_EEPROM_AT24 is not set
688# CONFIG_EEPROM_LEGACY is not set
689# CONFIG_SENSORS_PCF8574 is not set
690# CONFIG_PCF8575 is not set
691# CONFIG_SENSORS_PCA9539 is not set
692# CONFIG_SENSORS_PCF8591 is not set
693# CONFIG_TPS65010 is not set
694# CONFIG_SENSORS_MAX6875 is not set
695# CONFIG_SENSORS_TSL2550 is not set 750# CONFIG_SENSORS_TSL2550 is not set
696# CONFIG_I2C_DEBUG_CORE is not set 751# CONFIG_I2C_DEBUG_CORE is not set
697# CONFIG_I2C_DEBUG_ALGO is not set 752# CONFIG_I2C_DEBUG_ALGO is not set
@@ -706,13 +761,18 @@ CONFIG_SPI_MASTER=y
706# 761#
707CONFIG_SPI_ATMEL=y 762CONFIG_SPI_ATMEL=y
708# CONFIG_SPI_BITBANG is not set 763# CONFIG_SPI_BITBANG is not set
764# CONFIG_SPI_GPIO is not set
709 765
710# 766#
711# SPI Protocol Masters 767# SPI Protocol Masters
712# 768#
713# CONFIG_EEPROM_AT25 is not set
714CONFIG_SPI_SPIDEV=m 769CONFIG_SPI_SPIDEV=m
715# CONFIG_SPI_TLE62X0 is not set 770# CONFIG_SPI_TLE62X0 is not set
771
772#
773# PPS support
774#
775# CONFIG_PPS is not set
716CONFIG_ARCH_REQUIRE_GPIOLIB=y 776CONFIG_ARCH_REQUIRE_GPIOLIB=y
717CONFIG_GPIOLIB=y 777CONFIG_GPIOLIB=y
718# CONFIG_DEBUG_GPIO is not set 778# CONFIG_DEBUG_GPIO is not set
@@ -738,11 +798,15 @@ CONFIG_GPIO_SYSFS=y
738# 798#
739# CONFIG_GPIO_MAX7301 is not set 799# CONFIG_GPIO_MAX7301 is not set
740# CONFIG_GPIO_MCP23S08 is not set 800# CONFIG_GPIO_MCP23S08 is not set
801# CONFIG_GPIO_MC33880 is not set
802
803#
804# AC97 GPIO expanders:
805#
741# CONFIG_W1 is not set 806# CONFIG_W1 is not set
742# CONFIG_POWER_SUPPLY is not set 807# CONFIG_POWER_SUPPLY is not set
743# CONFIG_HWMON is not set 808# CONFIG_HWMON is not set
744# CONFIG_THERMAL is not set 809# CONFIG_THERMAL is not set
745# CONFIG_THERMAL_HWMON is not set
746CONFIG_WATCHDOG=y 810CONFIG_WATCHDOG=y
747# CONFIG_WATCHDOG_NOWAYOUT is not set 811# CONFIG_WATCHDOG_NOWAYOUT is not set
748 812
@@ -764,26 +828,17 @@ CONFIG_SSB_POSSIBLE=y
764# CONFIG_MFD_CORE is not set 828# CONFIG_MFD_CORE is not set
765# CONFIG_MFD_SM501 is not set 829# CONFIG_MFD_SM501 is not set
766# CONFIG_HTC_PASIC3 is not set 830# CONFIG_HTC_PASIC3 is not set
831# CONFIG_TPS65010 is not set
767# CONFIG_MFD_TMIO is not set 832# CONFIG_MFD_TMIO is not set
768# CONFIG_MFD_WM8400 is not set 833# CONFIG_MFD_WM8400 is not set
834# CONFIG_MFD_WM831X is not set
769# CONFIG_MFD_WM8350_I2C is not set 835# CONFIG_MFD_WM8350_I2C is not set
836# CONFIG_MFD_PCF50633 is not set
837# CONFIG_MFD_MC13783 is not set
838# CONFIG_AB3100_CORE is not set
839# CONFIG_EZX_PCAP is not set
770# CONFIG_REGULATOR is not set 840# CONFIG_REGULATOR is not set
771 841# CONFIG_MEDIA_SUPPORT is not set
772#
773# Multimedia devices
774#
775
776#
777# Multimedia core support
778#
779# CONFIG_VIDEO_DEV is not set
780# CONFIG_DVB_CORE is not set
781# CONFIG_VIDEO_MEDIA is not set
782
783#
784# Multimedia drivers
785#
786# CONFIG_DAB is not set
787 842
788# 843#
789# Graphics support 844# Graphics support
@@ -817,8 +872,10 @@ CONFIG_FB_ATMEL=y
817# CONFIG_FB_VIRTUAL is not set 872# CONFIG_FB_VIRTUAL is not set
818# CONFIG_FB_METRONOME is not set 873# CONFIG_FB_METRONOME is not set
819# CONFIG_FB_MB862XX is not set 874# CONFIG_FB_MB862XX is not set
875# CONFIG_FB_BROADSHEET is not set
820CONFIG_BACKLIGHT_LCD_SUPPORT=y 876CONFIG_BACKLIGHT_LCD_SUPPORT=y
821CONFIG_LCD_CLASS_DEVICE=y 877CONFIG_LCD_CLASS_DEVICE=y
878# CONFIG_LCD_LMS283GF05 is not set
822CONFIG_LCD_LTV350QV=y 879CONFIG_LCD_LTV350QV=y
823# CONFIG_LCD_ILI9320 is not set 880# CONFIG_LCD_ILI9320 is not set
824# CONFIG_LCD_TDO24M is not set 881# CONFIG_LCD_TDO24M is not set
@@ -833,6 +890,7 @@ CONFIG_LCD_LTV350QV=y
833# CONFIG_LOGO is not set 890# CONFIG_LOGO is not set
834CONFIG_SOUND=m 891CONFIG_SOUND=m
835CONFIG_SOUND_OSS_CORE=y 892CONFIG_SOUND_OSS_CORE=y
893CONFIG_SOUND_OSS_CORE_PRECLAIM=y
836CONFIG_SND=m 894CONFIG_SND=m
837CONFIG_SND_TIMER=m 895CONFIG_SND_TIMER=m
838CONFIG_SND_PCM=m 896CONFIG_SND_PCM=m
@@ -841,16 +899,28 @@ CONFIG_SND_OSSEMUL=y
841CONFIG_SND_MIXER_OSS=m 899CONFIG_SND_MIXER_OSS=m
842CONFIG_SND_PCM_OSS=m 900CONFIG_SND_PCM_OSS=m
843CONFIG_SND_PCM_OSS_PLUGINS=y 901CONFIG_SND_PCM_OSS_PLUGINS=y
902# CONFIG_SND_HRTIMER is not set
844# CONFIG_SND_DYNAMIC_MINORS is not set 903# CONFIG_SND_DYNAMIC_MINORS is not set
845# CONFIG_SND_SUPPORT_OLD_API is not set 904# CONFIG_SND_SUPPORT_OLD_API is not set
846# CONFIG_SND_VERBOSE_PROCFS is not set 905# CONFIG_SND_VERBOSE_PROCFS is not set
847# CONFIG_SND_VERBOSE_PRINTK is not set 906# CONFIG_SND_VERBOSE_PRINTK is not set
848# CONFIG_SND_DEBUG is not set 907# CONFIG_SND_DEBUG is not set
908# CONFIG_SND_RAWMIDI_SEQ is not set
909# CONFIG_SND_OPL3_LIB_SEQ is not set
910# CONFIG_SND_OPL4_LIB_SEQ is not set
911# CONFIG_SND_SBAWE_SEQ is not set
912# CONFIG_SND_EMU10K1_SEQ is not set
849CONFIG_SND_DRIVERS=y 913CONFIG_SND_DRIVERS=y
850# CONFIG_SND_DUMMY is not set 914# CONFIG_SND_DUMMY is not set
851# CONFIG_SND_MTPAV is not set 915# CONFIG_SND_MTPAV is not set
852# CONFIG_SND_SERIAL_U16550 is not set 916# CONFIG_SND_SERIAL_U16550 is not set
853# CONFIG_SND_MPU401 is not set 917# CONFIG_SND_MPU401 is not set
918
919#
920# Atmel devices (AVR32 and AT91)
921#
922# CONFIG_SND_ATMEL_ABDAC is not set
923# CONFIG_SND_ATMEL_AC97C is not set
854CONFIG_SND_SPI=y 924CONFIG_SND_SPI=y
855CONFIG_SND_AT73C213=m 925CONFIG_SND_AT73C213=m
856CONFIG_SND_AT73C213_TARGET_BITRATE=48000 926CONFIG_SND_AT73C213_TARGET_BITRATE=48000
@@ -863,11 +933,10 @@ CONFIG_USB_SUPPORT=y
863# CONFIG_USB_ARCH_HAS_EHCI is not set 933# CONFIG_USB_ARCH_HAS_EHCI is not set
864# CONFIG_USB_OTG_WHITELIST is not set 934# CONFIG_USB_OTG_WHITELIST is not set
865# CONFIG_USB_OTG_BLACKLIST_HUB is not set 935# CONFIG_USB_OTG_BLACKLIST_HUB is not set
866# CONFIG_USB_MUSB_HDRC is not set
867# CONFIG_USB_GADGET_MUSB_HDRC is not set 936# CONFIG_USB_GADGET_MUSB_HDRC is not set
868 937
869# 938#
870# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed; 939# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
871# 940#
872CONFIG_USB_GADGET=y 941CONFIG_USB_GADGET=y
873# CONFIG_USB_GADGET_DEBUG is not set 942# CONFIG_USB_GADGET_DEBUG is not set
@@ -882,18 +951,25 @@ CONFIG_USB_ATMEL_USBA=y
882# CONFIG_USB_GADGET_LH7A40X is not set 951# CONFIG_USB_GADGET_LH7A40X is not set
883# CONFIG_USB_GADGET_OMAP is not set 952# CONFIG_USB_GADGET_OMAP is not set
884# CONFIG_USB_GADGET_PXA25X is not set 953# CONFIG_USB_GADGET_PXA25X is not set
954# CONFIG_USB_GADGET_R8A66597 is not set
885# CONFIG_USB_GADGET_PXA27X is not set 955# CONFIG_USB_GADGET_PXA27X is not set
956# CONFIG_USB_GADGET_S3C_HSOTG is not set
957# CONFIG_USB_GADGET_IMX is not set
886# CONFIG_USB_GADGET_S3C2410 is not set 958# CONFIG_USB_GADGET_S3C2410 is not set
887# CONFIG_USB_GADGET_M66592 is not set 959# CONFIG_USB_GADGET_M66592 is not set
888# CONFIG_USB_GADGET_AMD5536UDC is not set 960# CONFIG_USB_GADGET_AMD5536UDC is not set
889# CONFIG_USB_GADGET_FSL_QE is not set 961# CONFIG_USB_GADGET_FSL_QE is not set
962# CONFIG_USB_GADGET_CI13XXX is not set
890# CONFIG_USB_GADGET_NET2280 is not set 963# CONFIG_USB_GADGET_NET2280 is not set
891# CONFIG_USB_GADGET_GOKU is not set 964# CONFIG_USB_GADGET_GOKU is not set
965# CONFIG_USB_GADGET_LANGWELL is not set
892# CONFIG_USB_GADGET_DUMMY_HCD is not set 966# CONFIG_USB_GADGET_DUMMY_HCD is not set
893CONFIG_USB_GADGET_DUALSPEED=y 967CONFIG_USB_GADGET_DUALSPEED=y
894CONFIG_USB_ZERO=m 968CONFIG_USB_ZERO=m
969# CONFIG_USB_AUDIO is not set
895CONFIG_USB_ETH=m 970CONFIG_USB_ETH=m
896CONFIG_USB_ETH_RNDIS=y 971CONFIG_USB_ETH_RNDIS=y
972# CONFIG_USB_ETH_EEM is not set
897CONFIG_USB_GADGETFS=m 973CONFIG_USB_GADGETFS=m
898CONFIG_USB_FILE_STORAGE=m 974CONFIG_USB_FILE_STORAGE=m
899# CONFIG_USB_FILE_STORAGE_TEST is not set 975# CONFIG_USB_FILE_STORAGE_TEST is not set
@@ -901,6 +977,12 @@ CONFIG_USB_G_SERIAL=m
901# CONFIG_USB_MIDI_GADGET is not set 977# CONFIG_USB_MIDI_GADGET is not set
902# CONFIG_USB_G_PRINTER is not set 978# CONFIG_USB_G_PRINTER is not set
903# CONFIG_USB_CDC_COMPOSITE is not set 979# CONFIG_USB_CDC_COMPOSITE is not set
980
981#
982# OTG and related infrastructure
983#
984# CONFIG_USB_GPIO_VBUS is not set
985# CONFIG_NOP_USB_XCEIV is not set
904CONFIG_MMC=y 986CONFIG_MMC=y
905# CONFIG_MMC_DEBUG is not set 987# CONFIG_MMC_DEBUG is not set
906# CONFIG_MMC_UNSAFE_RESUME is not set 988# CONFIG_MMC_UNSAFE_RESUME is not set
@@ -917,6 +999,7 @@ CONFIG_MMC_BLOCK_BOUNCE=y
917# MMC/SD/SDIO Host Controller Drivers 999# MMC/SD/SDIO Host Controller Drivers
918# 1000#
919# CONFIG_MMC_SDHCI is not set 1001# CONFIG_MMC_SDHCI is not set
1002# CONFIG_MMC_AT91 is not set
920CONFIG_MMC_ATMELMCI=y 1003CONFIG_MMC_ATMELMCI=y
921# CONFIG_MMC_ATMELMCI_DMA is not set 1004# CONFIG_MMC_ATMELMCI_DMA is not set
922CONFIG_MMC_SPI=m 1005CONFIG_MMC_SPI=m
@@ -930,7 +1013,11 @@ CONFIG_LEDS_CLASS=m
930CONFIG_LEDS_ATMEL_PWM=m 1013CONFIG_LEDS_ATMEL_PWM=m
931# CONFIG_LEDS_PCA9532 is not set 1014# CONFIG_LEDS_PCA9532 is not set
932CONFIG_LEDS_GPIO=m 1015CONFIG_LEDS_GPIO=m
1016CONFIG_LEDS_GPIO_PLATFORM=y
1017# CONFIG_LEDS_LP3944 is not set
933# CONFIG_LEDS_PCA955X is not set 1018# CONFIG_LEDS_PCA955X is not set
1019# CONFIG_LEDS_DAC124S085 is not set
1020# CONFIG_LEDS_BD2802 is not set
934 1021
935# 1022#
936# LED Triggers 1023# LED Triggers
@@ -939,7 +1026,12 @@ CONFIG_LEDS_TRIGGERS=y
939CONFIG_LEDS_TRIGGER_TIMER=m 1026CONFIG_LEDS_TRIGGER_TIMER=m
940CONFIG_LEDS_TRIGGER_HEARTBEAT=m 1027CONFIG_LEDS_TRIGGER_HEARTBEAT=m
941# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set 1028# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
1029# CONFIG_LEDS_TRIGGER_GPIO is not set
942CONFIG_LEDS_TRIGGER_DEFAULT_ON=m 1030CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
1031
1032#
1033# iptables trigger is under Netfilter config (LED target)
1034#
943# CONFIG_ACCESSIBILITY is not set 1035# CONFIG_ACCESSIBILITY is not set
944CONFIG_RTC_LIB=y 1036CONFIG_RTC_LIB=y
945CONFIG_RTC_CLASS=y 1037CONFIG_RTC_CLASS=y
@@ -972,6 +1064,7 @@ CONFIG_RTC_INTF_DEV=y
972# CONFIG_RTC_DRV_S35390A is not set 1064# CONFIG_RTC_DRV_S35390A is not set
973# CONFIG_RTC_DRV_FM3130 is not set 1065# CONFIG_RTC_DRV_FM3130 is not set
974# CONFIG_RTC_DRV_RX8581 is not set 1066# CONFIG_RTC_DRV_RX8581 is not set
1067# CONFIG_RTC_DRV_RX8025 is not set
975 1068
976# 1069#
977# SPI RTC drivers 1070# SPI RTC drivers
@@ -983,6 +1076,7 @@ CONFIG_RTC_INTF_DEV=y
983# CONFIG_RTC_DRV_R9701 is not set 1076# CONFIG_RTC_DRV_R9701 is not set
984# CONFIG_RTC_DRV_RS5C348 is not set 1077# CONFIG_RTC_DRV_RS5C348 is not set
985# CONFIG_RTC_DRV_DS3234 is not set 1078# CONFIG_RTC_DRV_DS3234 is not set
1079# CONFIG_RTC_DRV_PCF2123 is not set
986 1080
987# 1081#
988# Platform RTC drivers 1082# Platform RTC drivers
@@ -1014,32 +1108,42 @@ CONFIG_DMA_ENGINE=y
1014# DMA Clients 1108# DMA Clients
1015# 1109#
1016# CONFIG_NET_DMA is not set 1110# CONFIG_NET_DMA is not set
1111# CONFIG_ASYNC_TX_DMA is not set
1017# CONFIG_DMATEST is not set 1112# CONFIG_DMATEST is not set
1113# CONFIG_AUXDISPLAY is not set
1018# CONFIG_UIO is not set 1114# CONFIG_UIO is not set
1115
1116#
1117# TI VLYNQ
1118#
1019# CONFIG_STAGING is not set 1119# CONFIG_STAGING is not set
1020CONFIG_STAGING_EXCLUDE_BUILD=y
1021 1120
1022# 1121#
1023# File systems 1122# File systems
1024# 1123#
1025CONFIG_EXT2_FS=m 1124CONFIG_EXT2_FS=y
1026# CONFIG_EXT2_FS_XATTR is not set 1125# CONFIG_EXT2_FS_XATTR is not set
1027# CONFIG_EXT2_FS_XIP is not set 1126# CONFIG_EXT2_FS_XIP is not set
1028CONFIG_EXT3_FS=m 1127CONFIG_EXT3_FS=y
1128# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
1029# CONFIG_EXT3_FS_XATTR is not set 1129# CONFIG_EXT3_FS_XATTR is not set
1030CONFIG_EXT4_FS=m 1130CONFIG_EXT4_FS=y
1031CONFIG_EXT4DEV_COMPAT=y
1032# CONFIG_EXT4_FS_XATTR is not set 1131# CONFIG_EXT4_FS_XATTR is not set
1033CONFIG_JBD=m 1132# CONFIG_EXT4_DEBUG is not set
1133CONFIG_JBD=y
1034# CONFIG_JBD_DEBUG is not set 1134# CONFIG_JBD_DEBUG is not set
1035CONFIG_JBD2=m 1135CONFIG_JBD2=y
1036# CONFIG_JBD2_DEBUG is not set 1136# CONFIG_JBD2_DEBUG is not set
1037# CONFIG_REISERFS_FS is not set 1137# CONFIG_REISERFS_FS is not set
1038# CONFIG_JFS_FS is not set 1138# CONFIG_JFS_FS is not set
1039# CONFIG_FS_POSIX_ACL is not set 1139# CONFIG_FS_POSIX_ACL is not set
1040CONFIG_FILE_LOCKING=y
1041# CONFIG_XFS_FS is not set 1140# CONFIG_XFS_FS is not set
1141# CONFIG_GFS2_FS is not set
1042# CONFIG_OCFS2_FS is not set 1142# CONFIG_OCFS2_FS is not set
1143# CONFIG_BTRFS_FS is not set
1144# CONFIG_NILFS2_FS is not set
1145CONFIG_FILE_LOCKING=y
1146CONFIG_FSNOTIFY=y
1043# CONFIG_DNOTIFY is not set 1147# CONFIG_DNOTIFY is not set
1044CONFIG_INOTIFY=y 1148CONFIG_INOTIFY=y
1045CONFIG_INOTIFY_USER=y 1149CONFIG_INOTIFY_USER=y
@@ -1047,6 +1151,12 @@ CONFIG_INOTIFY_USER=y
1047# CONFIG_AUTOFS_FS is not set 1151# CONFIG_AUTOFS_FS is not set
1048# CONFIG_AUTOFS4_FS is not set 1152# CONFIG_AUTOFS4_FS is not set
1049CONFIG_FUSE_FS=m 1153CONFIG_FUSE_FS=m
1154# CONFIG_CUSE is not set
1155
1156#
1157# Caches
1158#
1159# CONFIG_FSCACHE is not set
1050 1160
1051# 1161#
1052# CD-ROM/DVD Filesystems 1162# CD-ROM/DVD Filesystems
@@ -1076,10 +1186,7 @@ CONFIG_TMPFS=y
1076# CONFIG_TMPFS_POSIX_ACL is not set 1186# CONFIG_TMPFS_POSIX_ACL is not set
1077# CONFIG_HUGETLB_PAGE is not set 1187# CONFIG_HUGETLB_PAGE is not set
1078# CONFIG_CONFIGFS_FS is not set 1188# CONFIG_CONFIGFS_FS is not set
1079 1189CONFIG_MISC_FILESYSTEMS=y
1080#
1081# Miscellaneous filesystems
1082#
1083# CONFIG_ADFS_FS is not set 1190# CONFIG_ADFS_FS is not set
1084# CONFIG_AFFS_FS is not set 1191# CONFIG_AFFS_FS is not set
1085# CONFIG_HFS_FS is not set 1192# CONFIG_HFS_FS is not set
@@ -1099,12 +1206,13 @@ CONFIG_JFFS2_ZLIB=y
1099CONFIG_JFFS2_RTIME=y 1206CONFIG_JFFS2_RTIME=y
1100# CONFIG_JFFS2_RUBIN is not set 1207# CONFIG_JFFS2_RUBIN is not set
1101CONFIG_UBIFS_FS=y 1208CONFIG_UBIFS_FS=y
1102CONFIG_UBIFS_FS_XATTR=y 1209# CONFIG_UBIFS_FS_XATTR is not set
1103# CONFIG_UBIFS_FS_ADVANCED_COMPR is not set 1210# CONFIG_UBIFS_FS_ADVANCED_COMPR is not set
1104CONFIG_UBIFS_FS_LZO=y 1211CONFIG_UBIFS_FS_LZO=y
1105CONFIG_UBIFS_FS_ZLIB=y 1212CONFIG_UBIFS_FS_ZLIB=y
1106# CONFIG_UBIFS_FS_DEBUG is not set 1213# CONFIG_UBIFS_FS_DEBUG is not set
1107# CONFIG_CRAMFS is not set 1214# CONFIG_CRAMFS is not set
1215# CONFIG_SQUASHFS is not set
1108# CONFIG_VXFS_FS is not set 1216# CONFIG_VXFS_FS is not set
1109CONFIG_MINIX_FS=m 1217CONFIG_MINIX_FS=m
1110# CONFIG_OMFS_FS is not set 1218# CONFIG_OMFS_FS is not set
@@ -1124,7 +1232,6 @@ CONFIG_LOCKD=y
1124CONFIG_LOCKD_V4=y 1232CONFIG_LOCKD_V4=y
1125CONFIG_NFS_COMMON=y 1233CONFIG_NFS_COMMON=y
1126CONFIG_SUNRPC=y 1234CONFIG_SUNRPC=y
1127# CONFIG_SUNRPC_REGISTER_V4 is not set
1128# CONFIG_RPCSEC_GSS_KRB5 is not set 1235# CONFIG_RPCSEC_GSS_KRB5 is not set
1129# CONFIG_RPCSEC_GSS_SPKM3 is not set 1236# CONFIG_RPCSEC_GSS_SPKM3 is not set
1130# CONFIG_SMB_FS is not set 1237# CONFIG_SMB_FS is not set
@@ -1188,6 +1295,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
1188CONFIG_ENABLE_MUST_CHECK=y 1295CONFIG_ENABLE_MUST_CHECK=y
1189CONFIG_FRAME_WARN=1024 1296CONFIG_FRAME_WARN=1024
1190CONFIG_MAGIC_SYSRQ=y 1297CONFIG_MAGIC_SYSRQ=y
1298# CONFIG_STRIP_ASM_SYMS is not set
1191# CONFIG_UNUSED_SYMBOLS is not set 1299# CONFIG_UNUSED_SYMBOLS is not set
1192CONFIG_DEBUG_FS=y 1300CONFIG_DEBUG_FS=y
1193# CONFIG_HEADERS_CHECK is not set 1301# CONFIG_HEADERS_CHECK is not set
@@ -1196,6 +1304,9 @@ CONFIG_DEBUG_KERNEL=y
1196CONFIG_DETECT_SOFTLOCKUP=y 1304CONFIG_DETECT_SOFTLOCKUP=y
1197# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set 1305# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
1198CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 1306CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
1307CONFIG_DETECT_HUNG_TASK=y
1308# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
1309CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
1199CONFIG_SCHED_DEBUG=y 1310CONFIG_SCHED_DEBUG=y
1200# CONFIG_SCHEDSTATS is not set 1311# CONFIG_SCHEDSTATS is not set
1201# CONFIG_TIMER_STATS is not set 1312# CONFIG_TIMER_STATS is not set
@@ -1211,6 +1322,7 @@ CONFIG_SCHED_DEBUG=y
1211# CONFIG_LOCK_STAT is not set 1322# CONFIG_LOCK_STAT is not set
1212# CONFIG_DEBUG_SPINLOCK_SLEEP is not set 1323# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
1213# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set 1324# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
1325CONFIG_STACKTRACE=y
1214# CONFIG_DEBUG_KOBJECT is not set 1326# CONFIG_DEBUG_KOBJECT is not set
1215CONFIG_DEBUG_BUGVERBOSE=y 1327CONFIG_DEBUG_BUGVERBOSE=y
1216# CONFIG_DEBUG_INFO is not set 1328# CONFIG_DEBUG_INFO is not set
@@ -1219,6 +1331,8 @@ CONFIG_DEBUG_BUGVERBOSE=y
1219# CONFIG_DEBUG_MEMORY_INIT is not set 1331# CONFIG_DEBUG_MEMORY_INIT is not set
1220# CONFIG_DEBUG_LIST is not set 1332# CONFIG_DEBUG_LIST is not set
1221# CONFIG_DEBUG_SG is not set 1333# CONFIG_DEBUG_SG is not set
1334# CONFIG_DEBUG_NOTIFIERS is not set
1335# CONFIG_DEBUG_CREDENTIALS is not set
1222CONFIG_FRAME_POINTER=y 1336CONFIG_FRAME_POINTER=y
1223# CONFIG_BOOT_PRINTK_DELAY is not set 1337# CONFIG_BOOT_PRINTK_DELAY is not set
1224# CONFIG_RCU_TORTURE_TEST is not set 1338# CONFIG_RCU_TORTURE_TEST is not set
@@ -1226,17 +1340,30 @@ CONFIG_FRAME_POINTER=y
1226# CONFIG_KPROBES_SANITY_TEST is not set 1340# CONFIG_KPROBES_SANITY_TEST is not set
1227# CONFIG_BACKTRACE_SELF_TEST is not set 1341# CONFIG_BACKTRACE_SELF_TEST is not set
1228# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set 1342# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
1343# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
1229# CONFIG_LKDTM is not set 1344# CONFIG_LKDTM is not set
1230# CONFIG_FAULT_INJECTION is not set 1345# CONFIG_FAULT_INJECTION is not set
1231 1346# CONFIG_PAGE_POISONING is not set
1232# 1347CONFIG_NOP_TRACER=y
1233# Tracers 1348CONFIG_RING_BUFFER=y
1234# 1349CONFIG_EVENT_TRACING=y
1350CONFIG_CONTEXT_SWITCH_TRACER=y
1351CONFIG_RING_BUFFER_ALLOW_SWAP=y
1352CONFIG_TRACING=y
1353CONFIG_TRACING_SUPPORT=y
1354CONFIG_FTRACE=y
1235# CONFIG_IRQSOFF_TRACER is not set 1355# CONFIG_IRQSOFF_TRACER is not set
1236# CONFIG_SCHED_TRACER is not set 1356# CONFIG_SCHED_TRACER is not set
1237# CONFIG_CONTEXT_SWITCH_TRACER is not set 1357# CONFIG_ENABLE_DEFAULT_TRACERS is not set
1238# CONFIG_BOOT_TRACER is not set 1358# CONFIG_BOOT_TRACER is not set
1239# CONFIG_DYNAMIC_PRINTK_DEBUG is not set 1359CONFIG_BRANCH_PROFILE_NONE=y
1360# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
1361# CONFIG_PROFILE_ALL_BRANCHES is not set
1362# CONFIG_KMEMTRACE is not set
1363# CONFIG_WORKQUEUE_TRACER is not set
1364# CONFIG_BLK_DEV_IO_TRACE is not set
1365# CONFIG_RING_BUFFER_BENCHMARK is not set
1366# CONFIG_DYNAMIC_DEBUG is not set
1240# CONFIG_SAMPLES is not set 1367# CONFIG_SAMPLES is not set
1241 1368
1242# 1369#
@@ -1262,10 +1389,12 @@ CONFIG_CRYPTO_HASH=m
1262CONFIG_CRYPTO_HASH2=y 1389CONFIG_CRYPTO_HASH2=y
1263CONFIG_CRYPTO_RNG=m 1390CONFIG_CRYPTO_RNG=m
1264CONFIG_CRYPTO_RNG2=y 1391CONFIG_CRYPTO_RNG2=y
1392CONFIG_CRYPTO_PCOMP=y
1265CONFIG_CRYPTO_MANAGER=m 1393CONFIG_CRYPTO_MANAGER=m
1266CONFIG_CRYPTO_MANAGER2=y 1394CONFIG_CRYPTO_MANAGER2=y
1267# CONFIG_CRYPTO_GF128MUL is not set 1395# CONFIG_CRYPTO_GF128MUL is not set
1268# CONFIG_CRYPTO_NULL is not set 1396# CONFIG_CRYPTO_NULL is not set
1397CONFIG_CRYPTO_WORKQUEUE=y
1269# CONFIG_CRYPTO_CRYPTD is not set 1398# CONFIG_CRYPTO_CRYPTD is not set
1270CONFIG_CRYPTO_AUTHENC=m 1399CONFIG_CRYPTO_AUTHENC=m
1271# CONFIG_CRYPTO_TEST is not set 1400# CONFIG_CRYPTO_TEST is not set
@@ -1293,11 +1422,13 @@ CONFIG_CRYPTO_CBC=m
1293# 1422#
1294CONFIG_CRYPTO_HMAC=m 1423CONFIG_CRYPTO_HMAC=m
1295# CONFIG_CRYPTO_XCBC is not set 1424# CONFIG_CRYPTO_XCBC is not set
1425# CONFIG_CRYPTO_VMAC is not set
1296 1426
1297# 1427#
1298# Digest 1428# Digest
1299# 1429#
1300# CONFIG_CRYPTO_CRC32C is not set 1430# CONFIG_CRYPTO_CRC32C is not set
1431# CONFIG_CRYPTO_GHASH is not set
1301# CONFIG_CRYPTO_MD4 is not set 1432# CONFIG_CRYPTO_MD4 is not set
1302CONFIG_CRYPTO_MD5=m 1433CONFIG_CRYPTO_MD5=m
1303# CONFIG_CRYPTO_MICHAEL_MIC is not set 1434# CONFIG_CRYPTO_MICHAEL_MIC is not set
@@ -1334,6 +1465,7 @@ CONFIG_CRYPTO_DES=m
1334# Compression 1465# Compression
1335# 1466#
1336CONFIG_CRYPTO_DEFLATE=y 1467CONFIG_CRYPTO_DEFLATE=y
1468# CONFIG_CRYPTO_ZLIB is not set
1337CONFIG_CRYPTO_LZO=y 1469CONFIG_CRYPTO_LZO=y
1338 1470
1339# 1471#
@@ -1341,11 +1473,13 @@ CONFIG_CRYPTO_LZO=y
1341# 1473#
1342CONFIG_CRYPTO_ANSI_CPRNG=m 1474CONFIG_CRYPTO_ANSI_CPRNG=m
1343# CONFIG_CRYPTO_HW is not set 1475# CONFIG_CRYPTO_HW is not set
1476CONFIG_BINARY_PRINTF=y
1344 1477
1345# 1478#
1346# Library routines 1479# Library routines
1347# 1480#
1348CONFIG_BITREVERSE=y 1481CONFIG_BITREVERSE=y
1482CONFIG_GENERIC_FIND_LAST_BIT=y
1349CONFIG_CRC_CCITT=m 1483CONFIG_CRC_CCITT=m
1350CONFIG_CRC16=y 1484CONFIG_CRC16=y
1351CONFIG_CRC_T10DIF=m 1485CONFIG_CRC_T10DIF=m
@@ -1357,8 +1491,9 @@ CONFIG_ZLIB_INFLATE=y
1357CONFIG_ZLIB_DEFLATE=y 1491CONFIG_ZLIB_DEFLATE=y
1358CONFIG_LZO_COMPRESS=y 1492CONFIG_LZO_COMPRESS=y
1359CONFIG_LZO_DECOMPRESS=y 1493CONFIG_LZO_DECOMPRESS=y
1494CONFIG_DECOMPRESS_GZIP=y
1360CONFIG_GENERIC_ALLOCATOR=y 1495CONFIG_GENERIC_ALLOCATOR=y
1361CONFIG_PLIST=y
1362CONFIG_HAS_IOMEM=y 1496CONFIG_HAS_IOMEM=y
1363CONFIG_HAS_IOPORT=y 1497CONFIG_HAS_IOPORT=y
1364CONFIG_HAS_DMA=y 1498CONFIG_HAS_DMA=y
1499CONFIG_NLATTR=y
diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
index d5d1d41c600a..3b3159b710d4 100644
--- a/arch/avr32/include/asm/elf.h
+++ b/arch/avr32/include/asm/elf.h
@@ -77,7 +77,6 @@ typedef struct user_fpu_struct elf_fpregset_t;
77#endif 77#endif
78#define ELF_ARCH EM_AVR32 78#define ELF_ARCH EM_AVR32
79 79
80#define USE_ELF_CORE_DUMP
81#define ELF_EXEC_PAGESIZE 4096 80#define ELF_EXEC_PAGESIZE 4096
82 81
83/* This is the location that an ET_DYN program is loaded if exec'ed. Typical 82/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
diff --git a/arch/avr32/include/asm/hardirq.h b/arch/avr32/include/asm/hardirq.h
index 015bc75ea798..9e36e3ff77d2 100644
--- a/arch/avr32/include/asm/hardirq.h
+++ b/arch/avr32/include/asm/hardirq.h
@@ -1,23 +1,6 @@
1#ifndef __ASM_AVR32_HARDIRQ_H 1#ifndef __ASM_AVR32_HARDIRQ_H
2#define __ASM_AVR32_HARDIRQ_H 2#define __ASM_AVR32_HARDIRQ_H
3
4#include <linux/threads.h>
5#include <asm/irq.h>
6
7#ifndef __ASSEMBLY__ 3#ifndef __ASSEMBLY__
8 4#include <asm-generic/hardirq.h>
9#include <linux/cache.h>
10
11/* entry.S is sensitive to the offsets of these fields */
12typedef struct {
13 unsigned int __softirq_pending;
14} ____cacheline_aligned irq_cpustat_t;
15
16void ack_bad_irq(unsigned int irq);
17
18/* Standard mappings for irq_cpustat_t above */
19#include <linux/irq_cpustat.h>
20
21#endif /* __ASSEMBLY__ */ 5#endif /* __ASSEMBLY__ */
22
23#endif /* __ASM_AVR32_HARDIRQ_H */ 6#endif /* __ASM_AVR32_HARDIRQ_H */
diff --git a/arch/avr32/kernel/irq.c b/arch/avr32/kernel/irq.c
index 9f572229d318..9604f7758f9a 100644
--- a/arch/avr32/kernel/irq.c
+++ b/arch/avr32/kernel/irq.c
@@ -16,15 +16,6 @@
16#include <linux/seq_file.h> 16#include <linux/seq_file.h>
17#include <linux/sysdev.h> 17#include <linux/sysdev.h>
18 18
19/*
20 * 'what should we do if we get a hw irq event on an illegal vector'.
21 * each architecture has to answer this themselves.
22 */
23void ack_bad_irq(unsigned int irq)
24{
25 printk("unexpected IRQ %u\n", irq);
26}
27
28/* May be overridden by platform code */ 19/* May be overridden by platform code */
29int __weak nmi_enable(void) 20int __weak nmi_enable(void)
30{ 21{
@@ -51,7 +42,7 @@ int show_interrupts(struct seq_file *p, void *v)
51 } 42 }
52 43
53 if (i < NR_IRQS) { 44 if (i < NR_IRQS) {
54 spin_lock_irqsave(&irq_desc[i].lock, flags); 45 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
55 action = irq_desc[i].action; 46 action = irq_desc[i].action;
56 if (!action) 47 if (!action)
57 goto unlock; 48 goto unlock;
@@ -66,7 +57,7 @@ int show_interrupts(struct seq_file *p, void *v)
66 57
67 seq_putc(p, '\n'); 58 seq_putc(p, '\n');
68 unlock: 59 unlock:
69 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 60 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
70 } 61 }
71 62
72 return 0; 63 return 0;
diff --git a/arch/avr32/kernel/vmlinux.lds.S b/arch/avr32/kernel/vmlinux.lds.S
index c4b56654349a..9cd2bd91d64a 100644
--- a/arch/avr32/kernel/vmlinux.lds.S
+++ b/arch/avr32/kernel/vmlinux.lds.S
@@ -39,30 +39,10 @@ SECTIONS
39 __tagtable_begin = .; 39 __tagtable_begin = .;
40 *(.taglist.init) 40 *(.taglist.init)
41 __tagtable_end = .; 41 __tagtable_end = .;
42 INIT_DATA
43 . = ALIGN(16);
44 __setup_start = .;
45 *(.init.setup)
46 __setup_end = .;
47 . = ALIGN(4);
48 __initcall_start = .;
49 INITCALLS
50 __initcall_end = .;
51 __con_initcall_start = .;
52 *(.con_initcall.init)
53 __con_initcall_end = .;
54 __security_initcall_start = .;
55 *(.security_initcall.init)
56 __security_initcall_end = .;
57#ifdef CONFIG_BLK_DEV_INITRD
58 . = ALIGN(32);
59 __initramfs_start = .;
60 *(.init.ramfs)
61 __initramfs_end = .;
62#endif
63 . = ALIGN(PAGE_SIZE);
64 __init_end = .;
65 } 42 }
43 INIT_DATA_SECTION(16)
44 . = ALIGN(PAGE_SIZE);
45 __init_end = .;
66 46
67 .text : AT(ADDR(.text) - LOAD_OFFSET) { 47 .text : AT(ADDR(.text) - LOAD_OFFSET) {
68 _evba = .; 48 _evba = .;
@@ -78,34 +58,16 @@ SECTIONS
78 _etext = .; 58 _etext = .;
79 } = 0xd703d703 59 } = 0xd703d703
80 60
81 . = ALIGN(4); 61 EXCEPTION_TABLE(4)
82 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
83 __start___ex_table = .;
84 *(__ex_table)
85 __stop___ex_table = .;
86 }
87
88 RODATA 62 RODATA
89 63
90 . = ALIGN(THREAD_SIZE);
91
92 .data : AT(ADDR(.data) - LOAD_OFFSET) { 64 .data : AT(ADDR(.data) - LOAD_OFFSET) {
93 _data = .; 65 _data = .;
94 _sdata = .; 66 _sdata = .;
95 /*
96 * First, the init task union, aligned to an 8K boundary.
97 */
98 *(.data.init_task)
99 67
100 /* Then, the page-aligned data */ 68 INIT_TASK_DATA(THREAD_SIZE)
101 . = ALIGN(PAGE_SIZE); 69 PAGE_ALIGNED_DATA(PAGE_SIZE);
102 *(.data.page_aligned) 70 CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
103
104 /* Then, the cacheline aligned data */
105 . = ALIGN(L1_CACHE_BYTES);
106 *(.data.cacheline_aligned)
107
108 /* And the rest... */
109 *(.data.rel*) 71 *(.data.rel*)
110 DATA_DATA 72 DATA_DATA
111 CONSTRUCTORS 73 CONSTRUCTORS
@@ -113,16 +75,8 @@ SECTIONS
113 _edata = .; 75 _edata = .;
114 } 76 }
115 77
116 78 BSS_SECTION(0, 8, 8)
117 . = ALIGN(8); 79 _end = .;
118 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
119 __bss_start = .;
120 *(.bss)
121 *(COMMON)
122 . = ALIGN(8);
123 __bss_stop = .;
124 _end = .;
125 }
126 80
127 DWARF_DEBUG 81 DWARF_DEBUG
128 82
diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c
index eb9d4dc2e86d..1aa1ea5e9212 100644
--- a/arch/avr32/mach-at32ap/at32ap700x.c
+++ b/arch/avr32/mach-at32ap/at32ap700x.c
@@ -15,6 +15,8 @@
15#include <linux/gpio.h> 15#include <linux/gpio.h>
16#include <linux/spi/spi.h> 16#include <linux/spi/spi.h>
17#include <linux/usb/atmel_usba_udc.h> 17#include <linux/usb/atmel_usba_udc.h>
18
19#include <mach/atmel-mci.h>
18#include <linux/atmel-mci.h> 20#include <linux/atmel-mci.h>
19 21
20#include <asm/io.h> 22#include <asm/io.h>
@@ -1181,19 +1183,32 @@ static struct resource atmel_spi1_resource[] = {
1181DEFINE_DEV(atmel_spi, 1); 1183DEFINE_DEV(atmel_spi, 1);
1182DEV_CLK(spi_clk, atmel_spi1, pba, 1); 1184DEV_CLK(spi_clk, atmel_spi1, pba, 1);
1183 1185
1184static void __init 1186void __init
1185at32_spi_setup_slaves(unsigned int bus_num, struct spi_board_info *b, 1187at32_spi_setup_slaves(unsigned int bus_num, struct spi_board_info *b, unsigned int n)
1186 unsigned int n, const u8 *pins)
1187{ 1188{
1189 /*
1190 * Manage the chipselects as GPIOs, normally using the same pins
1191 * the SPI controller expects; but boards can use other pins.
1192 */
1193 static u8 __initdata spi_pins[][4] = {
1194 { GPIO_PIN_PA(3), GPIO_PIN_PA(4),
1195 GPIO_PIN_PA(5), GPIO_PIN_PA(20) },
1196 { GPIO_PIN_PB(2), GPIO_PIN_PB(3),
1197 GPIO_PIN_PB(4), GPIO_PIN_PA(27) },
1198 };
1188 unsigned int pin, mode; 1199 unsigned int pin, mode;
1189 1200
1201 /* There are only 2 SPI controllers */
1202 if (bus_num > 1)
1203 return;
1204
1190 for (; n; n--, b++) { 1205 for (; n; n--, b++) {
1191 b->bus_num = bus_num; 1206 b->bus_num = bus_num;
1192 if (b->chip_select >= 4) 1207 if (b->chip_select >= 4)
1193 continue; 1208 continue;
1194 pin = (unsigned)b->controller_data; 1209 pin = (unsigned)b->controller_data;
1195 if (!pin) { 1210 if (!pin) {
1196 pin = pins[b->chip_select]; 1211 pin = spi_pins[bus_num][b->chip_select];
1197 b->controller_data = (void *)pin; 1212 b->controller_data = (void *)pin;
1198 } 1213 }
1199 mode = AT32_GPIOF_OUTPUT; 1214 mode = AT32_GPIOF_OUTPUT;
@@ -1206,16 +1221,6 @@ at32_spi_setup_slaves(unsigned int bus_num, struct spi_board_info *b,
1206struct platform_device *__init 1221struct platform_device *__init
1207at32_add_device_spi(unsigned int id, struct spi_board_info *b, unsigned int n) 1222at32_add_device_spi(unsigned int id, struct spi_board_info *b, unsigned int n)
1208{ 1223{
1209 /*
1210 * Manage the chipselects as GPIOs, normally using the same pins
1211 * the SPI controller expects; but boards can use other pins.
1212 */
1213 static u8 __initdata spi0_pins[] =
1214 { GPIO_PIN_PA(3), GPIO_PIN_PA(4),
1215 GPIO_PIN_PA(5), GPIO_PIN_PA(20), };
1216 static u8 __initdata spi1_pins[] =
1217 { GPIO_PIN_PB(2), GPIO_PIN_PB(3),
1218 GPIO_PIN_PB(4), GPIO_PIN_PA(27), };
1219 struct platform_device *pdev; 1224 struct platform_device *pdev;
1220 u32 pin_mask; 1225 u32 pin_mask;
1221 1226
@@ -1228,7 +1233,7 @@ at32_add_device_spi(unsigned int id, struct spi_board_info *b, unsigned int n)
1228 select_peripheral(PIOA, (1 << 0), PERIPH_A, AT32_GPIOF_PULLUP); 1233 select_peripheral(PIOA, (1 << 0), PERIPH_A, AT32_GPIOF_PULLUP);
1229 select_peripheral(PIOA, pin_mask, PERIPH_A, 0); 1234 select_peripheral(PIOA, pin_mask, PERIPH_A, 0);
1230 1235
1231 at32_spi_setup_slaves(0, b, n, spi0_pins); 1236 at32_spi_setup_slaves(0, b, n);
1232 break; 1237 break;
1233 1238
1234 case 1: 1239 case 1:
@@ -1239,7 +1244,7 @@ at32_add_device_spi(unsigned int id, struct spi_board_info *b, unsigned int n)
1239 select_peripheral(PIOB, (1 << 0), PERIPH_B, AT32_GPIOF_PULLUP); 1244 select_peripheral(PIOB, (1 << 0), PERIPH_B, AT32_GPIOF_PULLUP);
1240 select_peripheral(PIOB, pin_mask, PERIPH_B, 0); 1245 select_peripheral(PIOB, pin_mask, PERIPH_B, 0);
1241 1246
1242 at32_spi_setup_slaves(1, b, n, spi1_pins); 1247 at32_spi_setup_slaves(1, b, n);
1243 break; 1248 break;
1244 1249
1245 default: 1250 default:
@@ -1320,7 +1325,7 @@ struct platform_device *__init
1320at32_add_device_mci(unsigned int id, struct mci_platform_data *data) 1325at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
1321{ 1326{
1322 struct platform_device *pdev; 1327 struct platform_device *pdev;
1323 struct dw_dma_slave *dws = &data->dma_slave; 1328 struct mci_dma_slave *slave;
1324 u32 pioa_mask; 1329 u32 pioa_mask;
1325 u32 piob_mask; 1330 u32 piob_mask;
1326 1331
@@ -1339,13 +1344,17 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
1339 ARRAY_SIZE(atmel_mci0_resource))) 1344 ARRAY_SIZE(atmel_mci0_resource)))
1340 goto fail; 1345 goto fail;
1341 1346
1342 dws->dma_dev = &dw_dmac0_device.dev; 1347 slave = kzalloc(sizeof(struct mci_dma_slave), GFP_KERNEL);
1343 dws->reg_width = DW_DMA_SLAVE_WIDTH_32BIT; 1348
1344 dws->cfg_hi = (DWC_CFGH_SRC_PER(0) 1349 slave->sdata.dma_dev = &dw_dmac0_device.dev;
1350 slave->sdata.reg_width = DW_DMA_SLAVE_WIDTH_32BIT;
1351 slave->sdata.cfg_hi = (DWC_CFGH_SRC_PER(0)
1345 | DWC_CFGH_DST_PER(1)); 1352 | DWC_CFGH_DST_PER(1));
1346 dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL 1353 slave->sdata.cfg_lo &= ~(DWC_CFGL_HS_DST_POL
1347 | DWC_CFGL_HS_SRC_POL); 1354 | DWC_CFGL_HS_SRC_POL);
1348 1355
1356 data->dma_slave = slave;
1357
1349 if (platform_device_add_data(pdev, data, 1358 if (platform_device_add_data(pdev, data,
1350 sizeof(struct mci_platform_data))) 1359 sizeof(struct mci_platform_data)))
1351 goto fail; 1360 goto fail;
@@ -1411,6 +1420,8 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
1411 return pdev; 1420 return pdev;
1412 1421
1413fail: 1422fail:
1423 data->dma_slave = NULL;
1424 kfree(slave);
1414 platform_device_put(pdev); 1425 platform_device_put(pdev);
1415 return NULL; 1426 return NULL;
1416} 1427}
diff --git a/arch/avr32/mach-at32ap/include/mach/atmel-mci.h b/arch/avr32/mach-at32ap/include/mach/atmel-mci.h
new file mode 100644
index 000000000000..a9b38967f703
--- /dev/null
+++ b/arch/avr32/mach-at32ap/include/mach/atmel-mci.h
@@ -0,0 +1,24 @@
1#ifndef __MACH_ATMEL_MCI_H
2#define __MACH_ATMEL_MCI_H
3
4#include <linux/dw_dmac.h>
5
6/**
7 * struct mci_dma_data - DMA data for MCI interface
8 */
9struct mci_dma_data {
10 struct dw_dma_slave sdata;
11};
12
13/* accessor macros */
14#define slave_data_ptr(s) (&(s)->sdata)
15#define find_slave_dev(s) ((s)->sdata.dma_dev)
16
17#define setup_dma_addr(s, t, r) do { \
18 if (s) { \
19 (s)->sdata.tx_reg = (t); \
20 (s)->sdata.rx_reg = (r); \
21 } \
22} while (0)
23
24#endif /* __MACH_ATMEL_MCI_H */
diff --git a/arch/avr32/mach-at32ap/include/mach/board.h b/arch/avr32/mach-at32ap/include/mach/board.h
index ddedb471f33e..c7f25bb1d068 100644
--- a/arch/avr32/mach-at32ap/include/mach/board.h
+++ b/arch/avr32/mach-at32ap/include/mach/board.h
@@ -49,6 +49,7 @@ at32_add_device_eth(unsigned int id, struct eth_platform_data *data);
49struct spi_board_info; 49struct spi_board_info;
50struct platform_device * 50struct platform_device *
51at32_add_device_spi(unsigned int id, struct spi_board_info *b, unsigned int n); 51at32_add_device_spi(unsigned int id, struct spi_board_info *b, unsigned int n);
52void at32_spi_setup_slaves(unsigned int bus_num, struct spi_board_info *b, unsigned int n);
52 53
53struct atmel_lcdfb_info; 54struct atmel_lcdfb_info;
54struct platform_device * 55struct platform_device *
diff --git a/arch/blackfin/include/asm/bfin-lq035q1.h b/arch/blackfin/include/asm/bfin-lq035q1.h
new file mode 100644
index 000000000000..57bc21ac2296
--- /dev/null
+++ b/arch/blackfin/include/asm/bfin-lq035q1.h
@@ -0,0 +1,28 @@
1/*
2 * Blackfin LCD Framebuffer driver SHARP LQ035Q1DH02
3 *
4 * Copyright 2008-2009 Analog Devices Inc.
5 * Licensed under the GPL-2 or later.
6 */
7
8#ifndef BFIN_LQ035Q1_H
9#define BFIN_LQ035Q1_H
10
11#define LQ035_RL (0 << 8) /* Right -> Left Scan */
12#define LQ035_LR (1 << 8) /* Left -> Right Scan */
13#define LQ035_TB (1 << 9) /* Top -> Botton Scan */
14#define LQ035_BT (0 << 9) /* Botton -> Top Scan */
15#define LQ035_BGR (1 << 11) /* Use BGR format */
16#define LQ035_RGB (0 << 11) /* Use RGB format */
17#define LQ035_NORM (1 << 13) /* Reversal */
18#define LQ035_REV (0 << 13) /* Reversal */
19
20struct bfin_lq035q1fb_disp_info {
21
22 unsigned mode;
23 /* GPIOs */
24 int use_bl;
25 unsigned gpio_bl;
26};
27
28#endif /* BFIN_LQ035Q1_H */
diff --git a/arch/blackfin/include/asm/elf.h b/arch/blackfin/include/asm/elf.h
index 8e0764c81eaf..5b50f0ecacf8 100644
--- a/arch/blackfin/include/asm/elf.h
+++ b/arch/blackfin/include/asm/elf.h
@@ -55,7 +55,6 @@ do { \
55 _regs->p2 = _dynamic_addr; \ 55 _regs->p2 = _dynamic_addr; \
56} while(0) 56} while(0)
57 57
58#define USE_ELF_CORE_DUMP
59#define ELF_FDPIC_CORE_EFLAGS EF_BFIN_FDPIC 58#define ELF_FDPIC_CORE_EFLAGS EF_BFIN_FDPIC
60#define ELF_EXEC_PAGESIZE 4096 59#define ELF_EXEC_PAGESIZE 4096
61 60
diff --git a/arch/blackfin/include/asm/spinlock.h b/arch/blackfin/include/asm/spinlock.h
index b0c7f0ee4b03..1942ccfedbe0 100644
--- a/arch/blackfin/include/asm/spinlock.h
+++ b/arch/blackfin/include/asm/spinlock.h
@@ -17,84 +17,84 @@ asmlinkage int __raw_spin_is_locked_asm(volatile int *ptr);
17asmlinkage void __raw_spin_lock_asm(volatile int *ptr); 17asmlinkage void __raw_spin_lock_asm(volatile int *ptr);
18asmlinkage int __raw_spin_trylock_asm(volatile int *ptr); 18asmlinkage int __raw_spin_trylock_asm(volatile int *ptr);
19asmlinkage void __raw_spin_unlock_asm(volatile int *ptr); 19asmlinkage void __raw_spin_unlock_asm(volatile int *ptr);
20asmlinkage void __raw_read_lock_asm(volatile int *ptr); 20asmlinkage void arch_read_lock_asm(volatile int *ptr);
21asmlinkage int __raw_read_trylock_asm(volatile int *ptr); 21asmlinkage int arch_read_trylock_asm(volatile int *ptr);
22asmlinkage void __raw_read_unlock_asm(volatile int *ptr); 22asmlinkage void arch_read_unlock_asm(volatile int *ptr);
23asmlinkage void __raw_write_lock_asm(volatile int *ptr); 23asmlinkage void arch_write_lock_asm(volatile int *ptr);
24asmlinkage int __raw_write_trylock_asm(volatile int *ptr); 24asmlinkage int arch_write_trylock_asm(volatile int *ptr);
25asmlinkage void __raw_write_unlock_asm(volatile int *ptr); 25asmlinkage void arch_write_unlock_asm(volatile int *ptr);
26 26
27static inline int __raw_spin_is_locked(raw_spinlock_t *lock) 27static inline int arch_spin_is_locked(arch_spinlock_t *lock)
28{ 28{
29 return __raw_spin_is_locked_asm(&lock->lock); 29 return __raw_spin_is_locked_asm(&lock->lock);
30} 30}
31 31
32static inline void __raw_spin_lock(raw_spinlock_t *lock) 32static inline void arch_spin_lock(arch_spinlock_t *lock)
33{ 33{
34 __raw_spin_lock_asm(&lock->lock); 34 __raw_spin_lock_asm(&lock->lock);
35} 35}
36 36
37#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 37#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
38 38
39static inline int __raw_spin_trylock(raw_spinlock_t *lock) 39static inline int arch_spin_trylock(arch_spinlock_t *lock)
40{ 40{
41 return __raw_spin_trylock_asm(&lock->lock); 41 return __raw_spin_trylock_asm(&lock->lock);
42} 42}
43 43
44static inline void __raw_spin_unlock(raw_spinlock_t *lock) 44static inline void arch_spin_unlock(arch_spinlock_t *lock)
45{ 45{
46 __raw_spin_unlock_asm(&lock->lock); 46 __raw_spin_unlock_asm(&lock->lock);
47} 47}
48 48
49static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) 49static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
50{ 50{
51 while (__raw_spin_is_locked(lock)) 51 while (arch_spin_is_locked(lock))
52 cpu_relax(); 52 cpu_relax();
53} 53}
54 54
55static inline int __raw_read_can_lock(raw_rwlock_t *rw) 55static inline int arch_read_can_lock(arch_rwlock_t *rw)
56{ 56{
57 return __raw_uncached_fetch_asm(&rw->lock) > 0; 57 return __raw_uncached_fetch_asm(&rw->lock) > 0;
58} 58}
59 59
60static inline int __raw_write_can_lock(raw_rwlock_t *rw) 60static inline int arch_write_can_lock(arch_rwlock_t *rw)
61{ 61{
62 return __raw_uncached_fetch_asm(&rw->lock) == RW_LOCK_BIAS; 62 return __raw_uncached_fetch_asm(&rw->lock) == RW_LOCK_BIAS;
63} 63}
64 64
65static inline void __raw_read_lock(raw_rwlock_t *rw) 65static inline void arch_read_lock(arch_rwlock_t *rw)
66{ 66{
67 __raw_read_lock_asm(&rw->lock); 67 arch_read_lock_asm(&rw->lock);
68} 68}
69 69
70static inline int __raw_read_trylock(raw_rwlock_t *rw) 70static inline int arch_read_trylock(arch_rwlock_t *rw)
71{ 71{
72 return __raw_read_trylock_asm(&rw->lock); 72 return arch_read_trylock_asm(&rw->lock);
73} 73}
74 74
75static inline void __raw_read_unlock(raw_rwlock_t *rw) 75static inline void arch_read_unlock(arch_rwlock_t *rw)
76{ 76{
77 __raw_read_unlock_asm(&rw->lock); 77 arch_read_unlock_asm(&rw->lock);
78} 78}
79 79
80static inline void __raw_write_lock(raw_rwlock_t *rw) 80static inline void arch_write_lock(arch_rwlock_t *rw)
81{ 81{
82 __raw_write_lock_asm(&rw->lock); 82 arch_write_lock_asm(&rw->lock);
83} 83}
84 84
85static inline int __raw_write_trylock(raw_rwlock_t *rw) 85static inline int arch_write_trylock(arch_rwlock_t *rw)
86{ 86{
87 return __raw_write_trylock_asm(&rw->lock); 87 return arch_write_trylock_asm(&rw->lock);
88} 88}
89 89
90static inline void __raw_write_unlock(raw_rwlock_t *rw) 90static inline void arch_write_unlock(arch_rwlock_t *rw)
91{ 91{
92 __raw_write_unlock_asm(&rw->lock); 92 arch_write_unlock_asm(&rw->lock);
93} 93}
94 94
95#define _raw_spin_relax(lock) cpu_relax() 95#define arch_spin_relax(lock) cpu_relax()
96#define _raw_read_relax(lock) cpu_relax() 96#define arch_read_relax(lock) cpu_relax()
97#define _raw_write_relax(lock) cpu_relax() 97#define arch_write_relax(lock) cpu_relax()
98 98
99#endif 99#endif
100 100
diff --git a/arch/blackfin/include/asm/spinlock_types.h b/arch/blackfin/include/asm/spinlock_types.h
index be75762c0610..1a33608c958b 100644
--- a/arch/blackfin/include/asm/spinlock_types.h
+++ b/arch/blackfin/include/asm/spinlock_types.h
@@ -15,14 +15,14 @@
15 15
16typedef struct { 16typedef struct {
17 volatile unsigned int lock; 17 volatile unsigned int lock;
18} raw_spinlock_t; 18} arch_spinlock_t;
19 19
20#define __RAW_SPIN_LOCK_UNLOCKED { 0 } 20#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
21 21
22typedef struct { 22typedef struct {
23 volatile unsigned int lock; 23 volatile unsigned int lock;
24} raw_rwlock_t; 24} arch_rwlock_t;
25 25
26#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } 26#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
27 27
28#endif 28#endif
diff --git a/arch/blackfin/kernel/irqchip.c b/arch/blackfin/kernel/irqchip.c
index db9f9c91f11f..64cff54a8a58 100644
--- a/arch/blackfin/kernel/irqchip.c
+++ b/arch/blackfin/kernel/irqchip.c
@@ -23,7 +23,7 @@ void ack_bad_irq(unsigned int irq)
23 23
24static struct irq_desc bad_irq_desc = { 24static struct irq_desc bad_irq_desc = {
25 .handle_irq = handle_bad_irq, 25 .handle_irq = handle_bad_irq,
26 .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock), 26 .lock = __RAW_SPIN_LOCK_UNLOCKED(bad_irq_desc.lock),
27}; 27};
28 28
29#ifdef CONFIG_CPUMASK_OFFSTACK 29#ifdef CONFIG_CPUMASK_OFFSTACK
@@ -39,7 +39,7 @@ int show_interrupts(struct seq_file *p, void *v)
39 unsigned long flags; 39 unsigned long flags;
40 40
41 if (i < NR_IRQS) { 41 if (i < NR_IRQS) {
42 spin_lock_irqsave(&irq_desc[i].lock, flags); 42 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
43 action = irq_desc[i].action; 43 action = irq_desc[i].action;
44 if (!action) 44 if (!action)
45 goto skip; 45 goto skip;
@@ -53,7 +53,7 @@ int show_interrupts(struct seq_file *p, void *v)
53 53
54 seq_putc(p, '\n'); 54 seq_putc(p, '\n');
55 skip: 55 skip:
56 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 56 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
57 } else if (i == NR_IRQS) { 57 } else if (i == NR_IRQS) {
58 seq_printf(p, "NMI: "); 58 seq_printf(p, "NMI: ");
59 for_each_online_cpu(j) 59 for_each_online_cpu(j)
diff --git a/arch/blackfin/kernel/traps.c b/arch/blackfin/kernel/traps.c
index 78cb3d38f899..9636bace00e8 100644
--- a/arch/blackfin/kernel/traps.c
+++ b/arch/blackfin/kernel/traps.c
@@ -1140,7 +1140,7 @@ void show_regs(struct pt_regs *fp)
1140 if (fp->ipend & ~0x3F) { 1140 if (fp->ipend & ~0x3F) {
1141 for (i = 0; i < (NR_IRQS - 1); i++) { 1141 for (i = 0; i < (NR_IRQS - 1); i++) {
1142 if (!in_atomic) 1142 if (!in_atomic)
1143 spin_lock_irqsave(&irq_desc[i].lock, flags); 1143 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
1144 1144
1145 action = irq_desc[i].action; 1145 action = irq_desc[i].action;
1146 if (!action) 1146 if (!action)
@@ -1155,7 +1155,7 @@ void show_regs(struct pt_regs *fp)
1155 verbose_printk("\n"); 1155 verbose_printk("\n");
1156unlock: 1156unlock:
1157 if (!in_atomic) 1157 if (!in_atomic)
1158 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 1158 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
1159 } 1159 }
1160 } 1160 }
1161 1161
diff --git a/arch/cris/include/arch-v32/arch/spinlock.h b/arch/cris/include/arch-v32/arch/spinlock.h
index 367a53ea10c5..f171a6600fbc 100644
--- a/arch/cris/include/arch-v32/arch/spinlock.h
+++ b/arch/cris/include/arch-v32/arch/spinlock.h
@@ -9,12 +9,12 @@ extern void cris_spin_unlock(void *l, int val);
9extern void cris_spin_lock(void *l); 9extern void cris_spin_lock(void *l);
10extern int cris_spin_trylock(void *l); 10extern int cris_spin_trylock(void *l);
11 11
12static inline int __raw_spin_is_locked(raw_spinlock_t *x) 12static inline int arch_spin_is_locked(arch_spinlock_t *x)
13{ 13{
14 return *(volatile signed char *)(&(x)->slock) <= 0; 14 return *(volatile signed char *)(&(x)->slock) <= 0;
15} 15}
16 16
17static inline void __raw_spin_unlock(raw_spinlock_t *lock) 17static inline void arch_spin_unlock(arch_spinlock_t *lock)
18{ 18{
19 __asm__ volatile ("move.d %1,%0" \ 19 __asm__ volatile ("move.d %1,%0" \
20 : "=m" (lock->slock) \ 20 : "=m" (lock->slock) \
@@ -22,26 +22,26 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
22 : "memory"); 22 : "memory");
23} 23}
24 24
25static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) 25static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
26{ 26{
27 while (__raw_spin_is_locked(lock)) 27 while (arch_spin_is_locked(lock))
28 cpu_relax(); 28 cpu_relax();
29} 29}
30 30
31static inline int __raw_spin_trylock(raw_spinlock_t *lock) 31static inline int arch_spin_trylock(arch_spinlock_t *lock)
32{ 32{
33 return cris_spin_trylock((void *)&lock->slock); 33 return cris_spin_trylock((void *)&lock->slock);
34} 34}
35 35
36static inline void __raw_spin_lock(raw_spinlock_t *lock) 36static inline void arch_spin_lock(arch_spinlock_t *lock)
37{ 37{
38 cris_spin_lock((void *)&lock->slock); 38 cris_spin_lock((void *)&lock->slock);
39} 39}
40 40
41static inline void 41static inline void
42__raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) 42arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
43{ 43{
44 __raw_spin_lock(lock); 44 arch_spin_lock(lock);
45} 45}
46 46
47/* 47/*
@@ -56,76 +56,76 @@ __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
56 * 56 *
57 */ 57 */
58 58
59static inline int __raw_read_can_lock(raw_rwlock_t *x) 59static inline int arch_read_can_lock(arch_rwlock_t *x)
60{ 60{
61 return (int)(x)->lock > 0; 61 return (int)(x)->lock > 0;
62} 62}
63 63
64static inline int __raw_write_can_lock(raw_rwlock_t *x) 64static inline int arch_write_can_lock(arch_rwlock_t *x)
65{ 65{
66 return (x)->lock == RW_LOCK_BIAS; 66 return (x)->lock == RW_LOCK_BIAS;
67} 67}
68 68
69static inline void __raw_read_lock(raw_rwlock_t *rw) 69static inline void arch_read_lock(arch_rwlock_t *rw)
70{ 70{
71 __raw_spin_lock(&rw->slock); 71 arch_spin_lock(&rw->slock);
72 while (rw->lock == 0); 72 while (rw->lock == 0);
73 rw->lock--; 73 rw->lock--;
74 __raw_spin_unlock(&rw->slock); 74 arch_spin_unlock(&rw->slock);
75} 75}
76 76
77static inline void __raw_write_lock(raw_rwlock_t *rw) 77static inline void arch_write_lock(arch_rwlock_t *rw)
78{ 78{
79 __raw_spin_lock(&rw->slock); 79 arch_spin_lock(&rw->slock);
80 while (rw->lock != RW_LOCK_BIAS); 80 while (rw->lock != RW_LOCK_BIAS);
81 rw->lock = 0; 81 rw->lock = 0;
82 __raw_spin_unlock(&rw->slock); 82 arch_spin_unlock(&rw->slock);
83} 83}
84 84
85static inline void __raw_read_unlock(raw_rwlock_t *rw) 85static inline void arch_read_unlock(arch_rwlock_t *rw)
86{ 86{
87 __raw_spin_lock(&rw->slock); 87 arch_spin_lock(&rw->slock);
88 rw->lock++; 88 rw->lock++;
89 __raw_spin_unlock(&rw->slock); 89 arch_spin_unlock(&rw->slock);
90} 90}
91 91
92static inline void __raw_write_unlock(raw_rwlock_t *rw) 92static inline void arch_write_unlock(arch_rwlock_t *rw)
93{ 93{
94 __raw_spin_lock(&rw->slock); 94 arch_spin_lock(&rw->slock);
95 while (rw->lock != RW_LOCK_BIAS); 95 while (rw->lock != RW_LOCK_BIAS);
96 rw->lock = RW_LOCK_BIAS; 96 rw->lock = RW_LOCK_BIAS;
97 __raw_spin_unlock(&rw->slock); 97 arch_spin_unlock(&rw->slock);
98} 98}
99 99
100static inline int __raw_read_trylock(raw_rwlock_t *rw) 100static inline int arch_read_trylock(arch_rwlock_t *rw)
101{ 101{
102 int ret = 0; 102 int ret = 0;
103 __raw_spin_lock(&rw->slock); 103 arch_spin_lock(&rw->slock);
104 if (rw->lock != 0) { 104 if (rw->lock != 0) {
105 rw->lock--; 105 rw->lock--;
106 ret = 1; 106 ret = 1;
107 } 107 }
108 __raw_spin_unlock(&rw->slock); 108 arch_spin_unlock(&rw->slock);
109 return ret; 109 return ret;
110} 110}
111 111
112static inline int __raw_write_trylock(raw_rwlock_t *rw) 112static inline int arch_write_trylock(arch_rwlock_t *rw)
113{ 113{
114 int ret = 0; 114 int ret = 0;
115 __raw_spin_lock(&rw->slock); 115 arch_spin_lock(&rw->slock);
116 if (rw->lock == RW_LOCK_BIAS) { 116 if (rw->lock == RW_LOCK_BIAS) {
117 rw->lock = 0; 117 rw->lock = 0;
118 ret = 1; 118 ret = 1;
119 } 119 }
120 __raw_spin_unlock(&rw->slock); 120 arch_spin_unlock(&rw->slock);
121 return 1; 121 return 1;
122} 122}
123 123
124#define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock) 124#define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock)
125#define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock) 125#define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock)
126 126
127#define _raw_spin_relax(lock) cpu_relax() 127#define arch_spin_relax(lock) cpu_relax()
128#define _raw_read_relax(lock) cpu_relax() 128#define arch_read_relax(lock) cpu_relax()
129#define _raw_write_relax(lock) cpu_relax() 129#define arch_write_relax(lock) cpu_relax()
130 130
131#endif /* __ASM_ARCH_SPINLOCK_H */ 131#endif /* __ASM_ARCH_SPINLOCK_H */
diff --git a/arch/cris/include/asm/elf.h b/arch/cris/include/asm/elf.h
index 0f51b10b9f4f..8a3d8e2b33c1 100644
--- a/arch/cris/include/asm/elf.h
+++ b/arch/cris/include/asm/elf.h
@@ -64,8 +64,6 @@ typedef unsigned long elf_fpregset_t;
64#define EF_CRIS_VARIANT_COMMON_V10_V32 0x00000004 64#define EF_CRIS_VARIANT_COMMON_V10_V32 0x00000004
65/* End of excerpt from {binutils}/include/elf/cris.h. */ 65/* End of excerpt from {binutils}/include/elf/cris.h. */
66 66
67#define USE_ELF_CORE_DUMP
68
69#define ELF_EXEC_PAGESIZE 8192 67#define ELF_EXEC_PAGESIZE 8192
70 68
71/* This is the location that an ET_DYN program is loaded if exec'ed. Typical 69/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
diff --git a/arch/cris/kernel/irq.c b/arch/cris/kernel/irq.c
index 0ca7d9892cc6..b5ce0724a88f 100644
--- a/arch/cris/kernel/irq.c
+++ b/arch/cris/kernel/irq.c
@@ -52,7 +52,7 @@ int show_interrupts(struct seq_file *p, void *v)
52 } 52 }
53 53
54 if (i < NR_IRQS) { 54 if (i < NR_IRQS) {
55 spin_lock_irqsave(&irq_desc[i].lock, flags); 55 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
56 action = irq_desc[i].action; 56 action = irq_desc[i].action;
57 if (!action) 57 if (!action)
58 goto skip; 58 goto skip;
@@ -71,7 +71,7 @@ int show_interrupts(struct seq_file *p, void *v)
71 71
72 seq_putc(p, '\n'); 72 seq_putc(p, '\n');
73skip: 73skip:
74 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 74 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
75 } 75 }
76 return 0; 76 return 0;
77} 77}
diff --git a/arch/frv/include/asm/elf.h b/arch/frv/include/asm/elf.h
index 7bbf6e47f8c8..c3819804a74b 100644
--- a/arch/frv/include/asm/elf.h
+++ b/arch/frv/include/asm/elf.h
@@ -115,7 +115,6 @@ do { \
115 __kernel_frame0_ptr->gr29 = 0; \ 115 __kernel_frame0_ptr->gr29 = 0; \
116} while(0) 116} while(0)
117 117
118#define USE_ELF_CORE_DUMP
119#define CORE_DUMP_USE_REGSET 118#define CORE_DUMP_USE_REGSET
120#define ELF_FDPIC_CORE_EFLAGS EF_FRV_FDPIC 119#define ELF_FDPIC_CORE_EFLAGS EF_FRV_FDPIC
121#define ELF_EXEC_PAGESIZE 16384 120#define ELF_EXEC_PAGESIZE 16384
diff --git a/arch/frv/kernel/irq.c b/arch/frv/kernel/irq.c
index af3e824b91b3..62d1aba615dc 100644
--- a/arch/frv/kernel/irq.c
+++ b/arch/frv/kernel/irq.c
@@ -69,7 +69,7 @@ int show_interrupts(struct seq_file *p, void *v)
69 } 69 }
70 70
71 if (i < NR_IRQS) { 71 if (i < NR_IRQS) {
72 spin_lock_irqsave(&irq_desc[i].lock, flags); 72 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
73 action = irq_desc[i].action; 73 action = irq_desc[i].action;
74 if (action) { 74 if (action) {
75 seq_printf(p, "%3d: ", i); 75 seq_printf(p, "%3d: ", i);
@@ -85,7 +85,7 @@ int show_interrupts(struct seq_file *p, void *v)
85 seq_putc(p, '\n'); 85 seq_putc(p, '\n');
86 } 86 }
87 87
88 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 88 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
89 } else if (i == NR_IRQS) { 89 } else if (i == NR_IRQS) {
90 seq_printf(p, "Err: %10u\n", atomic_read(&irq_err_count)); 90 seq_printf(p, "Err: %10u\n", atomic_read(&irq_err_count));
91 } 91 }
diff --git a/arch/h8300/include/asm/elf.h b/arch/h8300/include/asm/elf.h
index 94e2284c8816..c24fa250d653 100644
--- a/arch/h8300/include/asm/elf.h
+++ b/arch/h8300/include/asm/elf.h
@@ -34,7 +34,6 @@ typedef unsigned long elf_fpregset_t;
34 34
35#define ELF_PLAT_INIT(_r) _r->er1 = 0 35#define ELF_PLAT_INIT(_r) _r->er1 = 0
36 36
37#define USE_ELF_CORE_DUMP
38#define ELF_EXEC_PAGESIZE 4096 37#define ELF_EXEC_PAGESIZE 4096
39 38
40/* This is the location that an ET_DYN program is loaded if exec'ed. Typical 39/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
diff --git a/arch/h8300/kernel/irq.c b/arch/h8300/kernel/irq.c
index 5c913d472119..c25dc2c2b1da 100644
--- a/arch/h8300/kernel/irq.c
+++ b/arch/h8300/kernel/irq.c
@@ -186,7 +186,7 @@ int show_interrupts(struct seq_file *p, void *v)
186 seq_puts(p, " CPU0"); 186 seq_puts(p, " CPU0");
187 187
188 if (i < NR_IRQS) { 188 if (i < NR_IRQS) {
189 spin_lock_irqsave(&irq_desc[i].lock, flags); 189 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
190 action = irq_desc[i].action; 190 action = irq_desc[i].action;
191 if (!action) 191 if (!action)
192 goto unlock; 192 goto unlock;
@@ -200,7 +200,7 @@ int show_interrupts(struct seq_file *p, void *v)
200 seq_printf(p, ", %s", action->name); 200 seq_printf(p, ", %s", action->name);
201 seq_putc(p, '\n'); 201 seq_putc(p, '\n');
202unlock: 202unlock:
203 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 203 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
204 } 204 }
205 return 0; 205 return 0;
206} 206}
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index f332e3fe4237..e14c492a8a93 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -677,12 +677,19 @@ sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
677 spin_unlock_irqrestore(&ioc->saved_lock, flags); 677 spin_unlock_irqrestore(&ioc->saved_lock, flags);
678 678
679 pide = sba_search_bitmap(ioc, dev, pages_needed, 0); 679 pide = sba_search_bitmap(ioc, dev, pages_needed, 0);
680 if (unlikely(pide >= (ioc->res_size << 3))) 680 if (unlikely(pide >= (ioc->res_size << 3))) {
681 panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n", 681 printk(KERN_WARNING "%s: I/O MMU @ %p is"
682 ioc->ioc_hpa); 682 "out of mapping resources, %u %u %lx\n",
683 __func__, ioc->ioc_hpa, ioc->res_size,
684 pages_needed, dma_get_seg_boundary(dev));
685 return -1;
686 }
683#else 687#else
684 panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n", 688 printk(KERN_WARNING "%s: I/O MMU @ %p is"
685 ioc->ioc_hpa); 689 "out of mapping resources, %u %u %lx\n",
690 __func__, ioc->ioc_hpa, ioc->res_size,
691 pages_needed, dma_get_seg_boundary(dev));
692 return -1;
686#endif 693#endif
687 } 694 }
688 } 695 }
@@ -965,6 +972,8 @@ static dma_addr_t sba_map_page(struct device *dev, struct page *page,
965#endif 972#endif
966 973
967 pide = sba_alloc_range(ioc, dev, size); 974 pide = sba_alloc_range(ioc, dev, size);
975 if (pide < 0)
976 return 0;
968 977
969 iovp = (dma_addr_t) pide << iovp_shift; 978 iovp = (dma_addr_t) pide << iovp_shift;
970 979
@@ -1320,6 +1329,7 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev,
1320 unsigned long dma_offset, dma_len; /* start/len of DMA stream */ 1329 unsigned long dma_offset, dma_len; /* start/len of DMA stream */
1321 int n_mappings = 0; 1330 int n_mappings = 0;
1322 unsigned int max_seg_size = dma_get_max_seg_size(dev); 1331 unsigned int max_seg_size = dma_get_max_seg_size(dev);
1332 int idx;
1323 1333
1324 while (nents > 0) { 1334 while (nents > 0) {
1325 unsigned long vaddr = (unsigned long) sba_sg_address(startsg); 1335 unsigned long vaddr = (unsigned long) sba_sg_address(startsg);
@@ -1418,16 +1428,22 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev,
1418 vcontig_sg->dma_length = vcontig_len; 1428 vcontig_sg->dma_length = vcontig_len;
1419 dma_len = (dma_len + dma_offset + ~iovp_mask) & iovp_mask; 1429 dma_len = (dma_len + dma_offset + ~iovp_mask) & iovp_mask;
1420 ASSERT(dma_len <= DMA_CHUNK_SIZE); 1430 ASSERT(dma_len <= DMA_CHUNK_SIZE);
1421 dma_sg->dma_address = (dma_addr_t) (PIDE_FLAG 1431 idx = sba_alloc_range(ioc, dev, dma_len);
1422 | (sba_alloc_range(ioc, dev, dma_len) << iovp_shift) 1432 if (idx < 0) {
1423 | dma_offset); 1433 dma_sg->dma_length = 0;
1434 return -1;
1435 }
1436 dma_sg->dma_address = (dma_addr_t)(PIDE_FLAG | (idx << iovp_shift)
1437 | dma_offset);
1424 n_mappings++; 1438 n_mappings++;
1425 } 1439 }
1426 1440
1427 return n_mappings; 1441 return n_mappings;
1428} 1442}
1429 1443
1430 1444static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
1445 int nents, enum dma_data_direction dir,
1446 struct dma_attrs *attrs);
1431/** 1447/**
1432 * sba_map_sg - map Scatter/Gather list 1448 * sba_map_sg - map Scatter/Gather list
1433 * @dev: instance of PCI owned by the driver that's asking. 1449 * @dev: instance of PCI owned by the driver that's asking.
@@ -1493,6 +1509,10 @@ static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist,
1493 ** Access to the virtual address is what forces a two pass algorithm. 1509 ** Access to the virtual address is what forces a two pass algorithm.
1494 */ 1510 */
1495 coalesced = sba_coalesce_chunks(ioc, dev, sglist, nents); 1511 coalesced = sba_coalesce_chunks(ioc, dev, sglist, nents);
1512 if (coalesced < 0) {
1513 sba_unmap_sg_attrs(dev, sglist, nents, dir, attrs);
1514 return 0;
1515 }
1496 1516
1497 /* 1517 /*
1498 ** Program the I/O Pdir 1518 ** Program the I/O Pdir
diff --git a/arch/ia64/ia32/elfcore32.h b/arch/ia64/ia32/elfcore32.h
index 9a3abf58cea3..657725742617 100644
--- a/arch/ia64/ia32/elfcore32.h
+++ b/arch/ia64/ia32/elfcore32.h
@@ -11,8 +11,6 @@
11#include <asm/intrinsics.h> 11#include <asm/intrinsics.h>
12#include <asm/uaccess.h> 12#include <asm/uaccess.h>
13 13
14#define USE_ELF_CORE_DUMP 1
15
16/* Override elfcore.h */ 14/* Override elfcore.h */
17#define _LINUX_ELFCORE_H 1 15#define _LINUX_ELFCORE_H 1
18typedef unsigned int elf_greg_t; 16typedef unsigned int elf_greg_t;
diff --git a/arch/ia64/include/asm/bitops.h b/arch/ia64/include/asm/bitops.h
index 57a2787bc9fb..6ebc229a1c51 100644
--- a/arch/ia64/include/asm/bitops.h
+++ b/arch/ia64/include/asm/bitops.h
@@ -127,7 +127,7 @@ clear_bit_unlock (int nr, volatile void *addr)
127 * @addr: Address to start counting from 127 * @addr: Address to start counting from
128 * 128 *
129 * Similarly to clear_bit_unlock, the implementation uses a store 129 * Similarly to clear_bit_unlock, the implementation uses a store
130 * with release semantics. See also __raw_spin_unlock(). 130 * with release semantics. See also arch_spin_unlock().
131 */ 131 */
132static __inline__ void 132static __inline__ void
133__clear_bit_unlock(int nr, void *addr) 133__clear_bit_unlock(int nr, void *addr)
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
index 8d3c79cd81e7..7d09a09cdaad 100644
--- a/arch/ia64/include/asm/dma-mapping.h
+++ b/arch/ia64/include/asm/dma-mapping.h
@@ -73,7 +73,7 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
73 if (!dev->dma_mask) 73 if (!dev->dma_mask)
74 return 0; 74 return 0;
75 75
76 return addr + size <= *dev->dma_mask; 76 return addr + size - 1 <= *dev->dma_mask;
77} 77}
78 78
79static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) 79static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
index 86eddee029cb..e14108b19c09 100644
--- a/arch/ia64/include/asm/elf.h
+++ b/arch/ia64/include/asm/elf.h
@@ -25,7 +25,6 @@
25#define ELF_DATA ELFDATA2LSB 25#define ELF_DATA ELFDATA2LSB
26#define ELF_ARCH EM_IA_64 26#define ELF_ARCH EM_IA_64
27 27
28#define USE_ELF_CORE_DUMP
29#define CORE_DUMP_USE_REGSET 28#define CORE_DUMP_USE_REGSET
30 29
31/* Least-significant four bits of ELF header's e_flags are OS-specific. The bits are 30/* Least-significant four bits of ELF header's e_flags are OS-specific. The bits are
diff --git a/arch/ia64/include/asm/hw_irq.h b/arch/ia64/include/asm/hw_irq.h
index 91619b31dbf5..bf2e37493e04 100644
--- a/arch/ia64/include/asm/hw_irq.h
+++ b/arch/ia64/include/asm/hw_irq.h
@@ -59,7 +59,13 @@ typedef u16 ia64_vector;
59extern int ia64_first_device_vector; 59extern int ia64_first_device_vector;
60extern int ia64_last_device_vector; 60extern int ia64_last_device_vector;
61 61
62#if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined (CONFIG_IA64_DIG))
63/* Reserve the lower priority vector than device vectors for "move IRQ" IPI */
64#define IA64_IRQ_MOVE_VECTOR 0x30 /* "move IRQ" IPI */
65#define IA64_DEF_FIRST_DEVICE_VECTOR 0x31
66#else
62#define IA64_DEF_FIRST_DEVICE_VECTOR 0x30 67#define IA64_DEF_FIRST_DEVICE_VECTOR 0x30
68#endif
63#define IA64_DEF_LAST_DEVICE_VECTOR 0xe7 69#define IA64_DEF_LAST_DEVICE_VECTOR 0xe7
64#define IA64_FIRST_DEVICE_VECTOR ia64_first_device_vector 70#define IA64_FIRST_DEVICE_VECTOR ia64_first_device_vector
65#define IA64_LAST_DEVICE_VECTOR ia64_last_device_vector 71#define IA64_LAST_DEVICE_VECTOR ia64_last_device_vector
diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h
index 0d9d16e2d949..cc8335eb3110 100644
--- a/arch/ia64/include/asm/io.h
+++ b/arch/ia64/include/asm/io.h
@@ -424,6 +424,8 @@ __writeq (unsigned long val, volatile void __iomem *addr)
424extern void __iomem * ioremap(unsigned long offset, unsigned long size); 424extern void __iomem * ioremap(unsigned long offset, unsigned long size);
425extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size); 425extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size);
426extern void iounmap (volatile void __iomem *addr); 426extern void iounmap (volatile void __iomem *addr);
427extern void __iomem * early_ioremap (unsigned long phys_addr, unsigned long size);
428extern void early_iounmap (volatile void __iomem *addr, unsigned long size);
427 429
428/* 430/*
429 * String version of IO memory access ops: 431 * String version of IO memory access ops:
diff --git a/arch/ia64/include/asm/mca.h b/arch/ia64/include/asm/mca.h
index c171cdf0a789..43f96ab18fa0 100644
--- a/arch/ia64/include/asm/mca.h
+++ b/arch/ia64/include/asm/mca.h
@@ -106,6 +106,11 @@ struct ia64_sal_os_state {
106 unsigned long os_status; /* OS status to SAL, enum below */ 106 unsigned long os_status; /* OS status to SAL, enum below */
107 unsigned long context; /* 0 if return to same context 107 unsigned long context; /* 0 if return to same context
108 1 if return to new context */ 108 1 if return to new context */
109
110 /* I-resources */
111 unsigned long iip;
112 unsigned long ipsr;
113 unsigned long ifs;
109}; 114};
110 115
111enum { 116enum {
diff --git a/arch/ia64/include/asm/numa.h b/arch/ia64/include/asm/numa.h
index 3499ff57bf42..6a8a27cfae3e 100644
--- a/arch/ia64/include/asm/numa.h
+++ b/arch/ia64/include/asm/numa.h
@@ -22,8 +22,6 @@
22 22
23#include <asm/mmzone.h> 23#include <asm/mmzone.h>
24 24
25#define NUMA_NO_NODE -1
26
27extern u16 cpu_to_node_map[NR_CPUS] __cacheline_aligned; 25extern u16 cpu_to_node_map[NR_CPUS] __cacheline_aligned;
28extern cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned; 26extern cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned;
29extern pg_data_t *pgdat_list[MAX_NUMNODES]; 27extern pg_data_t *pgdat_list[MAX_NUMNODES];
diff --git a/arch/ia64/include/asm/rwsem.h b/arch/ia64/include/asm/rwsem.h
index fbee74b15782..e8762688e8e3 100644
--- a/arch/ia64/include/asm/rwsem.h
+++ b/arch/ia64/include/asm/rwsem.h
@@ -47,7 +47,7 @@ struct rw_semaphore {
47#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) 47#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
48 48
49#define __RWSEM_INITIALIZER(name) \ 49#define __RWSEM_INITIALIZER(name) \
50 { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ 50 { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
51 LIST_HEAD_INIT((name).wait_list) } 51 LIST_HEAD_INIT((name).wait_list) }
52 52
53#define DECLARE_RWSEM(name) \ 53#define DECLARE_RWSEM(name) \
diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
index 239ecdc9516d..1a91c9121d17 100644
--- a/arch/ia64/include/asm/spinlock.h
+++ b/arch/ia64/include/asm/spinlock.h
@@ -17,7 +17,7 @@
17#include <asm/intrinsics.h> 17#include <asm/intrinsics.h>
18#include <asm/system.h> 18#include <asm/system.h>
19 19
20#define __raw_spin_lock_init(x) ((x)->lock = 0) 20#define arch_spin_lock_init(x) ((x)->lock = 0)
21 21
22/* 22/*
23 * Ticket locks are conceptually two parts, one indicating the current head of 23 * Ticket locks are conceptually two parts, one indicating the current head of
@@ -38,7 +38,7 @@
38#define TICKET_BITS 15 38#define TICKET_BITS 15
39#define TICKET_MASK ((1 << TICKET_BITS) - 1) 39#define TICKET_MASK ((1 << TICKET_BITS) - 1)
40 40
41static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) 41static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
42{ 42{
43 int *p = (int *)&lock->lock, ticket, serve; 43 int *p = (int *)&lock->lock, ticket, serve;
44 44
@@ -58,7 +58,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
58 } 58 }
59} 59}
60 60
61static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) 61static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
62{ 62{
63 int tmp = ACCESS_ONCE(lock->lock); 63 int tmp = ACCESS_ONCE(lock->lock);
64 64
@@ -67,7 +67,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
67 return 0; 67 return 0;
68} 68}
69 69
70static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) 70static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
71{ 71{
72 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp; 72 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
73 73
@@ -75,7 +75,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
75 ACCESS_ONCE(*p) = (tmp + 2) & ~1; 75 ACCESS_ONCE(*p) = (tmp + 2) & ~1;
76} 76}
77 77
78static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock) 78static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
79{ 79{
80 int *p = (int *)&lock->lock, ticket; 80 int *p = (int *)&lock->lock, ticket;
81 81
@@ -89,64 +89,64 @@ static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
89 } 89 }
90} 90}
91 91
92static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) 92static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
93{ 93{
94 long tmp = ACCESS_ONCE(lock->lock); 94 long tmp = ACCESS_ONCE(lock->lock);
95 95
96 return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK); 96 return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK);
97} 97}
98 98
99static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) 99static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
100{ 100{
101 long tmp = ACCESS_ONCE(lock->lock); 101 long tmp = ACCESS_ONCE(lock->lock);
102 102
103 return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1; 103 return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1;
104} 104}
105 105
106static inline int __raw_spin_is_locked(raw_spinlock_t *lock) 106static inline int arch_spin_is_locked(arch_spinlock_t *lock)
107{ 107{
108 return __ticket_spin_is_locked(lock); 108 return __ticket_spin_is_locked(lock);
109} 109}
110 110
111static inline int __raw_spin_is_contended(raw_spinlock_t *lock) 111static inline int arch_spin_is_contended(arch_spinlock_t *lock)
112{ 112{
113 return __ticket_spin_is_contended(lock); 113 return __ticket_spin_is_contended(lock);
114} 114}
115#define __raw_spin_is_contended __raw_spin_is_contended 115#define arch_spin_is_contended arch_spin_is_contended
116 116
117static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) 117static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
118{ 118{
119 __ticket_spin_lock(lock); 119 __ticket_spin_lock(lock);
120} 120}
121 121
122static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock) 122static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
123{ 123{
124 return __ticket_spin_trylock(lock); 124 return __ticket_spin_trylock(lock);
125} 125}
126 126
127static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) 127static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
128{ 128{
129 __ticket_spin_unlock(lock); 129 __ticket_spin_unlock(lock);
130} 130}
131 131
132static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock, 132static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
133 unsigned long flags) 133 unsigned long flags)
134{ 134{
135 __raw_spin_lock(lock); 135 arch_spin_lock(lock);
136} 136}
137 137
138static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) 138static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
139{ 139{
140 __ticket_spin_unlock_wait(lock); 140 __ticket_spin_unlock_wait(lock);
141} 141}
142 142
143#define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0) 143#define arch_read_can_lock(rw) (*(volatile int *)(rw) >= 0)
144#define __raw_write_can_lock(rw) (*(volatile int *)(rw) == 0) 144#define arch_write_can_lock(rw) (*(volatile int *)(rw) == 0)
145 145
146#ifdef ASM_SUPPORTED 146#ifdef ASM_SUPPORTED
147 147
148static __always_inline void 148static __always_inline void
149__raw_read_lock_flags(raw_rwlock_t *lock, unsigned long flags) 149arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags)
150{ 150{
151 __asm__ __volatile__ ( 151 __asm__ __volatile__ (
152 "tbit.nz p6, p0 = %1,%2\n" 152 "tbit.nz p6, p0 = %1,%2\n"
@@ -169,15 +169,15 @@ __raw_read_lock_flags(raw_rwlock_t *lock, unsigned long flags)
169 : "p6", "p7", "r2", "memory"); 169 : "p6", "p7", "r2", "memory");
170} 170}
171 171
172#define __raw_read_lock(lock) __raw_read_lock_flags(lock, 0) 172#define arch_read_lock(lock) arch_read_lock_flags(lock, 0)
173 173
174#else /* !ASM_SUPPORTED */ 174#else /* !ASM_SUPPORTED */
175 175
176#define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw) 176#define arch_read_lock_flags(rw, flags) arch_read_lock(rw)
177 177
178#define __raw_read_lock(rw) \ 178#define arch_read_lock(rw) \
179do { \ 179do { \
180 raw_rwlock_t *__read_lock_ptr = (rw); \ 180 arch_rwlock_t *__read_lock_ptr = (rw); \
181 \ 181 \
182 while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \ 182 while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \
183 ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ 183 ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
@@ -188,16 +188,16 @@ do { \
188 188
189#endif /* !ASM_SUPPORTED */ 189#endif /* !ASM_SUPPORTED */
190 190
191#define __raw_read_unlock(rw) \ 191#define arch_read_unlock(rw) \
192do { \ 192do { \
193 raw_rwlock_t *__read_lock_ptr = (rw); \ 193 arch_rwlock_t *__read_lock_ptr = (rw); \
194 ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ 194 ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
195} while (0) 195} while (0)
196 196
197#ifdef ASM_SUPPORTED 197#ifdef ASM_SUPPORTED
198 198
199static __always_inline void 199static __always_inline void
200__raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags) 200arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags)
201{ 201{
202 __asm__ __volatile__ ( 202 __asm__ __volatile__ (
203 "tbit.nz p6, p0 = %1, %2\n" 203 "tbit.nz p6, p0 = %1, %2\n"
@@ -221,9 +221,9 @@ __raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags)
221 : "ar.ccv", "p6", "p7", "r2", "r29", "memory"); 221 : "ar.ccv", "p6", "p7", "r2", "r29", "memory");
222} 222}
223 223
224#define __raw_write_lock(rw) __raw_write_lock_flags(rw, 0) 224#define arch_write_lock(rw) arch_write_lock_flags(rw, 0)
225 225
226#define __raw_write_trylock(rw) \ 226#define arch_write_trylock(rw) \
227({ \ 227({ \
228 register long result; \ 228 register long result; \
229 \ 229 \
@@ -235,7 +235,7 @@ __raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags)
235 (result == 0); \ 235 (result == 0); \
236}) 236})
237 237
238static inline void __raw_write_unlock(raw_rwlock_t *x) 238static inline void arch_write_unlock(arch_rwlock_t *x)
239{ 239{
240 u8 *y = (u8 *)x; 240 u8 *y = (u8 *)x;
241 barrier(); 241 barrier();
@@ -244,9 +244,9 @@ static inline void __raw_write_unlock(raw_rwlock_t *x)
244 244
245#else /* !ASM_SUPPORTED */ 245#else /* !ASM_SUPPORTED */
246 246
247#define __raw_write_lock_flags(l, flags) __raw_write_lock(l) 247#define arch_write_lock_flags(l, flags) arch_write_lock(l)
248 248
249#define __raw_write_lock(l) \ 249#define arch_write_lock(l) \
250({ \ 250({ \
251 __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \ 251 __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \
252 __u32 *ia64_write_lock_ptr = (__u32 *) (l); \ 252 __u32 *ia64_write_lock_ptr = (__u32 *) (l); \
@@ -257,7 +257,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *x)
257 } while (ia64_val); \ 257 } while (ia64_val); \
258}) 258})
259 259
260#define __raw_write_trylock(rw) \ 260#define arch_write_trylock(rw) \
261({ \ 261({ \
262 __u64 ia64_val; \ 262 __u64 ia64_val; \
263 __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \ 263 __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \
@@ -265,7 +265,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *x)
265 (ia64_val == 0); \ 265 (ia64_val == 0); \
266}) 266})
267 267
268static inline void __raw_write_unlock(raw_rwlock_t *x) 268static inline void arch_write_unlock(arch_rwlock_t *x)
269{ 269{
270 barrier(); 270 barrier();
271 x->write_lock = 0; 271 x->write_lock = 0;
@@ -273,10 +273,10 @@ static inline void __raw_write_unlock(raw_rwlock_t *x)
273 273
274#endif /* !ASM_SUPPORTED */ 274#endif /* !ASM_SUPPORTED */
275 275
276static inline int __raw_read_trylock(raw_rwlock_t *x) 276static inline int arch_read_trylock(arch_rwlock_t *x)
277{ 277{
278 union { 278 union {
279 raw_rwlock_t lock; 279 arch_rwlock_t lock;
280 __u32 word; 280 __u32 word;
281 } old, new; 281 } old, new;
282 old.lock = new.lock = *x; 282 old.lock = new.lock = *x;
@@ -285,8 +285,8 @@ static inline int __raw_read_trylock(raw_rwlock_t *x)
285 return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word; 285 return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word;
286} 286}
287 287
288#define _raw_spin_relax(lock) cpu_relax() 288#define arch_spin_relax(lock) cpu_relax()
289#define _raw_read_relax(lock) cpu_relax() 289#define arch_read_relax(lock) cpu_relax()
290#define _raw_write_relax(lock) cpu_relax() 290#define arch_write_relax(lock) cpu_relax()
291 291
292#endif /* _ASM_IA64_SPINLOCK_H */ 292#endif /* _ASM_IA64_SPINLOCK_H */
diff --git a/arch/ia64/include/asm/spinlock_types.h b/arch/ia64/include/asm/spinlock_types.h
index 474e46f1ab4a..e2b42a52a6d3 100644
--- a/arch/ia64/include/asm/spinlock_types.h
+++ b/arch/ia64/include/asm/spinlock_types.h
@@ -7,15 +7,15 @@
7 7
8typedef struct { 8typedef struct {
9 volatile unsigned int lock; 9 volatile unsigned int lock;
10} raw_spinlock_t; 10} arch_spinlock_t;
11 11
12#define __RAW_SPIN_LOCK_UNLOCKED { 0 } 12#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
13 13
14typedef struct { 14typedef struct {
15 volatile unsigned int read_counter : 31; 15 volatile unsigned int read_counter : 31;
16 volatile unsigned int write_lock : 1; 16 volatile unsigned int write_lock : 1;
17} raw_rwlock_t; 17} arch_rwlock_t;
18 18
19#define __RAW_RW_LOCK_UNLOCKED { 0, 0 } 19#define __ARCH_RW_LOCK_UNLOCKED { 0, 0 }
20 20
21#endif 21#endif
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index dab4d393908c..95ac77aeae9b 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -793,12 +793,12 @@ iosapic_register_intr (unsigned int gsi,
793 goto unlock_iosapic_lock; 793 goto unlock_iosapic_lock;
794 } 794 }
795 795
796 spin_lock(&irq_desc[irq].lock); 796 raw_spin_lock(&irq_desc[irq].lock);
797 dest = get_target_cpu(gsi, irq); 797 dest = get_target_cpu(gsi, irq);
798 dmode = choose_dmode(); 798 dmode = choose_dmode();
799 err = register_intr(gsi, irq, dmode, polarity, trigger); 799 err = register_intr(gsi, irq, dmode, polarity, trigger);
800 if (err < 0) { 800 if (err < 0) {
801 spin_unlock(&irq_desc[irq].lock); 801 raw_spin_unlock(&irq_desc[irq].lock);
802 irq = err; 802 irq = err;
803 goto unlock_iosapic_lock; 803 goto unlock_iosapic_lock;
804 } 804 }
@@ -817,7 +817,7 @@ iosapic_register_intr (unsigned int gsi,
817 (polarity == IOSAPIC_POL_HIGH ? "high" : "low"), 817 (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
818 cpu_logical_id(dest), dest, irq_to_vector(irq)); 818 cpu_logical_id(dest), dest, irq_to_vector(irq));
819 819
820 spin_unlock(&irq_desc[irq].lock); 820 raw_spin_unlock(&irq_desc[irq].lock);
821 unlock_iosapic_lock: 821 unlock_iosapic_lock:
822 spin_unlock_irqrestore(&iosapic_lock, flags); 822 spin_unlock_irqrestore(&iosapic_lock, flags);
823 return irq; 823 return irq;
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index 7d8951229e7c..94ee9d067cbd 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -71,7 +71,7 @@ int show_interrupts(struct seq_file *p, void *v)
71 } 71 }
72 72
73 if (i < NR_IRQS) { 73 if (i < NR_IRQS) {
74 spin_lock_irqsave(&irq_desc[i].lock, flags); 74 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
75 action = irq_desc[i].action; 75 action = irq_desc[i].action;
76 if (!action) 76 if (!action)
77 goto skip; 77 goto skip;
@@ -91,7 +91,7 @@ int show_interrupts(struct seq_file *p, void *v)
91 91
92 seq_putc(p, '\n'); 92 seq_putc(p, '\n');
93skip: 93skip:
94 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 94 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
95 } else if (i == NR_IRQS) 95 } else if (i == NR_IRQS)
96 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); 96 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
97 return 0; 97 return 0;
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index dd9d7b54f1a1..d4093a173a3e 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -260,7 +260,6 @@ void __setup_vector_irq(int cpu)
260} 260}
261 261
262#if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG)) 262#if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG))
263#define IA64_IRQ_MOVE_VECTOR IA64_DEF_FIRST_DEVICE_VECTOR
264 263
265static enum vector_domain_type { 264static enum vector_domain_type {
266 VECTOR_DOMAIN_NONE, 265 VECTOR_DOMAIN_NONE,
@@ -345,7 +344,7 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
345 344
346 desc = irq_desc + irq; 345 desc = irq_desc + irq;
347 cfg = irq_cfg + irq; 346 cfg = irq_cfg + irq;
348 spin_lock(&desc->lock); 347 raw_spin_lock(&desc->lock);
349 if (!cfg->move_cleanup_count) 348 if (!cfg->move_cleanup_count)
350 goto unlock; 349 goto unlock;
351 350
@@ -358,7 +357,7 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
358 spin_unlock_irqrestore(&vector_lock, flags); 357 spin_unlock_irqrestore(&vector_lock, flags);
359 cfg->move_cleanup_count--; 358 cfg->move_cleanup_count--;
360 unlock: 359 unlock:
361 spin_unlock(&desc->lock); 360 raw_spin_unlock(&desc->lock);
362 } 361 }
363 return IRQ_HANDLED; 362 return IRQ_HANDLED;
364} 363}
@@ -659,11 +658,8 @@ init_IRQ (void)
659 register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL); 658 register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL);
660#ifdef CONFIG_SMP 659#ifdef CONFIG_SMP
661#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG) 660#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG)
662 if (vector_domain_type != VECTOR_DOMAIN_NONE) { 661 if (vector_domain_type != VECTOR_DOMAIN_NONE)
663 BUG_ON(IA64_FIRST_DEVICE_VECTOR != IA64_IRQ_MOVE_VECTOR);
664 IA64_FIRST_DEVICE_VECTOR++;
665 register_percpu_irq(IA64_IRQ_MOVE_VECTOR, &irq_move_irqaction); 662 register_percpu_irq(IA64_IRQ_MOVE_VECTOR, &irq_move_irqaction);
666 }
667#endif 663#endif
668#endif 664#endif
669#ifdef CONFIG_PERFMON 665#ifdef CONFIG_PERFMON
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 496ac7a99488..32f2639e9b0a 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -888,9 +888,10 @@ ia64_mca_modify_comm(const struct task_struct *previous_current)
888} 888}
889 889
890static void 890static void
891finish_pt_regs(struct pt_regs *regs, const pal_min_state_area_t *ms, 891finish_pt_regs(struct pt_regs *regs, struct ia64_sal_os_state *sos,
892 unsigned long *nat) 892 unsigned long *nat)
893{ 893{
894 const pal_min_state_area_t *ms = sos->pal_min_state;
894 const u64 *bank; 895 const u64 *bank;
895 896
896 /* If ipsr.ic then use pmsa_{iip,ipsr,ifs}, else use 897 /* If ipsr.ic then use pmsa_{iip,ipsr,ifs}, else use
@@ -904,6 +905,10 @@ finish_pt_regs(struct pt_regs *regs, const pal_min_state_area_t *ms,
904 regs->cr_iip = ms->pmsa_xip; 905 regs->cr_iip = ms->pmsa_xip;
905 regs->cr_ipsr = ms->pmsa_xpsr; 906 regs->cr_ipsr = ms->pmsa_xpsr;
906 regs->cr_ifs = ms->pmsa_xfs; 907 regs->cr_ifs = ms->pmsa_xfs;
908
909 sos->iip = ms->pmsa_iip;
910 sos->ipsr = ms->pmsa_ipsr;
911 sos->ifs = ms->pmsa_ifs;
907 } 912 }
908 regs->pr = ms->pmsa_pr; 913 regs->pr = ms->pmsa_pr;
909 regs->b0 = ms->pmsa_br0; 914 regs->b0 = ms->pmsa_br0;
@@ -1079,7 +1084,7 @@ ia64_mca_modify_original_stack(struct pt_regs *regs,
1079 memcpy(old_regs, regs, sizeof(*regs)); 1084 memcpy(old_regs, regs, sizeof(*regs));
1080 old_regs->loadrs = loadrs; 1085 old_regs->loadrs = loadrs;
1081 old_unat = old_regs->ar_unat; 1086 old_unat = old_regs->ar_unat;
1082 finish_pt_regs(old_regs, ms, &old_unat); 1087 finish_pt_regs(old_regs, sos, &old_unat);
1083 1088
1084 /* Next stack a struct switch_stack. mca_asm.S built a partial 1089 /* Next stack a struct switch_stack. mca_asm.S built a partial
1085 * switch_stack, copy it and fill in the blanks using pt_regs and 1090 * switch_stack, copy it and fill in the blanks using pt_regs and
@@ -1150,7 +1155,7 @@ no_mod:
1150 mprintk(KERN_INFO "cpu %d, %s %s, original stack not modified\n", 1155 mprintk(KERN_INFO "cpu %d, %s %s, original stack not modified\n",
1151 smp_processor_id(), type, msg); 1156 smp_processor_id(), type, msg);
1152 old_unat = regs->ar_unat; 1157 old_unat = regs->ar_unat;
1153 finish_pt_regs(regs, ms, &old_unat); 1158 finish_pt_regs(regs, sos, &old_unat);
1154 return previous_current; 1159 return previous_current;
1155} 1160}
1156 1161
diff --git a/arch/ia64/mm/ioremap.c b/arch/ia64/mm/ioremap.c
index 2a140627dfd6..3dccdd8eb275 100644
--- a/arch/ia64/mm/ioremap.c
+++ b/arch/ia64/mm/ioremap.c
@@ -22,6 +22,12 @@ __ioremap (unsigned long phys_addr)
22} 22}
23 23
24void __iomem * 24void __iomem *
25early_ioremap (unsigned long phys_addr, unsigned long size)
26{
27 return __ioremap(phys_addr);
28}
29
30void __iomem *
25ioremap (unsigned long phys_addr, unsigned long size) 31ioremap (unsigned long phys_addr, unsigned long size)
26{ 32{
27 void __iomem *addr; 33 void __iomem *addr;
@@ -102,6 +108,11 @@ ioremap_nocache (unsigned long phys_addr, unsigned long size)
102EXPORT_SYMBOL(ioremap_nocache); 108EXPORT_SYMBOL(ioremap_nocache);
103 109
104void 110void
111early_iounmap (volatile void __iomem *addr, unsigned long size)
112{
113}
114
115void
105iounmap (volatile void __iomem *addr) 116iounmap (volatile void __iomem *addr)
106{ 117{
107 if (REGION_NUMBER(addr) == RGN_GATE) 118 if (REGION_NUMBER(addr) == RGN_GATE)
diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c
index 35b2a27d2e77..efb454534e52 100644
--- a/arch/ia64/sn/pci/tioca_provider.c
+++ b/arch/ia64/sn/pci/tioca_provider.c
@@ -9,6 +9,7 @@
9#include <linux/types.h> 9#include <linux/types.h>
10#include <linux/interrupt.h> 10#include <linux/interrupt.h>
11#include <linux/pci.h> 11#include <linux/pci.h>
12#include <linux/bitmap.h>
12#include <asm/sn/sn_sal.h> 13#include <asm/sn/sn_sal.h>
13#include <asm/sn/addrs.h> 14#include <asm/sn/addrs.h>
14#include <asm/sn/io.h> 15#include <asm/sn/io.h>
@@ -369,7 +370,7 @@ tioca_dma_d48(struct pci_dev *pdev, u64 paddr)
369static dma_addr_t 370static dma_addr_t
370tioca_dma_mapped(struct pci_dev *pdev, unsigned long paddr, size_t req_size) 371tioca_dma_mapped(struct pci_dev *pdev, unsigned long paddr, size_t req_size)
371{ 372{
372 int i, ps, ps_shift, entry, entries, mapsize, last_entry; 373 int ps, ps_shift, entry, entries, mapsize;
373 u64 xio_addr, end_xio_addr; 374 u64 xio_addr, end_xio_addr;
374 struct tioca_common *tioca_common; 375 struct tioca_common *tioca_common;
375 struct tioca_kernel *tioca_kern; 376 struct tioca_kernel *tioca_kern;
@@ -410,23 +411,13 @@ tioca_dma_mapped(struct pci_dev *pdev, unsigned long paddr, size_t req_size)
410 map = tioca_kern->ca_pcigart_pagemap; 411 map = tioca_kern->ca_pcigart_pagemap;
411 mapsize = tioca_kern->ca_pcigart_entries; 412 mapsize = tioca_kern->ca_pcigart_entries;
412 413
413 entry = find_first_zero_bit(map, mapsize); 414 entry = bitmap_find_next_zero_area(map, mapsize, 0, entries, 0);
414 while (entry < mapsize) { 415 if (entry >= mapsize) {
415 last_entry = find_next_bit(map, mapsize, entry);
416
417 if (last_entry - entry >= entries)
418 break;
419
420 entry = find_next_zero_bit(map, mapsize, last_entry);
421 }
422
423 if (entry > mapsize) {
424 kfree(ca_dmamap); 416 kfree(ca_dmamap);
425 goto map_return; 417 goto map_return;
426 } 418 }
427 419
428 for (i = 0; i < entries; i++) 420 bitmap_set(map, entry, entries);
429 set_bit(entry + i, map);
430 421
431 bus_addr = tioca_kern->ca_pciap_base + (entry * ps); 422 bus_addr = tioca_kern->ca_pciap_base + (entry * ps);
432 423
diff --git a/arch/m32r/include/asm/elf.h b/arch/m32r/include/asm/elf.h
index 0cc34c94bf2b..2f85412ef730 100644
--- a/arch/m32r/include/asm/elf.h
+++ b/arch/m32r/include/asm/elf.h
@@ -102,7 +102,6 @@ typedef elf_fpreg_t elf_fpregset_t;
102 */ 102 */
103#define ELF_PLAT_INIT(_r, load_addr) (_r)->r0 = 0 103#define ELF_PLAT_INIT(_r, load_addr) (_r)->r0 = 0
104 104
105#define USE_ELF_CORE_DUMP
106#define ELF_EXEC_PAGESIZE PAGE_SIZE 105#define ELF_EXEC_PAGESIZE PAGE_SIZE
107 106
108/* 107/*
diff --git a/arch/m32r/include/asm/spinlock.h b/arch/m32r/include/asm/spinlock.h
index dded923883b2..179a06489b10 100644
--- a/arch/m32r/include/asm/spinlock.h
+++ b/arch/m32r/include/asm/spinlock.h
@@ -24,19 +24,19 @@
24 * We make no fairness assumptions. They have a cost. 24 * We make no fairness assumptions. They have a cost.
25 */ 25 */
26 26
27#define __raw_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0) 27#define arch_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0)
28#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 28#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
29#define __raw_spin_unlock_wait(x) \ 29#define arch_spin_unlock_wait(x) \
30 do { cpu_relax(); } while (__raw_spin_is_locked(x)) 30 do { cpu_relax(); } while (arch_spin_is_locked(x))
31 31
32/** 32/**
33 * __raw_spin_trylock - Try spin lock and return a result 33 * arch_spin_trylock - Try spin lock and return a result
34 * @lock: Pointer to the lock variable 34 * @lock: Pointer to the lock variable
35 * 35 *
36 * __raw_spin_trylock() tries to get the lock and returns a result. 36 * arch_spin_trylock() tries to get the lock and returns a result.
37 * On the m32r, the result value is 1 (= Success) or 0 (= Failure). 37 * On the m32r, the result value is 1 (= Success) or 0 (= Failure).
38 */ 38 */
39static inline int __raw_spin_trylock(raw_spinlock_t *lock) 39static inline int arch_spin_trylock(arch_spinlock_t *lock)
40{ 40{
41 int oldval; 41 int oldval;
42 unsigned long tmp1, tmp2; 42 unsigned long tmp1, tmp2;
@@ -50,7 +50,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
50 * } 50 * }
51 */ 51 */
52 __asm__ __volatile__ ( 52 __asm__ __volatile__ (
53 "# __raw_spin_trylock \n\t" 53 "# arch_spin_trylock \n\t"
54 "ldi %1, #0; \n\t" 54 "ldi %1, #0; \n\t"
55 "mvfc %2, psw; \n\t" 55 "mvfc %2, psw; \n\t"
56 "clrpsw #0x40 -> nop; \n\t" 56 "clrpsw #0x40 -> nop; \n\t"
@@ -69,7 +69,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
69 return (oldval > 0); 69 return (oldval > 0);
70} 70}
71 71
72static inline void __raw_spin_lock(raw_spinlock_t *lock) 72static inline void arch_spin_lock(arch_spinlock_t *lock)
73{ 73{
74 unsigned long tmp0, tmp1; 74 unsigned long tmp0, tmp1;
75 75
@@ -84,7 +84,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
84 * } 84 * }
85 */ 85 */
86 __asm__ __volatile__ ( 86 __asm__ __volatile__ (
87 "# __raw_spin_lock \n\t" 87 "# arch_spin_lock \n\t"
88 ".fillinsn \n" 88 ".fillinsn \n"
89 "1: \n\t" 89 "1: \n\t"
90 "mvfc %1, psw; \n\t" 90 "mvfc %1, psw; \n\t"
@@ -111,7 +111,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
111 ); 111 );
112} 112}
113 113
114static inline void __raw_spin_unlock(raw_spinlock_t *lock) 114static inline void arch_spin_unlock(arch_spinlock_t *lock)
115{ 115{
116 mb(); 116 mb();
117 lock->slock = 1; 117 lock->slock = 1;
@@ -140,15 +140,15 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
140 * read_can_lock - would read_trylock() succeed? 140 * read_can_lock - would read_trylock() succeed?
141 * @lock: the rwlock in question. 141 * @lock: the rwlock in question.
142 */ 142 */
143#define __raw_read_can_lock(x) ((int)(x)->lock > 0) 143#define arch_read_can_lock(x) ((int)(x)->lock > 0)
144 144
145/** 145/**
146 * write_can_lock - would write_trylock() succeed? 146 * write_can_lock - would write_trylock() succeed?
147 * @lock: the rwlock in question. 147 * @lock: the rwlock in question.
148 */ 148 */
149#define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) 149#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
150 150
151static inline void __raw_read_lock(raw_rwlock_t *rw) 151static inline void arch_read_lock(arch_rwlock_t *rw)
152{ 152{
153 unsigned long tmp0, tmp1; 153 unsigned long tmp0, tmp1;
154 154
@@ -199,7 +199,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
199 ); 199 );
200} 200}
201 201
202static inline void __raw_write_lock(raw_rwlock_t *rw) 202static inline void arch_write_lock(arch_rwlock_t *rw)
203{ 203{
204 unsigned long tmp0, tmp1, tmp2; 204 unsigned long tmp0, tmp1, tmp2;
205 205
@@ -252,7 +252,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
252 ); 252 );
253} 253}
254 254
255static inline void __raw_read_unlock(raw_rwlock_t *rw) 255static inline void arch_read_unlock(arch_rwlock_t *rw)
256{ 256{
257 unsigned long tmp0, tmp1; 257 unsigned long tmp0, tmp1;
258 258
@@ -274,7 +274,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
274 ); 274 );
275} 275}
276 276
277static inline void __raw_write_unlock(raw_rwlock_t *rw) 277static inline void arch_write_unlock(arch_rwlock_t *rw)
278{ 278{
279 unsigned long tmp0, tmp1, tmp2; 279 unsigned long tmp0, tmp1, tmp2;
280 280
@@ -298,7 +298,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
298 ); 298 );
299} 299}
300 300
301static inline int __raw_read_trylock(raw_rwlock_t *lock) 301static inline int arch_read_trylock(arch_rwlock_t *lock)
302{ 302{
303 atomic_t *count = (atomic_t*)lock; 303 atomic_t *count = (atomic_t*)lock;
304 if (atomic_dec_return(count) >= 0) 304 if (atomic_dec_return(count) >= 0)
@@ -307,7 +307,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *lock)
307 return 0; 307 return 0;
308} 308}
309 309
310static inline int __raw_write_trylock(raw_rwlock_t *lock) 310static inline int arch_write_trylock(arch_rwlock_t *lock)
311{ 311{
312 atomic_t *count = (atomic_t *)lock; 312 atomic_t *count = (atomic_t *)lock;
313 if (atomic_sub_and_test(RW_LOCK_BIAS, count)) 313 if (atomic_sub_and_test(RW_LOCK_BIAS, count))
@@ -316,11 +316,11 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)
316 return 0; 316 return 0;
317} 317}
318 318
319#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 319#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
320#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) 320#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
321 321
322#define _raw_spin_relax(lock) cpu_relax() 322#define arch_spin_relax(lock) cpu_relax()
323#define _raw_read_relax(lock) cpu_relax() 323#define arch_read_relax(lock) cpu_relax()
324#define _raw_write_relax(lock) cpu_relax() 324#define arch_write_relax(lock) cpu_relax()
325 325
326#endif /* _ASM_M32R_SPINLOCK_H */ 326#endif /* _ASM_M32R_SPINLOCK_H */
diff --git a/arch/m32r/include/asm/spinlock_types.h b/arch/m32r/include/asm/spinlock_types.h
index 83f52105c0e4..92e27672661f 100644
--- a/arch/m32r/include/asm/spinlock_types.h
+++ b/arch/m32r/include/asm/spinlock_types.h
@@ -7,17 +7,17 @@
7 7
8typedef struct { 8typedef struct {
9 volatile int slock; 9 volatile int slock;
10} raw_spinlock_t; 10} arch_spinlock_t;
11 11
12#define __RAW_SPIN_LOCK_UNLOCKED { 1 } 12#define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
13 13
14typedef struct { 14typedef struct {
15 volatile int lock; 15 volatile int lock;
16} raw_rwlock_t; 16} arch_rwlock_t;
17 17
18#define RW_LOCK_BIAS 0x01000000 18#define RW_LOCK_BIAS 0x01000000
19#define RW_LOCK_BIAS_STR "0x01000000" 19#define RW_LOCK_BIAS_STR "0x01000000"
20 20
21#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } 21#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
22 22
23#endif /* _ASM_M32R_SPINLOCK_TYPES_H */ 23#endif /* _ASM_M32R_SPINLOCK_TYPES_H */
diff --git a/arch/m32r/kernel/irq.c b/arch/m32r/kernel/irq.c
index 8dfd31e87c4c..3c71f776872c 100644
--- a/arch/m32r/kernel/irq.c
+++ b/arch/m32r/kernel/irq.c
@@ -40,7 +40,7 @@ int show_interrupts(struct seq_file *p, void *v)
40 } 40 }
41 41
42 if (i < NR_IRQS) { 42 if (i < NR_IRQS) {
43 spin_lock_irqsave(&irq_desc[i].lock, flags); 43 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
44 action = irq_desc[i].action; 44 action = irq_desc[i].action;
45 if (!action) 45 if (!action)
46 goto skip; 46 goto skip;
@@ -59,7 +59,7 @@ int show_interrupts(struct seq_file *p, void *v)
59 59
60 seq_putc(p, '\n'); 60 seq_putc(p, '\n');
61skip: 61skip:
62 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 62 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
63 } 63 }
64 return 0; 64 return 0;
65} 65}
diff --git a/arch/m68k/include/asm/elf.h b/arch/m68k/include/asm/elf.h
index 0b0f49eb876b..01c193d91412 100644
--- a/arch/m68k/include/asm/elf.h
+++ b/arch/m68k/include/asm/elf.h
@@ -59,7 +59,6 @@ typedef struct user_m68kfp_struct elf_fpregset_t;
59 is actually used on ASV. */ 59 is actually used on ASV. */
60#define ELF_PLAT_INIT(_r, load_addr) _r->a1 = 0 60#define ELF_PLAT_INIT(_r, load_addr) _r->a1 = 0
61 61
62#define USE_ELF_CORE_DUMP
63#ifndef CONFIG_SUN3 62#ifndef CONFIG_SUN3
64#define ELF_EXEC_PAGESIZE 4096 63#define ELF_EXEC_PAGESIZE 4096
65#else 64#else
diff --git a/arch/microblaze/include/asm/elf.h b/arch/microblaze/include/asm/elf.h
index f92fc0dda006..7d4acf2b278e 100644
--- a/arch/microblaze/include/asm/elf.h
+++ b/arch/microblaze/include/asm/elf.h
@@ -77,7 +77,6 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
77#define ELF_DATA ELFDATA2MSB 77#define ELF_DATA ELFDATA2MSB
78#endif 78#endif
79 79
80#define USE_ELF_CORE_DUMP
81#define ELF_EXEC_PAGESIZE 4096 80#define ELF_EXEC_PAGESIZE 4096
82 81
83 82
diff --git a/arch/microblaze/kernel/irq.c b/arch/microblaze/kernel/irq.c
index 7d5ddd62d4d2..0f06034d1fe0 100644
--- a/arch/microblaze/kernel/irq.c
+++ b/arch/microblaze/kernel/irq.c
@@ -68,7 +68,7 @@ int show_interrupts(struct seq_file *p, void *v)
68 } 68 }
69 69
70 if (i < nr_irq) { 70 if (i < nr_irq) {
71 spin_lock_irqsave(&irq_desc[i].lock, flags); 71 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
72 action = irq_desc[i].action; 72 action = irq_desc[i].action;
73 if (!action) 73 if (!action)
74 goto skip; 74 goto skip;
@@ -89,7 +89,7 @@ int show_interrupts(struct seq_file *p, void *v)
89 89
90 seq_putc(p, '\n'); 90 seq_putc(p, '\n');
91skip: 91skip:
92 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 92 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
93 } 93 }
94 return 0; 94 return 0;
95} 95}
diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
index 7990694cda22..7a6a35dbe529 100644
--- a/arch/mips/include/asm/elf.h
+++ b/arch/mips/include/asm/elf.h
@@ -326,7 +326,6 @@ extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *);
326#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) \ 326#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) \
327 dump_task_fpu(tsk, elf_fpregs) 327 dump_task_fpu(tsk, elf_fpregs)
328 328
329#define USE_ELF_CORE_DUMP
330#define ELF_EXEC_PAGESIZE PAGE_SIZE 329#define ELF_EXEC_PAGESIZE PAGE_SIZE
331 330
332/* This yields a mask that user programs can use to figure out what 331/* This yields a mask that user programs can use to figure out what
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
index 5b60a09a0f08..21ef9efbde43 100644
--- a/arch/mips/include/asm/spinlock.h
+++ b/arch/mips/include/asm/spinlock.h
@@ -34,33 +34,33 @@
34 * becomes equal to the the initial value of the tail. 34 * becomes equal to the the initial value of the tail.
35 */ 35 */
36 36
37static inline int __raw_spin_is_locked(raw_spinlock_t *lock) 37static inline int arch_spin_is_locked(arch_spinlock_t *lock)
38{ 38{
39 unsigned int counters = ACCESS_ONCE(lock->lock); 39 unsigned int counters = ACCESS_ONCE(lock->lock);
40 40
41 return ((counters >> 14) ^ counters) & 0x1fff; 41 return ((counters >> 14) ^ counters) & 0x1fff;
42} 42}
43 43
44#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 44#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
45#define __raw_spin_unlock_wait(x) \ 45#define arch_spin_unlock_wait(x) \
46 while (__raw_spin_is_locked(x)) { cpu_relax(); } 46 while (arch_spin_is_locked(x)) { cpu_relax(); }
47 47
48static inline int __raw_spin_is_contended(raw_spinlock_t *lock) 48static inline int arch_spin_is_contended(arch_spinlock_t *lock)
49{ 49{
50 unsigned int counters = ACCESS_ONCE(lock->lock); 50 unsigned int counters = ACCESS_ONCE(lock->lock);
51 51
52 return (((counters >> 14) - counters) & 0x1fff) > 1; 52 return (((counters >> 14) - counters) & 0x1fff) > 1;
53} 53}
54#define __raw_spin_is_contended __raw_spin_is_contended 54#define arch_spin_is_contended arch_spin_is_contended
55 55
56static inline void __raw_spin_lock(raw_spinlock_t *lock) 56static inline void arch_spin_lock(arch_spinlock_t *lock)
57{ 57{
58 int my_ticket; 58 int my_ticket;
59 int tmp; 59 int tmp;
60 60
61 if (R10000_LLSC_WAR) { 61 if (R10000_LLSC_WAR) {
62 __asm__ __volatile__ ( 62 __asm__ __volatile__ (
63 " .set push # __raw_spin_lock \n" 63 " .set push # arch_spin_lock \n"
64 " .set noreorder \n" 64 " .set noreorder \n"
65 " \n" 65 " \n"
66 "1: ll %[ticket], %[ticket_ptr] \n" 66 "1: ll %[ticket], %[ticket_ptr] \n"
@@ -94,7 +94,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
94 [my_ticket] "=&r" (my_ticket)); 94 [my_ticket] "=&r" (my_ticket));
95 } else { 95 } else {
96 __asm__ __volatile__ ( 96 __asm__ __volatile__ (
97 " .set push # __raw_spin_lock \n" 97 " .set push # arch_spin_lock \n"
98 " .set noreorder \n" 98 " .set noreorder \n"
99 " \n" 99 " \n"
100 " ll %[ticket], %[ticket_ptr] \n" 100 " ll %[ticket], %[ticket_ptr] \n"
@@ -134,7 +134,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
134 smp_llsc_mb(); 134 smp_llsc_mb();
135} 135}
136 136
137static inline void __raw_spin_unlock(raw_spinlock_t *lock) 137static inline void arch_spin_unlock(arch_spinlock_t *lock)
138{ 138{
139 int tmp; 139 int tmp;
140 140
@@ -142,7 +142,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
142 142
143 if (R10000_LLSC_WAR) { 143 if (R10000_LLSC_WAR) {
144 __asm__ __volatile__ ( 144 __asm__ __volatile__ (
145 " # __raw_spin_unlock \n" 145 " # arch_spin_unlock \n"
146 "1: ll %[ticket], %[ticket_ptr] \n" 146 "1: ll %[ticket], %[ticket_ptr] \n"
147 " addiu %[ticket], %[ticket], 1 \n" 147 " addiu %[ticket], %[ticket], 1 \n"
148 " ori %[ticket], %[ticket], 0x2000 \n" 148 " ori %[ticket], %[ticket], 0x2000 \n"
@@ -153,7 +153,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
153 [ticket] "=&r" (tmp)); 153 [ticket] "=&r" (tmp));
154 } else { 154 } else {
155 __asm__ __volatile__ ( 155 __asm__ __volatile__ (
156 " .set push # __raw_spin_unlock \n" 156 " .set push # arch_spin_unlock \n"
157 " .set noreorder \n" 157 " .set noreorder \n"
158 " \n" 158 " \n"
159 " ll %[ticket], %[ticket_ptr] \n" 159 " ll %[ticket], %[ticket_ptr] \n"
@@ -174,13 +174,13 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
174 } 174 }
175} 175}
176 176
177static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock) 177static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
178{ 178{
179 int tmp, tmp2, tmp3; 179 int tmp, tmp2, tmp3;
180 180
181 if (R10000_LLSC_WAR) { 181 if (R10000_LLSC_WAR) {
182 __asm__ __volatile__ ( 182 __asm__ __volatile__ (
183 " .set push # __raw_spin_trylock \n" 183 " .set push # arch_spin_trylock \n"
184 " .set noreorder \n" 184 " .set noreorder \n"
185 " \n" 185 " \n"
186 "1: ll %[ticket], %[ticket_ptr] \n" 186 "1: ll %[ticket], %[ticket_ptr] \n"
@@ -204,7 +204,7 @@ static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock)
204 [now_serving] "=&r" (tmp3)); 204 [now_serving] "=&r" (tmp3));
205 } else { 205 } else {
206 __asm__ __volatile__ ( 206 __asm__ __volatile__ (
207 " .set push # __raw_spin_trylock \n" 207 " .set push # arch_spin_trylock \n"
208 " .set noreorder \n" 208 " .set noreorder \n"
209 " \n" 209 " \n"
210 " ll %[ticket], %[ticket_ptr] \n" 210 " ll %[ticket], %[ticket_ptr] \n"
@@ -248,21 +248,21 @@ static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock)
248 * read_can_lock - would read_trylock() succeed? 248 * read_can_lock - would read_trylock() succeed?
249 * @lock: the rwlock in question. 249 * @lock: the rwlock in question.
250 */ 250 */
251#define __raw_read_can_lock(rw) ((rw)->lock >= 0) 251#define arch_read_can_lock(rw) ((rw)->lock >= 0)
252 252
253/* 253/*
254 * write_can_lock - would write_trylock() succeed? 254 * write_can_lock - would write_trylock() succeed?
255 * @lock: the rwlock in question. 255 * @lock: the rwlock in question.
256 */ 256 */
257#define __raw_write_can_lock(rw) (!(rw)->lock) 257#define arch_write_can_lock(rw) (!(rw)->lock)
258 258
259static inline void __raw_read_lock(raw_rwlock_t *rw) 259static inline void arch_read_lock(arch_rwlock_t *rw)
260{ 260{
261 unsigned int tmp; 261 unsigned int tmp;
262 262
263 if (R10000_LLSC_WAR) { 263 if (R10000_LLSC_WAR) {
264 __asm__ __volatile__( 264 __asm__ __volatile__(
265 " .set noreorder # __raw_read_lock \n" 265 " .set noreorder # arch_read_lock \n"
266 "1: ll %1, %2 \n" 266 "1: ll %1, %2 \n"
267 " bltz %1, 1b \n" 267 " bltz %1, 1b \n"
268 " addu %1, 1 \n" 268 " addu %1, 1 \n"
@@ -275,7 +275,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
275 : "memory"); 275 : "memory");
276 } else { 276 } else {
277 __asm__ __volatile__( 277 __asm__ __volatile__(
278 " .set noreorder # __raw_read_lock \n" 278 " .set noreorder # arch_read_lock \n"
279 "1: ll %1, %2 \n" 279 "1: ll %1, %2 \n"
280 " bltz %1, 2f \n" 280 " bltz %1, 2f \n"
281 " addu %1, 1 \n" 281 " addu %1, 1 \n"
@@ -301,7 +301,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
301/* Note the use of sub, not subu which will make the kernel die with an 301/* Note the use of sub, not subu which will make the kernel die with an
302 overflow exception if we ever try to unlock an rwlock that is already 302 overflow exception if we ever try to unlock an rwlock that is already
303 unlocked or is being held by a writer. */ 303 unlocked or is being held by a writer. */
304static inline void __raw_read_unlock(raw_rwlock_t *rw) 304static inline void arch_read_unlock(arch_rwlock_t *rw)
305{ 305{
306 unsigned int tmp; 306 unsigned int tmp;
307 307
@@ -309,7 +309,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
309 309
310 if (R10000_LLSC_WAR) { 310 if (R10000_LLSC_WAR) {
311 __asm__ __volatile__( 311 __asm__ __volatile__(
312 "1: ll %1, %2 # __raw_read_unlock \n" 312 "1: ll %1, %2 # arch_read_unlock \n"
313 " sub %1, 1 \n" 313 " sub %1, 1 \n"
314 " sc %1, %0 \n" 314 " sc %1, %0 \n"
315 " beqzl %1, 1b \n" 315 " beqzl %1, 1b \n"
@@ -318,7 +318,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
318 : "memory"); 318 : "memory");
319 } else { 319 } else {
320 __asm__ __volatile__( 320 __asm__ __volatile__(
321 " .set noreorder # __raw_read_unlock \n" 321 " .set noreorder # arch_read_unlock \n"
322 "1: ll %1, %2 \n" 322 "1: ll %1, %2 \n"
323 " sub %1, 1 \n" 323 " sub %1, 1 \n"
324 " sc %1, %0 \n" 324 " sc %1, %0 \n"
@@ -335,13 +335,13 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
335 } 335 }
336} 336}
337 337
338static inline void __raw_write_lock(raw_rwlock_t *rw) 338static inline void arch_write_lock(arch_rwlock_t *rw)
339{ 339{
340 unsigned int tmp; 340 unsigned int tmp;
341 341
342 if (R10000_LLSC_WAR) { 342 if (R10000_LLSC_WAR) {
343 __asm__ __volatile__( 343 __asm__ __volatile__(
344 " .set noreorder # __raw_write_lock \n" 344 " .set noreorder # arch_write_lock \n"
345 "1: ll %1, %2 \n" 345 "1: ll %1, %2 \n"
346 " bnez %1, 1b \n" 346 " bnez %1, 1b \n"
347 " lui %1, 0x8000 \n" 347 " lui %1, 0x8000 \n"
@@ -354,7 +354,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
354 : "memory"); 354 : "memory");
355 } else { 355 } else {
356 __asm__ __volatile__( 356 __asm__ __volatile__(
357 " .set noreorder # __raw_write_lock \n" 357 " .set noreorder # arch_write_lock \n"
358 "1: ll %1, %2 \n" 358 "1: ll %1, %2 \n"
359 " bnez %1, 2f \n" 359 " bnez %1, 2f \n"
360 " lui %1, 0x8000 \n" 360 " lui %1, 0x8000 \n"
@@ -377,26 +377,26 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
377 smp_llsc_mb(); 377 smp_llsc_mb();
378} 378}
379 379
380static inline void __raw_write_unlock(raw_rwlock_t *rw) 380static inline void arch_write_unlock(arch_rwlock_t *rw)
381{ 381{
382 smp_mb(); 382 smp_mb();
383 383
384 __asm__ __volatile__( 384 __asm__ __volatile__(
385 " # __raw_write_unlock \n" 385 " # arch_write_unlock \n"
386 " sw $0, %0 \n" 386 " sw $0, %0 \n"
387 : "=m" (rw->lock) 387 : "=m" (rw->lock)
388 : "m" (rw->lock) 388 : "m" (rw->lock)
389 : "memory"); 389 : "memory");
390} 390}
391 391
392static inline int __raw_read_trylock(raw_rwlock_t *rw) 392static inline int arch_read_trylock(arch_rwlock_t *rw)
393{ 393{
394 unsigned int tmp; 394 unsigned int tmp;
395 int ret; 395 int ret;
396 396
397 if (R10000_LLSC_WAR) { 397 if (R10000_LLSC_WAR) {
398 __asm__ __volatile__( 398 __asm__ __volatile__(
399 " .set noreorder # __raw_read_trylock \n" 399 " .set noreorder # arch_read_trylock \n"
400 " li %2, 0 \n" 400 " li %2, 0 \n"
401 "1: ll %1, %3 \n" 401 "1: ll %1, %3 \n"
402 " bltz %1, 2f \n" 402 " bltz %1, 2f \n"
@@ -413,7 +413,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw)
413 : "memory"); 413 : "memory");
414 } else { 414 } else {
415 __asm__ __volatile__( 415 __asm__ __volatile__(
416 " .set noreorder # __raw_read_trylock \n" 416 " .set noreorder # arch_read_trylock \n"
417 " li %2, 0 \n" 417 " li %2, 0 \n"
418 "1: ll %1, %3 \n" 418 "1: ll %1, %3 \n"
419 " bltz %1, 2f \n" 419 " bltz %1, 2f \n"
@@ -433,14 +433,14 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw)
433 return ret; 433 return ret;
434} 434}
435 435
436static inline int __raw_write_trylock(raw_rwlock_t *rw) 436static inline int arch_write_trylock(arch_rwlock_t *rw)
437{ 437{
438 unsigned int tmp; 438 unsigned int tmp;
439 int ret; 439 int ret;
440 440
441 if (R10000_LLSC_WAR) { 441 if (R10000_LLSC_WAR) {
442 __asm__ __volatile__( 442 __asm__ __volatile__(
443 " .set noreorder # __raw_write_trylock \n" 443 " .set noreorder # arch_write_trylock \n"
444 " li %2, 0 \n" 444 " li %2, 0 \n"
445 "1: ll %1, %3 \n" 445 "1: ll %1, %3 \n"
446 " bnez %1, 2f \n" 446 " bnez %1, 2f \n"
@@ -457,7 +457,7 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
457 : "memory"); 457 : "memory");
458 } else { 458 } else {
459 __asm__ __volatile__( 459 __asm__ __volatile__(
460 " .set noreorder # __raw_write_trylock \n" 460 " .set noreorder # arch_write_trylock \n"
461 " li %2, 0 \n" 461 " li %2, 0 \n"
462 "1: ll %1, %3 \n" 462 "1: ll %1, %3 \n"
463 " bnez %1, 2f \n" 463 " bnez %1, 2f \n"
@@ -480,11 +480,11 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
480 return ret; 480 return ret;
481} 481}
482 482
483#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 483#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
484#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) 484#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
485 485
486#define _raw_spin_relax(lock) cpu_relax() 486#define arch_spin_relax(lock) cpu_relax()
487#define _raw_read_relax(lock) cpu_relax() 487#define arch_read_relax(lock) cpu_relax()
488#define _raw_write_relax(lock) cpu_relax() 488#define arch_write_relax(lock) cpu_relax()
489 489
490#endif /* _ASM_SPINLOCK_H */ 490#endif /* _ASM_SPINLOCK_H */
diff --git a/arch/mips/include/asm/spinlock_types.h b/arch/mips/include/asm/spinlock_types.h
index adeedaa116c1..ee197c2f9c98 100644
--- a/arch/mips/include/asm/spinlock_types.h
+++ b/arch/mips/include/asm/spinlock_types.h
@@ -12,14 +12,14 @@ typedef struct {
12 * bits 15..28: ticket 12 * bits 15..28: ticket
13 */ 13 */
14 unsigned int lock; 14 unsigned int lock;
15} raw_spinlock_t; 15} arch_spinlock_t;
16 16
17#define __RAW_SPIN_LOCK_UNLOCKED { 0 } 17#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
18 18
19typedef struct { 19typedef struct {
20 volatile unsigned int lock; 20 volatile unsigned int lock;
21} raw_rwlock_t; 21} arch_rwlock_t;
22 22
23#define __RAW_RW_LOCK_UNLOCKED { 0 } 23#define __ARCH_RW_LOCK_UNLOCKED { 0 }
24 24
25#endif 25#endif
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index 7b845ba9dff4..8b0b4181219f 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -99,7 +99,7 @@ int show_interrupts(struct seq_file *p, void *v)
99 } 99 }
100 100
101 if (i < NR_IRQS) { 101 if (i < NR_IRQS) {
102 spin_lock_irqsave(&irq_desc[i].lock, flags); 102 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
103 action = irq_desc[i].action; 103 action = irq_desc[i].action;
104 if (!action) 104 if (!action)
105 goto skip; 105 goto skip;
@@ -118,7 +118,7 @@ int show_interrupts(struct seq_file *p, void *v)
118 118
119 seq_putc(p, '\n'); 119 seq_putc(p, '\n');
120skip: 120skip:
121 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 121 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
122 } else if (i == NR_IRQS) { 122 } else if (i == NR_IRQS) {
123 seq_putc(p, '\n'); 123 seq_putc(p, '\n');
124 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); 124 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c
index 6d39e222b170..6153b6a05ccf 100644
--- a/arch/mips/vr41xx/common/icu.c
+++ b/arch/mips/vr41xx/common/icu.c
@@ -159,9 +159,9 @@ void vr41xx_enable_piuint(uint16_t mask)
159 159
160 if (current_cpu_type() == CPU_VR4111 || 160 if (current_cpu_type() == CPU_VR4111 ||
161 current_cpu_type() == CPU_VR4121) { 161 current_cpu_type() == CPU_VR4121) {
162 spin_lock_irqsave(&desc->lock, flags); 162 raw_spin_lock_irqsave(&desc->lock, flags);
163 icu1_set(MPIUINTREG, mask); 163 icu1_set(MPIUINTREG, mask);
164 spin_unlock_irqrestore(&desc->lock, flags); 164 raw_spin_unlock_irqrestore(&desc->lock, flags);
165 } 165 }
166} 166}
167 167
@@ -174,9 +174,9 @@ void vr41xx_disable_piuint(uint16_t mask)
174 174
175 if (current_cpu_type() == CPU_VR4111 || 175 if (current_cpu_type() == CPU_VR4111 ||
176 current_cpu_type() == CPU_VR4121) { 176 current_cpu_type() == CPU_VR4121) {
177 spin_lock_irqsave(&desc->lock, flags); 177 raw_spin_lock_irqsave(&desc->lock, flags);
178 icu1_clear(MPIUINTREG, mask); 178 icu1_clear(MPIUINTREG, mask);
179 spin_unlock_irqrestore(&desc->lock, flags); 179 raw_spin_unlock_irqrestore(&desc->lock, flags);
180 } 180 }
181} 181}
182 182
@@ -189,9 +189,9 @@ void vr41xx_enable_aiuint(uint16_t mask)
189 189
190 if (current_cpu_type() == CPU_VR4111 || 190 if (current_cpu_type() == CPU_VR4111 ||
191 current_cpu_type() == CPU_VR4121) { 191 current_cpu_type() == CPU_VR4121) {
192 spin_lock_irqsave(&desc->lock, flags); 192 raw_spin_lock_irqsave(&desc->lock, flags);
193 icu1_set(MAIUINTREG, mask); 193 icu1_set(MAIUINTREG, mask);
194 spin_unlock_irqrestore(&desc->lock, flags); 194 raw_spin_unlock_irqrestore(&desc->lock, flags);
195 } 195 }
196} 196}
197 197
@@ -204,9 +204,9 @@ void vr41xx_disable_aiuint(uint16_t mask)
204 204
205 if (current_cpu_type() == CPU_VR4111 || 205 if (current_cpu_type() == CPU_VR4111 ||
206 current_cpu_type() == CPU_VR4121) { 206 current_cpu_type() == CPU_VR4121) {
207 spin_lock_irqsave(&desc->lock, flags); 207 raw_spin_lock_irqsave(&desc->lock, flags);
208 icu1_clear(MAIUINTREG, mask); 208 icu1_clear(MAIUINTREG, mask);
209 spin_unlock_irqrestore(&desc->lock, flags); 209 raw_spin_unlock_irqrestore(&desc->lock, flags);
210 } 210 }
211} 211}
212 212
@@ -219,9 +219,9 @@ void vr41xx_enable_kiuint(uint16_t mask)
219 219
220 if (current_cpu_type() == CPU_VR4111 || 220 if (current_cpu_type() == CPU_VR4111 ||
221 current_cpu_type() == CPU_VR4121) { 221 current_cpu_type() == CPU_VR4121) {
222 spin_lock_irqsave(&desc->lock, flags); 222 raw_spin_lock_irqsave(&desc->lock, flags);
223 icu1_set(MKIUINTREG, mask); 223 icu1_set(MKIUINTREG, mask);
224 spin_unlock_irqrestore(&desc->lock, flags); 224 raw_spin_unlock_irqrestore(&desc->lock, flags);
225 } 225 }
226} 226}
227 227
@@ -234,9 +234,9 @@ void vr41xx_disable_kiuint(uint16_t mask)
234 234
235 if (current_cpu_type() == CPU_VR4111 || 235 if (current_cpu_type() == CPU_VR4111 ||
236 current_cpu_type() == CPU_VR4121) { 236 current_cpu_type() == CPU_VR4121) {
237 spin_lock_irqsave(&desc->lock, flags); 237 raw_spin_lock_irqsave(&desc->lock, flags);
238 icu1_clear(MKIUINTREG, mask); 238 icu1_clear(MKIUINTREG, mask);
239 spin_unlock_irqrestore(&desc->lock, flags); 239 raw_spin_unlock_irqrestore(&desc->lock, flags);
240 } 240 }
241} 241}
242 242
@@ -247,9 +247,9 @@ void vr41xx_enable_macint(uint16_t mask)
247 struct irq_desc *desc = irq_desc + ETHERNET_IRQ; 247 struct irq_desc *desc = irq_desc + ETHERNET_IRQ;
248 unsigned long flags; 248 unsigned long flags;
249 249
250 spin_lock_irqsave(&desc->lock, flags); 250 raw_spin_lock_irqsave(&desc->lock, flags);
251 icu1_set(MMACINTREG, mask); 251 icu1_set(MMACINTREG, mask);
252 spin_unlock_irqrestore(&desc->lock, flags); 252 raw_spin_unlock_irqrestore(&desc->lock, flags);
253} 253}
254 254
255EXPORT_SYMBOL(vr41xx_enable_macint); 255EXPORT_SYMBOL(vr41xx_enable_macint);
@@ -259,9 +259,9 @@ void vr41xx_disable_macint(uint16_t mask)
259 struct irq_desc *desc = irq_desc + ETHERNET_IRQ; 259 struct irq_desc *desc = irq_desc + ETHERNET_IRQ;
260 unsigned long flags; 260 unsigned long flags;
261 261
262 spin_lock_irqsave(&desc->lock, flags); 262 raw_spin_lock_irqsave(&desc->lock, flags);
263 icu1_clear(MMACINTREG, mask); 263 icu1_clear(MMACINTREG, mask);
264 spin_unlock_irqrestore(&desc->lock, flags); 264 raw_spin_unlock_irqrestore(&desc->lock, flags);
265} 265}
266 266
267EXPORT_SYMBOL(vr41xx_disable_macint); 267EXPORT_SYMBOL(vr41xx_disable_macint);
@@ -271,9 +271,9 @@ void vr41xx_enable_dsiuint(uint16_t mask)
271 struct irq_desc *desc = irq_desc + DSIU_IRQ; 271 struct irq_desc *desc = irq_desc + DSIU_IRQ;
272 unsigned long flags; 272 unsigned long flags;
273 273
274 spin_lock_irqsave(&desc->lock, flags); 274 raw_spin_lock_irqsave(&desc->lock, flags);
275 icu1_set(MDSIUINTREG, mask); 275 icu1_set(MDSIUINTREG, mask);
276 spin_unlock_irqrestore(&desc->lock, flags); 276 raw_spin_unlock_irqrestore(&desc->lock, flags);
277} 277}
278 278
279EXPORT_SYMBOL(vr41xx_enable_dsiuint); 279EXPORT_SYMBOL(vr41xx_enable_dsiuint);
@@ -283,9 +283,9 @@ void vr41xx_disable_dsiuint(uint16_t mask)
283 struct irq_desc *desc = irq_desc + DSIU_IRQ; 283 struct irq_desc *desc = irq_desc + DSIU_IRQ;
284 unsigned long flags; 284 unsigned long flags;
285 285
286 spin_lock_irqsave(&desc->lock, flags); 286 raw_spin_lock_irqsave(&desc->lock, flags);
287 icu1_clear(MDSIUINTREG, mask); 287 icu1_clear(MDSIUINTREG, mask);
288 spin_unlock_irqrestore(&desc->lock, flags); 288 raw_spin_unlock_irqrestore(&desc->lock, flags);
289} 289}
290 290
291EXPORT_SYMBOL(vr41xx_disable_dsiuint); 291EXPORT_SYMBOL(vr41xx_disable_dsiuint);
@@ -295,9 +295,9 @@ void vr41xx_enable_firint(uint16_t mask)
295 struct irq_desc *desc = irq_desc + FIR_IRQ; 295 struct irq_desc *desc = irq_desc + FIR_IRQ;
296 unsigned long flags; 296 unsigned long flags;
297 297
298 spin_lock_irqsave(&desc->lock, flags); 298 raw_spin_lock_irqsave(&desc->lock, flags);
299 icu2_set(MFIRINTREG, mask); 299 icu2_set(MFIRINTREG, mask);
300 spin_unlock_irqrestore(&desc->lock, flags); 300 raw_spin_unlock_irqrestore(&desc->lock, flags);
301} 301}
302 302
303EXPORT_SYMBOL(vr41xx_enable_firint); 303EXPORT_SYMBOL(vr41xx_enable_firint);
@@ -307,9 +307,9 @@ void vr41xx_disable_firint(uint16_t mask)
307 struct irq_desc *desc = irq_desc + FIR_IRQ; 307 struct irq_desc *desc = irq_desc + FIR_IRQ;
308 unsigned long flags; 308 unsigned long flags;
309 309
310 spin_lock_irqsave(&desc->lock, flags); 310 raw_spin_lock_irqsave(&desc->lock, flags);
311 icu2_clear(MFIRINTREG, mask); 311 icu2_clear(MFIRINTREG, mask);
312 spin_unlock_irqrestore(&desc->lock, flags); 312 raw_spin_unlock_irqrestore(&desc->lock, flags);
313} 313}
314 314
315EXPORT_SYMBOL(vr41xx_disable_firint); 315EXPORT_SYMBOL(vr41xx_disable_firint);
@@ -322,9 +322,9 @@ void vr41xx_enable_pciint(void)
322 if (current_cpu_type() == CPU_VR4122 || 322 if (current_cpu_type() == CPU_VR4122 ||
323 current_cpu_type() == CPU_VR4131 || 323 current_cpu_type() == CPU_VR4131 ||
324 current_cpu_type() == CPU_VR4133) { 324 current_cpu_type() == CPU_VR4133) {
325 spin_lock_irqsave(&desc->lock, flags); 325 raw_spin_lock_irqsave(&desc->lock, flags);
326 icu2_write(MPCIINTREG, PCIINT0); 326 icu2_write(MPCIINTREG, PCIINT0);
327 spin_unlock_irqrestore(&desc->lock, flags); 327 raw_spin_unlock_irqrestore(&desc->lock, flags);
328 } 328 }
329} 329}
330 330
@@ -338,9 +338,9 @@ void vr41xx_disable_pciint(void)
338 if (current_cpu_type() == CPU_VR4122 || 338 if (current_cpu_type() == CPU_VR4122 ||
339 current_cpu_type() == CPU_VR4131 || 339 current_cpu_type() == CPU_VR4131 ||
340 current_cpu_type() == CPU_VR4133) { 340 current_cpu_type() == CPU_VR4133) {
341 spin_lock_irqsave(&desc->lock, flags); 341 raw_spin_lock_irqsave(&desc->lock, flags);
342 icu2_write(MPCIINTREG, 0); 342 icu2_write(MPCIINTREG, 0);
343 spin_unlock_irqrestore(&desc->lock, flags); 343 raw_spin_unlock_irqrestore(&desc->lock, flags);
344 } 344 }
345} 345}
346 346
@@ -354,9 +354,9 @@ void vr41xx_enable_scuint(void)
354 if (current_cpu_type() == CPU_VR4122 || 354 if (current_cpu_type() == CPU_VR4122 ||
355 current_cpu_type() == CPU_VR4131 || 355 current_cpu_type() == CPU_VR4131 ||
356 current_cpu_type() == CPU_VR4133) { 356 current_cpu_type() == CPU_VR4133) {
357 spin_lock_irqsave(&desc->lock, flags); 357 raw_spin_lock_irqsave(&desc->lock, flags);
358 icu2_write(MSCUINTREG, SCUINT0); 358 icu2_write(MSCUINTREG, SCUINT0);
359 spin_unlock_irqrestore(&desc->lock, flags); 359 raw_spin_unlock_irqrestore(&desc->lock, flags);
360 } 360 }
361} 361}
362 362
@@ -370,9 +370,9 @@ void vr41xx_disable_scuint(void)
370 if (current_cpu_type() == CPU_VR4122 || 370 if (current_cpu_type() == CPU_VR4122 ||
371 current_cpu_type() == CPU_VR4131 || 371 current_cpu_type() == CPU_VR4131 ||
372 current_cpu_type() == CPU_VR4133) { 372 current_cpu_type() == CPU_VR4133) {
373 spin_lock_irqsave(&desc->lock, flags); 373 raw_spin_lock_irqsave(&desc->lock, flags);
374 icu2_write(MSCUINTREG, 0); 374 icu2_write(MSCUINTREG, 0);
375 spin_unlock_irqrestore(&desc->lock, flags); 375 raw_spin_unlock_irqrestore(&desc->lock, flags);
376 } 376 }
377} 377}
378 378
@@ -386,9 +386,9 @@ void vr41xx_enable_csiint(uint16_t mask)
386 if (current_cpu_type() == CPU_VR4122 || 386 if (current_cpu_type() == CPU_VR4122 ||
387 current_cpu_type() == CPU_VR4131 || 387 current_cpu_type() == CPU_VR4131 ||
388 current_cpu_type() == CPU_VR4133) { 388 current_cpu_type() == CPU_VR4133) {
389 spin_lock_irqsave(&desc->lock, flags); 389 raw_spin_lock_irqsave(&desc->lock, flags);
390 icu2_set(MCSIINTREG, mask); 390 icu2_set(MCSIINTREG, mask);
391 spin_unlock_irqrestore(&desc->lock, flags); 391 raw_spin_unlock_irqrestore(&desc->lock, flags);
392 } 392 }
393} 393}
394 394
@@ -402,9 +402,9 @@ void vr41xx_disable_csiint(uint16_t mask)
402 if (current_cpu_type() == CPU_VR4122 || 402 if (current_cpu_type() == CPU_VR4122 ||
403 current_cpu_type() == CPU_VR4131 || 403 current_cpu_type() == CPU_VR4131 ||
404 current_cpu_type() == CPU_VR4133) { 404 current_cpu_type() == CPU_VR4133) {
405 spin_lock_irqsave(&desc->lock, flags); 405 raw_spin_lock_irqsave(&desc->lock, flags);
406 icu2_clear(MCSIINTREG, mask); 406 icu2_clear(MCSIINTREG, mask);
407 spin_unlock_irqrestore(&desc->lock, flags); 407 raw_spin_unlock_irqrestore(&desc->lock, flags);
408 } 408 }
409} 409}
410 410
@@ -418,9 +418,9 @@ void vr41xx_enable_bcuint(void)
418 if (current_cpu_type() == CPU_VR4122 || 418 if (current_cpu_type() == CPU_VR4122 ||
419 current_cpu_type() == CPU_VR4131 || 419 current_cpu_type() == CPU_VR4131 ||
420 current_cpu_type() == CPU_VR4133) { 420 current_cpu_type() == CPU_VR4133) {
421 spin_lock_irqsave(&desc->lock, flags); 421 raw_spin_lock_irqsave(&desc->lock, flags);
422 icu2_write(MBCUINTREG, BCUINTR); 422 icu2_write(MBCUINTREG, BCUINTR);
423 spin_unlock_irqrestore(&desc->lock, flags); 423 raw_spin_unlock_irqrestore(&desc->lock, flags);
424 } 424 }
425} 425}
426 426
@@ -434,9 +434,9 @@ void vr41xx_disable_bcuint(void)
434 if (current_cpu_type() == CPU_VR4122 || 434 if (current_cpu_type() == CPU_VR4122 ||
435 current_cpu_type() == CPU_VR4131 || 435 current_cpu_type() == CPU_VR4131 ||
436 current_cpu_type() == CPU_VR4133) { 436 current_cpu_type() == CPU_VR4133) {
437 spin_lock_irqsave(&desc->lock, flags); 437 raw_spin_lock_irqsave(&desc->lock, flags);
438 icu2_write(MBCUINTREG, 0); 438 icu2_write(MBCUINTREG, 0);
439 spin_unlock_irqrestore(&desc->lock, flags); 439 raw_spin_unlock_irqrestore(&desc->lock, flags);
440 } 440 }
441} 441}
442 442
@@ -486,7 +486,7 @@ static inline int set_sysint1_assign(unsigned int irq, unsigned char assign)
486 486
487 pin = SYSINT1_IRQ_TO_PIN(irq); 487 pin = SYSINT1_IRQ_TO_PIN(irq);
488 488
489 spin_lock_irq(&desc->lock); 489 raw_spin_lock_irq(&desc->lock);
490 490
491 intassign0 = icu1_read(INTASSIGN0); 491 intassign0 = icu1_read(INTASSIGN0);
492 intassign1 = icu1_read(INTASSIGN1); 492 intassign1 = icu1_read(INTASSIGN1);
@@ -525,7 +525,7 @@ static inline int set_sysint1_assign(unsigned int irq, unsigned char assign)
525 intassign1 |= (uint16_t)assign << 9; 525 intassign1 |= (uint16_t)assign << 9;
526 break; 526 break;
527 default: 527 default:
528 spin_unlock_irq(&desc->lock); 528 raw_spin_unlock_irq(&desc->lock);
529 return -EINVAL; 529 return -EINVAL;
530 } 530 }
531 531
@@ -533,7 +533,7 @@ static inline int set_sysint1_assign(unsigned int irq, unsigned char assign)
533 icu1_write(INTASSIGN0, intassign0); 533 icu1_write(INTASSIGN0, intassign0);
534 icu1_write(INTASSIGN1, intassign1); 534 icu1_write(INTASSIGN1, intassign1);
535 535
536 spin_unlock_irq(&desc->lock); 536 raw_spin_unlock_irq(&desc->lock);
537 537
538 return 0; 538 return 0;
539} 539}
@@ -546,7 +546,7 @@ static inline int set_sysint2_assign(unsigned int irq, unsigned char assign)
546 546
547 pin = SYSINT2_IRQ_TO_PIN(irq); 547 pin = SYSINT2_IRQ_TO_PIN(irq);
548 548
549 spin_lock_irq(&desc->lock); 549 raw_spin_lock_irq(&desc->lock);
550 550
551 intassign2 = icu1_read(INTASSIGN2); 551 intassign2 = icu1_read(INTASSIGN2);
552 intassign3 = icu1_read(INTASSIGN3); 552 intassign3 = icu1_read(INTASSIGN3);
@@ -593,7 +593,7 @@ static inline int set_sysint2_assign(unsigned int irq, unsigned char assign)
593 intassign3 |= (uint16_t)assign << 12; 593 intassign3 |= (uint16_t)assign << 12;
594 break; 594 break;
595 default: 595 default:
596 spin_unlock_irq(&desc->lock); 596 raw_spin_unlock_irq(&desc->lock);
597 return -EINVAL; 597 return -EINVAL;
598 } 598 }
599 599
@@ -601,7 +601,7 @@ static inline int set_sysint2_assign(unsigned int irq, unsigned char assign)
601 icu1_write(INTASSIGN2, intassign2); 601 icu1_write(INTASSIGN2, intassign2);
602 icu1_write(INTASSIGN3, intassign3); 602 icu1_write(INTASSIGN3, intassign3);
603 603
604 spin_unlock_irq(&desc->lock); 604 raw_spin_unlock_irq(&desc->lock);
605 605
606 return 0; 606 return 0;
607} 607}
diff --git a/arch/mn10300/include/asm/elf.h b/arch/mn10300/include/asm/elf.h
index 75a70aa9fd6f..e5fa97cd9a14 100644
--- a/arch/mn10300/include/asm/elf.h
+++ b/arch/mn10300/include/asm/elf.h
@@ -77,7 +77,6 @@ do { \
77 _ur->a1 = 0; _ur->a0 = 0; _ur->d1 = 0; _ur->d0 = 0; \ 77 _ur->a1 = 0; _ur->a0 = 0; _ur->d1 = 0; _ur->d0 = 0; \
78} while (0) 78} while (0)
79 79
80#define USE_ELF_CORE_DUMP
81#define CORE_DUMP_USE_REGSET 80#define CORE_DUMP_USE_REGSET
82#define ELF_EXEC_PAGESIZE 4096 81#define ELF_EXEC_PAGESIZE 4096
83 82
diff --git a/arch/mn10300/kernel/irq.c b/arch/mn10300/kernel/irq.c
index 4c3c58ef5cda..e2d5ed891f37 100644
--- a/arch/mn10300/kernel/irq.c
+++ b/arch/mn10300/kernel/irq.c
@@ -215,7 +215,7 @@ int show_interrupts(struct seq_file *p, void *v)
215 215
216 /* display information rows, one per active CPU */ 216 /* display information rows, one per active CPU */
217 case 1 ... NR_IRQS - 1: 217 case 1 ... NR_IRQS - 1:
218 spin_lock_irqsave(&irq_desc[i].lock, flags); 218 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
219 219
220 action = irq_desc[i].action; 220 action = irq_desc[i].action;
221 if (action) { 221 if (action) {
@@ -235,7 +235,7 @@ int show_interrupts(struct seq_file *p, void *v)
235 seq_putc(p, '\n'); 235 seq_putc(p, '\n');
236 } 236 }
237 237
238 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 238 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
239 break; 239 break;
240 240
241 /* polish off with NMI and error counters */ 241 /* polish off with NMI and error counters */
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index 8bc9e96699b2..716634d1f546 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -27,19 +27,19 @@
27# define ATOMIC_HASH_SIZE 4 27# define ATOMIC_HASH_SIZE 4
28# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) 28# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
29 29
30extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; 30extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
31 31
32/* Can't use raw_spin_lock_irq because of #include problems, so 32/* Can't use raw_spin_lock_irq because of #include problems, so
33 * this is the substitute */ 33 * this is the substitute */
34#define _atomic_spin_lock_irqsave(l,f) do { \ 34#define _atomic_spin_lock_irqsave(l,f) do { \
35 raw_spinlock_t *s = ATOMIC_HASH(l); \ 35 arch_spinlock_t *s = ATOMIC_HASH(l); \
36 local_irq_save(f); \ 36 local_irq_save(f); \
37 __raw_spin_lock(s); \ 37 arch_spin_lock(s); \
38} while(0) 38} while(0)
39 39
40#define _atomic_spin_unlock_irqrestore(l,f) do { \ 40#define _atomic_spin_unlock_irqrestore(l,f) do { \
41 raw_spinlock_t *s = ATOMIC_HASH(l); \ 41 arch_spinlock_t *s = ATOMIC_HASH(l); \
42 __raw_spin_unlock(s); \ 42 arch_spin_unlock(s); \
43 local_irq_restore(f); \ 43 local_irq_restore(f); \
44} while(0) 44} while(0)
45 45
diff --git a/arch/parisc/include/asm/bug.h b/arch/parisc/include/asm/bug.h
index 8cfc553fc837..75e46c557a16 100644
--- a/arch/parisc/include/asm/bug.h
+++ b/arch/parisc/include/asm/bug.h
@@ -32,14 +32,14 @@
32 "\t.popsection" \ 32 "\t.popsection" \
33 : : "i" (__FILE__), "i" (__LINE__), \ 33 : : "i" (__FILE__), "i" (__LINE__), \
34 "i" (0), "i" (sizeof(struct bug_entry)) ); \ 34 "i" (0), "i" (sizeof(struct bug_entry)) ); \
35 for(;;) ; \ 35 unreachable(); \
36 } while(0) 36 } while(0)
37 37
38#else 38#else
39#define BUG() \ 39#define BUG() \
40 do { \ 40 do { \
41 asm volatile(PARISC_BUG_BREAK_ASM : : ); \ 41 asm volatile(PARISC_BUG_BREAK_ASM : : ); \
42 for(;;) ; \ 42 unreachable(); \
43 } while(0) 43 } while(0)
44#endif 44#endif
45 45
diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
index 9c802eb4be84..19f6cb1a4a1c 100644
--- a/arch/parisc/include/asm/elf.h
+++ b/arch/parisc/include/asm/elf.h
@@ -328,7 +328,6 @@ struct pt_regs; /* forward declaration... */
328 such function. */ 328 such function. */
329#define ELF_PLAT_INIT(_r, load_addr) _r->gr[23] = 0 329#define ELF_PLAT_INIT(_r, load_addr) _r->gr[23] = 0
330 330
331#define USE_ELF_CORE_DUMP
332#define ELF_EXEC_PAGESIZE 4096 331#define ELF_EXEC_PAGESIZE 4096
333 332
334/* This is the location that an ET_DYN program is loaded if exec'ed. Typical 333/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
diff --git a/arch/parisc/include/asm/ftrace.h b/arch/parisc/include/asm/ftrace.h
index 2fa05dd6aeee..72c0fafaa039 100644
--- a/arch/parisc/include/asm/ftrace.h
+++ b/arch/parisc/include/asm/ftrace.h
@@ -20,6 +20,20 @@ struct ftrace_ret_stack {
20 * Defined in entry.S 20 * Defined in entry.S
21 */ 21 */
22extern void return_to_handler(void); 22extern void return_to_handler(void);
23
24
25extern unsigned long return_address(unsigned int);
26
27#define HAVE_ARCH_CALLER_ADDR
28
29#define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
30#define CALLER_ADDR1 return_address(1)
31#define CALLER_ADDR2 return_address(2)
32#define CALLER_ADDR3 return_address(3)
33#define CALLER_ADDR4 return_address(4)
34#define CALLER_ADDR5 return_address(5)
35#define CALLER_ADDR6 return_address(6)
36
23#endif /* __ASSEMBLY__ */ 37#endif /* __ASSEMBLY__ */
24 38
25#endif /* _ASM_PARISC_FTRACE_H */ 39#endif /* _ASM_PARISC_FTRACE_H */
diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h
index fae03e136fa8..74036f436a3b 100644
--- a/arch/parisc/include/asm/spinlock.h
+++ b/arch/parisc/include/asm/spinlock.h
@@ -5,17 +5,17 @@
5#include <asm/processor.h> 5#include <asm/processor.h>
6#include <asm/spinlock_types.h> 6#include <asm/spinlock_types.h>
7 7
8static inline int __raw_spin_is_locked(raw_spinlock_t *x) 8static inline int arch_spin_is_locked(arch_spinlock_t *x)
9{ 9{
10 volatile unsigned int *a = __ldcw_align(x); 10 volatile unsigned int *a = __ldcw_align(x);
11 return *a == 0; 11 return *a == 0;
12} 12}
13 13
14#define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0) 14#define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0)
15#define __raw_spin_unlock_wait(x) \ 15#define arch_spin_unlock_wait(x) \
16 do { cpu_relax(); } while (__raw_spin_is_locked(x)) 16 do { cpu_relax(); } while (arch_spin_is_locked(x))
17 17
18static inline void __raw_spin_lock_flags(raw_spinlock_t *x, 18static inline void arch_spin_lock_flags(arch_spinlock_t *x,
19 unsigned long flags) 19 unsigned long flags)
20{ 20{
21 volatile unsigned int *a; 21 volatile unsigned int *a;
@@ -33,7 +33,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *x,
33 mb(); 33 mb();
34} 34}
35 35
36static inline void __raw_spin_unlock(raw_spinlock_t *x) 36static inline void arch_spin_unlock(arch_spinlock_t *x)
37{ 37{
38 volatile unsigned int *a; 38 volatile unsigned int *a;
39 mb(); 39 mb();
@@ -42,7 +42,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *x)
42 mb(); 42 mb();
43} 43}
44 44
45static inline int __raw_spin_trylock(raw_spinlock_t *x) 45static inline int arch_spin_trylock(arch_spinlock_t *x)
46{ 46{
47 volatile unsigned int *a; 47 volatile unsigned int *a;
48 int ret; 48 int ret;
@@ -69,38 +69,38 @@ static inline int __raw_spin_trylock(raw_spinlock_t *x)
69 69
70/* Note that we have to ensure interrupts are disabled in case we're 70/* Note that we have to ensure interrupts are disabled in case we're
71 * interrupted by some other code that wants to grab the same read lock */ 71 * interrupted by some other code that wants to grab the same read lock */
72static __inline__ void __raw_read_lock(raw_rwlock_t *rw) 72static __inline__ void arch_read_lock(arch_rwlock_t *rw)
73{ 73{
74 unsigned long flags; 74 unsigned long flags;
75 local_irq_save(flags); 75 local_irq_save(flags);
76 __raw_spin_lock_flags(&rw->lock, flags); 76 arch_spin_lock_flags(&rw->lock, flags);
77 rw->counter++; 77 rw->counter++;
78 __raw_spin_unlock(&rw->lock); 78 arch_spin_unlock(&rw->lock);
79 local_irq_restore(flags); 79 local_irq_restore(flags);
80} 80}
81 81
82/* Note that we have to ensure interrupts are disabled in case we're 82/* Note that we have to ensure interrupts are disabled in case we're
83 * interrupted by some other code that wants to grab the same read lock */ 83 * interrupted by some other code that wants to grab the same read lock */
84static __inline__ void __raw_read_unlock(raw_rwlock_t *rw) 84static __inline__ void arch_read_unlock(arch_rwlock_t *rw)
85{ 85{
86 unsigned long flags; 86 unsigned long flags;
87 local_irq_save(flags); 87 local_irq_save(flags);
88 __raw_spin_lock_flags(&rw->lock, flags); 88 arch_spin_lock_flags(&rw->lock, flags);
89 rw->counter--; 89 rw->counter--;
90 __raw_spin_unlock(&rw->lock); 90 arch_spin_unlock(&rw->lock);
91 local_irq_restore(flags); 91 local_irq_restore(flags);
92} 92}
93 93
94/* Note that we have to ensure interrupts are disabled in case we're 94/* Note that we have to ensure interrupts are disabled in case we're
95 * interrupted by some other code that wants to grab the same read lock */ 95 * interrupted by some other code that wants to grab the same read lock */
96static __inline__ int __raw_read_trylock(raw_rwlock_t *rw) 96static __inline__ int arch_read_trylock(arch_rwlock_t *rw)
97{ 97{
98 unsigned long flags; 98 unsigned long flags;
99 retry: 99 retry:
100 local_irq_save(flags); 100 local_irq_save(flags);
101 if (__raw_spin_trylock(&rw->lock)) { 101 if (arch_spin_trylock(&rw->lock)) {
102 rw->counter++; 102 rw->counter++;
103 __raw_spin_unlock(&rw->lock); 103 arch_spin_unlock(&rw->lock);
104 local_irq_restore(flags); 104 local_irq_restore(flags);
105 return 1; 105 return 1;
106 } 106 }
@@ -111,7 +111,7 @@ static __inline__ int __raw_read_trylock(raw_rwlock_t *rw)
111 return 0; 111 return 0;
112 112
113 /* Wait until we have a realistic chance at the lock */ 113 /* Wait until we have a realistic chance at the lock */
114 while (__raw_spin_is_locked(&rw->lock) && rw->counter >= 0) 114 while (arch_spin_is_locked(&rw->lock) && rw->counter >= 0)
115 cpu_relax(); 115 cpu_relax();
116 116
117 goto retry; 117 goto retry;
@@ -119,15 +119,15 @@ static __inline__ int __raw_read_trylock(raw_rwlock_t *rw)
119 119
120/* Note that we have to ensure interrupts are disabled in case we're 120/* Note that we have to ensure interrupts are disabled in case we're
121 * interrupted by some other code that wants to read_trylock() this lock */ 121 * interrupted by some other code that wants to read_trylock() this lock */
122static __inline__ void __raw_write_lock(raw_rwlock_t *rw) 122static __inline__ void arch_write_lock(arch_rwlock_t *rw)
123{ 123{
124 unsigned long flags; 124 unsigned long flags;
125retry: 125retry:
126 local_irq_save(flags); 126 local_irq_save(flags);
127 __raw_spin_lock_flags(&rw->lock, flags); 127 arch_spin_lock_flags(&rw->lock, flags);
128 128
129 if (rw->counter != 0) { 129 if (rw->counter != 0) {
130 __raw_spin_unlock(&rw->lock); 130 arch_spin_unlock(&rw->lock);
131 local_irq_restore(flags); 131 local_irq_restore(flags);
132 132
133 while (rw->counter != 0) 133 while (rw->counter != 0)
@@ -141,27 +141,27 @@ retry:
141 local_irq_restore(flags); 141 local_irq_restore(flags);
142} 142}
143 143
144static __inline__ void __raw_write_unlock(raw_rwlock_t *rw) 144static __inline__ void arch_write_unlock(arch_rwlock_t *rw)
145{ 145{
146 rw->counter = 0; 146 rw->counter = 0;
147 __raw_spin_unlock(&rw->lock); 147 arch_spin_unlock(&rw->lock);
148} 148}
149 149
150/* Note that we have to ensure interrupts are disabled in case we're 150/* Note that we have to ensure interrupts are disabled in case we're
151 * interrupted by some other code that wants to read_trylock() this lock */ 151 * interrupted by some other code that wants to read_trylock() this lock */
152static __inline__ int __raw_write_trylock(raw_rwlock_t *rw) 152static __inline__ int arch_write_trylock(arch_rwlock_t *rw)
153{ 153{
154 unsigned long flags; 154 unsigned long flags;
155 int result = 0; 155 int result = 0;
156 156
157 local_irq_save(flags); 157 local_irq_save(flags);
158 if (__raw_spin_trylock(&rw->lock)) { 158 if (arch_spin_trylock(&rw->lock)) {
159 if (rw->counter == 0) { 159 if (rw->counter == 0) {
160 rw->counter = -1; 160 rw->counter = -1;
161 result = 1; 161 result = 1;
162 } else { 162 } else {
163 /* Read-locked. Oh well. */ 163 /* Read-locked. Oh well. */
164 __raw_spin_unlock(&rw->lock); 164 arch_spin_unlock(&rw->lock);
165 } 165 }
166 } 166 }
167 local_irq_restore(flags); 167 local_irq_restore(flags);
@@ -173,7 +173,7 @@ static __inline__ int __raw_write_trylock(raw_rwlock_t *rw)
173 * read_can_lock - would read_trylock() succeed? 173 * read_can_lock - would read_trylock() succeed?
174 * @lock: the rwlock in question. 174 * @lock: the rwlock in question.
175 */ 175 */
176static __inline__ int __raw_read_can_lock(raw_rwlock_t *rw) 176static __inline__ int arch_read_can_lock(arch_rwlock_t *rw)
177{ 177{
178 return rw->counter >= 0; 178 return rw->counter >= 0;
179} 179}
@@ -182,16 +182,16 @@ static __inline__ int __raw_read_can_lock(raw_rwlock_t *rw)
182 * write_can_lock - would write_trylock() succeed? 182 * write_can_lock - would write_trylock() succeed?
183 * @lock: the rwlock in question. 183 * @lock: the rwlock in question.
184 */ 184 */
185static __inline__ int __raw_write_can_lock(raw_rwlock_t *rw) 185static __inline__ int arch_write_can_lock(arch_rwlock_t *rw)
186{ 186{
187 return !rw->counter; 187 return !rw->counter;
188} 188}
189 189
190#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 190#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
191#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) 191#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
192 192
193#define _raw_spin_relax(lock) cpu_relax() 193#define arch_spin_relax(lock) cpu_relax()
194#define _raw_read_relax(lock) cpu_relax() 194#define arch_read_relax(lock) cpu_relax()
195#define _raw_write_relax(lock) cpu_relax() 195#define arch_write_relax(lock) cpu_relax()
196 196
197#endif /* __ASM_SPINLOCK_H */ 197#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/parisc/include/asm/spinlock_types.h b/arch/parisc/include/asm/spinlock_types.h
index 3f72f47cf4b2..8c373aa28a86 100644
--- a/arch/parisc/include/asm/spinlock_types.h
+++ b/arch/parisc/include/asm/spinlock_types.h
@@ -4,18 +4,18 @@
4typedef struct { 4typedef struct {
5#ifdef CONFIG_PA20 5#ifdef CONFIG_PA20
6 volatile unsigned int slock; 6 volatile unsigned int slock;
7# define __RAW_SPIN_LOCK_UNLOCKED { 1 } 7# define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
8#else 8#else
9 volatile unsigned int lock[4]; 9 volatile unsigned int lock[4];
10# define __RAW_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } } 10# define __ARCH_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } }
11#endif 11#endif
12} raw_spinlock_t; 12} arch_spinlock_t;
13 13
14typedef struct { 14typedef struct {
15 raw_spinlock_t lock; 15 arch_spinlock_t lock;
16 volatile int counter; 16 volatile int counter;
17} raw_rwlock_t; 17} arch_rwlock_t;
18 18
19#define __RAW_RW_LOCK_UNLOCKED { __RAW_SPIN_LOCK_UNLOCKED, 0 } 19#define __ARCH_RW_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED, 0 }
20 20
21#endif 21#endif
diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c
index fcd3c707bf12..ec787b411e9a 100644
--- a/arch/parisc/kernel/asm-offsets.c
+++ b/arch/parisc/kernel/asm-offsets.c
@@ -244,9 +244,6 @@ int main(void)
244 DEFINE(THREAD_SZ, sizeof(struct thread_info)); 244 DEFINE(THREAD_SZ, sizeof(struct thread_info));
245 DEFINE(THREAD_SZ_ALGN, align(sizeof(struct thread_info), 64)); 245 DEFINE(THREAD_SZ_ALGN, align(sizeof(struct thread_info), 64));
246 BLANK(); 246 BLANK();
247 DEFINE(IRQSTAT_SIRQ_PEND, offsetof(irq_cpustat_t, __softirq_pending));
248 DEFINE(IRQSTAT_SZ, sizeof(irq_cpustat_t));
249 BLANK();
250 DEFINE(ICACHE_BASE, offsetof(struct pdc_cache_info, ic_base)); 247 DEFINE(ICACHE_BASE, offsetof(struct pdc_cache_info, ic_base));
251 DEFINE(ICACHE_STRIDE, offsetof(struct pdc_cache_info, ic_stride)); 248 DEFINE(ICACHE_STRIDE, offsetof(struct pdc_cache_info, ic_stride));
252 DEFINE(ICACHE_COUNT, offsetof(struct pdc_cache_info, ic_count)); 249 DEFINE(ICACHE_COUNT, offsetof(struct pdc_cache_info, ic_count));
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index 2e7610cb33d5..efbcee5d2220 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -145,7 +145,7 @@ static int cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest)
145#endif 145#endif
146 146
147static struct irq_chip cpu_interrupt_type = { 147static struct irq_chip cpu_interrupt_type = {
148 .typename = "CPU", 148 .name = "CPU",
149 .startup = cpu_startup_irq, 149 .startup = cpu_startup_irq,
150 .shutdown = cpu_disable_irq, 150 .shutdown = cpu_disable_irq,
151 .enable = cpu_enable_irq, 151 .enable = cpu_enable_irq,
@@ -180,7 +180,7 @@ int show_interrupts(struct seq_file *p, void *v)
180 if (i < NR_IRQS) { 180 if (i < NR_IRQS) {
181 struct irqaction *action; 181 struct irqaction *action;
182 182
183 spin_lock_irqsave(&irq_desc[i].lock, flags); 183 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
184 action = irq_desc[i].action; 184 action = irq_desc[i].action;
185 if (!action) 185 if (!action)
186 goto skip; 186 goto skip;
@@ -192,7 +192,7 @@ int show_interrupts(struct seq_file *p, void *v)
192 seq_printf(p, "%10u ", kstat_irqs(i)); 192 seq_printf(p, "%10u ", kstat_irqs(i));
193#endif 193#endif
194 194
195 seq_printf(p, " %14s", irq_desc[i].chip->typename); 195 seq_printf(p, " %14s", irq_desc[i].chip->name);
196#ifndef PARISC_IRQ_CR16_COUNTS 196#ifndef PARISC_IRQ_CR16_COUNTS
197 seq_printf(p, " %s", action->name); 197 seq_printf(p, " %s", action->name);
198 198
@@ -224,7 +224,7 @@ int show_interrupts(struct seq_file *p, void *v)
224 224
225 seq_putc(p, '\n'); 225 seq_putc(p, '\n');
226 skip: 226 skip:
227 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 227 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
228 } 228 }
229 229
230 return 0; 230 return 0;
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c
index e8467e4aa8d1..fb37ac52e46c 100644
--- a/arch/parisc/kernel/signal.c
+++ b/arch/parisc/kernel/signal.c
@@ -26,7 +26,6 @@
26#include <linux/stddef.h> 26#include <linux/stddef.h>
27#include <linux/compat.h> 27#include <linux/compat.h>
28#include <linux/elf.h> 28#include <linux/elf.h>
29#include <linux/tracehook.h>
30#include <asm/ucontext.h> 29#include <asm/ucontext.h>
31#include <asm/rt_sigframe.h> 30#include <asm/rt_sigframe.h>
32#include <asm/uaccess.h> 31#include <asm/uaccess.h>
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index 1fd0f0cec037..3f2fce8ce6b6 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -60,8 +60,6 @@ static int smp_debug_lvl = 0;
60#define smp_debug(lvl, ...) do { } while(0) 60#define smp_debug(lvl, ...) do { } while(0)
61#endif /* DEBUG_SMP */ 61#endif /* DEBUG_SMP */
62 62
63DEFINE_SPINLOCK(smp_lock);
64
65volatile struct task_struct *smp_init_current_idle_task; 63volatile struct task_struct *smp_init_current_idle_task;
66 64
67/* track which CPU is booting */ 65/* track which CPU is booting */
@@ -69,7 +67,7 @@ static volatile int cpu_now_booting __cpuinitdata;
69 67
70static int parisc_max_cpus __cpuinitdata = 1; 68static int parisc_max_cpus __cpuinitdata = 1;
71 69
72DEFINE_PER_CPU(spinlock_t, ipi_lock) = SPIN_LOCK_UNLOCKED; 70static DEFINE_PER_CPU(spinlock_t, ipi_lock);
73 71
74enum ipi_message_type { 72enum ipi_message_type {
75 IPI_NOP=0, 73 IPI_NOP=0,
@@ -438,6 +436,11 @@ void __init smp_prepare_boot_cpu(void)
438*/ 436*/
439void __init smp_prepare_cpus(unsigned int max_cpus) 437void __init smp_prepare_cpus(unsigned int max_cpus)
440{ 438{
439 int cpu;
440
441 for_each_possible_cpu(cpu)
442 spin_lock_init(&per_cpu(ipi_lock, cpu));
443
441 init_cpu_present(cpumask_of(0)); 444 init_cpu_present(cpumask_of(0));
442 445
443 parisc_max_cpus = max_cpus; 446 parisc_max_cpus = max_cpus;
diff --git a/arch/parisc/kernel/sys_parisc32.c b/arch/parisc/kernel/sys_parisc32.c
index 76d23ec8dfaa..9779ece2b070 100644
--- a/arch/parisc/kernel/sys_parisc32.c
+++ b/arch/parisc/kernel/sys_parisc32.c
@@ -26,13 +26,7 @@
26#include <linux/shm.h> 26#include <linux/shm.h>
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/uio.h> 28#include <linux/uio.h>
29#include <linux/nfs_fs.h>
30#include <linux/ncp_fs.h> 29#include <linux/ncp_fs.h>
31#include <linux/sunrpc/svc.h>
32#include <linux/nfsd/nfsd.h>
33#include <linux/nfsd/cache.h>
34#include <linux/nfsd/xdr.h>
35#include <linux/nfsd/syscall.h>
36#include <linux/poll.h> 30#include <linux/poll.h>
37#include <linux/personality.h> 31#include <linux/personality.h>
38#include <linux/stat.h> 32#include <linux/stat.h>
diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c
index a36799e85693..d58eac1a8288 100644
--- a/arch/parisc/kernel/unwind.c
+++ b/arch/parisc/kernel/unwind.c
@@ -13,6 +13,7 @@
13#include <linux/sched.h> 13#include <linux/sched.h>
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/kallsyms.h> 15#include <linux/kallsyms.h>
16#include <linux/sort.h>
16 17
17#include <asm/uaccess.h> 18#include <asm/uaccess.h>
18#include <asm/assembly.h> 19#include <asm/assembly.h>
@@ -115,24 +116,18 @@ unwind_table_init(struct unwind_table *table, const char *name,
115 } 116 }
116} 117}
117 118
119static int cmp_unwind_table_entry(const void *a, const void *b)
120{
121 return ((const struct unwind_table_entry *)a)->region_start
122 - ((const struct unwind_table_entry *)b)->region_start;
123}
124
118static void 125static void
119unwind_table_sort(struct unwind_table_entry *start, 126unwind_table_sort(struct unwind_table_entry *start,
120 struct unwind_table_entry *finish) 127 struct unwind_table_entry *finish)
121{ 128{
122 struct unwind_table_entry el, *p, *q; 129 sort(start, finish - start, sizeof(struct unwind_table_entry),
123 130 cmp_unwind_table_entry, NULL);
124 for (p = start + 1; p < finish; ++p) {
125 if (p[0].region_start < p[-1].region_start) {
126 el = *p;
127 q = p;
128 do {
129 q[0] = q[-1];
130 --q;
131 } while (q > start &&
132 el.region_start < q[-1].region_start);
133 *q = el;
134 }
135 }
136} 131}
137 132
138struct unwind_table * 133struct unwind_table *
@@ -417,3 +412,30 @@ int unwind_to_user(struct unwind_frame_info *info)
417 412
418 return ret; 413 return ret;
419} 414}
415
416unsigned long return_address(unsigned int level)
417{
418 struct unwind_frame_info info;
419 struct pt_regs r;
420 unsigned long sp;
421
422 /* initialize unwind info */
423 asm volatile ("copy %%r30, %0" : "=r"(sp));
424 memset(&r, 0, sizeof(struct pt_regs));
425 r.iaoq[0] = (unsigned long) current_text_addr();
426 r.gr[2] = (unsigned long) __builtin_return_address(0);
427 r.gr[30] = sp;
428 unwind_frame_init(&info, current, &r);
429
430 /* unwind stack */
431 ++level;
432 do {
433 if (unwind_once(&info) < 0 || info.ip == 0)
434 return 0;
435 if (!__kernel_text_address(info.ip)) {
436 return 0;
437 }
438 } while (info.ip && level--);
439
440 return info.ip;
441}
diff --git a/arch/parisc/lib/bitops.c b/arch/parisc/lib/bitops.c
index e3eb739fab19..353963d42059 100644
--- a/arch/parisc/lib/bitops.c
+++ b/arch/parisc/lib/bitops.c
@@ -12,8 +12,8 @@
12#include <asm/atomic.h> 12#include <asm/atomic.h>
13 13
14#ifdef CONFIG_SMP 14#ifdef CONFIG_SMP
15raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = { 15arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = {
16 [0 ... (ATOMIC_HASH_SIZE-1)] = __RAW_SPIN_LOCK_UNLOCKED 16 [0 ... (ATOMIC_HASH_SIZE-1)] = __ARCH_SPIN_LOCK_UNLOCKED
17}; 17};
18#endif 18#endif
19 19
diff --git a/arch/powerpc/include/asm/async_tx.h b/arch/powerpc/include/asm/async_tx.h
new file mode 100644
index 000000000000..8b2dc55d01ab
--- /dev/null
+++ b/arch/powerpc/include/asm/async_tx.h
@@ -0,0 +1,47 @@
1/*
2 * Copyright (C) 2008-2009 DENX Software Engineering.
3 *
4 * Author: Yuri Tikhonov <yur@emcraft.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59
18 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution in the
21 * file called COPYING.
22 */
23#ifndef _ASM_POWERPC_ASYNC_TX_H_
24#define _ASM_POWERPC_ASYNC_TX_H_
25
26#if defined(CONFIG_440SPe) || defined(CONFIG_440SP)
27extern struct dma_chan *
28ppc440spe_async_tx_find_best_channel(enum dma_transaction_type cap,
29 struct page **dst_lst, int dst_cnt, struct page **src_lst,
30 int src_cnt, size_t src_sz);
31
32#define async_tx_find_channel(dep, cap, dst_lst, dst_cnt, src_lst, \
33 src_cnt, src_sz) \
34 ppc440spe_async_tx_find_best_channel(cap, dst_lst, dst_cnt, src_lst, \
35 src_cnt, src_sz)
36#else
37
38#define async_tx_find_channel(dep, type, dst, dst_count, src, src_count, len) \
39 __async_tx_find_channel(dep, type)
40
41struct dma_chan *
42__async_tx_find_channel(struct async_submit_ctl *submit,
43 enum dma_transaction_type tx_type);
44
45#endif
46
47#endif
diff --git a/arch/powerpc/include/asm/dcr-regs.h b/arch/powerpc/include/asm/dcr-regs.h
index 828e3aa1f2fc..380274de429f 100644
--- a/arch/powerpc/include/asm/dcr-regs.h
+++ b/arch/powerpc/include/asm/dcr-regs.h
@@ -157,4 +157,27 @@
157#define L2C_SNP_SSR_32G 0x0000f000 157#define L2C_SNP_SSR_32G 0x0000f000
158#define L2C_SNP_ESR 0x00000800 158#define L2C_SNP_ESR 0x00000800
159 159
160/*
161 * DCR register offsets for 440SP/440SPe I2O/DMA controller.
162 * The base address is configured in the device tree.
163 */
164#define DCRN_I2O0_IBAL 0x006
165#define DCRN_I2O0_IBAH 0x007
166#define I2O_REG_ENABLE 0x00000001 /* Enable I2O/DMA access */
167
168/* 440SP/440SPe Software Reset DCR */
169#define DCRN_SDR0_SRST 0x0200
170#define DCRN_SDR0_SRST_I2ODMA (0x80000000 >> 15) /* Reset I2O/DMA */
171
172/* 440SP/440SPe Memory Queue DCR offsets */
173#define DCRN_MQ0_XORBA 0x04
174#define DCRN_MQ0_CF2H 0x06
175#define DCRN_MQ0_CFBHL 0x0f
176#define DCRN_MQ0_BAUH 0x10
177
178/* HB/LL Paths Configuration Register */
179#define MQ0_CFBHL_TPLM 28
180#define MQ0_CFBHL_HBCL 23
181#define MQ0_CFBHL_POLY 15
182
160#endif /* __DCR_REGS_H__ */ 183#endif /* __DCR_REGS_H__ */
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index e281daebddca..80a973bb9e71 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -197,7 +197,7 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
197 if (!dev->dma_mask) 197 if (!dev->dma_mask)
198 return 0; 198 return 0;
199 199
200 return addr + size <= *dev->dma_mask; 200 return addr + size - 1 <= *dev->dma_mask;
201} 201}
202 202
203static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) 203static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
index 014a624f4c8e..17828ad411eb 100644
--- a/arch/powerpc/include/asm/elf.h
+++ b/arch/powerpc/include/asm/elf.h
@@ -170,7 +170,6 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
170#define elf_check_arch(x) ((x)->e_machine == ELF_ARCH) 170#define elf_check_arch(x) ((x)->e_machine == ELF_ARCH)
171#define compat_elf_check_arch(x) ((x)->e_machine == EM_PPC) 171#define compat_elf_check_arch(x) ((x)->e_machine == EM_PPC)
172 172
173#define USE_ELF_CORE_DUMP
174#define CORE_DUMP_USE_REGSET 173#define CORE_DUMP_USE_REGSET
175#define ELF_EXEC_PAGESIZE PAGE_SIZE 174#define ELF_EXEC_PAGESIZE PAGE_SIZE
176 175
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
index 8c341490cfc5..cbd759e3cd78 100644
--- a/arch/powerpc/include/asm/ptrace.h
+++ b/arch/powerpc/include/asm/ptrace.h
@@ -140,6 +140,8 @@ extern void user_enable_single_step(struct task_struct *);
140extern void user_enable_block_step(struct task_struct *); 140extern void user_enable_block_step(struct task_struct *);
141extern void user_disable_single_step(struct task_struct *); 141extern void user_disable_single_step(struct task_struct *);
142 142
143#define ARCH_HAS_USER_SINGLE_STEP_INFO
144
143#endif /* __ASSEMBLY__ */ 145#endif /* __ASSEMBLY__ */
144 146
145#endif /* __KERNEL__ */ 147#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index 168fce726201..20de73c36682 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -58,7 +58,7 @@ struct rtas_t {
58 unsigned long entry; /* physical address pointer */ 58 unsigned long entry; /* physical address pointer */
59 unsigned long base; /* physical address pointer */ 59 unsigned long base; /* physical address pointer */
60 unsigned long size; 60 unsigned long size;
61 raw_spinlock_t lock; 61 arch_spinlock_t lock;
62 struct rtas_args args; 62 struct rtas_args args;
63 struct device_node *dev; /* virtual address pointer */ 63 struct device_node *dev; /* virtual address pointer */
64}; 64};
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index 198266cf9e2d..764094cff681 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -28,7 +28,7 @@
28#include <asm/asm-compat.h> 28#include <asm/asm-compat.h>
29#include <asm/synch.h> 29#include <asm/synch.h>
30 30
31#define __raw_spin_is_locked(x) ((x)->slock != 0) 31#define arch_spin_is_locked(x) ((x)->slock != 0)
32 32
33#ifdef CONFIG_PPC64 33#ifdef CONFIG_PPC64
34/* use 0x800000yy when locked, where yy == CPU number */ 34/* use 0x800000yy when locked, where yy == CPU number */
@@ -54,7 +54,7 @@
54 * This returns the old value in the lock, so we succeeded 54 * This returns the old value in the lock, so we succeeded
55 * in getting the lock if the return value is 0. 55 * in getting the lock if the return value is 0.
56 */ 56 */
57static inline unsigned long arch_spin_trylock(raw_spinlock_t *lock) 57static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
58{ 58{
59 unsigned long tmp, token; 59 unsigned long tmp, token;
60 60
@@ -73,10 +73,10 @@ static inline unsigned long arch_spin_trylock(raw_spinlock_t *lock)
73 return tmp; 73 return tmp;
74} 74}
75 75
76static inline int __raw_spin_trylock(raw_spinlock_t *lock) 76static inline int arch_spin_trylock(arch_spinlock_t *lock)
77{ 77{
78 CLEAR_IO_SYNC; 78 CLEAR_IO_SYNC;
79 return arch_spin_trylock(lock) == 0; 79 return __arch_spin_trylock(lock) == 0;
80} 80}
81 81
82/* 82/*
@@ -96,19 +96,19 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
96#if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES) 96#if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
97/* We only yield to the hypervisor if we are in shared processor mode */ 97/* We only yield to the hypervisor if we are in shared processor mode */
98#define SHARED_PROCESSOR (get_lppaca()->shared_proc) 98#define SHARED_PROCESSOR (get_lppaca()->shared_proc)
99extern void __spin_yield(raw_spinlock_t *lock); 99extern void __spin_yield(arch_spinlock_t *lock);
100extern void __rw_yield(raw_rwlock_t *lock); 100extern void __rw_yield(arch_rwlock_t *lock);
101#else /* SPLPAR || ISERIES */ 101#else /* SPLPAR || ISERIES */
102#define __spin_yield(x) barrier() 102#define __spin_yield(x) barrier()
103#define __rw_yield(x) barrier() 103#define __rw_yield(x) barrier()
104#define SHARED_PROCESSOR 0 104#define SHARED_PROCESSOR 0
105#endif 105#endif
106 106
107static inline void __raw_spin_lock(raw_spinlock_t *lock) 107static inline void arch_spin_lock(arch_spinlock_t *lock)
108{ 108{
109 CLEAR_IO_SYNC; 109 CLEAR_IO_SYNC;
110 while (1) { 110 while (1) {
111 if (likely(arch_spin_trylock(lock) == 0)) 111 if (likely(__arch_spin_trylock(lock) == 0))
112 break; 112 break;
113 do { 113 do {
114 HMT_low(); 114 HMT_low();
@@ -120,13 +120,13 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
120} 120}
121 121
122static inline 122static inline
123void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) 123void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
124{ 124{
125 unsigned long flags_dis; 125 unsigned long flags_dis;
126 126
127 CLEAR_IO_SYNC; 127 CLEAR_IO_SYNC;
128 while (1) { 128 while (1) {
129 if (likely(arch_spin_trylock(lock) == 0)) 129 if (likely(__arch_spin_trylock(lock) == 0))
130 break; 130 break;
131 local_save_flags(flags_dis); 131 local_save_flags(flags_dis);
132 local_irq_restore(flags); 132 local_irq_restore(flags);
@@ -140,19 +140,19 @@ void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
140 } 140 }
141} 141}
142 142
143static inline void __raw_spin_unlock(raw_spinlock_t *lock) 143static inline void arch_spin_unlock(arch_spinlock_t *lock)
144{ 144{
145 SYNC_IO; 145 SYNC_IO;
146 __asm__ __volatile__("# __raw_spin_unlock\n\t" 146 __asm__ __volatile__("# arch_spin_unlock\n\t"
147 LWSYNC_ON_SMP: : :"memory"); 147 LWSYNC_ON_SMP: : :"memory");
148 lock->slock = 0; 148 lock->slock = 0;
149} 149}
150 150
151#ifdef CONFIG_PPC64 151#ifdef CONFIG_PPC64
152extern void __raw_spin_unlock_wait(raw_spinlock_t *lock); 152extern void arch_spin_unlock_wait(arch_spinlock_t *lock);
153#else 153#else
154#define __raw_spin_unlock_wait(lock) \ 154#define arch_spin_unlock_wait(lock) \
155 do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) 155 do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
156#endif 156#endif
157 157
158/* 158/*
@@ -166,8 +166,8 @@ extern void __raw_spin_unlock_wait(raw_spinlock_t *lock);
166 * read-locks. 166 * read-locks.
167 */ 167 */
168 168
169#define __raw_read_can_lock(rw) ((rw)->lock >= 0) 169#define arch_read_can_lock(rw) ((rw)->lock >= 0)
170#define __raw_write_can_lock(rw) (!(rw)->lock) 170#define arch_write_can_lock(rw) (!(rw)->lock)
171 171
172#ifdef CONFIG_PPC64 172#ifdef CONFIG_PPC64
173#define __DO_SIGN_EXTEND "extsw %0,%0\n" 173#define __DO_SIGN_EXTEND "extsw %0,%0\n"
@@ -181,7 +181,7 @@ extern void __raw_spin_unlock_wait(raw_spinlock_t *lock);
181 * This returns the old value in the lock + 1, 181 * This returns the old value in the lock + 1,
182 * so we got a read lock if the return value is > 0. 182 * so we got a read lock if the return value is > 0.
183 */ 183 */
184static inline long arch_read_trylock(raw_rwlock_t *rw) 184static inline long __arch_read_trylock(arch_rwlock_t *rw)
185{ 185{
186 long tmp; 186 long tmp;
187 187
@@ -205,7 +205,7 @@ static inline long arch_read_trylock(raw_rwlock_t *rw)
205 * This returns the old value in the lock, 205 * This returns the old value in the lock,
206 * so we got the write lock if the return value is 0. 206 * so we got the write lock if the return value is 0.
207 */ 207 */
208static inline long arch_write_trylock(raw_rwlock_t *rw) 208static inline long __arch_write_trylock(arch_rwlock_t *rw)
209{ 209{
210 long tmp, token; 210 long tmp, token;
211 211
@@ -225,10 +225,10 @@ static inline long arch_write_trylock(raw_rwlock_t *rw)
225 return tmp; 225 return tmp;
226} 226}
227 227
228static inline void __raw_read_lock(raw_rwlock_t *rw) 228static inline void arch_read_lock(arch_rwlock_t *rw)
229{ 229{
230 while (1) { 230 while (1) {
231 if (likely(arch_read_trylock(rw) > 0)) 231 if (likely(__arch_read_trylock(rw) > 0))
232 break; 232 break;
233 do { 233 do {
234 HMT_low(); 234 HMT_low();
@@ -239,10 +239,10 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
239 } 239 }
240} 240}
241 241
242static inline void __raw_write_lock(raw_rwlock_t *rw) 242static inline void arch_write_lock(arch_rwlock_t *rw)
243{ 243{
244 while (1) { 244 while (1) {
245 if (likely(arch_write_trylock(rw) == 0)) 245 if (likely(__arch_write_trylock(rw) == 0))
246 break; 246 break;
247 do { 247 do {
248 HMT_low(); 248 HMT_low();
@@ -253,17 +253,17 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
253 } 253 }
254} 254}
255 255
256static inline int __raw_read_trylock(raw_rwlock_t *rw) 256static inline int arch_read_trylock(arch_rwlock_t *rw)
257{ 257{
258 return arch_read_trylock(rw) > 0; 258 return __arch_read_trylock(rw) > 0;
259} 259}
260 260
261static inline int __raw_write_trylock(raw_rwlock_t *rw) 261static inline int arch_write_trylock(arch_rwlock_t *rw)
262{ 262{
263 return arch_write_trylock(rw) == 0; 263 return __arch_write_trylock(rw) == 0;
264} 264}
265 265
266static inline void __raw_read_unlock(raw_rwlock_t *rw) 266static inline void arch_read_unlock(arch_rwlock_t *rw)
267{ 267{
268 long tmp; 268 long tmp;
269 269
@@ -280,19 +280,19 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
280 : "cr0", "xer", "memory"); 280 : "cr0", "xer", "memory");
281} 281}
282 282
283static inline void __raw_write_unlock(raw_rwlock_t *rw) 283static inline void arch_write_unlock(arch_rwlock_t *rw)
284{ 284{
285 __asm__ __volatile__("# write_unlock\n\t" 285 __asm__ __volatile__("# write_unlock\n\t"
286 LWSYNC_ON_SMP: : :"memory"); 286 LWSYNC_ON_SMP: : :"memory");
287 rw->lock = 0; 287 rw->lock = 0;
288} 288}
289 289
290#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 290#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
291#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) 291#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
292 292
293#define _raw_spin_relax(lock) __spin_yield(lock) 293#define arch_spin_relax(lock) __spin_yield(lock)
294#define _raw_read_relax(lock) __rw_yield(lock) 294#define arch_read_relax(lock) __rw_yield(lock)
295#define _raw_write_relax(lock) __rw_yield(lock) 295#define arch_write_relax(lock) __rw_yield(lock)
296 296
297#endif /* __KERNEL__ */ 297#endif /* __KERNEL__ */
298#endif /* __ASM_SPINLOCK_H */ 298#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/powerpc/include/asm/spinlock_types.h b/arch/powerpc/include/asm/spinlock_types.h
index 74236c9f05b1..2351adc4fdc4 100644
--- a/arch/powerpc/include/asm/spinlock_types.h
+++ b/arch/powerpc/include/asm/spinlock_types.h
@@ -7,14 +7,14 @@
7 7
8typedef struct { 8typedef struct {
9 volatile unsigned int slock; 9 volatile unsigned int slock;
10} raw_spinlock_t; 10} arch_spinlock_t;
11 11
12#define __RAW_SPIN_LOCK_UNLOCKED { 0 } 12#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
13 13
14typedef struct { 14typedef struct {
15 volatile signed int lock; 15 volatile signed int lock;
16} raw_rwlock_t; 16} arch_rwlock_t;
17 17
18#define __RAW_RW_LOCK_UNLOCKED { 0 } 18#define __ARCH_RW_LOCK_UNLOCKED { 0 }
19 19
20#endif 20#endif
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index fd51578e29dd..5547ae6e6b0b 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -30,7 +30,7 @@
30#include <linux/spinlock.h> 30#include <linux/spinlock.h>
31#include <linux/string.h> 31#include <linux/string.h>
32#include <linux/dma-mapping.h> 32#include <linux/dma-mapping.h>
33#include <linux/bitops.h> 33#include <linux/bitmap.h>
34#include <linux/iommu-helper.h> 34#include <linux/iommu-helper.h>
35#include <linux/crash_dump.h> 35#include <linux/crash_dump.h>
36#include <asm/io.h> 36#include <asm/io.h>
@@ -251,7 +251,7 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
251 } 251 }
252 252
253 ppc_md.tce_free(tbl, entry, npages); 253 ppc_md.tce_free(tbl, entry, npages);
254 iommu_area_free(tbl->it_map, free_entry, npages); 254 bitmap_clear(tbl->it_map, free_entry, npages);
255} 255}
256 256
257static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, 257static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index f6dca4f4b295..9040330b0530 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -210,7 +210,7 @@ int show_interrupts(struct seq_file *p, void *v)
210 if (!desc) 210 if (!desc)
211 return 0; 211 return 0;
212 212
213 spin_lock_irqsave(&desc->lock, flags); 213 raw_spin_lock_irqsave(&desc->lock, flags);
214 214
215 action = desc->action; 215 action = desc->action;
216 if (!action || !action->handler) 216 if (!action || !action->handler)
@@ -237,7 +237,7 @@ int show_interrupts(struct seq_file *p, void *v)
237 seq_putc(p, '\n'); 237 seq_putc(p, '\n');
238 238
239skip: 239skip:
240 spin_unlock_irqrestore(&desc->lock, flags); 240 raw_spin_unlock_irqrestore(&desc->lock, flags);
241 241
242 return 0; 242 return 0;
243} 243}
@@ -1112,7 +1112,7 @@ static int virq_debug_show(struct seq_file *m, void *private)
1112 if (!desc) 1112 if (!desc)
1113 continue; 1113 continue;
1114 1114
1115 spin_lock_irqsave(&desc->lock, flags); 1115 raw_spin_lock_irqsave(&desc->lock, flags);
1116 1116
1117 if (desc->action && desc->action->handler) { 1117 if (desc->action && desc->action->handler) {
1118 seq_printf(m, "%5d ", i); 1118 seq_printf(m, "%5d ", i);
@@ -1131,7 +1131,7 @@ static int virq_debug_show(struct seq_file *m, void *private)
1131 seq_printf(m, "%s\n", p); 1131 seq_printf(m, "%s\n", p);
1132 } 1132 }
1133 1133
1134 spin_unlock_irqrestore(&desc->lock, flags); 1134 raw_spin_unlock_irqrestore(&desc->lock, flags);
1135 } 1135 }
1136 1136
1137 return 0; 1137 return 0;
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index bf90361bb70f..fd0d29493fd6 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -42,7 +42,7 @@
42#include <asm/mmu.h> 42#include <asm/mmu.h>
43 43
44struct rtas_t rtas = { 44struct rtas_t rtas = {
45 .lock = __RAW_SPIN_LOCK_UNLOCKED 45 .lock = __ARCH_SPIN_LOCK_UNLOCKED
46}; 46};
47EXPORT_SYMBOL(rtas); 47EXPORT_SYMBOL(rtas);
48 48
@@ -80,13 +80,13 @@ static unsigned long lock_rtas(void)
80 80
81 local_irq_save(flags); 81 local_irq_save(flags);
82 preempt_disable(); 82 preempt_disable();
83 __raw_spin_lock_flags(&rtas.lock, flags); 83 arch_spin_lock_flags(&rtas.lock, flags);
84 return flags; 84 return flags;
85} 85}
86 86
87static void unlock_rtas(unsigned long flags) 87static void unlock_rtas(unsigned long flags)
88{ 88{
89 __raw_spin_unlock(&rtas.lock); 89 arch_spin_unlock(&rtas.lock);
90 local_irq_restore(flags); 90 local_irq_restore(flags);
91 preempt_enable(); 91 preempt_enable();
92} 92}
@@ -978,7 +978,7 @@ int __init early_init_dt_scan_rtas(unsigned long node,
978 return 1; 978 return 1;
979} 979}
980 980
981static raw_spinlock_t timebase_lock; 981static arch_spinlock_t timebase_lock;
982static u64 timebase = 0; 982static u64 timebase = 0;
983 983
984void __cpuinit rtas_give_timebase(void) 984void __cpuinit rtas_give_timebase(void)
@@ -987,10 +987,10 @@ void __cpuinit rtas_give_timebase(void)
987 987
988 local_irq_save(flags); 988 local_irq_save(flags);
989 hard_irq_disable(); 989 hard_irq_disable();
990 __raw_spin_lock(&timebase_lock); 990 arch_spin_lock(&timebase_lock);
991 rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL); 991 rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL);
992 timebase = get_tb(); 992 timebase = get_tb();
993 __raw_spin_unlock(&timebase_lock); 993 arch_spin_unlock(&timebase_lock);
994 994
995 while (timebase) 995 while (timebase)
996 barrier(); 996 barrier();
@@ -1002,8 +1002,8 @@ void __cpuinit rtas_take_timebase(void)
1002{ 1002{
1003 while (!timebase) 1003 while (!timebase)
1004 barrier(); 1004 barrier();
1005 __raw_spin_lock(&timebase_lock); 1005 arch_spin_lock(&timebase_lock);
1006 set_tb(timebase >> 32, timebase & 0xffffffff); 1006 set_tb(timebase >> 32, timebase & 0xffffffff);
1007 timebase = 0; 1007 timebase = 0;
1008 __raw_spin_unlock(&timebase_lock); 1008 arch_spin_unlock(&timebase_lock);
1009} 1009}
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 804f0f30f227..d069ff8a7e03 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -174,6 +174,15 @@ int die(const char *str, struct pt_regs *regs, long err)
174 return 0; 174 return 0;
175} 175}
176 176
177void user_single_step_siginfo(struct task_struct *tsk,
178 struct pt_regs *regs, siginfo_t *info)
179{
180 memset(info, 0, sizeof(*info));
181 info->si_signo = SIGTRAP;
182 info->si_code = TRAP_TRACE;
183 info->si_addr = (void __user *)regs->nip;
184}
185
177void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) 186void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
178{ 187{
179 siginfo_t info; 188 siginfo_t info;
diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c
index 79d0fa3a470d..58e14fba11b1 100644
--- a/arch/powerpc/lib/locks.c
+++ b/arch/powerpc/lib/locks.c
@@ -25,7 +25,7 @@
25#include <asm/smp.h> 25#include <asm/smp.h>
26#include <asm/firmware.h> 26#include <asm/firmware.h>
27 27
28void __spin_yield(raw_spinlock_t *lock) 28void __spin_yield(arch_spinlock_t *lock)
29{ 29{
30 unsigned int lock_value, holder_cpu, yield_count; 30 unsigned int lock_value, holder_cpu, yield_count;
31 31
@@ -55,7 +55,7 @@ void __spin_yield(raw_spinlock_t *lock)
55 * This turns out to be the same for read and write locks, since 55 * This turns out to be the same for read and write locks, since
56 * we only know the holder if it is write-locked. 56 * we only know the holder if it is write-locked.
57 */ 57 */
58void __rw_yield(raw_rwlock_t *rw) 58void __rw_yield(arch_rwlock_t *rw)
59{ 59{
60 int lock_value; 60 int lock_value;
61 unsigned int holder_cpu, yield_count; 61 unsigned int holder_cpu, yield_count;
@@ -82,7 +82,7 @@ void __rw_yield(raw_rwlock_t *rw)
82} 82}
83#endif 83#endif
84 84
85void __raw_spin_unlock_wait(raw_spinlock_t *lock) 85void arch_spin_unlock_wait(arch_spinlock_t *lock)
86{ 86{
87 while (lock->slock) { 87 while (lock->slock) {
88 HMT_low(); 88 HMT_low();
@@ -92,4 +92,4 @@ void __raw_spin_unlock_wait(raw_spinlock_t *lock)
92 HMT_medium(); 92 HMT_medium();
93} 93}
94 94
95EXPORT_SYMBOL(__raw_spin_unlock_wait); 95EXPORT_SYMBOL(arch_spin_unlock_wait);
diff --git a/arch/powerpc/platforms/52xx/media5200.c b/arch/powerpc/platforms/52xx/media5200.c
index cc0c854291d7..0bac3a3dbecf 100644
--- a/arch/powerpc/platforms/52xx/media5200.c
+++ b/arch/powerpc/platforms/52xx/media5200.c
@@ -86,9 +86,9 @@ void media5200_irq_cascade(unsigned int virq, struct irq_desc *desc)
86 u32 status, enable; 86 u32 status, enable;
87 87
88 /* Mask off the cascaded IRQ */ 88 /* Mask off the cascaded IRQ */
89 spin_lock(&desc->lock); 89 raw_spin_lock(&desc->lock);
90 desc->chip->mask(virq); 90 desc->chip->mask(virq);
91 spin_unlock(&desc->lock); 91 raw_spin_unlock(&desc->lock);
92 92
93 /* Ask the FPGA for IRQ status. If 'val' is 0, then no irqs 93 /* Ask the FPGA for IRQ status. If 'val' is 0, then no irqs
94 * are pending. 'ffs()' is 1 based */ 94 * are pending. 'ffs()' is 1 based */
@@ -104,11 +104,11 @@ void media5200_irq_cascade(unsigned int virq, struct irq_desc *desc)
104 } 104 }
105 105
106 /* Processing done; can reenable the cascade now */ 106 /* Processing done; can reenable the cascade now */
107 spin_lock(&desc->lock); 107 raw_spin_lock(&desc->lock);
108 desc->chip->ack(virq); 108 desc->chip->ack(virq);
109 if (!(desc->status & IRQ_DISABLED)) 109 if (!(desc->status & IRQ_DISABLED))
110 desc->chip->unmask(virq); 110 desc->chip->unmask(virq);
111 spin_unlock(&desc->lock); 111 raw_spin_unlock(&desc->lock);
112} 112}
113 113
114static int media5200_irq_map(struct irq_host *h, unsigned int virq, 114static int media5200_irq_map(struct irq_host *h, unsigned int virq,
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index 7267effc8078..6829cf7e2bda 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -237,7 +237,7 @@ extern int noirqdebug;
237 237
238static void handle_iic_irq(unsigned int irq, struct irq_desc *desc) 238static void handle_iic_irq(unsigned int irq, struct irq_desc *desc)
239{ 239{
240 spin_lock(&desc->lock); 240 raw_spin_lock(&desc->lock);
241 241
242 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); 242 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
243 243
@@ -265,18 +265,18 @@ static void handle_iic_irq(unsigned int irq, struct irq_desc *desc)
265 goto out_eoi; 265 goto out_eoi;
266 266
267 desc->status &= ~IRQ_PENDING; 267 desc->status &= ~IRQ_PENDING;
268 spin_unlock(&desc->lock); 268 raw_spin_unlock(&desc->lock);
269 action_ret = handle_IRQ_event(irq, action); 269 action_ret = handle_IRQ_event(irq, action);
270 if (!noirqdebug) 270 if (!noirqdebug)
271 note_interrupt(irq, desc, action_ret); 271 note_interrupt(irq, desc, action_ret);
272 spin_lock(&desc->lock); 272 raw_spin_lock(&desc->lock);
273 273
274 } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING); 274 } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING);
275 275
276 desc->status &= ~IRQ_INPROGRESS; 276 desc->status &= ~IRQ_INPROGRESS;
277out_eoi: 277out_eoi:
278 desc->chip->eoi(irq); 278 desc->chip->eoi(irq);
279 spin_unlock(&desc->lock); 279 raw_spin_unlock(&desc->lock);
280} 280}
281 281
282static int iic_host_map(struct irq_host *h, unsigned int virq, 282static int iic_host_map(struct irq_host *h, unsigned int virq,
diff --git a/arch/powerpc/platforms/iseries/irq.c b/arch/powerpc/platforms/iseries/irq.c
index 07762259c60a..86c4b29eea89 100644
--- a/arch/powerpc/platforms/iseries/irq.c
+++ b/arch/powerpc/platforms/iseries/irq.c
@@ -217,9 +217,9 @@ void __init iSeries_activate_IRQs()
217 struct irq_desc *desc = irq_to_desc(irq); 217 struct irq_desc *desc = irq_to_desc(irq);
218 218
219 if (desc && desc->chip && desc->chip->startup) { 219 if (desc && desc->chip && desc->chip->startup) {
220 spin_lock_irqsave(&desc->lock, flags); 220 raw_spin_lock_irqsave(&desc->lock, flags);
221 desc->chip->startup(irq); 221 desc->chip->startup(irq);
222 spin_unlock_irqrestore(&desc->lock, flags); 222 raw_spin_unlock_irqrestore(&desc->lock, flags);
223 } 223 }
224 } 224 }
225} 225}
diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c
index a4619347aa7e..242f8095c2df 100644
--- a/arch/powerpc/platforms/pasemi/setup.c
+++ b/arch/powerpc/platforms/pasemi/setup.c
@@ -71,7 +71,7 @@ static void pas_restart(char *cmd)
71} 71}
72 72
73#ifdef CONFIG_SMP 73#ifdef CONFIG_SMP
74static raw_spinlock_t timebase_lock; 74static arch_spinlock_t timebase_lock;
75static unsigned long timebase; 75static unsigned long timebase;
76 76
77static void __devinit pas_give_timebase(void) 77static void __devinit pas_give_timebase(void)
@@ -80,11 +80,11 @@ static void __devinit pas_give_timebase(void)
80 80
81 local_irq_save(flags); 81 local_irq_save(flags);
82 hard_irq_disable(); 82 hard_irq_disable();
83 __raw_spin_lock(&timebase_lock); 83 arch_spin_lock(&timebase_lock);
84 mtspr(SPRN_TBCTL, TBCTL_FREEZE); 84 mtspr(SPRN_TBCTL, TBCTL_FREEZE);
85 isync(); 85 isync();
86 timebase = get_tb(); 86 timebase = get_tb();
87 __raw_spin_unlock(&timebase_lock); 87 arch_spin_unlock(&timebase_lock);
88 88
89 while (timebase) 89 while (timebase)
90 barrier(); 90 barrier();
@@ -97,10 +97,10 @@ static void __devinit pas_take_timebase(void)
97 while (!timebase) 97 while (!timebase)
98 smp_rmb(); 98 smp_rmb();
99 99
100 __raw_spin_lock(&timebase_lock); 100 arch_spin_lock(&timebase_lock);
101 set_tb(timebase >> 32, timebase & 0xffffffff); 101 set_tb(timebase >> 32, timebase & 0xffffffff);
102 timebase = 0; 102 timebase = 0;
103 __raw_spin_unlock(&timebase_lock); 103 arch_spin_unlock(&timebase_lock);
104} 104}
105 105
106struct smp_ops_t pas_smp_ops = { 106struct smp_ops_t pas_smp_ops = {
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
index 7d01b58f3989..b9b9e11609ec 100644
--- a/arch/powerpc/platforms/pseries/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -906,7 +906,7 @@ void xics_migrate_irqs_away(void)
906 || desc->chip->set_affinity == NULL) 906 || desc->chip->set_affinity == NULL)
907 continue; 907 continue;
908 908
909 spin_lock_irqsave(&desc->lock, flags); 909 raw_spin_lock_irqsave(&desc->lock, flags);
910 910
911 status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq); 911 status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq);
912 if (status) { 912 if (status) {
@@ -930,7 +930,7 @@ void xics_migrate_irqs_away(void)
930 cpumask_setall(irq_to_desc(virq)->affinity); 930 cpumask_setall(irq_to_desc(virq)->affinity);
931 desc->chip->set_affinity(virq, cpu_all_mask); 931 desc->chip->set_affinity(virq, cpu_all_mask);
932unlock: 932unlock:
933 spin_unlock_irqrestore(&desc->lock, flags); 933 raw_spin_unlock_irqrestore(&desc->lock, flags);
934 } 934 }
935} 935}
936#endif 936#endif
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index 62e50258cdef..c6e11b077108 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -173,7 +173,7 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc)
173 u32 intr_index; 173 u32 intr_index;
174 u32 have_shift = 0; 174 u32 have_shift = 0;
175 175
176 spin_lock(&desc->lock); 176 raw_spin_lock(&desc->lock);
177 if ((msi_data->feature & FSL_PIC_IP_MASK) == FSL_PIC_IP_IPIC) { 177 if ((msi_data->feature & FSL_PIC_IP_MASK) == FSL_PIC_IP_IPIC) {
178 if (desc->chip->mask_ack) 178 if (desc->chip->mask_ack)
179 desc->chip->mask_ack(irq); 179 desc->chip->mask_ack(irq);
@@ -225,7 +225,7 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc)
225 break; 225 break;
226 } 226 }
227unlock: 227unlock:
228 spin_unlock(&desc->lock); 228 raw_spin_unlock(&desc->lock);
229} 229}
230 230
231static int __devinit fsl_of_msi_probe(struct of_device *dev, 231static int __devinit fsl_of_msi_probe(struct of_device *dev,
diff --git a/arch/powerpc/sysdev/uic.c b/arch/powerpc/sysdev/uic.c
index 7d10074b3304..6f220a913e42 100644
--- a/arch/powerpc/sysdev/uic.c
+++ b/arch/powerpc/sysdev/uic.c
@@ -225,12 +225,12 @@ void uic_irq_cascade(unsigned int virq, struct irq_desc *desc)
225 int src; 225 int src;
226 int subvirq; 226 int subvirq;
227 227
228 spin_lock(&desc->lock); 228 raw_spin_lock(&desc->lock);
229 if (desc->status & IRQ_LEVEL) 229 if (desc->status & IRQ_LEVEL)
230 desc->chip->mask(virq); 230 desc->chip->mask(virq);
231 else 231 else
232 desc->chip->mask_ack(virq); 232 desc->chip->mask_ack(virq);
233 spin_unlock(&desc->lock); 233 raw_spin_unlock(&desc->lock);
234 234
235 msr = mfdcr(uic->dcrbase + UIC_MSR); 235 msr = mfdcr(uic->dcrbase + UIC_MSR);
236 if (!msr) /* spurious interrupt */ 236 if (!msr) /* spurious interrupt */
@@ -242,12 +242,12 @@ void uic_irq_cascade(unsigned int virq, struct irq_desc *desc)
242 generic_handle_irq(subvirq); 242 generic_handle_irq(subvirq);
243 243
244uic_irq_ret: 244uic_irq_ret:
245 spin_lock(&desc->lock); 245 raw_spin_lock(&desc->lock);
246 if (desc->status & IRQ_LEVEL) 246 if (desc->status & IRQ_LEVEL)
247 desc->chip->ack(virq); 247 desc->chip->ack(virq);
248 if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask) 248 if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
249 desc->chip->unmask(virq); 249 desc->chip->unmask(virq);
250 spin_unlock(&desc->lock); 250 raw_spin_unlock(&desc->lock);
251} 251}
252 252
253static struct uic * __init uic_init_one(struct device_node *node) 253static struct uic * __init uic_init_one(struct device_node *node)
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index 495589950dc7..5c91995b74e4 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -551,7 +551,7 @@ static int appldata_thaw(struct device *dev)
551 return appldata_restore(dev); 551 return appldata_restore(dev);
552} 552}
553 553
554static struct dev_pm_ops appldata_pm_ops = { 554static const struct dev_pm_ops appldata_pm_ops = {
555 .freeze = appldata_freeze, 555 .freeze = appldata_freeze,
556 .thaw = appldata_thaw, 556 .thaw = appldata_thaw,
557 .restore = appldata_restore, 557 .restore = appldata_restore,
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index e885442c1dfe..354d42616c7e 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -155,7 +155,6 @@ extern unsigned int vdso_enabled;
155 } while (0) 155 } while (0)
156 156
157#define CORE_DUMP_USE_REGSET 157#define CORE_DUMP_USE_REGSET
158#define USE_ELF_CORE_DUMP
159#define ELF_EXEC_PAGESIZE 4096 158#define ELF_EXEC_PAGESIZE 4096
160 159
161/* This is the location that an ET_DYN program is loaded if exec'ed. Typical 160/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index c9af0d19c7ab..a587907d77f3 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -52,27 +52,27 @@ _raw_compare_and_swap(volatile unsigned int *lock,
52 * (the type definitions are in asm/spinlock_types.h) 52 * (the type definitions are in asm/spinlock_types.h)
53 */ 53 */
54 54
55#define __raw_spin_is_locked(x) ((x)->owner_cpu != 0) 55#define arch_spin_is_locked(x) ((x)->owner_cpu != 0)
56#define __raw_spin_unlock_wait(lock) \ 56#define arch_spin_unlock_wait(lock) \
57 do { while (__raw_spin_is_locked(lock)) \ 57 do { while (arch_spin_is_locked(lock)) \
58 _raw_spin_relax(lock); } while (0) 58 arch_spin_relax(lock); } while (0)
59 59
60extern void _raw_spin_lock_wait(raw_spinlock_t *); 60extern void arch_spin_lock_wait(arch_spinlock_t *);
61extern void _raw_spin_lock_wait_flags(raw_spinlock_t *, unsigned long flags); 61extern void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
62extern int _raw_spin_trylock_retry(raw_spinlock_t *); 62extern int arch_spin_trylock_retry(arch_spinlock_t *);
63extern void _raw_spin_relax(raw_spinlock_t *lock); 63extern void arch_spin_relax(arch_spinlock_t *lock);
64 64
65static inline void __raw_spin_lock(raw_spinlock_t *lp) 65static inline void arch_spin_lock(arch_spinlock_t *lp)
66{ 66{
67 int old; 67 int old;
68 68
69 old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); 69 old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
70 if (likely(old == 0)) 70 if (likely(old == 0))
71 return; 71 return;
72 _raw_spin_lock_wait(lp); 72 arch_spin_lock_wait(lp);
73} 73}
74 74
75static inline void __raw_spin_lock_flags(raw_spinlock_t *lp, 75static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
76 unsigned long flags) 76 unsigned long flags)
77{ 77{
78 int old; 78 int old;
@@ -80,20 +80,20 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lp,
80 old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); 80 old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
81 if (likely(old == 0)) 81 if (likely(old == 0))
82 return; 82 return;
83 _raw_spin_lock_wait_flags(lp, flags); 83 arch_spin_lock_wait_flags(lp, flags);
84} 84}
85 85
86static inline int __raw_spin_trylock(raw_spinlock_t *lp) 86static inline int arch_spin_trylock(arch_spinlock_t *lp)
87{ 87{
88 int old; 88 int old;
89 89
90 old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); 90 old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
91 if (likely(old == 0)) 91 if (likely(old == 0))
92 return 1; 92 return 1;
93 return _raw_spin_trylock_retry(lp); 93 return arch_spin_trylock_retry(lp);
94} 94}
95 95
96static inline void __raw_spin_unlock(raw_spinlock_t *lp) 96static inline void arch_spin_unlock(arch_spinlock_t *lp)
97{ 97{
98 _raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0); 98 _raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0);
99} 99}
@@ -113,22 +113,22 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lp)
113 * read_can_lock - would read_trylock() succeed? 113 * read_can_lock - would read_trylock() succeed?
114 * @lock: the rwlock in question. 114 * @lock: the rwlock in question.
115 */ 115 */
116#define __raw_read_can_lock(x) ((int)(x)->lock >= 0) 116#define arch_read_can_lock(x) ((int)(x)->lock >= 0)
117 117
118/** 118/**
119 * write_can_lock - would write_trylock() succeed? 119 * write_can_lock - would write_trylock() succeed?
120 * @lock: the rwlock in question. 120 * @lock: the rwlock in question.
121 */ 121 */
122#define __raw_write_can_lock(x) ((x)->lock == 0) 122#define arch_write_can_lock(x) ((x)->lock == 0)
123 123
124extern void _raw_read_lock_wait(raw_rwlock_t *lp); 124extern void _raw_read_lock_wait(arch_rwlock_t *lp);
125extern void _raw_read_lock_wait_flags(raw_rwlock_t *lp, unsigned long flags); 125extern void _raw_read_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
126extern int _raw_read_trylock_retry(raw_rwlock_t *lp); 126extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
127extern void _raw_write_lock_wait(raw_rwlock_t *lp); 127extern void _raw_write_lock_wait(arch_rwlock_t *lp);
128extern void _raw_write_lock_wait_flags(raw_rwlock_t *lp, unsigned long flags); 128extern void _raw_write_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
129extern int _raw_write_trylock_retry(raw_rwlock_t *lp); 129extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
130 130
131static inline void __raw_read_lock(raw_rwlock_t *rw) 131static inline void arch_read_lock(arch_rwlock_t *rw)
132{ 132{
133 unsigned int old; 133 unsigned int old;
134 old = rw->lock & 0x7fffffffU; 134 old = rw->lock & 0x7fffffffU;
@@ -136,7 +136,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
136 _raw_read_lock_wait(rw); 136 _raw_read_lock_wait(rw);
137} 137}
138 138
139static inline void __raw_read_lock_flags(raw_rwlock_t *rw, unsigned long flags) 139static inline void arch_read_lock_flags(arch_rwlock_t *rw, unsigned long flags)
140{ 140{
141 unsigned int old; 141 unsigned int old;
142 old = rw->lock & 0x7fffffffU; 142 old = rw->lock & 0x7fffffffU;
@@ -144,7 +144,7 @@ static inline void __raw_read_lock_flags(raw_rwlock_t *rw, unsigned long flags)
144 _raw_read_lock_wait_flags(rw, flags); 144 _raw_read_lock_wait_flags(rw, flags);
145} 145}
146 146
147static inline void __raw_read_unlock(raw_rwlock_t *rw) 147static inline void arch_read_unlock(arch_rwlock_t *rw)
148{ 148{
149 unsigned int old, cmp; 149 unsigned int old, cmp;
150 150
@@ -155,24 +155,24 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
155 } while (cmp != old); 155 } while (cmp != old);
156} 156}
157 157
158static inline void __raw_write_lock(raw_rwlock_t *rw) 158static inline void arch_write_lock(arch_rwlock_t *rw)
159{ 159{
160 if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) 160 if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
161 _raw_write_lock_wait(rw); 161 _raw_write_lock_wait(rw);
162} 162}
163 163
164static inline void __raw_write_lock_flags(raw_rwlock_t *rw, unsigned long flags) 164static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags)
165{ 165{
166 if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) 166 if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
167 _raw_write_lock_wait_flags(rw, flags); 167 _raw_write_lock_wait_flags(rw, flags);
168} 168}
169 169
170static inline void __raw_write_unlock(raw_rwlock_t *rw) 170static inline void arch_write_unlock(arch_rwlock_t *rw)
171{ 171{
172 _raw_compare_and_swap(&rw->lock, 0x80000000, 0); 172 _raw_compare_and_swap(&rw->lock, 0x80000000, 0);
173} 173}
174 174
175static inline int __raw_read_trylock(raw_rwlock_t *rw) 175static inline int arch_read_trylock(arch_rwlock_t *rw)
176{ 176{
177 unsigned int old; 177 unsigned int old;
178 old = rw->lock & 0x7fffffffU; 178 old = rw->lock & 0x7fffffffU;
@@ -181,14 +181,14 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw)
181 return _raw_read_trylock_retry(rw); 181 return _raw_read_trylock_retry(rw);
182} 182}
183 183
184static inline int __raw_write_trylock(raw_rwlock_t *rw) 184static inline int arch_write_trylock(arch_rwlock_t *rw)
185{ 185{
186 if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)) 186 if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0))
187 return 1; 187 return 1;
188 return _raw_write_trylock_retry(rw); 188 return _raw_write_trylock_retry(rw);
189} 189}
190 190
191#define _raw_read_relax(lock) cpu_relax() 191#define arch_read_relax(lock) cpu_relax()
192#define _raw_write_relax(lock) cpu_relax() 192#define arch_write_relax(lock) cpu_relax()
193 193
194#endif /* __ASM_SPINLOCK_H */ 194#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/s390/include/asm/spinlock_types.h b/arch/s390/include/asm/spinlock_types.h
index 654abc40de04..9c76656a0af0 100644
--- a/arch/s390/include/asm/spinlock_types.h
+++ b/arch/s390/include/asm/spinlock_types.h
@@ -7,14 +7,14 @@
7 7
8typedef struct { 8typedef struct {
9 volatile unsigned int owner_cpu; 9 volatile unsigned int owner_cpu;
10} __attribute__ ((aligned (4))) raw_spinlock_t; 10} __attribute__ ((aligned (4))) arch_spinlock_t;
11 11
12#define __RAW_SPIN_LOCK_UNLOCKED { 0 } 12#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
13 13
14typedef struct { 14typedef struct {
15 volatile unsigned int lock; 15 volatile unsigned int lock;
16} raw_rwlock_t; 16} arch_rwlock_t;
17 17
18#define __RAW_RW_LOCK_UNLOCKED { 0 } 18#define __ARCH_RW_LOCK_UNLOCKED { 0 }
19 19
20#endif 20#endif
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index 071c81f179ef..0168472b2fdf 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -18,6 +18,7 @@
18#include <linux/errno.h> 18#include <linux/errno.h>
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include <linux/ctype.h> 20#include <linux/ctype.h>
21#include <linux/string.h>
21#include <linux/sysctl.h> 22#include <linux/sysctl.h>
22#include <asm/uaccess.h> 23#include <asm/uaccess.h>
23#include <linux/module.h> 24#include <linux/module.h>
@@ -1178,7 +1179,7 @@ debug_get_uint(char *buf)
1178{ 1179{
1179 int rc; 1180 int rc;
1180 1181
1181 for(; isspace(*buf); buf++); 1182 buf = skip_spaces(buf);
1182 rc = simple_strtoul(buf, &buf, 10); 1183 rc = simple_strtoul(buf, &buf, 10);
1183 if(*buf){ 1184 if(*buf){
1184 rc = -EINVAL; 1185 rc = -EINVAL;
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index f7e0d30250b7..10754a375668 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -39,7 +39,7 @@ static inline void _raw_yield_cpu(int cpu)
39 _raw_yield(); 39 _raw_yield();
40} 40}
41 41
42void _raw_spin_lock_wait(raw_spinlock_t *lp) 42void arch_spin_lock_wait(arch_spinlock_t *lp)
43{ 43{
44 int count = spin_retry; 44 int count = spin_retry;
45 unsigned int cpu = ~smp_processor_id(); 45 unsigned int cpu = ~smp_processor_id();
@@ -51,15 +51,15 @@ void _raw_spin_lock_wait(raw_spinlock_t *lp)
51 _raw_yield_cpu(~owner); 51 _raw_yield_cpu(~owner);
52 count = spin_retry; 52 count = spin_retry;
53 } 53 }
54 if (__raw_spin_is_locked(lp)) 54 if (arch_spin_is_locked(lp))
55 continue; 55 continue;
56 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) 56 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
57 return; 57 return;
58 } 58 }
59} 59}
60EXPORT_SYMBOL(_raw_spin_lock_wait); 60EXPORT_SYMBOL(arch_spin_lock_wait);
61 61
62void _raw_spin_lock_wait_flags(raw_spinlock_t *lp, unsigned long flags) 62void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
63{ 63{
64 int count = spin_retry; 64 int count = spin_retry;
65 unsigned int cpu = ~smp_processor_id(); 65 unsigned int cpu = ~smp_processor_id();
@@ -72,7 +72,7 @@ void _raw_spin_lock_wait_flags(raw_spinlock_t *lp, unsigned long flags)
72 _raw_yield_cpu(~owner); 72 _raw_yield_cpu(~owner);
73 count = spin_retry; 73 count = spin_retry;
74 } 74 }
75 if (__raw_spin_is_locked(lp)) 75 if (arch_spin_is_locked(lp))
76 continue; 76 continue;
77 local_irq_disable(); 77 local_irq_disable();
78 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) 78 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
@@ -80,32 +80,32 @@ void _raw_spin_lock_wait_flags(raw_spinlock_t *lp, unsigned long flags)
80 local_irq_restore(flags); 80 local_irq_restore(flags);
81 } 81 }
82} 82}
83EXPORT_SYMBOL(_raw_spin_lock_wait_flags); 83EXPORT_SYMBOL(arch_spin_lock_wait_flags);
84 84
85int _raw_spin_trylock_retry(raw_spinlock_t *lp) 85int arch_spin_trylock_retry(arch_spinlock_t *lp)
86{ 86{
87 unsigned int cpu = ~smp_processor_id(); 87 unsigned int cpu = ~smp_processor_id();
88 int count; 88 int count;
89 89
90 for (count = spin_retry; count > 0; count--) { 90 for (count = spin_retry; count > 0; count--) {
91 if (__raw_spin_is_locked(lp)) 91 if (arch_spin_is_locked(lp))
92 continue; 92 continue;
93 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) 93 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
94 return 1; 94 return 1;
95 } 95 }
96 return 0; 96 return 0;
97} 97}
98EXPORT_SYMBOL(_raw_spin_trylock_retry); 98EXPORT_SYMBOL(arch_spin_trylock_retry);
99 99
100void _raw_spin_relax(raw_spinlock_t *lock) 100void arch_spin_relax(arch_spinlock_t *lock)
101{ 101{
102 unsigned int cpu = lock->owner_cpu; 102 unsigned int cpu = lock->owner_cpu;
103 if (cpu != 0) 103 if (cpu != 0)
104 _raw_yield_cpu(~cpu); 104 _raw_yield_cpu(~cpu);
105} 105}
106EXPORT_SYMBOL(_raw_spin_relax); 106EXPORT_SYMBOL(arch_spin_relax);
107 107
108void _raw_read_lock_wait(raw_rwlock_t *rw) 108void _raw_read_lock_wait(arch_rwlock_t *rw)
109{ 109{
110 unsigned int old; 110 unsigned int old;
111 int count = spin_retry; 111 int count = spin_retry;
@@ -115,7 +115,7 @@ void _raw_read_lock_wait(raw_rwlock_t *rw)
115 _raw_yield(); 115 _raw_yield();
116 count = spin_retry; 116 count = spin_retry;
117 } 117 }
118 if (!__raw_read_can_lock(rw)) 118 if (!arch_read_can_lock(rw))
119 continue; 119 continue;
120 old = rw->lock & 0x7fffffffU; 120 old = rw->lock & 0x7fffffffU;
121 if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) 121 if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
@@ -124,7 +124,7 @@ void _raw_read_lock_wait(raw_rwlock_t *rw)
124} 124}
125EXPORT_SYMBOL(_raw_read_lock_wait); 125EXPORT_SYMBOL(_raw_read_lock_wait);
126 126
127void _raw_read_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags) 127void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
128{ 128{
129 unsigned int old; 129 unsigned int old;
130 int count = spin_retry; 130 int count = spin_retry;
@@ -135,7 +135,7 @@ void _raw_read_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags)
135 _raw_yield(); 135 _raw_yield();
136 count = spin_retry; 136 count = spin_retry;
137 } 137 }
138 if (!__raw_read_can_lock(rw)) 138 if (!arch_read_can_lock(rw))
139 continue; 139 continue;
140 old = rw->lock & 0x7fffffffU; 140 old = rw->lock & 0x7fffffffU;
141 local_irq_disable(); 141 local_irq_disable();
@@ -145,13 +145,13 @@ void _raw_read_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags)
145} 145}
146EXPORT_SYMBOL(_raw_read_lock_wait_flags); 146EXPORT_SYMBOL(_raw_read_lock_wait_flags);
147 147
148int _raw_read_trylock_retry(raw_rwlock_t *rw) 148int _raw_read_trylock_retry(arch_rwlock_t *rw)
149{ 149{
150 unsigned int old; 150 unsigned int old;
151 int count = spin_retry; 151 int count = spin_retry;
152 152
153 while (count-- > 0) { 153 while (count-- > 0) {
154 if (!__raw_read_can_lock(rw)) 154 if (!arch_read_can_lock(rw))
155 continue; 155 continue;
156 old = rw->lock & 0x7fffffffU; 156 old = rw->lock & 0x7fffffffU;
157 if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) 157 if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
@@ -161,7 +161,7 @@ int _raw_read_trylock_retry(raw_rwlock_t *rw)
161} 161}
162EXPORT_SYMBOL(_raw_read_trylock_retry); 162EXPORT_SYMBOL(_raw_read_trylock_retry);
163 163
164void _raw_write_lock_wait(raw_rwlock_t *rw) 164void _raw_write_lock_wait(arch_rwlock_t *rw)
165{ 165{
166 int count = spin_retry; 166 int count = spin_retry;
167 167
@@ -170,7 +170,7 @@ void _raw_write_lock_wait(raw_rwlock_t *rw)
170 _raw_yield(); 170 _raw_yield();
171 count = spin_retry; 171 count = spin_retry;
172 } 172 }
173 if (!__raw_write_can_lock(rw)) 173 if (!arch_write_can_lock(rw))
174 continue; 174 continue;
175 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) 175 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
176 return; 176 return;
@@ -178,7 +178,7 @@ void _raw_write_lock_wait(raw_rwlock_t *rw)
178} 178}
179EXPORT_SYMBOL(_raw_write_lock_wait); 179EXPORT_SYMBOL(_raw_write_lock_wait);
180 180
181void _raw_write_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags) 181void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
182{ 182{
183 int count = spin_retry; 183 int count = spin_retry;
184 184
@@ -188,7 +188,7 @@ void _raw_write_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags)
188 _raw_yield(); 188 _raw_yield();
189 count = spin_retry; 189 count = spin_retry;
190 } 190 }
191 if (!__raw_write_can_lock(rw)) 191 if (!arch_write_can_lock(rw))
192 continue; 192 continue;
193 local_irq_disable(); 193 local_irq_disable();
194 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) 194 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
@@ -197,12 +197,12 @@ void _raw_write_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags)
197} 197}
198EXPORT_SYMBOL(_raw_write_lock_wait_flags); 198EXPORT_SYMBOL(_raw_write_lock_wait_flags);
199 199
200int _raw_write_trylock_retry(raw_rwlock_t *rw) 200int _raw_write_trylock_retry(arch_rwlock_t *rw)
201{ 201{
202 int count = spin_retry; 202 int count = spin_retry;
203 203
204 while (count-- > 0) { 204 while (count-- > 0) {
205 if (!__raw_write_can_lock(rw)) 205 if (!arch_write_can_lock(rw))
206 continue; 206 continue;
207 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) 207 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
208 return 1; 208 return 1;
diff --git a/arch/score/include/asm/elf.h b/arch/score/include/asm/elf.h
index 43526d9fda93..f478ce94181f 100644
--- a/arch/score/include/asm/elf.h
+++ b/arch/score/include/asm/elf.h
@@ -61,7 +61,6 @@ struct task_struct;
61struct pt_regs; 61struct pt_regs;
62 62
63#define CORE_DUMP_USE_REGSET 63#define CORE_DUMP_USE_REGSET
64#define USE_ELF_CORE_DUMP
65#define ELF_EXEC_PAGESIZE PAGE_SIZE 64#define ELF_EXEC_PAGESIZE PAGE_SIZE
66 65
67/* This yields a mask that user programs can use to figure out what 66/* This yields a mask that user programs can use to figure out what
diff --git a/arch/sh/Kconfig.debug b/arch/sh/Kconfig.debug
index 55907af1dc25..12fec72fec5f 100644
--- a/arch/sh/Kconfig.debug
+++ b/arch/sh/Kconfig.debug
@@ -19,50 +19,6 @@ config SH_STANDARD_BIOS
19 mask ROM and no flash (WindowsCE machines fall in this category). 19 mask ROM and no flash (WindowsCE machines fall in this category).
20 If unsure, say N. 20 If unsure, say N.
21 21
22config EARLY_SCIF_CONSOLE
23 bool "Use early SCIF console"
24 help
25 This enables an early console using a fixed SCIF port. This can
26 be used by platforms that are either not running the SH
27 standard BIOS, or do not wish to use the BIOS callbacks for the
28 serial I/O.
29
30config EARLY_SCIF_CONSOLE_PORT
31 hex
32 depends on EARLY_SCIF_CONSOLE
33 default "0xa4400000" if CPU_SUBTYPE_SH7712 || CPU_SUBTYPE_SH7705
34 default "0xa4430000" if CPU_SUBTYPE_SH7720 || CPU_SUBTYPE_SH7721
35 default "0xf8420000" if CPU_SUBTYPE_SH7619
36 default "0xff804000" if CPU_SUBTYPE_MXG
37 default "0xffc30000" if CPU_SUBTYPE_SHX3
38 default "0xffe00000" if CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7763 || \
39 CPU_SUBTYPE_SH7722 || CPU_SUBTYPE_SH7366 || \
40 CPU_SUBTYPE_SH7343
41 default "0xfe4c0000" if CPU_SUBTYPE_SH7757
42 default "0xffeb0000" if CPU_SUBTYPE_SH7785
43 default "0xffeb0000" if CPU_SUBTYPE_SH7786
44 default "0xfffe8000" if CPU_SUBTYPE_SH7203
45 default "0xfffe9800" if CPU_SUBTYPE_SH7206 || CPU_SUBTYPE_SH7263
46 default "0xffe80000" if CPU_SH4
47 default "0xa4000150" if CPU_SH3
48 default "0x00000000"
49
50config EARLY_PRINTK
51 bool "Early printk support"
52 depends on SH_STANDARD_BIOS || EARLY_SCIF_CONSOLE
53 help
54 Say Y here to redirect kernel printk messages to the serial port
55 used by the SH-IPL bootloader, starting very early in the boot
56 process and ending when the kernel's serial console is initialised.
57 This option is only useful porting the kernel to a new machine,
58 when the kernel may crash or hang before the serial console is
59 initialised. If unsure, say N.
60
61 On devices that are running SH-IPL and want to keep the port
62 initialization consistent while not using the BIOS callbacks,
63 select both the EARLY_SCIF_CONSOLE and SH_STANDARD_BIOS, using
64 the kernel command line option to toggle back and forth.
65
66config STACK_DEBUG 22config STACK_DEBUG
67 bool "Check for stack overflows" 23 bool "Check for stack overflows"
68 depends on DEBUG_KERNEL && SUPERH32 24 depends on DEBUG_KERNEL && SUPERH32
diff --git a/arch/sh/boards/mach-ap325rxa/setup.c b/arch/sh/boards/mach-ap325rxa/setup.c
index cf9dc12dfeb1..1f5fa5c44f6d 100644
--- a/arch/sh/boards/mach-ap325rxa/setup.c
+++ b/arch/sh/boards/mach-ap325rxa/setup.c
@@ -316,20 +316,24 @@ static struct soc_camera_platform_info camera_info = {
316 .format_name = "UYVY", 316 .format_name = "UYVY",
317 .format_depth = 16, 317 .format_depth = 16,
318 .format = { 318 .format = {
319 .pixelformat = V4L2_PIX_FMT_UYVY, 319 .code = V4L2_MBUS_FMT_YUYV8_2X8_BE,
320 .colorspace = V4L2_COLORSPACE_SMPTE170M, 320 .colorspace = V4L2_COLORSPACE_SMPTE170M,
321 .field = V4L2_FIELD_NONE,
321 .width = 640, 322 .width = 640,
322 .height = 480, 323 .height = 480,
323 }, 324 },
324 .bus_param = SOCAM_PCLK_SAMPLE_RISING | SOCAM_HSYNC_ACTIVE_HIGH | 325 .bus_param = SOCAM_PCLK_SAMPLE_RISING | SOCAM_HSYNC_ACTIVE_HIGH |
325 SOCAM_VSYNC_ACTIVE_HIGH | SOCAM_MASTER | SOCAM_DATAWIDTH_8, 326 SOCAM_VSYNC_ACTIVE_HIGH | SOCAM_MASTER | SOCAM_DATAWIDTH_8 |
327 SOCAM_DATA_ACTIVE_HIGH,
326 .set_capture = camera_set_capture, 328 .set_capture = camera_set_capture,
327 .link = { 329};
328 .bus_id = 0, 330
329 .add_device = ap325rxa_camera_add, 331struct soc_camera_link camera_link = {
330 .del_device = ap325rxa_camera_del, 332 .bus_id = 0,
331 .module_name = "soc_camera_platform", 333 .add_device = ap325rxa_camera_add,
332 }, 334 .del_device = ap325rxa_camera_del,
335 .module_name = "soc_camera_platform",
336 .priv = &camera_info,
333}; 337};
334 338
335static void dummy_release(struct device *dev) 339static void dummy_release(struct device *dev)
@@ -347,7 +351,7 @@ static struct platform_device camera_device = {
347static int ap325rxa_camera_add(struct soc_camera_link *icl, 351static int ap325rxa_camera_add(struct soc_camera_link *icl,
348 struct device *dev) 352 struct device *dev)
349{ 353{
350 if (icl != &camera_info.link || camera_probe() <= 0) 354 if (icl != &camera_link || camera_probe() <= 0)
351 return -ENODEV; 355 return -ENODEV;
352 356
353 camera_info.dev = dev; 357 camera_info.dev = dev;
@@ -357,7 +361,7 @@ static int ap325rxa_camera_add(struct soc_camera_link *icl,
357 361
358static void ap325rxa_camera_del(struct soc_camera_link *icl) 362static void ap325rxa_camera_del(struct soc_camera_link *icl)
359{ 363{
360 if (icl != &camera_info.link) 364 if (icl != &camera_link)
361 return; 365 return;
362 366
363 platform_device_unregister(&camera_device); 367 platform_device_unregister(&camera_device);
@@ -470,13 +474,15 @@ static struct ov772x_camera_info ov7725_info = {
470 .buswidth = SOCAM_DATAWIDTH_8, 474 .buswidth = SOCAM_DATAWIDTH_8,
471 .flags = OV772X_FLAG_VFLIP | OV772X_FLAG_HFLIP, 475 .flags = OV772X_FLAG_VFLIP | OV772X_FLAG_HFLIP,
472 .edgectrl = OV772X_AUTO_EDGECTRL(0xf, 0), 476 .edgectrl = OV772X_AUTO_EDGECTRL(0xf, 0),
473 .link = { 477};
474 .bus_id = 0, 478
475 .power = ov7725_power, 479static struct soc_camera_link ov7725_link = {
476 .board_info = &ap325rxa_i2c_camera[0], 480 .bus_id = 0,
477 .i2c_adapter_id = 0, 481 .power = ov7725_power,
478 .module_name = "ov772x", 482 .board_info = &ap325rxa_i2c_camera[0],
479 }, 483 .i2c_adapter_id = 0,
484 .module_name = "ov772x",
485 .priv = &ov7725_info,
480}; 486};
481 487
482static struct platform_device ap325rxa_camera[] = { 488static struct platform_device ap325rxa_camera[] = {
@@ -484,13 +490,13 @@ static struct platform_device ap325rxa_camera[] = {
484 .name = "soc-camera-pdrv", 490 .name = "soc-camera-pdrv",
485 .id = 0, 491 .id = 0,
486 .dev = { 492 .dev = {
487 .platform_data = &ov7725_info.link, 493 .platform_data = &ov7725_link,
488 }, 494 },
489 }, { 495 }, {
490 .name = "soc-camera-pdrv", 496 .name = "soc-camera-pdrv",
491 .id = 1, 497 .id = 1,
492 .dev = { 498 .dev = {
493 .platform_data = &camera_info.link, 499 .platform_data = &camera_link,
494 }, 500 },
495 }, 501 },
496}; 502};
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
index 826e62326d51..194aaca22d47 100644
--- a/arch/sh/boards/mach-ecovec24/setup.c
+++ b/arch/sh/boards/mach-ecovec24/setup.c
@@ -19,11 +19,18 @@
19#include <linux/usb/r8a66597.h> 19#include <linux/usb/r8a66597.h>
20#include <linux/i2c.h> 20#include <linux/i2c.h>
21#include <linux/i2c/tsc2007.h> 21#include <linux/i2c/tsc2007.h>
22#include <linux/spi/spi.h>
23#include <linux/spi/sh_msiof.h>
24#include <linux/spi/mmc_spi.h>
25#include <linux/mmc/host.h>
22#include <linux/input.h> 26#include <linux/input.h>
23#include <linux/input/sh_keysc.h> 27#include <linux/input/sh_keysc.h>
24#include <linux/mfd/sh_mobile_sdhi.h> 28#include <linux/mfd/sh_mobile_sdhi.h>
25#include <video/sh_mobile_lcdc.h> 29#include <video/sh_mobile_lcdc.h>
30#include <sound/sh_fsi.h>
26#include <media/sh_mobile_ceu.h> 31#include <media/sh_mobile_ceu.h>
32#include <media/tw9910.h>
33#include <media/mt9t112.h>
27#include <asm/heartbeat.h> 34#include <asm/heartbeat.h>
28#include <asm/sh_eth.h> 35#include <asm/sh_eth.h>
29#include <asm/clock.h> 36#include <asm/clock.h>
@@ -338,6 +345,12 @@ static struct platform_device ceu1_device = {
338}; 345};
339 346
340/* I2C device */ 347/* I2C device */
348static struct i2c_board_info i2c0_devices[] = {
349 {
350 I2C_BOARD_INFO("da7210", 0x1a),
351 },
352};
353
341static struct i2c_board_info i2c1_devices[] = { 354static struct i2c_board_info i2c1_devices[] = {
342 { 355 {
343 I2C_BOARD_INFO("r2025sd", 0x32), 356 I2C_BOARD_INFO("r2025sd", 0x32),
@@ -421,6 +434,7 @@ static struct i2c_board_info ts_i2c_clients = {
421 .irq = IRQ0, 434 .irq = IRQ0,
422}; 435};
423 436
437#ifdef CONFIG_MFD_SH_MOBILE_SDHI
424/* SHDI0 */ 438/* SHDI0 */
425static void sdhi0_set_pwr(struct platform_device *pdev, int state) 439static void sdhi0_set_pwr(struct platform_device *pdev, int state)
426{ 440{
@@ -493,6 +507,248 @@ static struct platform_device sdhi1_device = {
493 }, 507 },
494}; 508};
495 509
510#else
511
512static int mmc_spi_get_ro(struct device *dev)
513{
514 return gpio_get_value(GPIO_PTY6);
515}
516
517static int mmc_spi_get_cd(struct device *dev)
518{
519 return !gpio_get_value(GPIO_PTY7);
520}
521
522static void mmc_spi_setpower(struct device *dev, unsigned int maskval)
523{
524 gpio_set_value(GPIO_PTB6, maskval ? 1 : 0);
525}
526
527static struct mmc_spi_platform_data mmc_spi_info = {
528 .get_ro = mmc_spi_get_ro,
529 .get_cd = mmc_spi_get_cd,
530 .caps = MMC_CAP_NEEDS_POLL,
531 .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, /* 3.3V only */
532 .setpower = mmc_spi_setpower,
533};
534
535static struct spi_board_info spi_bus[] = {
536 {
537 .modalias = "mmc_spi",
538 .platform_data = &mmc_spi_info,
539 .max_speed_hz = 5000000,
540 .mode = SPI_MODE_0,
541 .controller_data = (void *) GPIO_PTM4,
542 },
543};
544
545static struct sh_msiof_spi_info msiof0_data = {
546 .num_chipselect = 1,
547};
548
549static struct resource msiof0_resources[] = {
550 [0] = {
551 .name = "MSIOF0",
552 .start = 0xa4c40000,
553 .end = 0xa4c40063,
554 .flags = IORESOURCE_MEM,
555 },
556 [1] = {
557 .start = 84,
558 .flags = IORESOURCE_IRQ,
559 },
560};
561
562static struct platform_device msiof0_device = {
563 .name = "spi_sh_msiof",
564 .id = 0, /* MSIOF0 */
565 .dev = {
566 .platform_data = &msiof0_data,
567 },
568 .num_resources = ARRAY_SIZE(msiof0_resources),
569 .resource = msiof0_resources,
570 .archdata = {
571 .hwblk_id = HWBLK_MSIOF0,
572 },
573};
574
575#endif
576
577/* I2C Video/Camera */
578static struct i2c_board_info i2c_camera[] = {
579 {
580 I2C_BOARD_INFO("tw9910", 0x45),
581 },
582 {
583 /* 1st camera */
584 I2C_BOARD_INFO("mt9t112", 0x3c),
585 },
586 {
587 /* 2nd camera */
588 I2C_BOARD_INFO("mt9t112", 0x3c),
589 },
590};
591
592/* tw9910 */
593static int tw9910_power(struct device *dev, int mode)
594{
595 int val = mode ? 0 : 1;
596
597 gpio_set_value(GPIO_PTU2, val);
598 if (mode)
599 mdelay(100);
600
601 return 0;
602}
603
604static struct tw9910_video_info tw9910_info = {
605 .buswidth = SOCAM_DATAWIDTH_8,
606 .mpout = TW9910_MPO_FIELD,
607};
608
609static struct soc_camera_link tw9910_link = {
610 .i2c_adapter_id = 0,
611 .bus_id = 1,
612 .power = tw9910_power,
613 .board_info = &i2c_camera[0],
614 .module_name = "tw9910",
615 .priv = &tw9910_info,
616};
617
618/* mt9t112 */
619static int mt9t112_power1(struct device *dev, int mode)
620{
621 gpio_set_value(GPIO_PTA3, mode);
622 if (mode)
623 mdelay(100);
624
625 return 0;
626}
627
628static struct mt9t112_camera_info mt9t112_info1 = {
629 .flags = MT9T112_FLAG_PCLK_RISING_EDGE | MT9T112_FLAG_DATAWIDTH_8,
630 .divider = { 0x49, 0x6, 0, 6, 0, 9, 9, 6, 0 }, /* for 24MHz */
631};
632
633static struct soc_camera_link mt9t112_link1 = {
634 .i2c_adapter_id = 0,
635 .power = mt9t112_power1,
636 .bus_id = 0,
637 .board_info = &i2c_camera[1],
638 .module_name = "mt9t112",
639 .priv = &mt9t112_info1,
640};
641
642static int mt9t112_power2(struct device *dev, int mode)
643{
644 gpio_set_value(GPIO_PTA4, mode);
645 if (mode)
646 mdelay(100);
647
648 return 0;
649}
650
651static struct mt9t112_camera_info mt9t112_info2 = {
652 .flags = MT9T112_FLAG_PCLK_RISING_EDGE | MT9T112_FLAG_DATAWIDTH_8,
653 .divider = { 0x49, 0x6, 0, 6, 0, 9, 9, 6, 0 }, /* for 24MHz */
654};
655
656static struct soc_camera_link mt9t112_link2 = {
657 .i2c_adapter_id = 1,
658 .power = mt9t112_power2,
659 .bus_id = 1,
660 .board_info = &i2c_camera[2],
661 .module_name = "mt9t112",
662 .priv = &mt9t112_info2,
663};
664
665static struct platform_device camera_devices[] = {
666 {
667 .name = "soc-camera-pdrv",
668 .id = 0,
669 .dev = {
670 .platform_data = &tw9910_link,
671 },
672 },
673 {
674 .name = "soc-camera-pdrv",
675 .id = 1,
676 .dev = {
677 .platform_data = &mt9t112_link1,
678 },
679 },
680 {
681 .name = "soc-camera-pdrv",
682 .id = 2,
683 .dev = {
684 .platform_data = &mt9t112_link2,
685 },
686 },
687};
688
689/* FSI */
690/*
691 * FSI-B use external clock which came from da7210.
692 * So, we should change parent of fsi
693 */
694#define FCLKBCR 0xa415000c
695static void fsimck_init(struct clk *clk)
696{
697 u32 status = ctrl_inl(clk->enable_reg);
698
699 /* use external clock */
700 status &= ~0x000000ff;
701 status |= 0x00000080;
702
703 ctrl_outl(status, clk->enable_reg);
704}
705
706static struct clk_ops fsimck_clk_ops = {
707 .init = fsimck_init,
708};
709
710static struct clk fsimckb_clk = {
711 .name = "fsimckb_clk",
712 .id = -1,
713 .ops = &fsimck_clk_ops,
714 .enable_reg = (void __iomem *)FCLKBCR,
715 .rate = 0, /* unknown */
716};
717
718struct sh_fsi_platform_info fsi_info = {
719 .portb_flags = SH_FSI_BRS_INV |
720 SH_FSI_OUT_SLAVE_MODE |
721 SH_FSI_IN_SLAVE_MODE |
722 SH_FSI_OFMT(I2S) |
723 SH_FSI_IFMT(I2S),
724};
725
726static struct resource fsi_resources[] = {
727 [0] = {
728 .name = "FSI",
729 .start = 0xFE3C0000,
730 .end = 0xFE3C021d,
731 .flags = IORESOURCE_MEM,
732 },
733 [1] = {
734 .start = 108,
735 .flags = IORESOURCE_IRQ,
736 },
737};
738
739static struct platform_device fsi_device = {
740 .name = "sh_fsi",
741 .id = 0,
742 .num_resources = ARRAY_SIZE(fsi_resources),
743 .resource = fsi_resources,
744 .dev = {
745 .platform_data = &fsi_info,
746 },
747 .archdata = {
748 .hwblk_id = HWBLK_SPU, /* FSI needs SPU hwblk */
749 },
750};
751
496static struct platform_device *ecovec_devices[] __initdata = { 752static struct platform_device *ecovec_devices[] __initdata = {
497 &heartbeat_device, 753 &heartbeat_device,
498 &nor_flash_device, 754 &nor_flash_device,
@@ -503,8 +759,16 @@ static struct platform_device *ecovec_devices[] __initdata = {
503 &ceu0_device, 759 &ceu0_device,
504 &ceu1_device, 760 &ceu1_device,
505 &keysc_device, 761 &keysc_device,
762#ifdef CONFIG_MFD_SH_MOBILE_SDHI
506 &sdhi0_device, 763 &sdhi0_device,
507 &sdhi1_device, 764 &sdhi1_device,
765#else
766 &msiof0_device,
767#endif
768 &camera_devices[0],
769 &camera_devices[1],
770 &camera_devices[2],
771 &fsi_device,
508}; 772};
509 773
510#define EEPROM_ADDR 0x50 774#define EEPROM_ADDR 0x50
@@ -560,6 +824,8 @@ extern char ecovec24_sdram_leave_end;
560 824
561static int __init arch_setup(void) 825static int __init arch_setup(void)
562{ 826{
827 struct clk *clk;
828
563 /* register board specific self-refresh code */ 829 /* register board specific self-refresh code */
564 sh_mobile_register_self_refresh(SUSP_SH_STANDBY | SUSP_SH_SF, 830 sh_mobile_register_self_refresh(SUSP_SH_STANDBY | SUSP_SH_SF,
565 &ecovec24_sdram_enter_start, 831 &ecovec24_sdram_enter_start,
@@ -773,7 +1039,8 @@ static int __init arch_setup(void)
773 gpio_direction_input(GPIO_PTR5); 1039 gpio_direction_input(GPIO_PTR5);
774 gpio_direction_input(GPIO_PTR6); 1040 gpio_direction_input(GPIO_PTR6);
775 1041
776 /* enable SDHI0 (needs DS2.4 set to ON) */ 1042#ifdef CONFIG_MFD_SH_MOBILE_SDHI
1043 /* enable SDHI0 on CN11 (needs DS2.4 set to ON) */
777 gpio_request(GPIO_FN_SDHI0CD, NULL); 1044 gpio_request(GPIO_FN_SDHI0CD, NULL);
778 gpio_request(GPIO_FN_SDHI0WP, NULL); 1045 gpio_request(GPIO_FN_SDHI0WP, NULL);
779 gpio_request(GPIO_FN_SDHI0CMD, NULL); 1046 gpio_request(GPIO_FN_SDHI0CMD, NULL);
@@ -785,7 +1052,7 @@ static int __init arch_setup(void)
785 gpio_request(GPIO_PTB6, NULL); 1052 gpio_request(GPIO_PTB6, NULL);
786 gpio_direction_output(GPIO_PTB6, 0); 1053 gpio_direction_output(GPIO_PTB6, 0);
787 1054
788 /* enable SDHI1 (needs DS2.6,7 set to ON,OFF) */ 1055 /* enable SDHI1 on CN12 (needs DS2.6,7 set to ON,OFF) */
789 gpio_request(GPIO_FN_SDHI1CD, NULL); 1056 gpio_request(GPIO_FN_SDHI1CD, NULL);
790 gpio_request(GPIO_FN_SDHI1WP, NULL); 1057 gpio_request(GPIO_FN_SDHI1WP, NULL);
791 gpio_request(GPIO_FN_SDHI1CMD, NULL); 1058 gpio_request(GPIO_FN_SDHI1CMD, NULL);
@@ -799,8 +1066,59 @@ static int __init arch_setup(void)
799 1066
800 /* I/O buffer drive ability is high for SDHI1 */ 1067 /* I/O buffer drive ability is high for SDHI1 */
801 ctrl_outw((ctrl_inw(IODRIVEA) & ~0x3000) | 0x2000 , IODRIVEA); 1068 ctrl_outw((ctrl_inw(IODRIVEA) & ~0x3000) | 0x2000 , IODRIVEA);
1069#else
1070 /* enable MSIOF0 on CN11 (needs DS2.4 set to OFF) */
1071 gpio_request(GPIO_FN_MSIOF0_TXD, NULL);
1072 gpio_request(GPIO_FN_MSIOF0_RXD, NULL);
1073 gpio_request(GPIO_FN_MSIOF0_TSCK, NULL);
1074 gpio_request(GPIO_PTM4, NULL); /* software CS control of TSYNC pin */
1075 gpio_direction_output(GPIO_PTM4, 1); /* active low CS */
1076 gpio_request(GPIO_PTB6, NULL); /* 3.3V power control */
1077 gpio_direction_output(GPIO_PTB6, 0); /* disable power by default */
1078 gpio_request(GPIO_PTY6, NULL); /* write protect */
1079 gpio_direction_input(GPIO_PTY6);
1080 gpio_request(GPIO_PTY7, NULL); /* card detect */
1081 gpio_direction_input(GPIO_PTY7);
1082
1083 spi_register_board_info(spi_bus, ARRAY_SIZE(spi_bus));
1084#endif
1085
1086 /* enable Video */
1087 gpio_request(GPIO_PTU2, NULL);
1088 gpio_direction_output(GPIO_PTU2, 1);
1089
1090 /* enable Camera */
1091 gpio_request(GPIO_PTA3, NULL);
1092 gpio_request(GPIO_PTA4, NULL);
1093 gpio_direction_output(GPIO_PTA3, 0);
1094 gpio_direction_output(GPIO_PTA4, 0);
1095
1096 /* enable FSI */
1097 gpio_request(GPIO_FN_FSIMCKB, NULL);
1098 gpio_request(GPIO_FN_FSIIBSD, NULL);
1099 gpio_request(GPIO_FN_FSIOBSD, NULL);
1100 gpio_request(GPIO_FN_FSIIBBCK, NULL);
1101 gpio_request(GPIO_FN_FSIIBLRCK, NULL);
1102 gpio_request(GPIO_FN_FSIOBBCK, NULL);
1103 gpio_request(GPIO_FN_FSIOBLRCK, NULL);
1104 gpio_request(GPIO_FN_CLKAUDIOBO, NULL);
1105
1106 /* change parent of FSI B */
1107 clk = clk_get(NULL, "fsib_clk");
1108 clk_register(&fsimckb_clk);
1109 clk_set_parent(clk, &fsimckb_clk);
1110 clk_set_rate(clk, 11000);
1111 clk_set_rate(&fsimckb_clk, 11000);
1112 clk_put(clk);
1113
1114 gpio_request(GPIO_PTU0, NULL);
1115 gpio_direction_output(GPIO_PTU0, 0);
1116 mdelay(20);
802 1117
803 /* enable I2C device */ 1118 /* enable I2C device */
1119 i2c_register_board_info(0, i2c0_devices,
1120 ARRAY_SIZE(i2c0_devices));
1121
804 i2c_register_board_info(1, i2c1_devices, 1122 i2c_register_board_info(1, i2c1_devices,
805 ARRAY_SIZE(i2c1_devices)); 1123 ARRAY_SIZE(i2c1_devices));
806 1124
diff --git a/arch/sh/boards/mach-kfr2r09/lcd_wqvga.c b/arch/sh/boards/mach-kfr2r09/lcd_wqvga.c
index 8ccb1cc8b589..e9b970846c41 100644
--- a/arch/sh/boards/mach-kfr2r09/lcd_wqvga.c
+++ b/arch/sh/boards/mach-kfr2r09/lcd_wqvga.c
@@ -273,6 +273,12 @@ int kfr2r09_lcd_setup(void *board_data, void *sohandle,
273 return 0; 273 return 0;
274} 274}
275 275
276void kfr2r09_lcd_start(void *board_data, void *sohandle,
277 struct sh_mobile_lcdc_sys_bus_ops *so)
278{
279 write_memory_start(sohandle, so);
280}
281
276#define CTRL_CKSW 0x10 282#define CTRL_CKSW 0x10
277#define CTRL_C10 0x20 283#define CTRL_C10 0x20
278#define CTRL_CPSW 0x80 284#define CTRL_CPSW 0x80
diff --git a/arch/sh/boards/mach-kfr2r09/setup.c b/arch/sh/boards/mach-kfr2r09/setup.c
index 87438d6603d6..5d7b5d92475e 100644
--- a/arch/sh/boards/mach-kfr2r09/setup.c
+++ b/arch/sh/boards/mach-kfr2r09/setup.c
@@ -19,6 +19,7 @@
19#include <linux/input/sh_keysc.h> 19#include <linux/input/sh_keysc.h>
20#include <linux/i2c.h> 20#include <linux/i2c.h>
21#include <linux/usb/r8a66597.h> 21#include <linux/usb/r8a66597.h>
22#include <media/rj54n1cb0c.h>
22#include <media/soc_camera.h> 23#include <media/soc_camera.h>
23#include <media/sh_mobile_ceu.h> 24#include <media/sh_mobile_ceu.h>
24#include <video/sh_mobile_lcdc.h> 25#include <video/sh_mobile_lcdc.h>
@@ -149,6 +150,7 @@ static struct sh_mobile_lcdc_info kfr2r09_sh_lcdc_info = {
149 }, 150 },
150 .board_cfg = { 151 .board_cfg = {
151 .setup_sys = kfr2r09_lcd_setup, 152 .setup_sys = kfr2r09_lcd_setup,
153 .start_transfer = kfr2r09_lcd_start,
152 .display_on = kfr2r09_lcd_on, 154 .display_on = kfr2r09_lcd_on,
153 .display_off = kfr2r09_lcd_off, 155 .display_off = kfr2r09_lcd_off,
154 }, 156 },
@@ -255,6 +257,9 @@ static struct i2c_board_info kfr2r09_i2c_camera = {
255 257
256static struct clk *camera_clk; 258static struct clk *camera_clk;
257 259
260/* set VIO_CKO clock to 25MHz */
261#define CEU_MCLK_FREQ 25000000
262
258#define DRVCRB 0xA405018C 263#define DRVCRB 0xA405018C
259static int camera_power(struct device *dev, int mode) 264static int camera_power(struct device *dev, int mode)
260{ 265{
@@ -267,8 +272,7 @@ static int camera_power(struct device *dev, int mode)
267 if (IS_ERR(camera_clk)) 272 if (IS_ERR(camera_clk))
268 return PTR_ERR(camera_clk); 273 return PTR_ERR(camera_clk);
269 274
270 /* set VIO_CKO clock to 25MHz */ 275 rate = clk_round_rate(camera_clk, CEU_MCLK_FREQ);
271 rate = clk_round_rate(camera_clk, 25000000);
272 ret = clk_set_rate(camera_clk, rate); 276 ret = clk_set_rate(camera_clk, rate);
273 if (ret < 0) 277 if (ret < 0)
274 goto eclkrate; 278 goto eclkrate;
@@ -318,11 +322,17 @@ eclkrate:
318 return ret; 322 return ret;
319} 323}
320 324
325static struct rj54n1_pdata rj54n1_priv = {
326 .mclk_freq = CEU_MCLK_FREQ,
327 .ioctl_high = false,
328};
329
321static struct soc_camera_link rj54n1_link = { 330static struct soc_camera_link rj54n1_link = {
322 .power = camera_power, 331 .power = camera_power,
323 .board_info = &kfr2r09_i2c_camera, 332 .board_info = &kfr2r09_i2c_camera,
324 .i2c_adapter_id = 1, 333 .i2c_adapter_id = 1,
325 .module_name = "rj54n1cb0c", 334 .module_name = "rj54n1cb0c",
335 .priv = &rj54n1_priv,
326}; 336};
327 337
328static struct platform_device kfr2r09_camera = { 338static struct platform_device kfr2r09_camera = {
diff --git a/arch/sh/boards/mach-migor/setup.c b/arch/sh/boards/mach-migor/setup.c
index 9099b6da9957..507c77be476d 100644
--- a/arch/sh/boards/mach-migor/setup.c
+++ b/arch/sh/boards/mach-migor/setup.c
@@ -432,23 +432,27 @@ static struct i2c_board_info migor_i2c_camera[] = {
432 432
433static struct ov772x_camera_info ov7725_info = { 433static struct ov772x_camera_info ov7725_info = {
434 .buswidth = SOCAM_DATAWIDTH_8, 434 .buswidth = SOCAM_DATAWIDTH_8,
435 .link = { 435};
436 .power = ov7725_power, 436
437 .board_info = &migor_i2c_camera[0], 437static struct soc_camera_link ov7725_link = {
438 .i2c_adapter_id = 0, 438 .power = ov7725_power,
439 .module_name = "ov772x", 439 .board_info = &migor_i2c_camera[0],
440 }, 440 .i2c_adapter_id = 0,
441 .module_name = "ov772x",
442 .priv = &ov7725_info,
441}; 443};
442 444
443static struct tw9910_video_info tw9910_info = { 445static struct tw9910_video_info tw9910_info = {
444 .buswidth = SOCAM_DATAWIDTH_8, 446 .buswidth = SOCAM_DATAWIDTH_8,
445 .mpout = TW9910_MPO_FIELD, 447 .mpout = TW9910_MPO_FIELD,
446 .link = { 448};
447 .power = tw9910_power, 449
448 .board_info = &migor_i2c_camera[1], 450static struct soc_camera_link tw9910_link = {
449 .i2c_adapter_id = 0, 451 .power = tw9910_power,
450 .module_name = "tw9910", 452 .board_info = &migor_i2c_camera[1],
451 } 453 .i2c_adapter_id = 0,
454 .module_name = "tw9910",
455 .priv = &tw9910_info,
452}; 456};
453 457
454static struct platform_device migor_camera[] = { 458static struct platform_device migor_camera[] = {
@@ -456,13 +460,13 @@ static struct platform_device migor_camera[] = {
456 .name = "soc-camera-pdrv", 460 .name = "soc-camera-pdrv",
457 .id = 0, 461 .id = 0,
458 .dev = { 462 .dev = {
459 .platform_data = &ov7725_info.link, 463 .platform_data = &ov7725_link,
460 }, 464 },
461 }, { 465 }, {
462 .name = "soc-camera-pdrv", 466 .name = "soc-camera-pdrv",
463 .id = 1, 467 .id = 1,
464 .dev = { 468 .dev = {
465 .platform_data = &tw9910_info.link, 469 .platform_data = &tw9910_link,
466 }, 470 },
467 }, 471 },
468}; 472};
diff --git a/arch/sh/boards/mach-se/7722/irq.c b/arch/sh/boards/mach-se/7722/irq.c
index 4eb31acfafef..b221b6842b0d 100644
--- a/arch/sh/boards/mach-se/7722/irq.c
+++ b/arch/sh/boards/mach-se/7722/irq.c
@@ -57,15 +57,16 @@ static void se7722_irq_demux(unsigned int irq, struct irq_desc *desc)
57 */ 57 */
58void __init init_se7722_IRQ(void) 58void __init init_se7722_IRQ(void)
59{ 59{
60 int i; 60 int i, irq;
61 61
62 ctrl_outw(0, IRQ01_MASK); /* disable all irqs */ 62 ctrl_outw(0, IRQ01_MASK); /* disable all irqs */
63 ctrl_outw(0x2000, 0xb03fffec); /* mrshpc irq enable */ 63 ctrl_outw(0x2000, 0xb03fffec); /* mrshpc irq enable */
64 64
65 for (i = 0; i < SE7722_FPGA_IRQ_NR; i++) { 65 for (i = 0; i < SE7722_FPGA_IRQ_NR; i++) {
66 se7722_fpga_irq[i] = create_irq(); 66 irq = create_irq();
67 if (se7722_fpga_irq[i] < 0) 67 if (irq < 0)
68 return; 68 return;
69 se7722_fpga_irq[i] = irq;
69 70
70 set_irq_chip_and_handler_name(se7722_fpga_irq[i], 71 set_irq_chip_and_handler_name(se7722_fpga_irq[i],
71 &se7722_irq_chip, 72 &se7722_irq_chip,
diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c
index 4b0f0c0dc2b8..5d0f70b46c97 100644
--- a/arch/sh/boards/mach-se/7724/setup.c
+++ b/arch/sh/boards/mach-se/7724/setup.c
@@ -514,6 +514,13 @@ static struct platform_device *ms7724se_devices[] __initdata = {
514 &sdhi1_cn8_device, 514 &sdhi1_cn8_device,
515}; 515};
516 516
517/* I2C device */
518static struct i2c_board_info i2c0_devices[] = {
519 {
520 I2C_BOARD_INFO("ak4642", 0x12),
521 },
522};
523
517#define EEPROM_OP 0xBA206000 524#define EEPROM_OP 0xBA206000
518#define EEPROM_ADR 0xBA206004 525#define EEPROM_ADR 0xBA206004
519#define EEPROM_DATA 0xBA20600C 526#define EEPROM_DATA 0xBA20600C
@@ -575,6 +582,16 @@ extern char ms7724se_sdram_enter_end;
575extern char ms7724se_sdram_leave_start; 582extern char ms7724se_sdram_leave_start;
576extern char ms7724se_sdram_leave_end; 583extern char ms7724se_sdram_leave_end;
577 584
585
586static int __init arch_setup(void)
587{
588 /* enable I2C device */
589 i2c_register_board_info(0, i2c0_devices,
590 ARRAY_SIZE(i2c0_devices));
591 return 0;
592}
593arch_initcall(arch_setup);
594
578static int __init devices_setup(void) 595static int __init devices_setup(void)
579{ 596{
580 u16 sw = ctrl_inw(SW4140); /* select camera, monitor */ 597 u16 sw = ctrl_inw(SW4140); /* select camera, monitor */
diff --git a/arch/sh/configs/ecovec24-romimage_defconfig b/arch/sh/configs/ecovec24-romimage_defconfig
index 0774924623cc..46874704e4e7 100644
--- a/arch/sh/configs/ecovec24-romimage_defconfig
+++ b/arch/sh/configs/ecovec24-romimage_defconfig
@@ -203,7 +203,7 @@ CONFIG_MMU=y
203CONFIG_PAGE_OFFSET=0x80000000 203CONFIG_PAGE_OFFSET=0x80000000
204CONFIG_FORCE_MAX_ZONEORDER=11 204CONFIG_FORCE_MAX_ZONEORDER=11
205CONFIG_MEMORY_START=0x08000000 205CONFIG_MEMORY_START=0x08000000
206CONFIG_MEMORY_SIZE=0x08000000 206CONFIG_MEMORY_SIZE=0x10000000
207CONFIG_29BIT=y 207CONFIG_29BIT=y
208# CONFIG_X2TLB is not set 208# CONFIG_X2TLB is not set
209CONFIG_VSYSCALL=y 209CONFIG_VSYSCALL=y
diff --git a/arch/sh/configs/ecovec24_defconfig b/arch/sh/configs/ecovec24_defconfig
index ac6469718a2c..cad918437ca7 100644
--- a/arch/sh/configs/ecovec24_defconfig
+++ b/arch/sh/configs/ecovec24_defconfig
@@ -204,7 +204,7 @@ CONFIG_MMU=y
204CONFIG_PAGE_OFFSET=0x80000000 204CONFIG_PAGE_OFFSET=0x80000000
205CONFIG_FORCE_MAX_ZONEORDER=11 205CONFIG_FORCE_MAX_ZONEORDER=11
206CONFIG_MEMORY_START=0x08000000 206CONFIG_MEMORY_START=0x08000000
207CONFIG_MEMORY_SIZE=0x08000000 207CONFIG_MEMORY_SIZE=0x10000000
208CONFIG_29BIT=y 208CONFIG_29BIT=y
209# CONFIG_X2TLB is not set 209# CONFIG_X2TLB is not set
210CONFIG_VSYSCALL=y 210CONFIG_VSYSCALL=y
diff --git a/arch/sh/configs/rts7751r2d1_defconfig b/arch/sh/configs/rts7751r2d1_defconfig
index f521e82cc19e..6f1126b3e487 100644
--- a/arch/sh/configs/rts7751r2d1_defconfig
+++ b/arch/sh/configs/rts7751r2d1_defconfig
@@ -324,7 +324,7 @@ CONFIG_ENTRY_OFFSET=0x00001000
324# CONFIG_UBC_WAKEUP is not set 324# CONFIG_UBC_WAKEUP is not set
325CONFIG_CMDLINE_OVERWRITE=y 325CONFIG_CMDLINE_OVERWRITE=y
326# CONFIG_CMDLINE_EXTEND is not set 326# CONFIG_CMDLINE_EXTEND is not set
327CONFIG_CMDLINE="console=tty0 console=ttySC0,115200 root=/dev/sda1 earlyprintk=serial" 327CONFIG_CMDLINE="console=tty0 console=ttySC1,115200 root=/dev/sda1"
328 328
329# 329#
330# Bus options 330# Bus options
diff --git a/arch/sh/configs/rts7751r2dplus_defconfig b/arch/sh/configs/rts7751r2dplus_defconfig
index a156cd1e0617..9215bbb13d6f 100644
--- a/arch/sh/configs/rts7751r2dplus_defconfig
+++ b/arch/sh/configs/rts7751r2dplus_defconfig
@@ -324,7 +324,7 @@ CONFIG_ENTRY_OFFSET=0x00001000
324# CONFIG_UBC_WAKEUP is not set 324# CONFIG_UBC_WAKEUP is not set
325CONFIG_CMDLINE_OVERWRITE=y 325CONFIG_CMDLINE_OVERWRITE=y
326# CONFIG_CMDLINE_EXTEND is not set 326# CONFIG_CMDLINE_EXTEND is not set
327CONFIG_CMDLINE="console=tty0 console=ttySC0,115200 root=/dev/sda1 earlyprintk=serial" 327CONFIG_CMDLINE="console=tty0 console=ttySC1,115200 root=/dev/sda1"
328 328
329# 329#
330# Bus options 330# Bus options
diff --git a/arch/sh/include/asm/elf.h b/arch/sh/include/asm/elf.h
index ccb1d93bb043..ac04255022b6 100644
--- a/arch/sh/include/asm/elf.h
+++ b/arch/sh/include/asm/elf.h
@@ -114,7 +114,6 @@ typedef struct user_fpu_struct elf_fpregset_t;
114 */ 114 */
115#define CORE_DUMP_USE_REGSET 115#define CORE_DUMP_USE_REGSET
116 116
117#define USE_ELF_CORE_DUMP
118#define ELF_FDPIC_CORE_EFLAGS EF_SH_FDPIC 117#define ELF_FDPIC_CORE_EFLAGS EF_SH_FDPIC
119#define ELF_EXEC_PAGESIZE PAGE_SIZE 118#define ELF_EXEC_PAGESIZE PAGE_SIZE
120 119
diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h
index 512cd3e9d0ca..026dd659a640 100644
--- a/arch/sh/include/asm/io.h
+++ b/arch/sh/include/asm/io.h
@@ -233,11 +233,17 @@ unsigned long long poke_real_address_q(unsigned long long addr,
233 * doesn't exist, so everything must go through page tables. 233 * doesn't exist, so everything must go through page tables.
234 */ 234 */
235#ifdef CONFIG_MMU 235#ifdef CONFIG_MMU
236void __iomem *__ioremap(unsigned long offset, unsigned long size, 236void __iomem *__ioremap_caller(unsigned long offset, unsigned long size,
237 unsigned long flags); 237 unsigned long flags, void *caller);
238void __iounmap(void __iomem *addr); 238void __iounmap(void __iomem *addr);
239 239
240static inline void __iomem * 240static inline void __iomem *
241__ioremap(unsigned long offset, unsigned long size, unsigned long flags)
242{
243 return __ioremap_caller(offset, size, flags, __builtin_return_address(0));
244}
245
246static inline void __iomem *
241__ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags) 247__ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags)
242{ 248{
243#if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED) && !defined(CONFIG_PMB) 249#if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED) && !defined(CONFIG_PMB)
@@ -271,6 +277,7 @@ __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags)
271 return __ioremap(offset, size, flags); 277 return __ioremap(offset, size, flags);
272} 278}
273#else 279#else
280#define __ioremap(offset, size, flags) ((void __iomem *)(offset))
274#define __ioremap_mode(offset, size, flags) ((void __iomem *)(offset)) 281#define __ioremap_mode(offset, size, flags) ((void __iomem *)(offset))
275#define __iounmap(addr) do { } while (0) 282#define __iounmap(addr) do { } while (0)
276#endif /* CONFIG_MMU */ 283#endif /* CONFIG_MMU */
diff --git a/arch/sh/include/asm/pgtable_32.h b/arch/sh/include/asm/pgtable_32.h
index b35435516203..5003ee86f67b 100644
--- a/arch/sh/include/asm/pgtable_32.h
+++ b/arch/sh/include/asm/pgtable_32.h
@@ -344,7 +344,8 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
344#define pte_special(pte) ((pte).pte_low & _PAGE_SPECIAL) 344#define pte_special(pte) ((pte).pte_low & _PAGE_SPECIAL)
345 345
346#ifdef CONFIG_X2TLB 346#ifdef CONFIG_X2TLB
347#define pte_write(pte) ((pte).pte_high & _PAGE_EXT_USER_WRITE) 347#define pte_write(pte) \
348 ((pte).pte_high & (_PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE))
348#else 349#else
349#define pte_write(pte) ((pte).pte_low & _PAGE_RW) 350#define pte_write(pte) ((pte).pte_low & _PAGE_RW)
350#endif 351#endif
@@ -358,7 +359,7 @@ static inline pte_t pte_##fn(pte_t pte) { pte.pte_##h op; return pte; }
358 * individually toggled (and user permissions are entirely decoupled from 359 * individually toggled (and user permissions are entirely decoupled from
359 * kernel permissions), we attempt to couple them a bit more sanely here. 360 * kernel permissions), we attempt to couple them a bit more sanely here.
360 */ 361 */
361PTE_BIT_FUNC(high, wrprotect, &= ~_PAGE_EXT_USER_WRITE); 362PTE_BIT_FUNC(high, wrprotect, &= ~(_PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE));
362PTE_BIT_FUNC(high, mkwrite, |= _PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE); 363PTE_BIT_FUNC(high, mkwrite, |= _PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE);
363PTE_BIT_FUNC(high, mkhuge, |= _PAGE_SZHUGE); 364PTE_BIT_FUNC(high, mkhuge, |= _PAGE_SZHUGE);
364#else 365#else
diff --git a/arch/sh/include/asm/spinlock.h b/arch/sh/include/asm/spinlock.h
index a28c9f0053fd..bdc0f3b6c56a 100644
--- a/arch/sh/include/asm/spinlock.h
+++ b/arch/sh/include/asm/spinlock.h
@@ -23,10 +23,10 @@
23 * Your basic SMP spinlocks, allowing only a single CPU anywhere 23 * Your basic SMP spinlocks, allowing only a single CPU anywhere
24 */ 24 */
25 25
26#define __raw_spin_is_locked(x) ((x)->lock <= 0) 26#define arch_spin_is_locked(x) ((x)->lock <= 0)
27#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 27#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
28#define __raw_spin_unlock_wait(x) \ 28#define arch_spin_unlock_wait(x) \
29 do { while (__raw_spin_is_locked(x)) cpu_relax(); } while (0) 29 do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0)
30 30
31/* 31/*
32 * Simple spin lock operations. There are two variants, one clears IRQ's 32 * Simple spin lock operations. There are two variants, one clears IRQ's
@@ -34,14 +34,14 @@
34 * 34 *
35 * We make no fairness assumptions. They have a cost. 35 * We make no fairness assumptions. They have a cost.
36 */ 36 */
37static inline void __raw_spin_lock(raw_spinlock_t *lock) 37static inline void arch_spin_lock(arch_spinlock_t *lock)
38{ 38{
39 unsigned long tmp; 39 unsigned long tmp;
40 unsigned long oldval; 40 unsigned long oldval;
41 41
42 __asm__ __volatile__ ( 42 __asm__ __volatile__ (
43 "1: \n\t" 43 "1: \n\t"
44 "movli.l @%2, %0 ! __raw_spin_lock \n\t" 44 "movli.l @%2, %0 ! arch_spin_lock \n\t"
45 "mov %0, %1 \n\t" 45 "mov %0, %1 \n\t"
46 "mov #0, %0 \n\t" 46 "mov #0, %0 \n\t"
47 "movco.l %0, @%2 \n\t" 47 "movco.l %0, @%2 \n\t"
@@ -54,12 +54,12 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
54 ); 54 );
55} 55}
56 56
57static inline void __raw_spin_unlock(raw_spinlock_t *lock) 57static inline void arch_spin_unlock(arch_spinlock_t *lock)
58{ 58{
59 unsigned long tmp; 59 unsigned long tmp;
60 60
61 __asm__ __volatile__ ( 61 __asm__ __volatile__ (
62 "mov #1, %0 ! __raw_spin_unlock \n\t" 62 "mov #1, %0 ! arch_spin_unlock \n\t"
63 "mov.l %0, @%1 \n\t" 63 "mov.l %0, @%1 \n\t"
64 : "=&z" (tmp) 64 : "=&z" (tmp)
65 : "r" (&lock->lock) 65 : "r" (&lock->lock)
@@ -67,13 +67,13 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
67 ); 67 );
68} 68}
69 69
70static inline int __raw_spin_trylock(raw_spinlock_t *lock) 70static inline int arch_spin_trylock(arch_spinlock_t *lock)
71{ 71{
72 unsigned long tmp, oldval; 72 unsigned long tmp, oldval;
73 73
74 __asm__ __volatile__ ( 74 __asm__ __volatile__ (
75 "1: \n\t" 75 "1: \n\t"
76 "movli.l @%2, %0 ! __raw_spin_trylock \n\t" 76 "movli.l @%2, %0 ! arch_spin_trylock \n\t"
77 "mov %0, %1 \n\t" 77 "mov %0, %1 \n\t"
78 "mov #0, %0 \n\t" 78 "mov #0, %0 \n\t"
79 "movco.l %0, @%2 \n\t" 79 "movco.l %0, @%2 \n\t"
@@ -100,21 +100,21 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
100 * read_can_lock - would read_trylock() succeed? 100 * read_can_lock - would read_trylock() succeed?
101 * @lock: the rwlock in question. 101 * @lock: the rwlock in question.
102 */ 102 */
103#define __raw_read_can_lock(x) ((x)->lock > 0) 103#define arch_read_can_lock(x) ((x)->lock > 0)
104 104
105/** 105/**
106 * write_can_lock - would write_trylock() succeed? 106 * write_can_lock - would write_trylock() succeed?
107 * @lock: the rwlock in question. 107 * @lock: the rwlock in question.
108 */ 108 */
109#define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) 109#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
110 110
111static inline void __raw_read_lock(raw_rwlock_t *rw) 111static inline void arch_read_lock(arch_rwlock_t *rw)
112{ 112{
113 unsigned long tmp; 113 unsigned long tmp;
114 114
115 __asm__ __volatile__ ( 115 __asm__ __volatile__ (
116 "1: \n\t" 116 "1: \n\t"
117 "movli.l @%1, %0 ! __raw_read_lock \n\t" 117 "movli.l @%1, %0 ! arch_read_lock \n\t"
118 "cmp/pl %0 \n\t" 118 "cmp/pl %0 \n\t"
119 "bf 1b \n\t" 119 "bf 1b \n\t"
120 "add #-1, %0 \n\t" 120 "add #-1, %0 \n\t"
@@ -126,13 +126,13 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
126 ); 126 );
127} 127}
128 128
129static inline void __raw_read_unlock(raw_rwlock_t *rw) 129static inline void arch_read_unlock(arch_rwlock_t *rw)
130{ 130{
131 unsigned long tmp; 131 unsigned long tmp;
132 132
133 __asm__ __volatile__ ( 133 __asm__ __volatile__ (
134 "1: \n\t" 134 "1: \n\t"
135 "movli.l @%1, %0 ! __raw_read_unlock \n\t" 135 "movli.l @%1, %0 ! arch_read_unlock \n\t"
136 "add #1, %0 \n\t" 136 "add #1, %0 \n\t"
137 "movco.l %0, @%1 \n\t" 137 "movco.l %0, @%1 \n\t"
138 "bf 1b \n\t" 138 "bf 1b \n\t"
@@ -142,13 +142,13 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
142 ); 142 );
143} 143}
144 144
145static inline void __raw_write_lock(raw_rwlock_t *rw) 145static inline void arch_write_lock(arch_rwlock_t *rw)
146{ 146{
147 unsigned long tmp; 147 unsigned long tmp;
148 148
149 __asm__ __volatile__ ( 149 __asm__ __volatile__ (
150 "1: \n\t" 150 "1: \n\t"
151 "movli.l @%1, %0 ! __raw_write_lock \n\t" 151 "movli.l @%1, %0 ! arch_write_lock \n\t"
152 "cmp/hs %2, %0 \n\t" 152 "cmp/hs %2, %0 \n\t"
153 "bf 1b \n\t" 153 "bf 1b \n\t"
154 "sub %2, %0 \n\t" 154 "sub %2, %0 \n\t"
@@ -160,23 +160,23 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
160 ); 160 );
161} 161}
162 162
163static inline void __raw_write_unlock(raw_rwlock_t *rw) 163static inline void arch_write_unlock(arch_rwlock_t *rw)
164{ 164{
165 __asm__ __volatile__ ( 165 __asm__ __volatile__ (
166 "mov.l %1, @%0 ! __raw_write_unlock \n\t" 166 "mov.l %1, @%0 ! arch_write_unlock \n\t"
167 : 167 :
168 : "r" (&rw->lock), "r" (RW_LOCK_BIAS) 168 : "r" (&rw->lock), "r" (RW_LOCK_BIAS)
169 : "t", "memory" 169 : "t", "memory"
170 ); 170 );
171} 171}
172 172
173static inline int __raw_read_trylock(raw_rwlock_t *rw) 173static inline int arch_read_trylock(arch_rwlock_t *rw)
174{ 174{
175 unsigned long tmp, oldval; 175 unsigned long tmp, oldval;
176 176
177 __asm__ __volatile__ ( 177 __asm__ __volatile__ (
178 "1: \n\t" 178 "1: \n\t"
179 "movli.l @%2, %0 ! __raw_read_trylock \n\t" 179 "movli.l @%2, %0 ! arch_read_trylock \n\t"
180 "mov %0, %1 \n\t" 180 "mov %0, %1 \n\t"
181 "cmp/pl %0 \n\t" 181 "cmp/pl %0 \n\t"
182 "bf 2f \n\t" 182 "bf 2f \n\t"
@@ -193,13 +193,13 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw)
193 return (oldval > 0); 193 return (oldval > 0);
194} 194}
195 195
196static inline int __raw_write_trylock(raw_rwlock_t *rw) 196static inline int arch_write_trylock(arch_rwlock_t *rw)
197{ 197{
198 unsigned long tmp, oldval; 198 unsigned long tmp, oldval;
199 199
200 __asm__ __volatile__ ( 200 __asm__ __volatile__ (
201 "1: \n\t" 201 "1: \n\t"
202 "movli.l @%2, %0 ! __raw_write_trylock \n\t" 202 "movli.l @%2, %0 ! arch_write_trylock \n\t"
203 "mov %0, %1 \n\t" 203 "mov %0, %1 \n\t"
204 "cmp/hs %3, %0 \n\t" 204 "cmp/hs %3, %0 \n\t"
205 "bf 2f \n\t" 205 "bf 2f \n\t"
@@ -216,11 +216,11 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
216 return (oldval > (RW_LOCK_BIAS - 1)); 216 return (oldval > (RW_LOCK_BIAS - 1));
217} 217}
218 218
219#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 219#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
220#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) 220#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
221 221
222#define _raw_spin_relax(lock) cpu_relax() 222#define arch_spin_relax(lock) cpu_relax()
223#define _raw_read_relax(lock) cpu_relax() 223#define arch_read_relax(lock) cpu_relax()
224#define _raw_write_relax(lock) cpu_relax() 224#define arch_write_relax(lock) cpu_relax()
225 225
226#endif /* __ASM_SH_SPINLOCK_H */ 226#endif /* __ASM_SH_SPINLOCK_H */
diff --git a/arch/sh/include/asm/spinlock_types.h b/arch/sh/include/asm/spinlock_types.h
index b4d244e7b60c..9b7560db06ca 100644
--- a/arch/sh/include/asm/spinlock_types.h
+++ b/arch/sh/include/asm/spinlock_types.h
@@ -7,15 +7,15 @@
7 7
8typedef struct { 8typedef struct {
9 volatile unsigned int lock; 9 volatile unsigned int lock;
10} raw_spinlock_t; 10} arch_spinlock_t;
11 11
12#define __RAW_SPIN_LOCK_UNLOCKED { 1 } 12#define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
13 13
14typedef struct { 14typedef struct {
15 volatile unsigned int lock; 15 volatile unsigned int lock;
16} raw_rwlock_t; 16} arch_rwlock_t;
17 17
18#define RW_LOCK_BIAS 0x01000000 18#define RW_LOCK_BIAS 0x01000000
19#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } 19#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
20 20
21#endif 21#endif
diff --git a/arch/sh/include/asm/unistd_32.h b/arch/sh/include/asm/unistd_32.h
index f3fd1b9eb6b1..f18c4f9baf27 100644
--- a/arch/sh/include/asm/unistd_32.h
+++ b/arch/sh/include/asm/unistd_32.h
@@ -345,8 +345,9 @@
345#define __NR_pwritev 334 345#define __NR_pwritev 334
346#define __NR_rt_tgsigqueueinfo 335 346#define __NR_rt_tgsigqueueinfo 335
347#define __NR_perf_event_open 336 347#define __NR_perf_event_open 336
348#define __NR_recvmmsg 337
348 349
349#define NR_syscalls 337 350#define NR_syscalls 338
350 351
351#ifdef __KERNEL__ 352#ifdef __KERNEL__
352 353
diff --git a/arch/sh/include/asm/unistd_64.h b/arch/sh/include/asm/unistd_64.h
index 343ce8f073ea..3e7645d11130 100644
--- a/arch/sh/include/asm/unistd_64.h
+++ b/arch/sh/include/asm/unistd_64.h
@@ -385,10 +385,11 @@
385#define __NR_pwritev 362 385#define __NR_pwritev 362
386#define __NR_rt_tgsigqueueinfo 363 386#define __NR_rt_tgsigqueueinfo 363
387#define __NR_perf_event_open 364 387#define __NR_perf_event_open 364
388#define __NR_recvmmsg 365
388 389
389#ifdef __KERNEL__ 390#ifdef __KERNEL__
390 391
391#define NR_syscalls 365 392#define NR_syscalls 366
392 393
393#define __ARCH_WANT_IPC_PARSE_VERSION 394#define __ARCH_WANT_IPC_PARSE_VERSION
394#define __ARCH_WANT_OLD_READDIR 395#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/sh/include/mach-kfr2r09/mach/kfr2r09.h b/arch/sh/include/mach-kfr2r09/mach/kfr2r09.h
index 174374e19547..484ef42c2fb5 100644
--- a/arch/sh/include/mach-kfr2r09/mach/kfr2r09.h
+++ b/arch/sh/include/mach-kfr2r09/mach/kfr2r09.h
@@ -8,6 +8,8 @@ void kfr2r09_lcd_on(void *board_data);
8void kfr2r09_lcd_off(void *board_data); 8void kfr2r09_lcd_off(void *board_data);
9int kfr2r09_lcd_setup(void *board_data, void *sys_ops_handle, 9int kfr2r09_lcd_setup(void *board_data, void *sys_ops_handle,
10 struct sh_mobile_lcdc_sys_bus_ops *sys_ops); 10 struct sh_mobile_lcdc_sys_bus_ops *sys_ops);
11void kfr2r09_lcd_start(void *board_data, void *sys_ops_handle,
12 struct sh_mobile_lcdc_sys_bus_ops *sys_ops);
11#else 13#else
12static inline void kfr2r09_lcd_on(void *board_data) {} 14static inline void kfr2r09_lcd_on(void *board_data) {}
13static inline void kfr2r09_lcd_off(void *board_data) {} 15static inline void kfr2r09_lcd_off(void *board_data) {}
@@ -16,6 +18,10 @@ static inline int kfr2r09_lcd_setup(void *board_data, void *sys_ops_handle,
16{ 18{
17 return -ENODEV; 19 return -ENODEV;
18} 20}
21static inline void kfr2r09_lcd_start(void *board_data, void *sys_ops_handle,
22 struct sh_mobile_lcdc_sys_bus_ops *sys_ops)
23{
24}
19#endif 25#endif
20 26
21#endif /* __ASM_SH_KFR2R09_H */ 27#endif /* __ASM_SH_KFR2R09_H */
diff --git a/arch/sh/kernel/Makefile b/arch/sh/kernel/Makefile
index 0471a3eb25ed..0d587da1ef12 100644
--- a/arch/sh/kernel/Makefile
+++ b/arch/sh/kernel/Makefile
@@ -22,11 +22,10 @@ obj-y := debugtraps.o dma-nommu.o dumpstack.o \
22obj-y += cpu/ 22obj-y += cpu/
23obj-$(CONFIG_VSYSCALL) += vsyscall/ 23obj-$(CONFIG_VSYSCALL) += vsyscall/
24obj-$(CONFIG_SMP) += smp.o 24obj-$(CONFIG_SMP) += smp.o
25obj-$(CONFIG_SH_STANDARD_BIOS) += sh_bios.o 25obj-$(CONFIG_SH_STANDARD_BIOS) += sh_bios.o early_printk.o
26obj-$(CONFIG_KGDB) += kgdb.o 26obj-$(CONFIG_KGDB) += kgdb.o
27obj-$(CONFIG_SH_CPU_FREQ) += cpufreq.o 27obj-$(CONFIG_SH_CPU_FREQ) += cpufreq.o
28obj-$(CONFIG_MODULES) += sh_ksyms_$(BITS).o module.o 28obj-$(CONFIG_MODULES) += sh_ksyms_$(BITS).o module.o
29obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
30obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o 29obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
31obj-$(CONFIG_CRASH_DUMP) += crash_dump.o 30obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
32obj-$(CONFIG_STACKTRACE) += stacktrace.o 31obj-$(CONFIG_STACKTRACE) += stacktrace.o
diff --git a/arch/sh/kernel/cpu/irq/ipr.c b/arch/sh/kernel/cpu/irq/ipr.c
index c1508a90fc6a..9282d965a1b6 100644
--- a/arch/sh/kernel/cpu/irq/ipr.c
+++ b/arch/sh/kernel/cpu/irq/ipr.c
@@ -17,16 +17,17 @@
17 * for more details. 17 * for more details.
18 */ 18 */
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/interrupt.h>
21#include <linux/io.h>
20#include <linux/irq.h> 22#include <linux/irq.h>
23#include <linux/kernel.h>
21#include <linux/module.h> 24#include <linux/module.h>
22#include <linux/io.h>
23#include <linux/interrupt.h>
24#include <linux/topology.h> 25#include <linux/topology.h>
25 26
26static inline struct ipr_desc *get_ipr_desc(unsigned int irq) 27static inline struct ipr_desc *get_ipr_desc(unsigned int irq)
27{ 28{
28 struct irq_chip *chip = get_irq_chip(irq); 29 struct irq_chip *chip = get_irq_chip(irq);
29 return (void *)((char *)chip - offsetof(struct ipr_desc, chip)); 30 return container_of(chip, struct ipr_desc, chip);
30} 31}
31 32
32static void disable_ipr_irq(unsigned int irq) 33static void disable_ipr_irq(unsigned int irq)
diff --git a/arch/sh/kernel/cpu/sh2/setup-sh7619.c b/arch/sh/kernel/cpu/sh2/setup-sh7619.c
index 8555c05e8667..114c7cee7184 100644
--- a/arch/sh/kernel/cpu/sh2/setup-sh7619.c
+++ b/arch/sh/kernel/cpu/sh2/setup-sh7619.c
@@ -59,32 +59,48 @@ static struct intc_prio_reg prio_registers[] __initdata = {
59static DECLARE_INTC_DESC(intc_desc, "sh7619", vectors, NULL, 59static DECLARE_INTC_DESC(intc_desc, "sh7619", vectors, NULL,
60 NULL, prio_registers, NULL); 60 NULL, prio_registers, NULL);
61 61
62static struct plat_sci_port sci_platform_data[] = { 62static struct plat_sci_port scif0_platform_data = {
63 { 63 .mapbase = 0xf8400000,
64 .mapbase = 0xf8400000, 64 .flags = UPF_BOOT_AUTOCONF,
65 .flags = UPF_BOOT_AUTOCONF, 65 .type = PORT_SCIF,
66 .type = PORT_SCIF, 66 .irqs = { 88, 88, 88, 88 },
67 .irqs = { 88, 88, 88, 88 }, 67};
68 }, { 68
69 .mapbase = 0xf8410000, 69static struct platform_device scif0_device = {
70 .flags = UPF_BOOT_AUTOCONF, 70 .name = "sh-sci",
71 .type = PORT_SCIF, 71 .id = 0,
72 .irqs = { 92, 92, 92, 92 }, 72 .dev = {
73 }, { 73 .platform_data = &scif0_platform_data,
74 .mapbase = 0xf8420000, 74 },
75 .flags = UPF_BOOT_AUTOCONF, 75};
76 .type = PORT_SCIF, 76
77 .irqs = { 96, 96, 96, 96 }, 77static struct plat_sci_port scif1_platform_data = {
78 }, { 78 .mapbase = 0xf8410000,
79 .flags = 0, 79 .flags = UPF_BOOT_AUTOCONF,
80 } 80 .type = PORT_SCIF,
81}; 81 .irqs = { 92, 92, 92, 92 },
82 82};
83static struct platform_device sci_device = { 83
84static struct platform_device scif1_device = {
85 .name = "sh-sci",
86 .id = 1,
87 .dev = {
88 .platform_data = &scif1_platform_data,
89 },
90};
91
92static struct plat_sci_port scif2_platform_data = {
93 .mapbase = 0xf8420000,
94 .flags = UPF_BOOT_AUTOCONF,
95 .type = PORT_SCIF,
96 .irqs = { 96, 96, 96, 96 },
97};
98
99static struct platform_device scif2_device = {
84 .name = "sh-sci", 100 .name = "sh-sci",
85 .id = -1, 101 .id = 2,
86 .dev = { 102 .dev = {
87 .platform_data = sci_platform_data, 103 .platform_data = &scif2_platform_data,
88 }, 104 },
89}; 105};
90 106
@@ -176,7 +192,9 @@ static struct platform_device cmt1_device = {
176}; 192};
177 193
178static struct platform_device *sh7619_devices[] __initdata = { 194static struct platform_device *sh7619_devices[] __initdata = {
179 &sci_device, 195 &scif0_device,
196 &scif1_device,
197 &scif2_device,
180 &eth_device, 198 &eth_device,
181 &cmt0_device, 199 &cmt0_device,
182 &cmt1_device, 200 &cmt1_device,
@@ -195,6 +213,9 @@ void __init plat_irq_setup(void)
195} 213}
196 214
197static struct platform_device *sh7619_early_devices[] __initdata = { 215static struct platform_device *sh7619_early_devices[] __initdata = {
216 &scif0_device,
217 &scif1_device,
218 &scif2_device,
198 &cmt0_device, 219 &cmt0_device,
199 &cmt1_device, 220 &cmt1_device,
200}; 221};
diff --git a/arch/sh/kernel/cpu/sh2a/setup-mxg.c b/arch/sh/kernel/cpu/sh2a/setup-mxg.c
index b67376445315..8f669dc9b0da 100644
--- a/arch/sh/kernel/cpu/sh2a/setup-mxg.c
+++ b/arch/sh/kernel/cpu/sh2a/setup-mxg.c
@@ -207,27 +207,23 @@ static struct platform_device mtu2_2_device = {
207 .num_resources = ARRAY_SIZE(mtu2_2_resources), 207 .num_resources = ARRAY_SIZE(mtu2_2_resources),
208}; 208};
209 209
210static struct plat_sci_port sci_platform_data[] = { 210static struct plat_sci_port scif0_platform_data = {
211 { 211 .mapbase = 0xff804000,
212 .mapbase = 0xff804000, 212 .flags = UPF_BOOT_AUTOCONF,
213 .flags = UPF_BOOT_AUTOCONF, 213 .type = PORT_SCIF,
214 .type = PORT_SCIF, 214 .irqs = { 220, 220, 220, 220 },
215 .irqs = { 220, 220, 220, 220 },
216 }, {
217 .flags = 0,
218 }
219}; 215};
220 216
221static struct platform_device sci_device = { 217static struct platform_device scif0_device = {
222 .name = "sh-sci", 218 .name = "sh-sci",
223 .id = -1, 219 .id = 0,
224 .dev = { 220 .dev = {
225 .platform_data = sci_platform_data, 221 .platform_data = &scif0_platform_data,
226 }, 222 },
227}; 223};
228 224
229static struct platform_device *mxg_devices[] __initdata = { 225static struct platform_device *mxg_devices[] __initdata = {
230 &sci_device, 226 &scif0_device,
231 &mtu2_0_device, 227 &mtu2_0_device,
232 &mtu2_1_device, 228 &mtu2_1_device,
233 &mtu2_2_device, 229 &mtu2_2_device,
@@ -246,6 +242,7 @@ void __init plat_irq_setup(void)
246} 242}
247 243
248static struct platform_device *mxg_early_devices[] __initdata = { 244static struct platform_device *mxg_early_devices[] __initdata = {
245 &scif0_device,
249 &mtu2_0_device, 246 &mtu2_0_device,
250 &mtu2_1_device, 247 &mtu2_1_device,
251 &mtu2_2_device, 248 &mtu2_2_device,
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7201.c b/arch/sh/kernel/cpu/sh2a/setup-sh7201.c
index fbde5b75deb9..4ccfeb59eb1a 100644
--- a/arch/sh/kernel/cpu/sh2a/setup-sh7201.c
+++ b/arch/sh/kernel/cpu/sh2a/setup-sh7201.c
@@ -177,57 +177,123 @@ static struct intc_mask_reg mask_registers[] __initdata = {
177static DECLARE_INTC_DESC(intc_desc, "sh7201", vectors, groups, 177static DECLARE_INTC_DESC(intc_desc, "sh7201", vectors, groups,
178 mask_registers, prio_registers, NULL); 178 mask_registers, prio_registers, NULL);
179 179
180static struct plat_sci_port sci_platform_data[] = { 180static struct plat_sci_port scif0_platform_data = {
181 { 181 .mapbase = 0xfffe8000,
182 .mapbase = 0xfffe8000, 182 .flags = UPF_BOOT_AUTOCONF,
183 .flags = UPF_BOOT_AUTOCONF, 183 .type = PORT_SCIF,
184 .type = PORT_SCIF, 184 .irqs = { 180, 180, 180, 180 }
185 .irqs = { 180, 180, 180, 180 } 185};
186 }, { 186
187 .mapbase = 0xfffe8800, 187static struct platform_device scif0_device = {
188 .flags = UPF_BOOT_AUTOCONF,
189 .type = PORT_SCIF,
190 .irqs = { 184, 184, 184, 184 }
191 }, {
192 .mapbase = 0xfffe9000,
193 .flags = UPF_BOOT_AUTOCONF,
194 .type = PORT_SCIF,
195 .irqs = { 188, 188, 188, 188 }
196 }, {
197 .mapbase = 0xfffe9800,
198 .flags = UPF_BOOT_AUTOCONF,
199 .type = PORT_SCIF,
200 .irqs = { 192, 192, 192, 192 }
201 }, {
202 .mapbase = 0xfffea000,
203 .flags = UPF_BOOT_AUTOCONF,
204 .type = PORT_SCIF,
205 .irqs = { 196, 196, 196, 196 }
206 }, {
207 .mapbase = 0xfffea800,
208 .flags = UPF_BOOT_AUTOCONF,
209 .type = PORT_SCIF,
210 .irqs = { 200, 200, 200, 200 }
211 }, {
212 .mapbase = 0xfffeb000,
213 .flags = UPF_BOOT_AUTOCONF,
214 .type = PORT_SCIF,
215 .irqs = { 204, 204, 204, 204 }
216 }, {
217 .mapbase = 0xfffeb800,
218 .flags = UPF_BOOT_AUTOCONF,
219 .type = PORT_SCIF,
220 .irqs = { 208, 208, 208, 208 }
221 }, {
222 .flags = 0,
223 }
224};
225
226static struct platform_device sci_device = {
227 .name = "sh-sci", 188 .name = "sh-sci",
228 .id = -1, 189 .id = 0,
190 .dev = {
191 .platform_data = &scif0_platform_data,
192 },
193};
194
195static struct plat_sci_port scif1_platform_data = {
196 .mapbase = 0xfffe8800,
197 .flags = UPF_BOOT_AUTOCONF,
198 .type = PORT_SCIF,
199 .irqs = { 184, 184, 184, 184 }
200};
201
202static struct platform_device scif1_device = {
203 .name = "sh-sci",
204 .id = 1,
205 .dev = {
206 .platform_data = &scif1_platform_data,
207 },
208};
209
210static struct plat_sci_port scif2_platform_data = {
211 .mapbase = 0xfffe9000,
212 .flags = UPF_BOOT_AUTOCONF,
213 .type = PORT_SCIF,
214 .irqs = { 188, 188, 188, 188 }
215};
216
217static struct platform_device scif2_device = {
218 .name = "sh-sci",
219 .id = 2,
220 .dev = {
221 .platform_data = &scif2_platform_data,
222 },
223};
224
225static struct plat_sci_port scif3_platform_data = {
226 .mapbase = 0xfffe9800,
227 .flags = UPF_BOOT_AUTOCONF,
228 .type = PORT_SCIF,
229 .irqs = { 192, 192, 192, 192 }
230};
231
232static struct platform_device scif3_device = {
233 .name = "sh-sci",
234 .id = 3,
235 .dev = {
236 .platform_data = &scif3_platform_data,
237 },
238};
239
240static struct plat_sci_port scif4_platform_data = {
241 .mapbase = 0xfffea000,
242 .flags = UPF_BOOT_AUTOCONF,
243 .type = PORT_SCIF,
244 .irqs = { 196, 196, 196, 196 }
245};
246
247static struct platform_device scif4_device = {
248 .name = "sh-sci",
249 .id = 4,
250 .dev = {
251 .platform_data = &scif4_platform_data,
252 },
253};
254
255static struct plat_sci_port scif5_platform_data = {
256 .mapbase = 0xfffea800,
257 .flags = UPF_BOOT_AUTOCONF,
258 .type = PORT_SCIF,
259 .irqs = { 200, 200, 200, 200 }
260};
261
262static struct platform_device scif5_device = {
263 .name = "sh-sci",
264 .id = 5,
265 .dev = {
266 .platform_data = &scif5_platform_data,
267 },
268};
269
270static struct plat_sci_port scif6_platform_data = {
271 .mapbase = 0xfffeb000,
272 .flags = UPF_BOOT_AUTOCONF,
273 .type = PORT_SCIF,
274 .irqs = { 204, 204, 204, 204 }
275};
276
277static struct platform_device scif6_device = {
278 .name = "sh-sci",
279 .id = 6,
280 .dev = {
281 .platform_data = &scif6_platform_data,
282 },
283};
284
285static struct plat_sci_port scif7_platform_data = {
286 .mapbase = 0xfffeb800,
287 .flags = UPF_BOOT_AUTOCONF,
288 .type = PORT_SCIF,
289 .irqs = { 208, 208, 208, 208 }
290};
291
292static struct platform_device scif7_device = {
293 .name = "sh-sci",
294 .id = 7,
229 .dev = { 295 .dev = {
230 .platform_data = sci_platform_data, 296 .platform_data = &scif7_platform_data,
231 }, 297 },
232}; 298};
233 299
@@ -345,7 +411,14 @@ static struct platform_device mtu2_2_device = {
345}; 411};
346 412
347static struct platform_device *sh7201_devices[] __initdata = { 413static struct platform_device *sh7201_devices[] __initdata = {
348 &sci_device, 414 &scif0_device,
415 &scif1_device,
416 &scif2_device,
417 &scif3_device,
418 &scif4_device,
419 &scif5_device,
420 &scif6_device,
421 &scif7_device,
349 &rtc_device, 422 &rtc_device,
350 &mtu2_0_device, 423 &mtu2_0_device,
351 &mtu2_1_device, 424 &mtu2_1_device,
@@ -365,6 +438,14 @@ void __init plat_irq_setup(void)
365} 438}
366 439
367static struct platform_device *sh7201_early_devices[] __initdata = { 440static struct platform_device *sh7201_early_devices[] __initdata = {
441 &scif0_device,
442 &scif1_device,
443 &scif2_device,
444 &scif3_device,
445 &scif4_device,
446 &scif5_device,
447 &scif6_device,
448 &scif7_device,
368 &mtu2_0_device, 449 &mtu2_0_device,
369 &mtu2_1_device, 450 &mtu2_1_device,
370 &mtu2_2_device, 451 &mtu2_2_device,
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7203.c b/arch/sh/kernel/cpu/sh2a/setup-sh7203.c
index d3fd536c9a84..3136966cc9b3 100644
--- a/arch/sh/kernel/cpu/sh2a/setup-sh7203.c
+++ b/arch/sh/kernel/cpu/sh2a/setup-sh7203.c
@@ -173,37 +173,63 @@ static struct intc_mask_reg mask_registers[] __initdata = {
173static DECLARE_INTC_DESC(intc_desc, "sh7203", vectors, groups, 173static DECLARE_INTC_DESC(intc_desc, "sh7203", vectors, groups,
174 mask_registers, prio_registers, NULL); 174 mask_registers, prio_registers, NULL);
175 175
176static struct plat_sci_port sci_platform_data[] = { 176static struct plat_sci_port scif0_platform_data = {
177 { 177 .mapbase = 0xfffe8000,
178 .mapbase = 0xfffe8000, 178 .flags = UPF_BOOT_AUTOCONF,
179 .flags = UPF_BOOT_AUTOCONF, 179 .type = PORT_SCIF,
180 .type = PORT_SCIF, 180 .irqs = { 192, 192, 192, 192 },
181 .irqs = { 192, 192, 192, 192 },
182 }, {
183 .mapbase = 0xfffe8800,
184 .flags = UPF_BOOT_AUTOCONF,
185 .type = PORT_SCIF,
186 .irqs = { 196, 196, 196, 196 },
187 }, {
188 .mapbase = 0xfffe9000,
189 .flags = UPF_BOOT_AUTOCONF,
190 .type = PORT_SCIF,
191 .irqs = { 200, 200, 200, 200 },
192 }, {
193 .mapbase = 0xfffe9800,
194 .flags = UPF_BOOT_AUTOCONF,
195 .type = PORT_SCIF,
196 .irqs = { 204, 204, 204, 204 },
197 }, {
198 .flags = 0,
199 }
200}; 181};
201 182
202static struct platform_device sci_device = { 183static struct platform_device scif0_device = {
203 .name = "sh-sci", 184 .name = "sh-sci",
204 .id = -1, 185 .id = 0,
186 .dev = {
187 .platform_data = &scif0_platform_data,
188 },
189};
190
191static struct plat_sci_port scif1_platform_data = {
192 .mapbase = 0xfffe8800,
193 .flags = UPF_BOOT_AUTOCONF,
194 .type = PORT_SCIF,
195 .irqs = { 196, 196, 196, 196 },
196};
197
198static struct platform_device scif1_device = {
199 .name = "sh-sci",
200 .id = 1,
201 .dev = {
202 .platform_data = &scif1_platform_data,
203 },
204};
205
206static struct plat_sci_port scif2_platform_data = {
207 .mapbase = 0xfffe9000,
208 .flags = UPF_BOOT_AUTOCONF,
209 .type = PORT_SCIF,
210 .irqs = { 200, 200, 200, 200 },
211};
212
213static struct platform_device scif2_device = {
214 .name = "sh-sci",
215 .id = 2,
216 .dev = {
217 .platform_data = &scif2_platform_data,
218 },
219};
220
221static struct plat_sci_port scif3_platform_data = {
222 .mapbase = 0xfffe9800,
223 .flags = UPF_BOOT_AUTOCONF,
224 .type = PORT_SCIF,
225 .irqs = { 204, 204, 204, 204 },
226};
227
228static struct platform_device scif3_device = {
229 .name = "sh-sci",
230 .id = 3,
205 .dev = { 231 .dev = {
206 .platform_data = sci_platform_data, 232 .platform_data = &scif3_platform_data,
207 }, 233 },
208}; 234};
209 235
@@ -354,7 +380,10 @@ static struct platform_device rtc_device = {
354}; 380};
355 381
356static struct platform_device *sh7203_devices[] __initdata = { 382static struct platform_device *sh7203_devices[] __initdata = {
357 &sci_device, 383 &scif0_device,
384 &scif1_device,
385 &scif2_device,
386 &scif3_device,
358 &cmt0_device, 387 &cmt0_device,
359 &cmt1_device, 388 &cmt1_device,
360 &mtu2_0_device, 389 &mtu2_0_device,
@@ -375,6 +404,10 @@ void __init plat_irq_setup(void)
375} 404}
376 405
377static struct platform_device *sh7203_early_devices[] __initdata = { 406static struct platform_device *sh7203_early_devices[] __initdata = {
407 &scif0_device,
408 &scif1_device,
409 &scif2_device,
410 &scif3_device,
378 &cmt0_device, 411 &cmt0_device,
379 &cmt1_device, 412 &cmt1_device,
380 &mtu2_0_device, 413 &mtu2_0_device,
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
index a9ccc5e8d9e9..064873585a8b 100644
--- a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
+++ b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
@@ -133,37 +133,63 @@ static struct intc_mask_reg mask_registers[] __initdata = {
133static DECLARE_INTC_DESC(intc_desc, "sh7206", vectors, groups, 133static DECLARE_INTC_DESC(intc_desc, "sh7206", vectors, groups,
134 mask_registers, prio_registers, NULL); 134 mask_registers, prio_registers, NULL);
135 135
136static struct plat_sci_port sci_platform_data[] = { 136static struct plat_sci_port scif0_platform_data = {
137 { 137 .mapbase = 0xfffe8000,
138 .mapbase = 0xfffe8000, 138 .flags = UPF_BOOT_AUTOCONF,
139 .flags = UPF_BOOT_AUTOCONF, 139 .type = PORT_SCIF,
140 .type = PORT_SCIF, 140 .irqs = { 240, 240, 240, 240 },
141 .irqs = { 240, 240, 240, 240 },
142 }, {
143 .mapbase = 0xfffe8800,
144 .flags = UPF_BOOT_AUTOCONF,
145 .type = PORT_SCIF,
146 .irqs = { 244, 244, 244, 244 },
147 }, {
148 .mapbase = 0xfffe9000,
149 .flags = UPF_BOOT_AUTOCONF,
150 .type = PORT_SCIF,
151 .irqs = { 248, 248, 248, 248 },
152 }, {
153 .mapbase = 0xfffe9800,
154 .flags = UPF_BOOT_AUTOCONF,
155 .type = PORT_SCIF,
156 .irqs = { 252, 252, 252, 252 },
157 }, {
158 .flags = 0,
159 }
160}; 141};
161 142
162static struct platform_device sci_device = { 143static struct platform_device scif0_device = {
163 .name = "sh-sci", 144 .name = "sh-sci",
164 .id = -1, 145 .id = 0,
146 .dev = {
147 .platform_data = &scif0_platform_data,
148 },
149};
150
151static struct plat_sci_port scif1_platform_data = {
152 .mapbase = 0xfffe8800,
153 .flags = UPF_BOOT_AUTOCONF,
154 .type = PORT_SCIF,
155 .irqs = { 244, 244, 244, 244 },
156};
157
158static struct platform_device scif1_device = {
159 .name = "sh-sci",
160 .id = 1,
161 .dev = {
162 .platform_data = &scif1_platform_data,
163 },
164};
165
166static struct plat_sci_port scif2_platform_data = {
167 .mapbase = 0xfffe9000,
168 .flags = UPF_BOOT_AUTOCONF,
169 .type = PORT_SCIF,
170 .irqs = { 248, 248, 248, 248 },
171};
172
173static struct platform_device scif2_device = {
174 .name = "sh-sci",
175 .id = 2,
176 .dev = {
177 .platform_data = &scif2_platform_data,
178 },
179};
180
181static struct plat_sci_port scif3_platform_data = {
182 .mapbase = 0xfffe9800,
183 .flags = UPF_BOOT_AUTOCONF,
184 .type = PORT_SCIF,
185 .irqs = { 252, 252, 252, 252 },
186};
187
188static struct platform_device scif3_device = {
189 .name = "sh-sci",
190 .id = 3,
165 .dev = { 191 .dev = {
166 .platform_data = sci_platform_data, 192 .platform_data = &scif3_platform_data,
167 }, 193 },
168}; 194};
169 195
@@ -325,7 +351,10 @@ static struct platform_device mtu2_2_device = {
325}; 351};
326 352
327static struct platform_device *sh7206_devices[] __initdata = { 353static struct platform_device *sh7206_devices[] __initdata = {
328 &sci_device, 354 &scif0_device,
355 &scif1_device,
356 &scif2_device,
357 &scif3_device,
329 &cmt0_device, 358 &cmt0_device,
330 &cmt1_device, 359 &cmt1_device,
331 &mtu2_0_device, 360 &mtu2_0_device,
@@ -346,6 +375,10 @@ void __init plat_irq_setup(void)
346} 375}
347 376
348static struct platform_device *sh7206_early_devices[] __initdata = { 377static struct platform_device *sh7206_early_devices[] __initdata = {
378 &scif0_device,
379 &scif1_device,
380 &scif2_device,
381 &scif3_device,
349 &cmt0_device, 382 &cmt0_device,
350 &cmt1_device, 383 &cmt1_device,
351 &mtu2_0_device, 384 &mtu2_0_device,
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7705.c b/arch/sh/kernel/cpu/sh3/setup-sh7705.c
index c23105983878..7b892d60e3a0 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh7705.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh7705.c
@@ -67,27 +67,33 @@ static struct intc_prio_reg prio_registers[] __initdata = {
67static DECLARE_INTC_DESC(intc_desc, "sh7705", vectors, NULL, 67static DECLARE_INTC_DESC(intc_desc, "sh7705", vectors, NULL,
68 NULL, prio_registers, NULL); 68 NULL, prio_registers, NULL);
69 69
70static struct plat_sci_port sci_platform_data[] = { 70static struct plat_sci_port scif0_platform_data = {
71 { 71 .mapbase = 0xa4410000,
72 .mapbase = 0xa4410000, 72 .flags = UPF_BOOT_AUTOCONF,
73 .flags = UPF_BOOT_AUTOCONF, 73 .type = PORT_SCIF,
74 .type = PORT_SCIF, 74 .irqs = { 56, 56, 56 },
75 .irqs = { 56, 56, 56 }, 75};
76 }, { 76
77 .mapbase = 0xa4400000, 77static struct platform_device scif0_device = {
78 .flags = UPF_BOOT_AUTOCONF,
79 .type = PORT_SCIF,
80 .irqs = { 52, 52, 52 },
81 }, {
82 .flags = 0,
83 }
84};
85
86static struct platform_device sci_device = {
87 .name = "sh-sci", 78 .name = "sh-sci",
88 .id = -1, 79 .id = 0,
80 .dev = {
81 .platform_data = &scif0_platform_data,
82 },
83};
84
85static struct plat_sci_port scif1_platform_data = {
86 .mapbase = 0xa4400000,
87 .flags = UPF_BOOT_AUTOCONF,
88 .type = PORT_SCIF,
89 .irqs = { 52, 52, 52 },
90};
91
92static struct platform_device scif1_device = {
93 .name = "sh-sci",
94 .id = 1,
89 .dev = { 95 .dev = {
90 .platform_data = sci_platform_data, 96 .platform_data = &scif1_platform_data,
91 }, 97 },
92}; 98};
93 99
@@ -210,10 +216,11 @@ static struct platform_device tmu2_device = {
210}; 216};
211 217
212static struct platform_device *sh7705_devices[] __initdata = { 218static struct platform_device *sh7705_devices[] __initdata = {
219 &scif0_device,
220 &scif1_device,
213 &tmu0_device, 221 &tmu0_device,
214 &tmu1_device, 222 &tmu1_device,
215 &tmu2_device, 223 &tmu2_device,
216 &sci_device,
217 &rtc_device, 224 &rtc_device,
218}; 225};
219 226
@@ -225,6 +232,8 @@ static int __init sh7705_devices_setup(void)
225arch_initcall(sh7705_devices_setup); 232arch_initcall(sh7705_devices_setup);
226 233
227static struct platform_device *sh7705_early_devices[] __initdata = { 234static struct platform_device *sh7705_early_devices[] __initdata = {
235 &scif0_device,
236 &scif1_device,
228 &tmu0_device, 237 &tmu0_device,
229 &tmu1_device, 238 &tmu1_device,
230 &tmu2_device, 239 &tmu2_device,
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh770x.c b/arch/sh/kernel/cpu/sh3/setup-sh770x.c
index 347ab35d0697..bc0c4f68c7c7 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh770x.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh770x.c
@@ -106,44 +106,55 @@ static struct platform_device rtc_device = {
106 .resource = rtc_resources, 106 .resource = rtc_resources,
107}; 107};
108 108
109static struct plat_sci_port sci_platform_data[] = { 109static struct plat_sci_port scif0_platform_data = {
110 { 110 .mapbase = 0xfffffe80,
111 .mapbase = 0xfffffe80, 111 .flags = UPF_BOOT_AUTOCONF,
112 .flags = UPF_BOOT_AUTOCONF, 112 .type = PORT_SCI,
113 .type = PORT_SCI, 113 .irqs = { 23, 23, 23, 0 },
114 .irqs = { 23, 23, 23, 0 }, 114};
115
116static struct platform_device scif0_device = {
117 .name = "sh-sci",
118 .id = 0,
119 .dev = {
120 .platform_data = &scif0_platform_data,
115 }, 121 },
122};
116#if defined(CONFIG_CPU_SUBTYPE_SH7706) || \ 123#if defined(CONFIG_CPU_SUBTYPE_SH7706) || \
117 defined(CONFIG_CPU_SUBTYPE_SH7707) || \ 124 defined(CONFIG_CPU_SUBTYPE_SH7707) || \
118 defined(CONFIG_CPU_SUBTYPE_SH7709) 125 defined(CONFIG_CPU_SUBTYPE_SH7709)
119 { 126static struct plat_sci_port scif1_platform_data = {
120 .mapbase = 0xa4000150, 127 .mapbase = 0xa4000150,
121 .flags = UPF_BOOT_AUTOCONF, 128 .flags = UPF_BOOT_AUTOCONF,
122 .type = PORT_SCIF, 129 .type = PORT_SCIF,
123 .irqs = { 56, 56, 56, 56 }, 130 .irqs = { 56, 56, 56, 56 },
131};
132
133static struct platform_device scif1_device = {
134 .name = "sh-sci",
135 .id = 1,
136 .dev = {
137 .platform_data = &scif1_platform_data,
124 }, 138 },
139};
125#endif 140#endif
126#if defined(CONFIG_CPU_SUBTYPE_SH7707) || \ 141#if defined(CONFIG_CPU_SUBTYPE_SH7707) || \
127 defined(CONFIG_CPU_SUBTYPE_SH7709) 142 defined(CONFIG_CPU_SUBTYPE_SH7709)
128 { 143static struct plat_sci_port scif2_platform_data = {
129 .mapbase = 0xa4000140, 144 .mapbase = 0xa4000140,
130 .flags = UPF_BOOT_AUTOCONF, 145 .flags = UPF_BOOT_AUTOCONF,
131 .type = PORT_IRDA, 146 .type = PORT_IRDA,
132 .irqs = { 52, 52, 52, 52 }, 147 .irqs = { 52, 52, 52, 52 },
133 },
134#endif
135 {
136 .flags = 0,
137 }
138}; 148};
139 149
140static struct platform_device sci_device = { 150static struct platform_device scif2_device = {
141 .name = "sh-sci", 151 .name = "sh-sci",
142 .id = -1, 152 .id = 2,
143 .dev = { 153 .dev = {
144 .platform_data = sci_platform_data, 154 .platform_data = &scif2_platform_data,
145 }, 155 },
146}; 156};
157#endif
147 158
148static struct sh_timer_config tmu0_platform_data = { 159static struct sh_timer_config tmu0_platform_data = {
149 .name = "TMU0", 160 .name = "TMU0",
@@ -238,10 +249,19 @@ static struct platform_device tmu2_device = {
238}; 249};
239 250
240static struct platform_device *sh770x_devices[] __initdata = { 251static struct platform_device *sh770x_devices[] __initdata = {
252 &scif0_device,
253#if defined(CONFIG_CPU_SUBTYPE_SH7706) || \
254 defined(CONFIG_CPU_SUBTYPE_SH7707) || \
255 defined(CONFIG_CPU_SUBTYPE_SH7709)
256 &scif1_device,
257#endif
258#if defined(CONFIG_CPU_SUBTYPE_SH7707) || \
259 defined(CONFIG_CPU_SUBTYPE_SH7709)
260 &scif2_device,
261#endif
241 &tmu0_device, 262 &tmu0_device,
242 &tmu1_device, 263 &tmu1_device,
243 &tmu2_device, 264 &tmu2_device,
244 &sci_device,
245 &rtc_device, 265 &rtc_device,
246}; 266};
247 267
@@ -253,6 +273,16 @@ static int __init sh770x_devices_setup(void)
253arch_initcall(sh770x_devices_setup); 273arch_initcall(sh770x_devices_setup);
254 274
255static struct platform_device *sh770x_early_devices[] __initdata = { 275static struct platform_device *sh770x_early_devices[] __initdata = {
276 &scif0_device,
277#if defined(CONFIG_CPU_SUBTYPE_SH7706) || \
278 defined(CONFIG_CPU_SUBTYPE_SH7707) || \
279 defined(CONFIG_CPU_SUBTYPE_SH7709)
280 &scif1_device,
281#endif
282#if defined(CONFIG_CPU_SUBTYPE_SH7707) || \
283 defined(CONFIG_CPU_SUBTYPE_SH7709)
284 &scif2_device,
285#endif
256 &tmu0_device, 286 &tmu0_device,
257 &tmu1_device, 287 &tmu1_device,
258 &tmu2_device, 288 &tmu2_device,
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7710.c b/arch/sh/kernel/cpu/sh3/setup-sh7710.c
index 717e90ae1097..0845a3ad006d 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh7710.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh7710.c
@@ -96,28 +96,33 @@ static struct platform_device rtc_device = {
96 }, 96 },
97}; 97};
98 98
99static struct plat_sci_port sci_platform_data[] = { 99static struct plat_sci_port scif0_platform_data = {
100 { 100 .mapbase = 0xa4400000,
101 .mapbase = 0xa4400000, 101 .flags = UPF_BOOT_AUTOCONF,
102 .flags = UPF_BOOT_AUTOCONF, 102 .type = PORT_SCIF,
103 .type = PORT_SCIF, 103 .irqs = { 52, 52, 52, 52 },
104 .irqs = { 52, 52, 52, 52 }, 104};
105 }, { 105
106 .mapbase = 0xa4410000, 106static struct platform_device scif0_device = {
107 .flags = UPF_BOOT_AUTOCONF,
108 .type = PORT_SCIF,
109 .irqs = { 56, 56, 56, 56 },
110 }, {
111
112 .flags = 0,
113 }
114};
115
116static struct platform_device sci_device = {
117 .name = "sh-sci", 107 .name = "sh-sci",
118 .id = -1, 108 .id = 0,
109 .dev = {
110 .platform_data = &scif0_platform_data,
111 },
112};
113
114static struct plat_sci_port scif1_platform_data = {
115 .mapbase = 0xa4410000,
116 .flags = UPF_BOOT_AUTOCONF,
117 .type = PORT_SCIF,
118 .irqs = { 56, 56, 56, 56 },
119};
120
121static struct platform_device scif1_device = {
122 .name = "sh-sci",
123 .id = 1,
119 .dev = { 124 .dev = {
120 .platform_data = sci_platform_data, 125 .platform_data = &scif1_platform_data,
121 }, 126 },
122}; 127};
123 128
@@ -214,10 +219,11 @@ static struct platform_device tmu2_device = {
214}; 219};
215 220
216static struct platform_device *sh7710_devices[] __initdata = { 221static struct platform_device *sh7710_devices[] __initdata = {
222 &scif0_device,
223 &scif1_device,
217 &tmu0_device, 224 &tmu0_device,
218 &tmu1_device, 225 &tmu1_device,
219 &tmu2_device, 226 &tmu2_device,
220 &sci_device,
221 &rtc_device, 227 &rtc_device,
222}; 228};
223 229
@@ -229,6 +235,8 @@ static int __init sh7710_devices_setup(void)
229arch_initcall(sh7710_devices_setup); 235arch_initcall(sh7710_devices_setup);
230 236
231static struct platform_device *sh7710_early_devices[] __initdata = { 237static struct platform_device *sh7710_early_devices[] __initdata = {
238 &scif0_device,
239 &scif1_device,
232 &tmu0_device, 240 &tmu0_device,
233 &tmu1_device, 241 &tmu1_device,
234 &tmu2_device, 242 &tmu2_device,
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7720.c b/arch/sh/kernel/cpu/sh3/setup-sh7720.c
index 74d8baaf8e96..a718a6231091 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh7720.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh7720.c
@@ -48,28 +48,33 @@ static struct platform_device rtc_device = {
48 }, 48 },
49}; 49};
50 50
51static struct plat_sci_port sci_platform_data[] = { 51static struct plat_sci_port scif0_platform_data = {
52 { 52 .mapbase = 0xa4430000,
53 .mapbase = 0xa4430000, 53 .flags = UPF_BOOT_AUTOCONF,
54 .flags = UPF_BOOT_AUTOCONF, 54 .type = PORT_SCIF,
55 .type = PORT_SCIF, 55 .irqs = { 80, 80, 80, 80 },
56 .irqs = { 80, 80, 80, 80 }, 56};
57 }, { 57
58 .mapbase = 0xa4438000, 58static struct platform_device scif0_device = {
59 .flags = UPF_BOOT_AUTOCONF,
60 .type = PORT_SCIF,
61 .irqs = { 81, 81, 81, 81 },
62 }, {
63
64 .flags = 0,
65 }
66};
67
68static struct platform_device sci_device = {
69 .name = "sh-sci", 59 .name = "sh-sci",
70 .id = -1, 60 .id = 0,
61 .dev = {
62 .platform_data = &scif0_platform_data,
63 },
64};
65
66static struct plat_sci_port scif1_platform_data = {
67 .mapbase = 0xa4438000,
68 .flags = UPF_BOOT_AUTOCONF,
69 .type = PORT_SCIF,
70 .irqs = { 81, 81, 81, 81 },
71};
72
73static struct platform_device scif1_device = {
74 .name = "sh-sci",
75 .id = 1,
71 .dev = { 76 .dev = {
72 .platform_data = sci_platform_data, 77 .platform_data = &scif1_platform_data,
73 }, 78 },
74}; 79};
75 80
@@ -369,6 +374,8 @@ static struct platform_device tmu2_device = {
369}; 374};
370 375
371static struct platform_device *sh7720_devices[] __initdata = { 376static struct platform_device *sh7720_devices[] __initdata = {
377 &scif0_device,
378 &scif1_device,
372 &cmt0_device, 379 &cmt0_device,
373 &cmt1_device, 380 &cmt1_device,
374 &cmt2_device, 381 &cmt2_device,
@@ -378,7 +385,6 @@ static struct platform_device *sh7720_devices[] __initdata = {
378 &tmu1_device, 385 &tmu1_device,
379 &tmu2_device, 386 &tmu2_device,
380 &rtc_device, 387 &rtc_device,
381 &sci_device,
382 &usb_ohci_device, 388 &usb_ohci_device,
383 &usbf_device, 389 &usbf_device,
384}; 390};
@@ -391,6 +397,8 @@ static int __init sh7720_devices_setup(void)
391arch_initcall(sh7720_devices_setup); 397arch_initcall(sh7720_devices_setup);
392 398
393static struct platform_device *sh7720_early_devices[] __initdata = { 399static struct platform_device *sh7720_early_devices[] __initdata = {
400 &scif0_device,
401 &scif1_device,
394 &cmt0_device, 402 &cmt0_device,
395 &cmt1_device, 403 &cmt1_device,
396 &cmt2_device, 404 &cmt2_device,
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh4-202.c b/arch/sh/kernel/cpu/sh4/setup-sh4-202.c
index de4827df19aa..4b733715cdb5 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh4-202.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh4-202.c
@@ -15,22 +15,18 @@
15#include <linux/sh_timer.h> 15#include <linux/sh_timer.h>
16#include <linux/io.h> 16#include <linux/io.h>
17 17
18static struct plat_sci_port sci_platform_data[] = { 18static struct plat_sci_port scif0_platform_data = {
19 { 19 .mapbase = 0xffe80000,
20 .mapbase = 0xffe80000, 20 .flags = UPF_BOOT_AUTOCONF,
21 .flags = UPF_BOOT_AUTOCONF, 21 .type = PORT_SCIF,
22 .type = PORT_SCIF, 22 .irqs = { 40, 41, 43, 42 },
23 .irqs = { 40, 41, 43, 42 },
24 }, {
25 .flags = 0,
26 }
27}; 23};
28 24
29static struct platform_device sci_device = { 25static struct platform_device scif0_device = {
30 .name = "sh-sci", 26 .name = "sh-sci",
31 .id = -1, 27 .id = 0,
32 .dev = { 28 .dev = {
33 .platform_data = sci_platform_data, 29 .platform_data = &scif0_platform_data,
34 }, 30 },
35}; 31};
36 32
@@ -127,7 +123,7 @@ static struct platform_device tmu2_device = {
127}; 123};
128 124
129static struct platform_device *sh4202_devices[] __initdata = { 125static struct platform_device *sh4202_devices[] __initdata = {
130 &sci_device, 126 &scif0_device,
131 &tmu0_device, 127 &tmu0_device,
132 &tmu1_device, 128 &tmu1_device,
133 &tmu2_device, 129 &tmu2_device,
@@ -141,6 +137,7 @@ static int __init sh4202_devices_setup(void)
141arch_initcall(sh4202_devices_setup); 137arch_initcall(sh4202_devices_setup);
142 138
143static struct platform_device *sh4202_early_devices[] __initdata = { 139static struct platform_device *sh4202_early_devices[] __initdata = {
140 &scif0_device,
144 &tmu0_device, 141 &tmu0_device,
145 &tmu1_device, 142 &tmu1_device,
146 &tmu2_device, 143 &tmu2_device,
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7750.c b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
index 1b8b122e8f3d..b2a9df1af64c 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh7750.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
@@ -35,29 +35,33 @@ static struct platform_device rtc_device = {
35 .resource = rtc_resources, 35 .resource = rtc_resources,
36}; 36};
37 37
38static struct plat_sci_port sci_platform_data[] = { 38static struct plat_sci_port scif0_platform_data = {
39 { 39 .mapbase = 0xffe00000,
40#ifndef CONFIG_SH_RTS7751R2D 40 .flags = UPF_BOOT_AUTOCONF,
41 .mapbase = 0xffe00000, 41 .type = PORT_SCI,
42 .flags = UPF_BOOT_AUTOCONF, 42 .irqs = { 23, 23, 23, 0 },
43 .type = PORT_SCI,
44 .irqs = { 23, 23, 23, 0 },
45 }, {
46#endif
47 .mapbase = 0xffe80000,
48 .flags = UPF_BOOT_AUTOCONF,
49 .type = PORT_SCIF,
50 .irqs = { 40, 40, 40, 40 },
51 }, {
52 .flags = 0,
53 }
54}; 43};
55 44
56static struct platform_device sci_device = { 45static struct platform_device scif0_device = {
57 .name = "sh-sci", 46 .name = "sh-sci",
58 .id = -1, 47 .id = 0,
48 .dev = {
49 .platform_data = &scif0_platform_data,
50 },
51};
52
53static struct plat_sci_port scif1_platform_data = {
54 .mapbase = 0xffe80000,
55 .flags = UPF_BOOT_AUTOCONF,
56 .type = PORT_SCIF,
57 .irqs = { 40, 40, 40, 40 },
58};
59
60static struct platform_device scif1_device = {
61 .name = "sh-sci",
62 .id = 1,
59 .dev = { 63 .dev = {
60 .platform_data = sci_platform_data, 64 .platform_data = &scif1_platform_data,
61 }, 65 },
62}; 66};
63 67
@@ -221,8 +225,9 @@ static struct platform_device tmu4_device = {
221#endif 225#endif
222 226
223static struct platform_device *sh7750_devices[] __initdata = { 227static struct platform_device *sh7750_devices[] __initdata = {
228 &scif0_device,
229 &scif1_device,
224 &rtc_device, 230 &rtc_device,
225 &sci_device,
226 &tmu0_device, 231 &tmu0_device,
227 &tmu1_device, 232 &tmu1_device,
228 &tmu2_device, 233 &tmu2_device,
@@ -242,6 +247,8 @@ static int __init sh7750_devices_setup(void)
242arch_initcall(sh7750_devices_setup); 247arch_initcall(sh7750_devices_setup);
243 248
244static struct platform_device *sh7750_early_devices[] __initdata = { 249static struct platform_device *sh7750_early_devices[] __initdata = {
250 &scif0_device,
251 &scif1_device,
245 &tmu0_device, 252 &tmu0_device,
246 &tmu1_device, 253 &tmu1_device,
247 &tmu2_device, 254 &tmu2_device,
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7760.c b/arch/sh/kernel/cpu/sh4/setup-sh7760.c
index 7fbb7be9284c..5b74cc0b43da 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh7760.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh7760.c
@@ -126,37 +126,63 @@ static struct intc_vect vectors_irq[] __initdata = {
126static DECLARE_INTC_DESC(intc_desc_irq, "sh7760-irq", vectors_irq, groups, 126static DECLARE_INTC_DESC(intc_desc_irq, "sh7760-irq", vectors_irq, groups,
127 mask_registers, prio_registers, NULL); 127 mask_registers, prio_registers, NULL);
128 128
129static struct plat_sci_port sci_platform_data[] = { 129static struct plat_sci_port scif0_platform_data = {
130 { 130 .mapbase = 0xfe600000,
131 .mapbase = 0xfe600000, 131 .flags = UPF_BOOT_AUTOCONF,
132 .flags = UPF_BOOT_AUTOCONF, 132 .type = PORT_SCIF,
133 .type = PORT_SCIF, 133 .irqs = { 52, 53, 55, 54 },
134 .irqs = { 52, 53, 55, 54 }, 134};
135 }, { 135
136 .mapbase = 0xfe610000, 136static struct platform_device scif0_device = {
137 .flags = UPF_BOOT_AUTOCONF, 137 .name = "sh-sci",
138 .type = PORT_SCIF, 138 .id = 0,
139 .irqs = { 72, 73, 75, 74 }, 139 .dev = {
140 }, { 140 .platform_data = &scif0_platform_data,
141 .mapbase = 0xfe620000, 141 },
142 .flags = UPF_BOOT_AUTOCONF, 142};
143 .type = PORT_SCIF, 143
144 .irqs = { 76, 77, 79, 78 }, 144static struct plat_sci_port scif1_platform_data = {
145 }, { 145 .mapbase = 0xfe610000,
146 .mapbase = 0xfe480000, 146 .flags = UPF_BOOT_AUTOCONF,
147 .flags = UPF_BOOT_AUTOCONF, 147 .type = PORT_SCIF,
148 .type = PORT_SCI, 148 .irqs = { 72, 73, 75, 74 },
149 .irqs = { 80, 81, 82, 0 }, 149};
150 }, { 150
151 .flags = 0, 151static struct platform_device scif1_device = {
152 } 152 .name = "sh-sci",
153 .id = 1,
154 .dev = {
155 .platform_data = &scif1_platform_data,
156 },
157};
158
159static struct plat_sci_port scif2_platform_data = {
160 .mapbase = 0xfe620000,
161 .flags = UPF_BOOT_AUTOCONF,
162 .type = PORT_SCIF,
163 .irqs = { 76, 77, 79, 78 },
164};
165
166static struct platform_device scif2_device = {
167 .name = "sh-sci",
168 .id = 2,
169 .dev = {
170 .platform_data = &scif2_platform_data,
171 },
172};
173
174static struct plat_sci_port scif3_platform_data = {
175 .mapbase = 0xfe480000,
176 .flags = UPF_BOOT_AUTOCONF,
177 .type = PORT_SCI,
178 .irqs = { 80, 81, 82, 0 },
153}; 179};
154 180
155static struct platform_device sci_device = { 181static struct platform_device scif3_device = {
156 .name = "sh-sci", 182 .name = "sh-sci",
157 .id = -1, 183 .id = 3,
158 .dev = { 184 .dev = {
159 .platform_data = sci_platform_data, 185 .platform_data = &scif3_platform_data,
160 }, 186 },
161}; 187};
162 188
@@ -254,7 +280,10 @@ static struct platform_device tmu2_device = {
254 280
255 281
256static struct platform_device *sh7760_devices[] __initdata = { 282static struct platform_device *sh7760_devices[] __initdata = {
257 &sci_device, 283 &scif0_device,
284 &scif1_device,
285 &scif2_device,
286 &scif3_device,
258 &tmu0_device, 287 &tmu0_device,
259 &tmu1_device, 288 &tmu1_device,
260 &tmu2_device, 289 &tmu2_device,
@@ -268,6 +297,10 @@ static int __init sh7760_devices_setup(void)
268arch_initcall(sh7760_devices_setup); 297arch_initcall(sh7760_devices_setup);
269 298
270static struct platform_device *sh7760_early_devices[] __initdata = { 299static struct platform_device *sh7760_early_devices[] __initdata = {
300 &scif0_device,
301 &scif1_device,
302 &scif2_device,
303 &scif3_device,
271 &tmu0_device, 304 &tmu0_device,
272 &tmu1_device, 305 &tmu1_device,
273 &tmu2_device, 306 &tmu2_device,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7343.c b/arch/sh/kernel/cpu/sh4a/setup-sh7343.c
index ac4d5672ec1a..45eb1bfd42c9 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7343.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7343.c
@@ -15,6 +15,71 @@
15#include <linux/sh_timer.h> 15#include <linux/sh_timer.h>
16#include <asm/clock.h> 16#include <asm/clock.h>
17 17
18/* Serial */
19static struct plat_sci_port scif0_platform_data = {
20 .mapbase = 0xffe00000,
21 .flags = UPF_BOOT_AUTOCONF,
22 .type = PORT_SCIF,
23 .irqs = { 80, 80, 80, 80 },
24 .clk = "scif0",
25};
26
27static struct platform_device scif0_device = {
28 .name = "sh-sci",
29 .id = 0,
30 .dev = {
31 .platform_data = &scif0_platform_data,
32 },
33};
34
35static struct plat_sci_port scif1_platform_data = {
36 .mapbase = 0xffe10000,
37 .flags = UPF_BOOT_AUTOCONF,
38 .type = PORT_SCIF,
39 .irqs = { 81, 81, 81, 81 },
40 .clk = "scif1",
41};
42
43static struct platform_device scif1_device = {
44 .name = "sh-sci",
45 .id = 1,
46 .dev = {
47 .platform_data = &scif1_platform_data,
48 },
49};
50
51static struct plat_sci_port scif2_platform_data = {
52 .mapbase = 0xffe20000,
53 .flags = UPF_BOOT_AUTOCONF,
54 .type = PORT_SCIF,
55 .irqs = { 82, 82, 82, 82 },
56 .clk = "scif2",
57};
58
59static struct platform_device scif2_device = {
60 .name = "sh-sci",
61 .id = 2,
62 .dev = {
63 .platform_data = &scif2_platform_data,
64 },
65};
66
67static struct plat_sci_port scif3_platform_data = {
68 .mapbase = 0xffe30000,
69 .flags = UPF_BOOT_AUTOCONF,
70 .type = PORT_SCIF,
71 .irqs = { 83, 83, 83, 83 },
72 .clk = "scif3",
73};
74
75static struct platform_device scif3_device = {
76 .name = "sh-sci",
77 .id = 3,
78 .dev = {
79 .platform_data = &scif3_platform_data,
80 },
81};
82
18static struct resource iic0_resources[] = { 83static struct resource iic0_resources[] = {
19 [0] = { 84 [0] = {
20 .name = "IIC0", 85 .name = "IIC0",
@@ -265,52 +330,17 @@ static struct platform_device tmu2_device = {
265 .num_resources = ARRAY_SIZE(tmu2_resources), 330 .num_resources = ARRAY_SIZE(tmu2_resources),
266}; 331};
267 332
268static struct plat_sci_port sci_platform_data[] = {
269 {
270 .mapbase = 0xffe00000,
271 .flags = UPF_BOOT_AUTOCONF,
272 .type = PORT_SCIF,
273 .irqs = { 80, 80, 80, 80 },
274 .clk = "scif0",
275 }, {
276 .mapbase = 0xffe10000,
277 .flags = UPF_BOOT_AUTOCONF,
278 .type = PORT_SCIF,
279 .irqs = { 81, 81, 81, 81 },
280 .clk = "scif1",
281 }, {
282 .mapbase = 0xffe20000,
283 .flags = UPF_BOOT_AUTOCONF,
284 .type = PORT_SCIF,
285 .irqs = { 82, 82, 82, 82 },
286 .clk = "scif2",
287 }, {
288 .mapbase = 0xffe30000,
289 .flags = UPF_BOOT_AUTOCONF,
290 .type = PORT_SCIF,
291 .irqs = { 83, 83, 83, 83 },
292 .clk = "scif3",
293 }, {
294 .flags = 0,
295 }
296};
297
298static struct platform_device sci_device = {
299 .name = "sh-sci",
300 .id = -1,
301 .dev = {
302 .platform_data = sci_platform_data,
303 },
304};
305
306static struct platform_device *sh7343_devices[] __initdata = { 333static struct platform_device *sh7343_devices[] __initdata = {
334 &scif0_device,
335 &scif1_device,
336 &scif2_device,
337 &scif3_device,
307 &cmt_device, 338 &cmt_device,
308 &tmu0_device, 339 &tmu0_device,
309 &tmu1_device, 340 &tmu1_device,
310 &tmu2_device, 341 &tmu2_device,
311 &iic0_device, 342 &iic0_device,
312 &iic1_device, 343 &iic1_device,
313 &sci_device,
314 &vpu_device, 344 &vpu_device,
315 &veu_device, 345 &veu_device,
316 &jpu_device, 346 &jpu_device,
@@ -328,6 +358,10 @@ static int __init sh7343_devices_setup(void)
328arch_initcall(sh7343_devices_setup); 358arch_initcall(sh7343_devices_setup);
329 359
330static struct platform_device *sh7343_early_devices[] __initdata = { 360static struct platform_device *sh7343_early_devices[] __initdata = {
361 &scif0_device,
362 &scif1_device,
363 &scif2_device,
364 &scif3_device,
331 &cmt_device, 365 &cmt_device,
332 &tmu0_device, 366 &tmu0_device,
333 &tmu1_device, 367 &tmu1_device,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c
index 4a9010bf4fd3..c494c193e3b6 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c
@@ -18,6 +18,22 @@
18#include <linux/usb/r8a66597.h> 18#include <linux/usb/r8a66597.h>
19#include <asm/clock.h> 19#include <asm/clock.h>
20 20
21static struct plat_sci_port scif0_platform_data = {
22 .mapbase = 0xffe00000,
23 .flags = UPF_BOOT_AUTOCONF,
24 .type = PORT_SCIF,
25 .irqs = { 80, 80, 80, 80 },
26 .clk = "scif0",
27};
28
29static struct platform_device scif0_device = {
30 .name = "sh-sci",
31 .id = 0,
32 .dev = {
33 .platform_data = &scif0_platform_data,
34 },
35};
36
21static struct resource iic_resources[] = { 37static struct resource iic_resources[] = {
22 [0] = { 38 [0] = {
23 .name = "IIC", 39 .name = "IIC",
@@ -276,33 +292,13 @@ static struct platform_device tmu2_device = {
276 .num_resources = ARRAY_SIZE(tmu2_resources), 292 .num_resources = ARRAY_SIZE(tmu2_resources),
277}; 293};
278 294
279static struct plat_sci_port sci_platform_data[] = {
280 {
281 .mapbase = 0xffe00000,
282 .flags = UPF_BOOT_AUTOCONF,
283 .type = PORT_SCIF,
284 .irqs = { 80, 80, 80, 80 },
285 .clk = "scif0",
286 }, {
287 .flags = 0,
288 }
289};
290
291static struct platform_device sci_device = {
292 .name = "sh-sci",
293 .id = -1,
294 .dev = {
295 .platform_data = sci_platform_data,
296 },
297};
298
299static struct platform_device *sh7366_devices[] __initdata = { 295static struct platform_device *sh7366_devices[] __initdata = {
296 &scif0_device,
300 &cmt_device, 297 &cmt_device,
301 &tmu0_device, 298 &tmu0_device,
302 &tmu1_device, 299 &tmu1_device,
303 &tmu2_device, 300 &tmu2_device,
304 &iic_device, 301 &iic_device,
305 &sci_device,
306 &usb_host_device, 302 &usb_host_device,
307 &vpu_device, 303 &vpu_device,
308 &veu0_device, 304 &veu0_device,
@@ -321,6 +317,7 @@ static int __init sh7366_devices_setup(void)
321arch_initcall(sh7366_devices_setup); 317arch_initcall(sh7366_devices_setup);
322 318
323static struct platform_device *sh7366_early_devices[] __initdata = { 319static struct platform_device *sh7366_early_devices[] __initdata = {
320 &scif0_device,
324 &cmt_device, 321 &cmt_device,
325 &tmu0_device, 322 &tmu0_device,
326 &tmu1_device, 323 &tmu1_device,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
index 5491b094cf05..b5335b5e309c 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
@@ -20,6 +20,55 @@
20#include <asm/dma-sh.h> 20#include <asm/dma-sh.h>
21#include <cpu/sh7722.h> 21#include <cpu/sh7722.h>
22 22
23/* Serial */
24static struct plat_sci_port scif0_platform_data = {
25 .mapbase = 0xffe00000,
26 .flags = UPF_BOOT_AUTOCONF,
27 .type = PORT_SCIF,
28 .irqs = { 80, 80, 80, 80 },
29 .clk = "scif0",
30};
31
32static struct platform_device scif0_device = {
33 .name = "sh-sci",
34 .id = 0,
35 .dev = {
36 .platform_data = &scif0_platform_data,
37 },
38};
39
40static struct plat_sci_port scif1_platform_data = {
41 .mapbase = 0xffe10000,
42 .flags = UPF_BOOT_AUTOCONF,
43 .type = PORT_SCIF,
44 .irqs = { 81, 81, 81, 81 },
45 .clk = "scif1",
46};
47
48static struct platform_device scif1_device = {
49 .name = "sh-sci",
50 .id = 1,
51 .dev = {
52 .platform_data = &scif1_platform_data,
53 },
54};
55
56static struct plat_sci_port scif2_platform_data = {
57 .mapbase = 0xffe20000,
58 .flags = UPF_BOOT_AUTOCONF,
59 .type = PORT_SCIF,
60 .irqs = { 82, 82, 82, 82 },
61 .clk = "scif2",
62};
63
64static struct platform_device scif2_device = {
65 .name = "sh-sci",
66 .id = 2,
67 .dev = {
68 .platform_data = &scif2_platform_data,
69 },
70};
71
23static struct resource rtc_resources[] = { 72static struct resource rtc_resources[] = {
24 [0] = { 73 [0] = {
25 .start = 0xa465fec0, 74 .start = 0xa465fec0,
@@ -339,41 +388,6 @@ static struct platform_device tmu2_device = {
339 }, 388 },
340}; 389};
341 390
342static struct plat_sci_port sci_platform_data[] = {
343 {
344 .mapbase = 0xffe00000,
345 .flags = UPF_BOOT_AUTOCONF,
346 .type = PORT_SCIF,
347 .irqs = { 80, 80, 80, 80 },
348 .clk = "scif0",
349 },
350 {
351 .mapbase = 0xffe10000,
352 .flags = UPF_BOOT_AUTOCONF,
353 .type = PORT_SCIF,
354 .irqs = { 81, 81, 81, 81 },
355 .clk = "scif1",
356 },
357 {
358 .mapbase = 0xffe20000,
359 .flags = UPF_BOOT_AUTOCONF,
360 .type = PORT_SCIF,
361 .irqs = { 82, 82, 82, 82 },
362 .clk = "scif2",
363 },
364 {
365 .flags = 0,
366 }
367};
368
369static struct platform_device sci_device = {
370 .name = "sh-sci",
371 .id = -1,
372 .dev = {
373 .platform_data = sci_platform_data,
374 },
375};
376
377static struct sh_dmae_pdata dma_platform_data = { 391static struct sh_dmae_pdata dma_platform_data = {
378 .mode = 0, 392 .mode = 0,
379}; 393};
@@ -387,6 +401,9 @@ static struct platform_device dma_device = {
387}; 401};
388 402
389static struct platform_device *sh7722_devices[] __initdata = { 403static struct platform_device *sh7722_devices[] __initdata = {
404 &scif0_device,
405 &scif1_device,
406 &scif2_device,
390 &cmt_device, 407 &cmt_device,
391 &tmu0_device, 408 &tmu0_device,
392 &tmu1_device, 409 &tmu1_device,
@@ -394,7 +411,6 @@ static struct platform_device *sh7722_devices[] __initdata = {
394 &rtc_device, 411 &rtc_device,
395 &usbf_device, 412 &usbf_device,
396 &iic_device, 413 &iic_device,
397 &sci_device,
398 &vpu_device, 414 &vpu_device,
399 &veu_device, 415 &veu_device,
400 &jpu_device, 416 &jpu_device,
@@ -413,6 +429,9 @@ static int __init sh7722_devices_setup(void)
413arch_initcall(sh7722_devices_setup); 429arch_initcall(sh7722_devices_setup);
414 430
415static struct platform_device *sh7722_early_devices[] __initdata = { 431static struct platform_device *sh7722_early_devices[] __initdata = {
432 &scif0_device,
433 &scif1_device,
434 &scif2_device,
416 &cmt_device, 435 &cmt_device,
417 &tmu0_device, 436 &tmu0_device,
418 &tmu1_device, 437 &tmu1_device,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c
index 4caa5a7ca86e..772b9265d0e4 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c
@@ -20,6 +20,103 @@
20#include <asm/mmzone.h> 20#include <asm/mmzone.h>
21#include <cpu/sh7723.h> 21#include <cpu/sh7723.h>
22 22
23/* Serial */
24static struct plat_sci_port scif0_platform_data = {
25 .mapbase = 0xffe00000,
26 .flags = UPF_BOOT_AUTOCONF,
27 .type = PORT_SCIF,
28 .irqs = { 80, 80, 80, 80 },
29 .clk = "scif0",
30};
31
32static struct platform_device scif0_device = {
33 .name = "sh-sci",
34 .id = 0,
35 .dev = {
36 .platform_data = &scif0_platform_data,
37 },
38};
39
40static struct plat_sci_port scif1_platform_data = {
41 .mapbase = 0xffe10000,
42 .flags = UPF_BOOT_AUTOCONF,
43 .type = PORT_SCIF,
44 .irqs = { 81, 81, 81, 81 },
45 .clk = "scif1",
46};
47
48static struct platform_device scif1_device = {
49 .name = "sh-sci",
50 .id = 1,
51 .dev = {
52 .platform_data = &scif1_platform_data,
53 },
54};
55
56static struct plat_sci_port scif2_platform_data = {
57 .mapbase = 0xffe20000,
58 .flags = UPF_BOOT_AUTOCONF,
59 .type = PORT_SCIF,
60 .irqs = { 82, 82, 82, 82 },
61 .clk = "scif2",
62};
63
64static struct platform_device scif2_device = {
65 .name = "sh-sci",
66 .id = 2,
67 .dev = {
68 .platform_data = &scif2_platform_data,
69 },
70};
71
72static struct plat_sci_port scif3_platform_data = {
73 .mapbase = 0xa4e30000,
74 .flags = UPF_BOOT_AUTOCONF,
75 .type = PORT_SCIFA,
76 .irqs = { 56, 56, 56, 56 },
77 .clk = "scif3",
78};
79
80static struct platform_device scif3_device = {
81 .name = "sh-sci",
82 .id = 3,
83 .dev = {
84 .platform_data = &scif3_platform_data,
85 },
86};
87
88static struct plat_sci_port scif4_platform_data = {
89 .mapbase = 0xa4e40000,
90 .flags = UPF_BOOT_AUTOCONF,
91 .type = PORT_SCIFA,
92 .irqs = { 88, 88, 88, 88 },
93 .clk = "scif4",
94};
95
96static struct platform_device scif4_device = {
97 .name = "sh-sci",
98 .id = 4,
99 .dev = {
100 .platform_data = &scif4_platform_data,
101 },
102};
103
104static struct plat_sci_port scif5_platform_data = {
105 .mapbase = 0xa4e50000,
106 .flags = UPF_BOOT_AUTOCONF,
107 .type = PORT_SCIFA,
108 .irqs = { 109, 109, 109, 109 },
109 .clk = "scif5",
110};
111
112static struct platform_device scif5_device = {
113 .name = "sh-sci",
114 .id = 5,
115 .dev = {
116 .platform_data = &scif5_platform_data,
117 },
118};
119
23static struct uio_info vpu_platform_data = { 120static struct uio_info vpu_platform_data = {
24 .name = "VPU5", 121 .name = "VPU5",
25 .version = "0", 122 .version = "0",
@@ -348,56 +445,6 @@ static struct platform_device tmu5_device = {
348 }, 445 },
349}; 446};
350 447
351static struct plat_sci_port sci_platform_data[] = {
352 {
353 .mapbase = 0xffe00000,
354 .flags = UPF_BOOT_AUTOCONF,
355 .type = PORT_SCIF,
356 .irqs = { 80, 80, 80, 80 },
357 .clk = "scif0",
358 },{
359 .mapbase = 0xffe10000,
360 .flags = UPF_BOOT_AUTOCONF,
361 .type = PORT_SCIF,
362 .irqs = { 81, 81, 81, 81 },
363 .clk = "scif1",
364 },{
365 .mapbase = 0xffe20000,
366 .flags = UPF_BOOT_AUTOCONF,
367 .type = PORT_SCIF,
368 .irqs = { 82, 82, 82, 82 },
369 .clk = "scif2",
370 },{
371 .mapbase = 0xa4e30000,
372 .flags = UPF_BOOT_AUTOCONF,
373 .type = PORT_SCIFA,
374 .irqs = { 56, 56, 56, 56 },
375 .clk = "scif3",
376 },{
377 .mapbase = 0xa4e40000,
378 .flags = UPF_BOOT_AUTOCONF,
379 .type = PORT_SCIFA,
380 .irqs = { 88, 88, 88, 88 },
381 .clk = "scif4",
382 },{
383 .mapbase = 0xa4e50000,
384 .flags = UPF_BOOT_AUTOCONF,
385 .type = PORT_SCIFA,
386 .irqs = { 109, 109, 109, 109 },
387 .clk = "scif5",
388 }, {
389 .flags = 0,
390 }
391};
392
393static struct platform_device sci_device = {
394 .name = "sh-sci",
395 .id = -1,
396 .dev = {
397 .platform_data = sci_platform_data,
398 },
399};
400
401static struct resource rtc_resources[] = { 448static struct resource rtc_resources[] = {
402 [0] = { 449 [0] = {
403 .start = 0xa465fec0, 450 .start = 0xa465fec0,
@@ -488,6 +535,12 @@ static struct platform_device iic_device = {
488}; 535};
489 536
490static struct platform_device *sh7723_devices[] __initdata = { 537static struct platform_device *sh7723_devices[] __initdata = {
538 &scif0_device,
539 &scif1_device,
540 &scif2_device,
541 &scif3_device,
542 &scif4_device,
543 &scif5_device,
491 &cmt_device, 544 &cmt_device,
492 &tmu0_device, 545 &tmu0_device,
493 &tmu1_device, 546 &tmu1_device,
@@ -495,7 +548,6 @@ static struct platform_device *sh7723_devices[] __initdata = {
495 &tmu3_device, 548 &tmu3_device,
496 &tmu4_device, 549 &tmu4_device,
497 &tmu5_device, 550 &tmu5_device,
498 &sci_device,
499 &rtc_device, 551 &rtc_device,
500 &iic_device, 552 &iic_device,
501 &sh7723_usb_host_device, 553 &sh7723_usb_host_device,
@@ -516,6 +568,12 @@ static int __init sh7723_devices_setup(void)
516arch_initcall(sh7723_devices_setup); 568arch_initcall(sh7723_devices_setup);
517 569
518static struct platform_device *sh7723_early_devices[] __initdata = { 570static struct platform_device *sh7723_early_devices[] __initdata = {
571 &scif0_device,
572 &scif1_device,
573 &scif2_device,
574 &scif3_device,
575 &scif4_device,
576 &scif5_device,
519 &cmt_device, 577 &cmt_device,
520 &tmu0_device, 578 &tmu0_device,
521 &tmu1_device, 579 &tmu1_device,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
index 845e89c936e7..a52f35117e82 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
@@ -27,53 +27,99 @@
27#include <cpu/sh7724.h> 27#include <cpu/sh7724.h>
28 28
29/* Serial */ 29/* Serial */
30static struct plat_sci_port sci_platform_data[] = { 30static struct plat_sci_port scif0_platform_data = {
31 { 31 .mapbase = 0xffe00000,
32 .mapbase = 0xffe00000, 32 .flags = UPF_BOOT_AUTOCONF,
33 .flags = UPF_BOOT_AUTOCONF, 33 .type = PORT_SCIF,
34 .type = PORT_SCIF, 34 .irqs = { 80, 80, 80, 80 },
35 .irqs = { 80, 80, 80, 80 }, 35 .clk = "scif0",
36 .clk = "scif0", 36};
37 }, { 37
38 .mapbase = 0xffe10000, 38static struct platform_device scif0_device = {
39 .flags = UPF_BOOT_AUTOCONF,
40 .type = PORT_SCIF,
41 .irqs = { 81, 81, 81, 81 },
42 .clk = "scif1",
43 }, {
44 .mapbase = 0xffe20000,
45 .flags = UPF_BOOT_AUTOCONF,
46 .type = PORT_SCIF,
47 .irqs = { 82, 82, 82, 82 },
48 .clk = "scif2",
49 }, {
50 .mapbase = 0xa4e30000,
51 .flags = UPF_BOOT_AUTOCONF,
52 .type = PORT_SCIFA,
53 .irqs = { 56, 56, 56, 56 },
54 .clk = "scif3",
55 }, {
56 .mapbase = 0xa4e40000,
57 .flags = UPF_BOOT_AUTOCONF,
58 .type = PORT_SCIFA,
59 .irqs = { 88, 88, 88, 88 },
60 .clk = "scif4",
61 }, {
62 .mapbase = 0xa4e50000,
63 .flags = UPF_BOOT_AUTOCONF,
64 .type = PORT_SCIFA,
65 .irqs = { 109, 109, 109, 109 },
66 .clk = "scif5",
67 }, {
68 .flags = 0,
69 }
70};
71
72static struct platform_device sci_device = {
73 .name = "sh-sci", 39 .name = "sh-sci",
74 .id = -1, 40 .id = 0,
41 .dev = {
42 .platform_data = &scif0_platform_data,
43 },
44};
45
46static struct plat_sci_port scif1_platform_data = {
47 .mapbase = 0xffe10000,
48 .flags = UPF_BOOT_AUTOCONF,
49 .type = PORT_SCIF,
50 .irqs = { 81, 81, 81, 81 },
51 .clk = "scif1",
52};
53
54static struct platform_device scif1_device = {
55 .name = "sh-sci",
56 .id = 1,
57 .dev = {
58 .platform_data = &scif1_platform_data,
59 },
60};
61
62static struct plat_sci_port scif2_platform_data = {
63 .mapbase = 0xffe20000,
64 .flags = UPF_BOOT_AUTOCONF,
65 .type = PORT_SCIF,
66 .irqs = { 82, 82, 82, 82 },
67 .clk = "scif2",
68};
69
70static struct platform_device scif2_device = {
71 .name = "sh-sci",
72 .id = 2,
73 .dev = {
74 .platform_data = &scif2_platform_data,
75 },
76};
77
78static struct plat_sci_port scif3_platform_data = {
79 .mapbase = 0xa4e30000,
80 .flags = UPF_BOOT_AUTOCONF,
81 .type = PORT_SCIFA,
82 .irqs = { 56, 56, 56, 56 },
83 .clk = "scif3",
84};
85
86static struct platform_device scif3_device = {
87 .name = "sh-sci",
88 .id = 3,
89 .dev = {
90 .platform_data = &scif3_platform_data,
91 },
92};
93
94static struct plat_sci_port scif4_platform_data = {
95 .mapbase = 0xa4e40000,
96 .flags = UPF_BOOT_AUTOCONF,
97 .type = PORT_SCIFA,
98 .irqs = { 88, 88, 88, 88 },
99 .clk = "scif4",
100};
101
102static struct platform_device scif4_device = {
103 .name = "sh-sci",
104 .id = 4,
105 .dev = {
106 .platform_data = &scif4_platform_data,
107 },
108};
109
110static struct plat_sci_port scif5_platform_data = {
111 .mapbase = 0xa4e50000,
112 .flags = UPF_BOOT_AUTOCONF,
113 .type = PORT_SCIFA,
114 .irqs = { 109, 109, 109, 109 },
115 .clk = "scif5",
116};
117
118static struct platform_device scif5_device = {
119 .name = "sh-sci",
120 .id = 5,
75 .dev = { 121 .dev = {
76 .platform_data = sci_platform_data, 122 .platform_data = &scif5_platform_data,
77 }, 123 },
78}; 124};
79 125
@@ -590,6 +636,12 @@ static struct platform_device spu1_device = {
590}; 636};
591 637
592static struct platform_device *sh7724_devices[] __initdata = { 638static struct platform_device *sh7724_devices[] __initdata = {
639 &scif0_device,
640 &scif1_device,
641 &scif2_device,
642 &scif3_device,
643 &scif4_device,
644 &scif5_device,
593 &cmt_device, 645 &cmt_device,
594 &tmu0_device, 646 &tmu0_device,
595 &tmu1_device, 647 &tmu1_device,
@@ -597,7 +649,6 @@ static struct platform_device *sh7724_devices[] __initdata = {
597 &tmu3_device, 649 &tmu3_device,
598 &tmu4_device, 650 &tmu4_device,
599 &tmu5_device, 651 &tmu5_device,
600 &sci_device,
601 &rtc_device, 652 &rtc_device,
602 &iic0_device, 653 &iic0_device,
603 &iic1_device, 654 &iic1_device,
@@ -624,6 +675,12 @@ static int __init sh7724_devices_setup(void)
624arch_initcall(sh7724_devices_setup); 675arch_initcall(sh7724_devices_setup);
625 676
626static struct platform_device *sh7724_early_devices[] __initdata = { 677static struct platform_device *sh7724_early_devices[] __initdata = {
678 &scif0_device,
679 &scif1_device,
680 &scif2_device,
681 &scif3_device,
682 &scif4_device,
683 &scif5_device,
627 &cmt_device, 684 &cmt_device,
628 &tmu0_device, 685 &tmu0_device,
629 &tmu1_device, 686 &tmu1_device,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7757.c b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
index c470e15f2e03..37e32efbbaa7 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
@@ -17,6 +17,51 @@
17#include <linux/mm.h> 17#include <linux/mm.h>
18#include <linux/sh_timer.h> 18#include <linux/sh_timer.h>
19 19
20static struct plat_sci_port scif2_platform_data = {
21 .mapbase = 0xfe4b0000, /* SCIF2 */
22 .flags = UPF_BOOT_AUTOCONF,
23 .type = PORT_SCIF,
24 .irqs = { 40, 40, 40, 40 },
25};
26
27static struct platform_device scif2_device = {
28 .name = "sh-sci",
29 .id = 2,
30 .dev = {
31 .platform_data = &scif2_platform_data,
32 },
33};
34
35static struct plat_sci_port scif3_platform_data = {
36 .mapbase = 0xfe4c0000, /* SCIF3 */
37 .flags = UPF_BOOT_AUTOCONF,
38 .type = PORT_SCIF,
39 .irqs = { 76, 76, 76, 76 },
40};
41
42static struct platform_device scif3_device = {
43 .name = "sh-sci",
44 .id = 3,
45 .dev = {
46 .platform_data = &scif3_platform_data,
47 },
48};
49
50static struct plat_sci_port scif4_platform_data = {
51 .mapbase = 0xfe4d0000, /* SCIF4 */
52 .flags = UPF_BOOT_AUTOCONF,
53 .type = PORT_SCIF,
54 .irqs = { 104, 104, 104, 104 },
55};
56
57static struct platform_device scif4_device = {
58 .name = "sh-sci",
59 .id = 4,
60 .dev = {
61 .platform_data = &scif4_platform_data,
62 },
63};
64
20static struct sh_timer_config tmu0_platform_data = { 65static struct sh_timer_config tmu0_platform_data = {
21 .name = "TMU0", 66 .name = "TMU0",
22 .channel_offset = 0x04, 67 .channel_offset = 0x04,
@@ -79,39 +124,12 @@ static struct platform_device tmu1_device = {
79 .num_resources = ARRAY_SIZE(tmu1_resources), 124 .num_resources = ARRAY_SIZE(tmu1_resources),
80}; 125};
81 126
82static struct plat_sci_port sci_platform_data[] = {
83 {
84 .mapbase = 0xfe4b0000, /* SCIF2 */
85 .flags = UPF_BOOT_AUTOCONF,
86 .type = PORT_SCIF,
87 .irqs = { 40, 40, 40, 40 },
88 }, {
89 .mapbase = 0xfe4c0000, /* SCIF3 */
90 .flags = UPF_BOOT_AUTOCONF,
91 .type = PORT_SCIF,
92 .irqs = { 76, 76, 76, 76 },
93 }, {
94 .mapbase = 0xfe4d0000, /* SCIF4 */
95 .flags = UPF_BOOT_AUTOCONF,
96 .type = PORT_SCIF,
97 .irqs = { 104, 104, 104, 104 },
98 }, {
99 .flags = 0,
100 }
101};
102
103static struct platform_device sci_device = {
104 .name = "sh-sci",
105 .id = -1,
106 .dev = {
107 .platform_data = sci_platform_data,
108 },
109};
110
111static struct platform_device *sh7757_devices[] __initdata = { 127static struct platform_device *sh7757_devices[] __initdata = {
128 &scif2_device,
129 &scif3_device,
130 &scif4_device,
112 &tmu0_device, 131 &tmu0_device,
113 &tmu1_device, 132 &tmu1_device,
114 &sci_device,
115}; 133};
116 134
117static int __init sh7757_devices_setup(void) 135static int __init sh7757_devices_setup(void)
@@ -121,6 +139,20 @@ static int __init sh7757_devices_setup(void)
121} 139}
122arch_initcall(sh7757_devices_setup); 140arch_initcall(sh7757_devices_setup);
123 141
142static struct platform_device *sh7757_early_devices[] __initdata = {
143 &scif2_device,
144 &scif3_device,
145 &scif4_device,
146 &tmu0_device,
147 &tmu1_device,
148};
149
150void __init plat_early_device_setup(void)
151{
152 early_platform_add_devices(sh7757_early_devices,
153 ARRAY_SIZE(sh7757_early_devices));
154}
155
124enum { 156enum {
125 UNUSED = 0, 157 UNUSED = 0,
126 158
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7763.c b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c
index 4659fff6b842..6aba26fec416 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7763.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c
@@ -16,6 +16,51 @@
16#include <linux/io.h> 16#include <linux/io.h>
17#include <linux/serial_sci.h> 17#include <linux/serial_sci.h>
18 18
19static struct plat_sci_port scif0_platform_data = {
20 .mapbase = 0xffe00000,
21 .flags = UPF_BOOT_AUTOCONF,
22 .type = PORT_SCIF,
23 .irqs = { 40, 40, 40, 40 },
24};
25
26static struct platform_device scif0_device = {
27 .name = "sh-sci",
28 .id = 0,
29 .dev = {
30 .platform_data = &scif0_platform_data,
31 },
32};
33
34static struct plat_sci_port scif1_platform_data = {
35 .mapbase = 0xffe08000,
36 .flags = UPF_BOOT_AUTOCONF,
37 .type = PORT_SCIF,
38 .irqs = { 76, 76, 76, 76 },
39};
40
41static struct platform_device scif1_device = {
42 .name = "sh-sci",
43 .id = 1,
44 .dev = {
45 .platform_data = &scif1_platform_data,
46 },
47};
48
49static struct plat_sci_port scif2_platform_data = {
50 .mapbase = 0xffe10000,
51 .flags = UPF_BOOT_AUTOCONF,
52 .type = PORT_SCIF,
53 .irqs = { 104, 104, 104, 104 },
54};
55
56static struct platform_device scif2_device = {
57 .name = "sh-sci",
58 .id = 2,
59 .dev = {
60 .platform_data = &scif2_platform_data,
61 },
62};
63
19static struct resource rtc_resources[] = { 64static struct resource rtc_resources[] = {
20 [0] = { 65 [0] = {
21 .start = 0xffe80000, 66 .start = 0xffe80000,
@@ -36,35 +81,6 @@ static struct platform_device rtc_device = {
36 .resource = rtc_resources, 81 .resource = rtc_resources,
37}; 82};
38 83
39static struct plat_sci_port sci_platform_data[] = {
40 {
41 .mapbase = 0xffe00000,
42 .flags = UPF_BOOT_AUTOCONF,
43 .type = PORT_SCIF,
44 .irqs = { 40, 40, 40, 40 },
45 }, {
46 .mapbase = 0xffe08000,
47 .flags = UPF_BOOT_AUTOCONF,
48 .type = PORT_SCIF,
49 .irqs = { 76, 76, 76, 76 },
50 }, {
51 .mapbase = 0xffe10000,
52 .flags = UPF_BOOT_AUTOCONF,
53 .type = PORT_SCIF,
54 .irqs = { 104, 104, 104, 104 },
55 }, {
56 .flags = 0,
57 }
58};
59
60static struct platform_device sci_device = {
61 .name = "sh-sci",
62 .id = -1,
63 .dev = {
64 .platform_data = sci_platform_data,
65 },
66};
67
68static struct resource usb_ohci_resources[] = { 84static struct resource usb_ohci_resources[] = {
69 [0] = { 85 [0] = {
70 .start = 0xffec8000, 86 .start = 0xffec8000,
@@ -297,6 +313,9 @@ static struct platform_device tmu5_device = {
297}; 313};
298 314
299static struct platform_device *sh7763_devices[] __initdata = { 315static struct platform_device *sh7763_devices[] __initdata = {
316 &scif0_device,
317 &scif1_device,
318 &scif2_device,
300 &tmu0_device, 319 &tmu0_device,
301 &tmu1_device, 320 &tmu1_device,
302 &tmu2_device, 321 &tmu2_device,
@@ -304,7 +323,6 @@ static struct platform_device *sh7763_devices[] __initdata = {
304 &tmu4_device, 323 &tmu4_device,
305 &tmu5_device, 324 &tmu5_device,
306 &rtc_device, 325 &rtc_device,
307 &sci_device,
308 &usb_ohci_device, 326 &usb_ohci_device,
309 &usbf_device, 327 &usbf_device,
310}; 328};
@@ -317,6 +335,9 @@ static int __init sh7763_devices_setup(void)
317arch_initcall(sh7763_devices_setup); 335arch_initcall(sh7763_devices_setup);
318 336
319static struct platform_device *sh7763_early_devices[] __initdata = { 337static struct platform_device *sh7763_early_devices[] __initdata = {
338 &scif0_device,
339 &scif1_device,
340 &scif2_device,
320 &tmu0_device, 341 &tmu0_device,
321 &tmu1_device, 342 &tmu1_device,
322 &tmu2_device, 343 &tmu2_device,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7770.c b/arch/sh/kernel/cpu/sh4a/setup-sh7770.c
index eead08d89d32..c1643bc9590d 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7770.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7770.c
@@ -14,67 +14,153 @@
14#include <linux/sh_timer.h> 14#include <linux/sh_timer.h>
15#include <linux/io.h> 15#include <linux/io.h>
16 16
17static struct plat_sci_port sci_platform_data[] = { 17static struct plat_sci_port scif0_platform_data = {
18 { 18 .mapbase = 0xff923000,
19 .mapbase = 0xff923000, 19 .flags = UPF_BOOT_AUTOCONF,
20 .flags = UPF_BOOT_AUTOCONF, 20 .type = PORT_SCIF,
21 .type = PORT_SCIF, 21 .irqs = { 61, 61, 61, 61 },
22 .irqs = { 61, 61, 61, 61 }, 22};
23 }, { 23
24 .mapbase = 0xff924000, 24static struct platform_device scif0_device = {
25 .flags = UPF_BOOT_AUTOCONF, 25 .name = "sh-sci",
26 .type = PORT_SCIF, 26 .id = 0,
27 .irqs = { 62, 62, 62, 62 }, 27 .dev = {
28 }, { 28 .platform_data = &scif0_platform_data,
29 .mapbase = 0xff925000, 29 },
30 .flags = UPF_BOOT_AUTOCONF, 30};
31 .type = PORT_SCIF, 31
32 .irqs = { 63, 63, 63, 63 }, 32static struct plat_sci_port scif1_platform_data = {
33 }, { 33 .mapbase = 0xff924000,
34 .mapbase = 0xff926000, 34 .flags = UPF_BOOT_AUTOCONF,
35 .flags = UPF_BOOT_AUTOCONF, 35 .type = PORT_SCIF,
36 .type = PORT_SCIF, 36 .irqs = { 62, 62, 62, 62 },
37 .irqs = { 64, 64, 64, 64 }, 37};
38 }, { 38
39 .mapbase = 0xff927000, 39static struct platform_device scif1_device = {
40 .flags = UPF_BOOT_AUTOCONF, 40 .name = "sh-sci",
41 .type = PORT_SCIF, 41 .id = 1,
42 .irqs = { 65, 65, 65, 65 }, 42 .dev = {
43 }, { 43 .platform_data = &scif1_platform_data,
44 .mapbase = 0xff928000, 44 },
45 .flags = UPF_BOOT_AUTOCONF, 45};
46 .type = PORT_SCIF, 46
47 .irqs = { 66, 66, 66, 66 }, 47static struct plat_sci_port scif2_platform_data = {
48 }, { 48 .mapbase = 0xff925000,
49 .mapbase = 0xff929000, 49 .flags = UPF_BOOT_AUTOCONF,
50 .flags = UPF_BOOT_AUTOCONF, 50 .type = PORT_SCIF,
51 .type = PORT_SCIF, 51 .irqs = { 63, 63, 63, 63 },
52 .irqs = { 67, 67, 67, 67 }, 52};
53 }, { 53
54 .mapbase = 0xff92a000, 54static struct platform_device scif2_device = {
55 .flags = UPF_BOOT_AUTOCONF, 55 .name = "sh-sci",
56 .type = PORT_SCIF, 56 .id = 2,
57 .irqs = { 68, 68, 68, 68 }, 57 .dev = {
58 }, { 58 .platform_data = &scif2_platform_data,
59 .mapbase = 0xff92b000, 59 },
60 .flags = UPF_BOOT_AUTOCONF, 60};
61 .type = PORT_SCIF, 61
62 .irqs = { 69, 69, 69, 69 }, 62static struct plat_sci_port scif3_platform_data = {
63 }, { 63 .mapbase = 0xff926000,
64 .mapbase = 0xff92c000, 64 .flags = UPF_BOOT_AUTOCONF,
65 .flags = UPF_BOOT_AUTOCONF, 65 .type = PORT_SCIF,
66 .type = PORT_SCIF, 66 .irqs = { 64, 64, 64, 64 },
67 .irqs = { 70, 70, 70, 70 }, 67};
68 }, { 68
69 .flags = 0, 69static struct platform_device scif3_device = {
70 } 70 .name = "sh-sci",
71 .id = 3,
72 .dev = {
73 .platform_data = &scif3_platform_data,
74 },
75};
76
77static struct plat_sci_port scif4_platform_data = {
78 .mapbase = 0xff927000,
79 .flags = UPF_BOOT_AUTOCONF,
80 .type = PORT_SCIF,
81 .irqs = { 65, 65, 65, 65 },
82};
83
84static struct platform_device scif4_device = {
85 .name = "sh-sci",
86 .id = 4,
87 .dev = {
88 .platform_data = &scif4_platform_data,
89 },
90};
91
92static struct plat_sci_port scif5_platform_data = {
93 .mapbase = 0xff928000,
94 .flags = UPF_BOOT_AUTOCONF,
95 .type = PORT_SCIF,
96 .irqs = { 66, 66, 66, 66 },
97};
98
99static struct platform_device scif5_device = {
100 .name = "sh-sci",
101 .id = 5,
102 .dev = {
103 .platform_data = &scif5_platform_data,
104 },
105};
106
107static struct plat_sci_port scif6_platform_data = {
108 .mapbase = 0xff929000,
109 .flags = UPF_BOOT_AUTOCONF,
110 .type = PORT_SCIF,
111 .irqs = { 67, 67, 67, 67 },
112};
113
114static struct platform_device scif6_device = {
115 .name = "sh-sci",
116 .id = 6,
117 .dev = {
118 .platform_data = &scif6_platform_data,
119 },
120};
121
122static struct plat_sci_port scif7_platform_data = {
123 .mapbase = 0xff92a000,
124 .flags = UPF_BOOT_AUTOCONF,
125 .type = PORT_SCIF,
126 .irqs = { 68, 68, 68, 68 },
127};
128
129static struct platform_device scif7_device = {
130 .name = "sh-sci",
131 .id = 7,
132 .dev = {
133 .platform_data = &scif7_platform_data,
134 },
135};
136
137static struct plat_sci_port scif8_platform_data = {
138 .mapbase = 0xff92b000,
139 .flags = UPF_BOOT_AUTOCONF,
140 .type = PORT_SCIF,
141 .irqs = { 69, 69, 69, 69 },
142};
143
144static struct platform_device scif8_device = {
145 .name = "sh-sci",
146 .id = 8,
147 .dev = {
148 .platform_data = &scif8_platform_data,
149 },
150};
151
152static struct plat_sci_port scif9_platform_data = {
153 .mapbase = 0xff92c000,
154 .flags = UPF_BOOT_AUTOCONF,
155 .type = PORT_SCIF,
156 .irqs = { 70, 70, 70, 70 },
71}; 157};
72 158
73static struct platform_device sci_device = { 159static struct platform_device scif9_device = {
74 .name = "sh-sci", 160 .name = "sh-sci",
75 .id = -1, 161 .id = 9,
76 .dev = { 162 .dev = {
77 .platform_data = sci_platform_data, 163 .platform_data = &scif9_platform_data,
78 }, 164 },
79}; 165};
80 166
@@ -351,6 +437,16 @@ static struct platform_device tmu8_device = {
351}; 437};
352 438
353static struct platform_device *sh7770_devices[] __initdata = { 439static struct platform_device *sh7770_devices[] __initdata = {
440 &scif0_device,
441 &scif1_device,
442 &scif2_device,
443 &scif3_device,
444 &scif4_device,
445 &scif5_device,
446 &scif6_device,
447 &scif7_device,
448 &scif8_device,
449 &scif9_device,
354 &tmu0_device, 450 &tmu0_device,
355 &tmu1_device, 451 &tmu1_device,
356 &tmu2_device, 452 &tmu2_device,
@@ -360,7 +456,6 @@ static struct platform_device *sh7770_devices[] __initdata = {
360 &tmu6_device, 456 &tmu6_device,
361 &tmu7_device, 457 &tmu7_device,
362 &tmu8_device, 458 &tmu8_device,
363 &sci_device,
364}; 459};
365 460
366static int __init sh7770_devices_setup(void) 461static int __init sh7770_devices_setup(void)
@@ -371,6 +466,16 @@ static int __init sh7770_devices_setup(void)
371arch_initcall(sh7770_devices_setup); 466arch_initcall(sh7770_devices_setup);
372 467
373static struct platform_device *sh7770_early_devices[] __initdata = { 468static struct platform_device *sh7770_early_devices[] __initdata = {
469 &scif0_device,
470 &scif1_device,
471 &scif2_device,
472 &scif3_device,
473 &scif4_device,
474 &scif5_device,
475 &scif6_device,
476 &scif7_device,
477 &scif8_device,
478 &scif9_device,
374 &tmu0_device, 479 &tmu0_device,
375 &tmu1_device, 480 &tmu1_device,
376 &tmu2_device, 481 &tmu2_device,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
index 12ff56f19c5c..c310558490d5 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
@@ -15,6 +15,36 @@
15#include <linux/sh_timer.h> 15#include <linux/sh_timer.h>
16#include <asm/dma-sh.h> 16#include <asm/dma-sh.h>
17 17
18static struct plat_sci_port scif0_platform_data = {
19 .mapbase = 0xffe00000,
20 .flags = UPF_BOOT_AUTOCONF,
21 .type = PORT_SCIF,
22 .irqs = { 40, 40, 40, 40 },
23};
24
25static struct platform_device scif0_device = {
26 .name = "sh-sci",
27 .id = 0,
28 .dev = {
29 .platform_data = &scif0_platform_data,
30 },
31};
32
33static struct plat_sci_port scif1_platform_data = {
34 .mapbase = 0xffe10000,
35 .flags = UPF_BOOT_AUTOCONF,
36 .type = PORT_SCIF,
37 .irqs = { 76, 76, 76, 76 },
38};
39
40static struct platform_device scif1_device = {
41 .name = "sh-sci",
42 .id = 1,
43 .dev = {
44 .platform_data = &scif1_platform_data,
45 },
46};
47
18static struct sh_timer_config tmu0_platform_data = { 48static struct sh_timer_config tmu0_platform_data = {
19 .name = "TMU0", 49 .name = "TMU0",
20 .channel_offset = 0x04, 50 .channel_offset = 0x04,
@@ -217,30 +247,6 @@ static struct platform_device rtc_device = {
217 .resource = rtc_resources, 247 .resource = rtc_resources,
218}; 248};
219 249
220static struct plat_sci_port sci_platform_data[] = {
221 {
222 .mapbase = 0xffe00000,
223 .flags = UPF_BOOT_AUTOCONF,
224 .type = PORT_SCIF,
225 .irqs = { 40, 40, 40, 40 },
226 }, {
227 .mapbase = 0xffe10000,
228 .flags = UPF_BOOT_AUTOCONF,
229 .type = PORT_SCIF,
230 .irqs = { 76, 76, 76, 76 },
231 }, {
232 .flags = 0,
233 }
234};
235
236static struct platform_device sci_device = {
237 .name = "sh-sci",
238 .id = -1,
239 .dev = {
240 .platform_data = sci_platform_data,
241 },
242};
243
244static struct sh_dmae_pdata dma_platform_data = { 250static struct sh_dmae_pdata dma_platform_data = {
245 .mode = (SHDMA_MIX_IRQ | SHDMA_DMAOR1), 251 .mode = (SHDMA_MIX_IRQ | SHDMA_DMAOR1),
246}; 252};
@@ -254,6 +260,8 @@ static struct platform_device dma_device = {
254}; 260};
255 261
256static struct platform_device *sh7780_devices[] __initdata = { 262static struct platform_device *sh7780_devices[] __initdata = {
263 &scif0_device,
264 &scif1_device,
257 &tmu0_device, 265 &tmu0_device,
258 &tmu1_device, 266 &tmu1_device,
259 &tmu2_device, 267 &tmu2_device,
@@ -261,7 +269,6 @@ static struct platform_device *sh7780_devices[] __initdata = {
261 &tmu4_device, 269 &tmu4_device,
262 &tmu5_device, 270 &tmu5_device,
263 &rtc_device, 271 &rtc_device,
264 &sci_device,
265 &dma_device, 272 &dma_device,
266}; 273};
267 274
@@ -271,8 +278,9 @@ static int __init sh7780_devices_setup(void)
271 ARRAY_SIZE(sh7780_devices)); 278 ARRAY_SIZE(sh7780_devices));
272} 279}
273arch_initcall(sh7780_devices_setup); 280arch_initcall(sh7780_devices_setup);
274
275static struct platform_device *sh7780_early_devices[] __initdata = { 281static struct platform_device *sh7780_early_devices[] __initdata = {
282 &scif0_device,
283 &scif1_device,
276 &tmu0_device, 284 &tmu0_device,
277 &tmu1_device, 285 &tmu1_device,
278 &tmu2_device, 286 &tmu2_device,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
index 7f6c718b6c36..ef26ebda6e8b 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
@@ -16,6 +16,102 @@
16#include <linux/sh_timer.h> 16#include <linux/sh_timer.h>
17#include <asm/mmzone.h> 17#include <asm/mmzone.h>
18 18
19static struct plat_sci_port scif0_platform_data = {
20 .mapbase = 0xffea0000,
21 .flags = UPF_BOOT_AUTOCONF,
22 .type = PORT_SCIF,
23 .irqs = { 40, 40, 40, 40 },
24 .clk = "scif_fck",
25};
26
27static struct platform_device scif0_device = {
28 .name = "sh-sci",
29 .id = 0,
30 .dev = {
31 .platform_data = &scif0_platform_data,
32 },
33};
34
35static struct plat_sci_port scif1_platform_data = {
36 .mapbase = 0xffeb0000,
37 .flags = UPF_BOOT_AUTOCONF,
38 .type = PORT_SCIF,
39 .irqs = { 44, 44, 44, 44 },
40 .clk = "scif_fck",
41};
42
43static struct platform_device scif1_device = {
44 .name = "sh-sci",
45 .id = 1,
46 .dev = {
47 .platform_data = &scif1_platform_data,
48 },
49};
50
51static struct plat_sci_port scif2_platform_data = {
52 .mapbase = 0xffec0000,
53 .flags = UPF_BOOT_AUTOCONF,
54 .type = PORT_SCIF,
55 .irqs = { 60, 60, 60, 60 },
56 .clk = "scif_fck",
57};
58
59static struct platform_device scif2_device = {
60 .name = "sh-sci",
61 .id = 2,
62 .dev = {
63 .platform_data = &scif2_platform_data,
64 },
65};
66
67static struct plat_sci_port scif3_platform_data = {
68 .mapbase = 0xffed0000,
69 .flags = UPF_BOOT_AUTOCONF,
70 .type = PORT_SCIF,
71 .irqs = { 61, 61, 61, 61 },
72 .clk = "scif_fck",
73};
74
75static struct platform_device scif3_device = {
76 .name = "sh-sci",
77 .id = 3,
78 .dev = {
79 .platform_data = &scif3_platform_data,
80 },
81};
82
83static struct plat_sci_port scif4_platform_data = {
84 .mapbase = 0xffee0000,
85 .flags = UPF_BOOT_AUTOCONF,
86 .type = PORT_SCIF,
87 .irqs = { 62, 62, 62, 62 },
88 .clk = "scif_fck",
89};
90
91static struct platform_device scif4_device = {
92 .name = "sh-sci",
93 .id = 4,
94 .dev = {
95 .platform_data = &scif4_platform_data,
96 },
97};
98
99static struct plat_sci_port scif5_platform_data = {
100 .mapbase = 0xffef0000,
101 .flags = UPF_BOOT_AUTOCONF,
102 .type = PORT_SCIF,
103 .irqs = { 63, 63, 63, 63 },
104 .clk = "scif_fck",
105};
106
107static struct platform_device scif5_device = {
108 .name = "sh-sci",
109 .id = 5,
110 .dev = {
111 .platform_data = &scif5_platform_data,
112 },
113};
114
19static struct sh_timer_config tmu0_platform_data = { 115static struct sh_timer_config tmu0_platform_data = {
20 .name = "TMU0", 116 .name = "TMU0",
21 .channel_offset = 0x04, 117 .channel_offset = 0x04,
@@ -198,64 +294,19 @@ static struct platform_device tmu5_device = {
198 .num_resources = ARRAY_SIZE(tmu5_resources), 294 .num_resources = ARRAY_SIZE(tmu5_resources),
199}; 295};
200 296
201static struct plat_sci_port sci_platform_data[] = {
202 {
203 .mapbase = 0xffea0000,
204 .flags = UPF_BOOT_AUTOCONF,
205 .type = PORT_SCIF,
206 .irqs = { 40, 40, 40, 40 },
207 .clk = "scif_fck",
208 }, {
209 .mapbase = 0xffeb0000,
210 .flags = UPF_BOOT_AUTOCONF,
211 .type = PORT_SCIF,
212 .irqs = { 44, 44, 44, 44 },
213 .clk = "scif_fck",
214 }, {
215 .mapbase = 0xffec0000,
216 .flags = UPF_BOOT_AUTOCONF,
217 .type = PORT_SCIF,
218 .irqs = { 60, 60, 60, 60 },
219 .clk = "scif_fck",
220 }, {
221 .mapbase = 0xffed0000,
222 .flags = UPF_BOOT_AUTOCONF,
223 .type = PORT_SCIF,
224 .irqs = { 61, 61, 61, 61 },
225 .clk = "scif_fck",
226 }, {
227 .mapbase = 0xffee0000,
228 .flags = UPF_BOOT_AUTOCONF,
229 .type = PORT_SCIF,
230 .irqs = { 62, 62, 62, 62 },
231 .clk = "scif_fck",
232 }, {
233 .mapbase = 0xffef0000,
234 .flags = UPF_BOOT_AUTOCONF,
235 .type = PORT_SCIF,
236 .irqs = { 63, 63, 63, 63 },
237 .clk = "scif_fck",
238 }, {
239 .flags = 0,
240 }
241};
242
243static struct platform_device sci_device = {
244 .name = "sh-sci",
245 .id = -1,
246 .dev = {
247 .platform_data = sci_platform_data,
248 },
249};
250
251static struct platform_device *sh7785_devices[] __initdata = { 297static struct platform_device *sh7785_devices[] __initdata = {
298 &scif0_device,
299 &scif1_device,
300 &scif2_device,
301 &scif3_device,
302 &scif4_device,
303 &scif5_device,
252 &tmu0_device, 304 &tmu0_device,
253 &tmu1_device, 305 &tmu1_device,
254 &tmu2_device, 306 &tmu2_device,
255 &tmu3_device, 307 &tmu3_device,
256 &tmu4_device, 308 &tmu4_device,
257 &tmu5_device, 309 &tmu5_device,
258 &sci_device,
259}; 310};
260 311
261static int __init sh7785_devices_setup(void) 312static int __init sh7785_devices_setup(void)
@@ -266,6 +317,12 @@ static int __init sh7785_devices_setup(void)
266arch_initcall(sh7785_devices_setup); 317arch_initcall(sh7785_devices_setup);
267 318
268static struct platform_device *sh7785_early_devices[] __initdata = { 319static struct platform_device *sh7785_early_devices[] __initdata = {
320 &scif0_device,
321 &scif1_device,
322 &scif2_device,
323 &scif3_device,
324 &scif4_device,
325 &scif5_device,
269 &tmu0_device, 326 &tmu0_device,
270 &tmu1_device, 327 &tmu1_device,
271 &tmu2_device, 328 &tmu2_device,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
index 0104a8ec5369..71673487ace0 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
@@ -23,51 +23,96 @@
23#include <linux/sh_timer.h> 23#include <linux/sh_timer.h>
24#include <asm/mmzone.h> 24#include <asm/mmzone.h>
25 25
26static struct plat_sci_port sci_platform_data[] = { 26static struct plat_sci_port scif0_platform_data = {
27 { 27 .mapbase = 0xffea0000,
28 .mapbase = 0xffea0000, 28 .flags = UPF_BOOT_AUTOCONF,
29 .flags = UPF_BOOT_AUTOCONF, 29 .type = PORT_SCIF,
30 .type = PORT_SCIF, 30 .irqs = { 40, 41, 43, 42 },
31 .irqs = { 40, 41, 43, 42 }, 31};
32
33static struct platform_device scif0_device = {
34 .name = "sh-sci",
35 .id = 0,
36 .dev = {
37 .platform_data = &scif0_platform_data,
32 }, 38 },
33 /*
34 * The rest of these all have multiplexed IRQs
35 */
36 {
37 .mapbase = 0xffeb0000,
38 .flags = UPF_BOOT_AUTOCONF,
39 .type = PORT_SCIF,
40 .irqs = { 44, 44, 44, 44 },
41 }, {
42 .mapbase = 0xffec0000,
43 .flags = UPF_BOOT_AUTOCONF,
44 .type = PORT_SCIF,
45 .irqs = { 50, 50, 50, 50 },
46 }, {
47 .mapbase = 0xffed0000,
48 .flags = UPF_BOOT_AUTOCONF,
49 .type = PORT_SCIF,
50 .irqs = { 51, 51, 51, 51 },
51 }, {
52 .mapbase = 0xffee0000,
53 .flags = UPF_BOOT_AUTOCONF,
54 .type = PORT_SCIF,
55 .irqs = { 52, 52, 52, 52 },
56 }, {
57 .mapbase = 0xffef0000,
58 .flags = UPF_BOOT_AUTOCONF,
59 .type = PORT_SCIF,
60 .irqs = { 53, 53, 53, 53 },
61 }, {
62 .flags = 0,
63 }
64}; 39};
65 40
66static struct platform_device sci_device = { 41/*
42 * The rest of these all have multiplexed IRQs
43 */
44static struct plat_sci_port scif1_platform_data = {
45 .mapbase = 0xffeb0000,
46 .flags = UPF_BOOT_AUTOCONF,
47 .type = PORT_SCIF,
48 .irqs = { 44, 44, 44, 44 },
49};
50
51static struct platform_device scif1_device = {
67 .name = "sh-sci", 52 .name = "sh-sci",
68 .id = -1, 53 .id = 1,
54 .dev = {
55 .platform_data = &scif1_platform_data,
56 },
57};
58
59static struct plat_sci_port scif2_platform_data = {
60 .mapbase = 0xffec0000,
61 .flags = UPF_BOOT_AUTOCONF,
62 .type = PORT_SCIF,
63 .irqs = { 50, 50, 50, 50 },
64};
65
66static struct platform_device scif2_device = {
67 .name = "sh-sci",
68 .id = 2,
69 .dev = {
70 .platform_data = &scif2_platform_data,
71 },
72};
73
74static struct plat_sci_port scif3_platform_data = {
75 .mapbase = 0xffed0000,
76 .flags = UPF_BOOT_AUTOCONF,
77 .type = PORT_SCIF,
78 .irqs = { 51, 51, 51, 51 },
79};
80
81static struct platform_device scif3_device = {
82 .name = "sh-sci",
83 .id = 3,
84 .dev = {
85 .platform_data = &scif3_platform_data,
86 },
87};
88
89static struct plat_sci_port scif4_platform_data = {
90 .mapbase = 0xffee0000,
91 .flags = UPF_BOOT_AUTOCONF,
92 .type = PORT_SCIF,
93 .irqs = { 52, 52, 52, 52 },
94};
95
96static struct platform_device scif4_device = {
97 .name = "sh-sci",
98 .id = 4,
99 .dev = {
100 .platform_data = &scif4_platform_data,
101 },
102};
103
104static struct plat_sci_port scif5_platform_data = {
105 .mapbase = 0xffef0000,
106 .flags = UPF_BOOT_AUTOCONF,
107 .type = PORT_SCIF,
108 .irqs = { 53, 53, 53, 53 },
109};
110
111static struct platform_device scif5_device = {
112 .name = "sh-sci",
113 .id = 5,
69 .dev = { 114 .dev = {
70 .platform_data = sci_platform_data, 115 .platform_data = &scif5_platform_data,
71 }, 116 },
72}; 117};
73 118
@@ -459,6 +504,12 @@ static struct platform_device usb_ohci_device = {
459}; 504};
460 505
461static struct platform_device *sh7786_early_devices[] __initdata = { 506static struct platform_device *sh7786_early_devices[] __initdata = {
507 &scif0_device,
508 &scif1_device,
509 &scif2_device,
510 &scif3_device,
511 &scif4_device,
512 &scif5_device,
462 &tmu0_device, 513 &tmu0_device,
463 &tmu1_device, 514 &tmu1_device,
464 &tmu2_device, 515 &tmu2_device,
@@ -474,7 +525,6 @@ static struct platform_device *sh7786_early_devices[] __initdata = {
474}; 525};
475 526
476static struct platform_device *sh7786_devices[] __initdata = { 527static struct platform_device *sh7786_devices[] __initdata = {
477 &sci_device,
478 &usb_ohci_device, 528 &usb_ohci_device,
479}; 529};
480 530
diff --git a/arch/sh/kernel/cpu/sh4a/setup-shx3.c b/arch/sh/kernel/cpu/sh4a/setup-shx3.c
index c7ba9166e18a..780ba17a5599 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-shx3.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-shx3.c
@@ -24,32 +24,48 @@
24 * silicon in the first place, we just refuse to deal with the port at 24 * silicon in the first place, we just refuse to deal with the port at
25 * all rather than adding infrastructure to hack around it. 25 * all rather than adding infrastructure to hack around it.
26 */ 26 */
27static struct plat_sci_port sci_platform_data[] = { 27static struct plat_sci_port scif0_platform_data = {
28 { 28 .mapbase = 0xffc30000,
29 .mapbase = 0xffc30000, 29 .flags = UPF_BOOT_AUTOCONF,
30 .flags = UPF_BOOT_AUTOCONF, 30 .type = PORT_SCIF,
31 .type = PORT_SCIF, 31 .irqs = { 40, 41, 43, 42 },
32 .irqs = { 40, 41, 43, 42 }, 32};
33 }, { 33
34 .mapbase = 0xffc40000, 34static struct platform_device scif0_device = {
35 .flags = UPF_BOOT_AUTOCONF, 35 .name = "sh-sci",
36 .type = PORT_SCIF, 36 .id = 0,
37 .irqs = { 44, 45, 47, 46 }, 37 .dev = {
38 }, { 38 .platform_data = &scif0_platform_data,
39 .mapbase = 0xffc60000, 39 },
40 .flags = UPF_BOOT_AUTOCONF, 40};
41 .type = PORT_SCIF, 41
42 .irqs = { 52, 53, 55, 54 }, 42static struct plat_sci_port scif1_platform_data = {
43 }, { 43 .mapbase = 0xffc40000,
44 .flags = 0, 44 .flags = UPF_BOOT_AUTOCONF,
45 } 45 .type = PORT_SCIF,
46 .irqs = { 44, 45, 47, 46 },
47};
48
49static struct platform_device scif1_device = {
50 .name = "sh-sci",
51 .id = 1,
52 .dev = {
53 .platform_data = &scif1_platform_data,
54 },
55};
56
57static struct plat_sci_port scif2_platform_data = {
58 .mapbase = 0xffc60000,
59 .flags = UPF_BOOT_AUTOCONF,
60 .type = PORT_SCIF,
61 .irqs = { 52, 53, 55, 54 },
46}; 62};
47 63
48static struct platform_device sci_device = { 64static struct platform_device scif2_device = {
49 .name = "sh-sci", 65 .name = "sh-sci",
50 .id = -1, 66 .id = 2,
51 .dev = { 67 .dev = {
52 .platform_data = sci_platform_data, 68 .platform_data = &scif2_platform_data,
53 }, 69 },
54}; 70};
55 71
@@ -236,6 +252,9 @@ static struct platform_device tmu5_device = {
236}; 252};
237 253
238static struct platform_device *shx3_early_devices[] __initdata = { 254static struct platform_device *shx3_early_devices[] __initdata = {
255 &scif0_device,
256 &scif1_device,
257 &scif2_device,
239 &tmu0_device, 258 &tmu0_device,
240 &tmu1_device, 259 &tmu1_device,
241 &tmu2_device, 260 &tmu2_device,
@@ -244,21 +263,10 @@ static struct platform_device *shx3_early_devices[] __initdata = {
244 &tmu5_device, 263 &tmu5_device,
245}; 264};
246 265
247static struct platform_device *shx3_devices[] __initdata = {
248 &sci_device,
249};
250
251static int __init shx3_devices_setup(void) 266static int __init shx3_devices_setup(void)
252{ 267{
253 int ret; 268 return platform_add_devices(shx3_early_devices,
254
255 ret = platform_add_devices(shx3_early_devices,
256 ARRAY_SIZE(shx3_early_devices)); 269 ARRAY_SIZE(shx3_early_devices));
257 if (unlikely(ret != 0))
258 return ret;
259
260 return platform_add_devices(shx3_devices,
261 ARRAY_SIZE(shx3_devices));
262} 270}
263arch_initcall(shx3_devices_setup); 271arch_initcall(shx3_devices_setup);
264 272
diff --git a/arch/sh/kernel/cpu/sh5/fpu.c b/arch/sh/kernel/cpu/sh5/fpu.c
index dd4f51ffb50e..4648ccee6c4d 100644
--- a/arch/sh/kernel/cpu/sh5/fpu.c
+++ b/arch/sh/kernel/cpu/sh5/fpu.c
@@ -34,7 +34,7 @@ static union sh_fpu_union init_fpuregs = {
34 } 34 }
35}; 35};
36 36
37void save_fpu(struct task_struct *tsk, struct pt_regs *regs) 37void save_fpu(struct task_struct *tsk)
38{ 38{
39 asm volatile("fst.p %0, (0*8), fp0\n\t" 39 asm volatile("fst.p %0, (0*8), fp0\n\t"
40 "fst.p %0, (1*8), fp2\n\t" 40 "fst.p %0, (1*8), fp2\n\t"
@@ -153,7 +153,7 @@ do_fpu_state_restore(unsigned long ex, struct pt_regs *regs)
153 enable_fpu(); 153 enable_fpu();
154 if (last_task_used_math != NULL) 154 if (last_task_used_math != NULL)
155 /* Other processes fpu state, save away */ 155 /* Other processes fpu state, save away */
156 save_fpu(last_task_used_math, regs); 156 save_fpu(last_task_used_math);
157 157
158 last_task_used_math = current; 158 last_task_used_math = current;
159 if (used_math()) { 159 if (used_math()) {
diff --git a/arch/sh/kernel/cpu/sh5/setup-sh5.c b/arch/sh/kernel/cpu/sh5/setup-sh5.c
index 6a0f82f70032..e7a3c1e4b604 100644
--- a/arch/sh/kernel/cpu/sh5/setup-sh5.c
+++ b/arch/sh/kernel/cpu/sh5/setup-sh5.c
@@ -16,22 +16,18 @@
16#include <linux/sh_timer.h> 16#include <linux/sh_timer.h>
17#include <asm/addrspace.h> 17#include <asm/addrspace.h>
18 18
19static struct plat_sci_port sci_platform_data[] = { 19static struct plat_sci_port scif0_platform_data = {
20 { 20 .mapbase = PHYS_PERIPHERAL_BLOCK + 0x01030000,
21 .mapbase = PHYS_PERIPHERAL_BLOCK + 0x01030000, 21 .flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP,
22 .flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP, 22 .type = PORT_SCIF,
23 .type = PORT_SCIF, 23 .irqs = { 39, 40, 42, 0 },
24 .irqs = { 39, 40, 42, 0 },
25 }, {
26 .flags = 0,
27 }
28}; 24};
29 25
30static struct platform_device sci_device = { 26static struct platform_device scif0_device = {
31 .name = "sh-sci", 27 .name = "sh-sci",
32 .id = -1, 28 .id = 0,
33 .dev = { 29 .dev = {
34 .platform_data = sci_platform_data, 30 .platform_data = &scif0_platform_data,
35 }, 31 },
36}; 32};
37 33
@@ -164,13 +160,13 @@ static struct platform_device tmu2_device = {
164}; 160};
165 161
166static struct platform_device *sh5_early_devices[] __initdata = { 162static struct platform_device *sh5_early_devices[] __initdata = {
163 &scif0_device,
167 &tmu0_device, 164 &tmu0_device,
168 &tmu1_device, 165 &tmu1_device,
169 &tmu2_device, 166 &tmu2_device,
170}; 167};
171 168
172static struct platform_device *sh5_devices[] __initdata = { 169static struct platform_device *sh5_devices[] __initdata = {
173 &sci_device,
174 &rtc_device, 170 &rtc_device,
175}; 171};
176 172
diff --git a/arch/sh/kernel/early_printk.c b/arch/sh/kernel/early_printk.c
index 81a46145ffa5..f8bb50c6e050 100644
--- a/arch/sh/kernel/early_printk.c
+++ b/arch/sh/kernel/early_printk.c
@@ -15,7 +15,6 @@
15#include <linux/io.h> 15#include <linux/io.h>
16#include <linux/delay.h> 16#include <linux/delay.h>
17 17
18#ifdef CONFIG_SH_STANDARD_BIOS
19#include <asm/sh_bios.h> 18#include <asm/sh_bios.h>
20 19
21/* 20/*
@@ -57,149 +56,8 @@ static struct console bios_console = {
57 .flags = CON_PRINTBUFFER, 56 .flags = CON_PRINTBUFFER,
58 .index = -1, 57 .index = -1,
59}; 58};
60#endif
61 59
62#ifdef CONFIG_EARLY_SCIF_CONSOLE 60static struct console *early_console;
63#include <linux/serial_core.h>
64#include "../../../drivers/serial/sh-sci.h"
65
66#if defined(CONFIG_CPU_SUBTYPE_SH7720) || \
67 defined(CONFIG_CPU_SUBTYPE_SH7721)
68#define EPK_SCSMR_VALUE 0x000
69#define EPK_SCBRR_VALUE 0x00C
70#define EPK_FIFO_SIZE 64
71#define EPK_FIFO_BITS (0x7f00 >> 8)
72#else
73#define EPK_FIFO_SIZE 16
74#define EPK_FIFO_BITS (0x1f00 >> 8)
75#endif
76
77static struct uart_port scif_port = {
78 .type = PORT_SCIF,
79 .mapbase = CONFIG_EARLY_SCIF_CONSOLE_PORT,
80 .membase = (char __iomem *)CONFIG_EARLY_SCIF_CONSOLE_PORT,
81};
82
83static void scif_sercon_putc(int c)
84{
85 while (((sci_in(&scif_port, SCFDR) & EPK_FIFO_BITS) >= EPK_FIFO_SIZE))
86 ;
87
88 sci_in(&scif_port, SCxSR);
89 sci_out(&scif_port, SCxSR, 0xf3 & ~(0x20 | 0x40));
90 sci_out(&scif_port, SCxTDR, c);
91
92 while ((sci_in(&scif_port, SCxSR) & 0x40) == 0)
93 ;
94
95 if (c == '\n')
96 scif_sercon_putc('\r');
97}
98
99static void scif_sercon_write(struct console *con, const char *s,
100 unsigned count)
101{
102 while (count-- > 0)
103 scif_sercon_putc(*s++);
104}
105
106static int __init scif_sercon_setup(struct console *con, char *options)
107{
108 con->cflag = CREAD | HUPCL | CLOCAL | B115200 | CS8;
109
110 return 0;
111}
112
113static struct console scif_console = {
114 .name = "sercon",
115 .write = scif_sercon_write,
116 .setup = scif_sercon_setup,
117 .flags = CON_PRINTBUFFER,
118 .index = -1,
119};
120
121#if !defined(CONFIG_SH_STANDARD_BIOS)
122#if defined(CONFIG_CPU_SUBTYPE_SH7720) || \
123 defined(CONFIG_CPU_SUBTYPE_SH7721)
124static void scif_sercon_init(char *s)
125{
126 sci_out(&scif_port, SCSCR, 0x0000); /* clear TE and RE */
127 sci_out(&scif_port, SCFCR, 0x4006); /* reset */
128 sci_out(&scif_port, SCSCR, 0x0000); /* select internal clock */
129 sci_out(&scif_port, SCSMR, EPK_SCSMR_VALUE);
130 sci_out(&scif_port, SCBRR, EPK_SCBRR_VALUE);
131
132 mdelay(1); /* wait 1-bit time */
133
134 sci_out(&scif_port, SCFCR, 0x0030); /* TTRG=b'11 */
135 sci_out(&scif_port, SCSCR, 0x0030); /* TE, RE */
136}
137#elif defined(CONFIG_CPU_SH4) || defined(CONFIG_CPU_SH3)
138#define DEFAULT_BAUD 115200
139/*
140 * Simple SCIF init, primarily aimed at SH7750 and other similar SH-4
141 * devices that aren't using sh-ipl+g.
142 */
143static void scif_sercon_init(char *s)
144{
145 struct uart_port *port = &scif_port;
146 unsigned baud = DEFAULT_BAUD;
147 unsigned int status;
148 char *e;
149
150 if (*s == ',')
151 ++s;
152
153 if (*s) {
154 /* ignore ioport/device name */
155 s += strcspn(s, ",");
156 if (*s == ',')
157 s++;
158 }
159
160 if (*s) {
161 baud = simple_strtoul(s, &e, 0);
162 if (baud == 0 || s == e)
163 baud = DEFAULT_BAUD;
164 }
165
166 do {
167 status = sci_in(port, SCxSR);
168 } while (!(status & SCxSR_TEND(port)));
169
170 sci_out(port, SCSCR, 0); /* TE=0, RE=0 */
171 sci_out(port, SCFCR, SCFCR_RFRST | SCFCR_TFRST);
172 sci_out(port, SCSMR, 0);
173
174 /* Set baud rate */
175 sci_out(port, SCBRR, (CONFIG_SH_PCLK_FREQ + 16 * baud) /
176 (32 * baud) - 1);
177 udelay((1000000+(baud-1)) / baud); /* Wait one bit interval */
178
179 sci_out(port, SCSPTR, 0);
180 sci_out(port, SCxSR, 0x60);
181 sci_out(port, SCLSR, 0);
182
183 sci_out(port, SCFCR, 0);
184 sci_out(port, SCSCR, 0x30); /* TE=1, RE=1 */
185}
186#endif /* defined(CONFIG_CPU_SUBTYPE_SH7720) */
187#endif /* !defined(CONFIG_SH_STANDARD_BIOS) */
188#endif /* CONFIG_EARLY_SCIF_CONSOLE */
189
190/*
191 * Setup a default console, if more than one is compiled in, rely on the
192 * earlyprintk= parsing to give priority.
193 */
194static struct console *early_console =
195#ifdef CONFIG_SH_STANDARD_BIOS
196 &bios_console
197#elif defined(CONFIG_EARLY_SCIF_CONSOLE)
198 &scif_console
199#else
200 NULL
201#endif
202 ;
203 61
204static int __init setup_early_printk(char *buf) 62static int __init setup_early_printk(char *buf)
205{ 63{
@@ -211,21 +69,8 @@ static int __init setup_early_printk(char *buf)
211 if (strstr(buf, "keep")) 69 if (strstr(buf, "keep"))
212 keep_early = 1; 70 keep_early = 1;
213 71
214#ifdef CONFIG_SH_STANDARD_BIOS
215 if (!strncmp(buf, "bios", 4)) 72 if (!strncmp(buf, "bios", 4))
216 early_console = &bios_console; 73 early_console = &bios_console;
217#endif
218#if defined(CONFIG_EARLY_SCIF_CONSOLE)
219 if (!strncmp(buf, "serial", 6)) {
220 early_console = &scif_console;
221
222#if !defined(CONFIG_SH_STANDARD_BIOS)
223#if defined(CONFIG_CPU_SH4) || defined(CONFIG_CPU_SH3)
224 scif_sercon_init(buf + 6);
225#endif
226#endif
227 }
228#endif
229 74
230 if (likely(early_console)) { 75 if (likely(early_console)) {
231 if (keep_early) 76 if (keep_early)
diff --git a/arch/sh/kernel/ftrace.c b/arch/sh/kernel/ftrace.c
index b6f41c109beb..a48cdedc73b5 100644
--- a/arch/sh/kernel/ftrace.c
+++ b/arch/sh/kernel/ftrace.c
@@ -401,82 +401,10 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
401#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 401#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
402 402
403#ifdef CONFIG_FTRACE_SYSCALLS 403#ifdef CONFIG_FTRACE_SYSCALLS
404
405extern unsigned long __start_syscalls_metadata[];
406extern unsigned long __stop_syscalls_metadata[];
407extern unsigned long *sys_call_table; 404extern unsigned long *sys_call_table;
408 405
409static struct syscall_metadata **syscalls_metadata; 406unsigned long __init arch_syscall_addr(int nr)
410
411static struct syscall_metadata *find_syscall_meta(unsigned long *syscall)
412{
413 struct syscall_metadata *start;
414 struct syscall_metadata *stop;
415 char str[KSYM_SYMBOL_LEN];
416
417
418 start = (struct syscall_metadata *)__start_syscalls_metadata;
419 stop = (struct syscall_metadata *)__stop_syscalls_metadata;
420 kallsyms_lookup((unsigned long) syscall, NULL, NULL, NULL, str);
421
422 for ( ; start < stop; start++) {
423 if (start->name && !strcmp(start->name, str))
424 return start;
425 }
426
427 return NULL;
428}
429
430struct syscall_metadata *syscall_nr_to_meta(int nr)
431{
432 if (!syscalls_metadata || nr >= FTRACE_SYSCALL_MAX || nr < 0)
433 return NULL;
434
435 return syscalls_metadata[nr];
436}
437
438int syscall_name_to_nr(char *name)
439{
440 int i;
441
442 if (!syscalls_metadata)
443 return -1;
444 for (i = 0; i < NR_syscalls; i++)
445 if (syscalls_metadata[i])
446 if (!strcmp(syscalls_metadata[i]->name, name))
447 return i;
448 return -1;
449}
450
451void set_syscall_enter_id(int num, int id)
452{
453 syscalls_metadata[num]->enter_id = id;
454}
455
456void set_syscall_exit_id(int num, int id)
457{
458 syscalls_metadata[num]->exit_id = id;
459}
460
461static int __init arch_init_ftrace_syscalls(void)
462{ 407{
463 int i; 408 return (unsigned long)sys_call_table[nr];
464 struct syscall_metadata *meta;
465 unsigned long **psys_syscall_table = &sys_call_table;
466
467 syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) *
468 FTRACE_SYSCALL_MAX, GFP_KERNEL);
469 if (!syscalls_metadata) {
470 WARN_ON(1);
471 return -ENOMEM;
472 }
473
474 for (i = 0; i < FTRACE_SYSCALL_MAX; i++) {
475 meta = find_syscall_meta(psys_syscall_table[i]);
476 syscalls_metadata[i] = meta;
477 }
478
479 return 0;
480} 409}
481arch_initcall(arch_init_ftrace_syscalls);
482#endif /* CONFIG_FTRACE_SYSCALLS */ 410#endif /* CONFIG_FTRACE_SYSCALLS */
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
index e1913f28f418..d2d41d046657 100644
--- a/arch/sh/kernel/irq.c
+++ b/arch/sh/kernel/irq.c
@@ -76,7 +76,7 @@ int show_interrupts(struct seq_file *p, void *v)
76 if (!desc) 76 if (!desc)
77 return 0; 77 return 0;
78 78
79 spin_lock_irqsave(&desc->lock, flags); 79 raw_spin_lock_irqsave(&desc->lock, flags);
80 for_each_online_cpu(j) 80 for_each_online_cpu(j)
81 any_count |= kstat_irqs_cpu(i, j); 81 any_count |= kstat_irqs_cpu(i, j);
82 action = desc->action; 82 action = desc->action;
@@ -97,7 +97,7 @@ int show_interrupts(struct seq_file *p, void *v)
97 97
98 seq_putc(p, '\n'); 98 seq_putc(p, '\n');
99out: 99out:
100 spin_unlock_irqrestore(&desc->lock, flags); 100 raw_spin_unlock_irqrestore(&desc->lock, flags);
101 return 0; 101 return 0;
102} 102}
103#endif 103#endif
diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c
index 359b8a2f4d2e..31f80c61b031 100644
--- a/arch/sh/kernel/process_64.c
+++ b/arch/sh/kernel/process_64.c
@@ -404,7 +404,7 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
404 if (fpvalid) { 404 if (fpvalid) {
405 if (current == last_task_used_math) { 405 if (current == last_task_used_math) {
406 enable_fpu(); 406 enable_fpu();
407 save_fpu(tsk, regs); 407 save_fpu(tsk);
408 disable_fpu(); 408 disable_fpu();
409 last_task_used_math = 0; 409 last_task_used_math = 0;
410 regs->sr |= SR_FD; 410 regs->sr |= SR_FD;
@@ -431,7 +431,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
431#ifdef CONFIG_SH_FPU 431#ifdef CONFIG_SH_FPU
432 if(last_task_used_math == current) { 432 if(last_task_used_math == current) {
433 enable_fpu(); 433 enable_fpu();
434 save_fpu(current, regs); 434 save_fpu(current);
435 disable_fpu(); 435 disable_fpu();
436 last_task_used_math = NULL; 436 last_task_used_math = NULL;
437 regs->sr |= SR_FD; 437 regs->sr |= SR_FD;
diff --git a/arch/sh/kernel/ptrace_64.c b/arch/sh/kernel/ptrace_64.c
index 952da83903da..873ebdc4f98e 100644
--- a/arch/sh/kernel/ptrace_64.c
+++ b/arch/sh/kernel/ptrace_64.c
@@ -82,7 +82,7 @@ get_fpu_long(struct task_struct *task, unsigned long addr)
82 82
83 if (last_task_used_math == task) { 83 if (last_task_used_math == task) {
84 enable_fpu(); 84 enable_fpu();
85 save_fpu(task, regs); 85 save_fpu(task);
86 disable_fpu(); 86 disable_fpu();
87 last_task_used_math = 0; 87 last_task_used_math = 0;
88 regs->sr |= SR_FD; 88 regs->sr |= SR_FD;
@@ -118,7 +118,7 @@ put_fpu_long(struct task_struct *task, unsigned long addr, unsigned long data)
118 set_stopped_child_used_math(task); 118 set_stopped_child_used_math(task);
119 } else if (last_task_used_math == task) { 119 } else if (last_task_used_math == task) {
120 enable_fpu(); 120 enable_fpu();
121 save_fpu(task, regs); 121 save_fpu(task);
122 disable_fpu(); 122 disable_fpu();
123 last_task_used_math = 0; 123 last_task_used_math = 0;
124 regs->sr |= SR_FD; 124 regs->sr |= SR_FD;
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index 5a947a2567e4..8b0e69792cf4 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -423,6 +423,9 @@ void __init setup_arch(char **cmdline_p)
423 423
424 plat_early_device_setup(); 424 plat_early_device_setup();
425 425
426 /* Let earlyprintk output early console messages */
427 early_platform_driver_probe("earlyprintk", 1, 1);
428
426 sh_mv_setup(); 429 sh_mv_setup();
427 430
428 /* 431 /*
diff --git a/arch/sh/kernel/signal_64.c b/arch/sh/kernel/signal_64.c
index feb3dddd3192..ce76dbdef294 100644
--- a/arch/sh/kernel/signal_64.c
+++ b/arch/sh/kernel/signal_64.c
@@ -314,7 +314,7 @@ setup_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
314 314
315 if (current == last_task_used_math) { 315 if (current == last_task_used_math) {
316 enable_fpu(); 316 enable_fpu();
317 save_fpu(current, regs); 317 save_fpu(current);
318 disable_fpu(); 318 disable_fpu();
319 last_task_used_math = NULL; 319 last_task_used_math = NULL;
320 regs->sr |= SR_FD; 320 regs->sr |= SR_FD;
diff --git a/arch/sh/kernel/syscalls_32.S b/arch/sh/kernel/syscalls_32.S
index 19fd11dd9871..4bd5a1146956 100644
--- a/arch/sh/kernel/syscalls_32.S
+++ b/arch/sh/kernel/syscalls_32.S
@@ -353,3 +353,4 @@ ENTRY(sys_call_table)
353 .long sys_pwritev 353 .long sys_pwritev
354 .long sys_rt_tgsigqueueinfo /* 335 */ 354 .long sys_rt_tgsigqueueinfo /* 335 */
355 .long sys_perf_event_open 355 .long sys_perf_event_open
356 .long sys_recvmmsg
diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c
index 3da5a125d884..86639beac3a2 100644
--- a/arch/sh/kernel/traps_32.c
+++ b/arch/sh/kernel/traps_32.c
@@ -452,12 +452,18 @@ int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
452 rm = regs->regs[index]; 452 rm = regs->regs[index];
453 453
454 /* shout about fixups */ 454 /* shout about fixups */
455 if (!expected && printk_ratelimit()) 455 if (!expected) {
456 printk(KERN_NOTICE "Fixing up unaligned %s access " 456 if (user_mode(regs) && (se_usermode & 1) && printk_ratelimit())
457 "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n", 457 pr_notice("Fixing up unaligned userspace access "
458 user_mode(regs) ? "userspace" : "kernel", 458 "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
459 current->comm, task_pid_nr(current), 459 current->comm, task_pid_nr(current),
460 (void *)regs->pc, instruction); 460 (void *)regs->pc, instruction);
461 else if (se_kernmode_warn && printk_ratelimit())
462 pr_notice("Fixing up unaligned kernel access "
463 "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
464 current->comm, task_pid_nr(current),
465 (void *)regs->pc, instruction);
466 }
461 467
462 ret = -EFAULT; 468 ret = -EFAULT;
463 switch (instruction&0xF000) { 469 switch (instruction&0xF000) {
diff --git a/arch/sh/kernel/traps_64.c b/arch/sh/kernel/traps_64.c
index 75c0cbe2eda0..d86f5315a0c1 100644
--- a/arch/sh/kernel/traps_64.c
+++ b/arch/sh/kernel/traps_64.c
@@ -600,7 +600,7 @@ static int misaligned_fpu_load(struct pt_regs *regs,
600 indexed by register number. */ 600 indexed by register number. */
601 if (last_task_used_math == current) { 601 if (last_task_used_math == current) {
602 enable_fpu(); 602 enable_fpu();
603 save_fpu(current, regs); 603 save_fpu(current);
604 disable_fpu(); 604 disable_fpu();
605 last_task_used_math = NULL; 605 last_task_used_math = NULL;
606 regs->sr |= SR_FD; 606 regs->sr |= SR_FD;
@@ -673,7 +673,7 @@ static int misaligned_fpu_store(struct pt_regs *regs,
673 indexed by register number. */ 673 indexed by register number. */
674 if (last_task_used_math == current) { 674 if (last_task_used_math == current) {
675 enable_fpu(); 675 enable_fpu();
676 save_fpu(current, regs); 676 save_fpu(current);
677 disable_fpu(); 677 disable_fpu();
678 last_task_used_math = NULL; 678 last_task_used_math = NULL;
679 regs->sr |= SR_FD; 679 regs->sr |= SR_FD;
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
index f36a08bf3d5c..560ddb6bc8a7 100644
--- a/arch/sh/mm/cache-sh4.c
+++ b/arch/sh/mm/cache-sh4.c
@@ -256,8 +256,7 @@ static void sh4_flush_cache_page(void *args)
256 address = (unsigned long)vaddr; 256 address = (unsigned long)vaddr;
257 } 257 }
258 258
259 if (pages_do_alias(address, phys)) 259 flush_cache_one(CACHE_OC_ADDRESS_ARRAY |
260 flush_cache_one(CACHE_OC_ADDRESS_ARRAY |
261 (address & shm_align_mask), phys); 260 (address & shm_align_mask), phys);
262 261
263 if (vma->vm_flags & VM_EXEC) 262 if (vma->vm_flags & VM_EXEC)
diff --git a/arch/sh/mm/ioremap_32.c b/arch/sh/mm/ioremap_32.c
index a86eaa9d75a5..2141befb4f91 100644
--- a/arch/sh/mm/ioremap_32.c
+++ b/arch/sh/mm/ioremap_32.c
@@ -33,10 +33,10 @@
33 * have to convert them into an offset in a page-aligned mapping, but the 33 * have to convert them into an offset in a page-aligned mapping, but the
34 * caller shouldn't need to know that small detail. 34 * caller shouldn't need to know that small detail.
35 */ 35 */
36void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, 36void __iomem *__ioremap_caller(unsigned long phys_addr, unsigned long size,
37 unsigned long flags) 37 unsigned long flags, void *caller)
38{ 38{
39 struct vm_struct * area; 39 struct vm_struct *area;
40 unsigned long offset, last_addr, addr, orig_addr; 40 unsigned long offset, last_addr, addr, orig_addr;
41 pgprot_t pgprot; 41 pgprot_t pgprot;
42 42
@@ -67,7 +67,7 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
67 /* 67 /*
68 * Ok, go for it.. 68 * Ok, go for it..
69 */ 69 */
70 area = get_vm_area(size, VM_IOREMAP); 70 area = get_vm_area_caller(size, VM_IOREMAP, caller);
71 if (!area) 71 if (!area)
72 return NULL; 72 return NULL;
73 area->phys_addr = phys_addr; 73 area->phys_addr = phys_addr;
@@ -103,7 +103,7 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
103 103
104 return (void __iomem *)(offset + (char *)orig_addr); 104 return (void __iomem *)(offset + (char *)orig_addr);
105} 105}
106EXPORT_SYMBOL(__ioremap); 106EXPORT_SYMBOL(__ioremap_caller);
107 107
108void __iounmap(void __iomem *addr) 108void __iounmap(void __iomem *addr)
109{ 109{
diff --git a/arch/sh/mm/ioremap_64.c b/arch/sh/mm/ioremap_64.c
index b16843d02b76..ef434657d428 100644
--- a/arch/sh/mm/ioremap_64.c
+++ b/arch/sh/mm/ioremap_64.c
@@ -258,15 +258,15 @@ static void shmedia_unmapioaddr(unsigned long vaddr)
258 pte_clear(&init_mm, vaddr, ptep); 258 pte_clear(&init_mm, vaddr, ptep);
259} 259}
260 260
261void __iomem *__ioremap(unsigned long offset, unsigned long size, 261void __iomem *__ioremap_caller(unsigned long offset, unsigned long size,
262 unsigned long flags) 262 unsigned long flags, void *caller)
263{ 263{
264 char name[14]; 264 char name[14];
265 265
266 sprintf(name, "phys_%08x", (u32)offset); 266 sprintf(name, "phys_%08x", (u32)offset);
267 return shmedia_alloc_io(offset, size, name, flags); 267 return shmedia_alloc_io(offset, size, name, flags);
268} 268}
269EXPORT_SYMBOL(__ioremap); 269EXPORT_SYMBOL(__ioremap_caller);
270 270
271void __iounmap(void __iomem *virtual) 271void __iounmap(void __iomem *virtual)
272{ 272{
diff --git a/arch/sh/mm/numa.c b/arch/sh/mm/numa.c
index 6c524446c0f6..422e92721878 100644
--- a/arch/sh/mm/numa.c
+++ b/arch/sh/mm/numa.c
@@ -28,7 +28,7 @@ void __init setup_memory(void)
28{ 28{
29 unsigned long free_pfn = PFN_UP(__pa(_end)); 29 unsigned long free_pfn = PFN_UP(__pa(_end));
30 u64 base = min_low_pfn << PAGE_SHIFT; 30 u64 base = min_low_pfn << PAGE_SHIFT;
31 u64 size = (max_low_pfn << PAGE_SHIFT) - min_low_pfn; 31 u64 size = (max_low_pfn << PAGE_SHIFT) - base;
32 32
33 lmb_add(base, size); 33 lmb_add(base, size);
34 34
@@ -38,6 +38,15 @@ void __init setup_memory(void)
38 (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET)); 38 (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET));
39 39
40 /* 40 /*
41 * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
42 */
43 if (CONFIG_ZERO_PAGE_OFFSET != 0)
44 lmb_reserve(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET);
45
46 lmb_analyze();
47 lmb_dump_all();
48
49 /*
41 * Node 0 sets up its pgdat at the first available pfn, 50 * Node 0 sets up its pgdat at the first available pfn,
42 * and bumps it up before setting up the bootmem allocator. 51 * and bumps it up before setting up the bootmem allocator.
43 */ 52 */
@@ -71,7 +80,7 @@ void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end)
71 80
72 /* Node-local pgdat */ 81 /* Node-local pgdat */
73 NODE_DATA(nid) = __va(lmb_alloc_base(sizeof(struct pglist_data), 82 NODE_DATA(nid) = __va(lmb_alloc_base(sizeof(struct pglist_data),
74 SMP_CACHE_BYTES, end_pfn)); 83 SMP_CACHE_BYTES, end));
75 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); 84 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
76 85
77 NODE_DATA(nid)->bdata = &bootmem_node_data[nid]; 86 NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
@@ -81,7 +90,7 @@ void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end)
81 /* Node-local bootmap */ 90 /* Node-local bootmap */
82 bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn); 91 bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
83 bootmem_paddr = lmb_alloc_base(bootmap_pages << PAGE_SHIFT, 92 bootmem_paddr = lmb_alloc_base(bootmap_pages << PAGE_SHIFT,
84 PAGE_SIZE, end_pfn); 93 PAGE_SIZE, end);
85 init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT, 94 init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT,
86 start_pfn, end_pfn); 95 start_pfn, end_pfn);
87 96
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 33ac1a9ac881..108197ac0d56 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -43,6 +43,7 @@ config SPARC64
43 select HAVE_SYSCALL_WRAPPERS 43 select HAVE_SYSCALL_WRAPPERS
44 select HAVE_DYNAMIC_FTRACE 44 select HAVE_DYNAMIC_FTRACE
45 select HAVE_FTRACE_MCOUNT_RECORD 45 select HAVE_FTRACE_MCOUNT_RECORD
46 select HAVE_SYSCALL_TRACEPOINTS
46 select USE_GENERIC_SMP_HELPERS if SMP 47 select USE_GENERIC_SMP_HELPERS if SMP
47 select RTC_DRV_CMOS 48 select RTC_DRV_CMOS
48 select RTC_DRV_BQ4802 49 select RTC_DRV_BQ4802
diff --git a/arch/sparc/Kconfig.debug b/arch/sparc/Kconfig.debug
index 90d5fe223a74..9d3c889718ac 100644
--- a/arch/sparc/Kconfig.debug
+++ b/arch/sparc/Kconfig.debug
@@ -33,4 +33,18 @@ config FRAME_POINTER
33 depends on MCOUNT 33 depends on MCOUNT
34 default y 34 default y
35 35
36config DEBUG_STRICT_USER_COPY_CHECKS
37 bool "Strict copy size checks"
38 depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
39 ---help---
40 Enabling this option turns a certain set of sanity checks for user
41 copy operations into compile time failures.
42
43 The copy_from_user() etc checks are there to help test if there
44 are sufficient security checks on the length argument of
45 the copy operation, by having gcc prove that the argument is
46 within bounds.
47
48 If unsure, or if you run an older (pre 4.4) gcc, say N.
49
36endmenu 50endmenu
diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
index 381a1b5256d6..4269ca6ad18a 100644
--- a/arch/sparc/include/asm/elf_32.h
+++ b/arch/sparc/include/asm/elf_32.h
@@ -104,8 +104,6 @@ typedef struct {
104#define ELF_CLASS ELFCLASS32 104#define ELF_CLASS ELFCLASS32
105#define ELF_DATA ELFDATA2MSB 105#define ELF_DATA ELFDATA2MSB
106 106
107#define USE_ELF_CORE_DUMP
108
109#define ELF_EXEC_PAGESIZE 4096 107#define ELF_EXEC_PAGESIZE 4096
110 108
111 109
diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
index d42e393078c4..ff66bb88537b 100644
--- a/arch/sparc/include/asm/elf_64.h
+++ b/arch/sparc/include/asm/elf_64.h
@@ -152,7 +152,6 @@ typedef struct {
152 (x)->e_machine == EM_SPARC32PLUS) 152 (x)->e_machine == EM_SPARC32PLUS)
153#define compat_start_thread start_thread32 153#define compat_start_thread start_thread32
154 154
155#define USE_ELF_CORE_DUMP
156#define ELF_EXEC_PAGESIZE PAGE_SIZE 155#define ELF_EXEC_PAGESIZE PAGE_SIZE
157 156
158/* This is the location that an ET_DYN program is loaded if exec'ed. Typical 157/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
diff --git a/arch/sparc/include/asm/spinlock_32.h b/arch/sparc/include/asm/spinlock_32.h
index 857630cff636..7f9b9dba38a6 100644
--- a/arch/sparc/include/asm/spinlock_32.h
+++ b/arch/sparc/include/asm/spinlock_32.h
@@ -10,12 +10,12 @@
10 10
11#include <asm/psr.h> 11#include <asm/psr.h>
12 12
13#define __raw_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0) 13#define arch_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0)
14 14
15#define __raw_spin_unlock_wait(lock) \ 15#define arch_spin_unlock_wait(lock) \
16 do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) 16 do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
17 17
18static inline void __raw_spin_lock(raw_spinlock_t *lock) 18static inline void arch_spin_lock(arch_spinlock_t *lock)
19{ 19{
20 __asm__ __volatile__( 20 __asm__ __volatile__(
21 "\n1:\n\t" 21 "\n1:\n\t"
@@ -35,7 +35,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
35 : "g2", "memory", "cc"); 35 : "g2", "memory", "cc");
36} 36}
37 37
38static inline int __raw_spin_trylock(raw_spinlock_t *lock) 38static inline int arch_spin_trylock(arch_spinlock_t *lock)
39{ 39{
40 unsigned int result; 40 unsigned int result;
41 __asm__ __volatile__("ldstub [%1], %0" 41 __asm__ __volatile__("ldstub [%1], %0"
@@ -45,7 +45,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
45 return (result == 0); 45 return (result == 0);
46} 46}
47 47
48static inline void __raw_spin_unlock(raw_spinlock_t *lock) 48static inline void arch_spin_unlock(arch_spinlock_t *lock)
49{ 49{
50 __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory"); 50 __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory");
51} 51}
@@ -65,7 +65,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
65 * Sort of like atomic_t's on Sparc, but even more clever. 65 * Sort of like atomic_t's on Sparc, but even more clever.
66 * 66 *
67 * ------------------------------------ 67 * ------------------------------------
68 * | 24-bit counter | wlock | raw_rwlock_t 68 * | 24-bit counter | wlock | arch_rwlock_t
69 * ------------------------------------ 69 * ------------------------------------
70 * 31 8 7 0 70 * 31 8 7 0
71 * 71 *
@@ -76,9 +76,9 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
76 * 76 *
77 * Unfortunately this scheme limits us to ~16,000,000 cpus. 77 * Unfortunately this scheme limits us to ~16,000,000 cpus.
78 */ 78 */
79static inline void arch_read_lock(raw_rwlock_t *rw) 79static inline void __arch_read_lock(arch_rwlock_t *rw)
80{ 80{
81 register raw_rwlock_t *lp asm("g1"); 81 register arch_rwlock_t *lp asm("g1");
82 lp = rw; 82 lp = rw;
83 __asm__ __volatile__( 83 __asm__ __volatile__(
84 "mov %%o7, %%g4\n\t" 84 "mov %%o7, %%g4\n\t"
@@ -89,16 +89,16 @@ static inline void arch_read_lock(raw_rwlock_t *rw)
89 : "g2", "g4", "memory", "cc"); 89 : "g2", "g4", "memory", "cc");
90} 90}
91 91
92#define __raw_read_lock(lock) \ 92#define arch_read_lock(lock) \
93do { unsigned long flags; \ 93do { unsigned long flags; \
94 local_irq_save(flags); \ 94 local_irq_save(flags); \
95 arch_read_lock(lock); \ 95 __arch_read_lock(lock); \
96 local_irq_restore(flags); \ 96 local_irq_restore(flags); \
97} while(0) 97} while(0)
98 98
99static inline void arch_read_unlock(raw_rwlock_t *rw) 99static inline void __arch_read_unlock(arch_rwlock_t *rw)
100{ 100{
101 register raw_rwlock_t *lp asm("g1"); 101 register arch_rwlock_t *lp asm("g1");
102 lp = rw; 102 lp = rw;
103 __asm__ __volatile__( 103 __asm__ __volatile__(
104 "mov %%o7, %%g4\n\t" 104 "mov %%o7, %%g4\n\t"
@@ -109,16 +109,16 @@ static inline void arch_read_unlock(raw_rwlock_t *rw)
109 : "g2", "g4", "memory", "cc"); 109 : "g2", "g4", "memory", "cc");
110} 110}
111 111
112#define __raw_read_unlock(lock) \ 112#define arch_read_unlock(lock) \
113do { unsigned long flags; \ 113do { unsigned long flags; \
114 local_irq_save(flags); \ 114 local_irq_save(flags); \
115 arch_read_unlock(lock); \ 115 __arch_read_unlock(lock); \
116 local_irq_restore(flags); \ 116 local_irq_restore(flags); \
117} while(0) 117} while(0)
118 118
119static inline void __raw_write_lock(raw_rwlock_t *rw) 119static inline void arch_write_lock(arch_rwlock_t *rw)
120{ 120{
121 register raw_rwlock_t *lp asm("g1"); 121 register arch_rwlock_t *lp asm("g1");
122 lp = rw; 122 lp = rw;
123 __asm__ __volatile__( 123 __asm__ __volatile__(
124 "mov %%o7, %%g4\n\t" 124 "mov %%o7, %%g4\n\t"
@@ -130,7 +130,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
130 *(volatile __u32 *)&lp->lock = ~0U; 130 *(volatile __u32 *)&lp->lock = ~0U;
131} 131}
132 132
133static inline int __raw_write_trylock(raw_rwlock_t *rw) 133static inline int arch_write_trylock(arch_rwlock_t *rw)
134{ 134{
135 unsigned int val; 135 unsigned int val;
136 136
@@ -150,9 +150,9 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
150 return (val == 0); 150 return (val == 0);
151} 151}
152 152
153static inline int arch_read_trylock(raw_rwlock_t *rw) 153static inline int __arch_read_trylock(arch_rwlock_t *rw)
154{ 154{
155 register raw_rwlock_t *lp asm("g1"); 155 register arch_rwlock_t *lp asm("g1");
156 register int res asm("o0"); 156 register int res asm("o0");
157 lp = rw; 157 lp = rw;
158 __asm__ __volatile__( 158 __asm__ __volatile__(
@@ -165,27 +165,27 @@ static inline int arch_read_trylock(raw_rwlock_t *rw)
165 return res; 165 return res;
166} 166}
167 167
168#define __raw_read_trylock(lock) \ 168#define arch_read_trylock(lock) \
169({ unsigned long flags; \ 169({ unsigned long flags; \
170 int res; \ 170 int res; \
171 local_irq_save(flags); \ 171 local_irq_save(flags); \
172 res = arch_read_trylock(lock); \ 172 res = __arch_read_trylock(lock); \
173 local_irq_restore(flags); \ 173 local_irq_restore(flags); \
174 res; \ 174 res; \
175}) 175})
176 176
177#define __raw_write_unlock(rw) do { (rw)->lock = 0; } while(0) 177#define arch_write_unlock(rw) do { (rw)->lock = 0; } while(0)
178 178
179#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 179#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
180#define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw) 180#define arch_read_lock_flags(rw, flags) arch_read_lock(rw)
181#define __raw_write_lock_flags(rw, flags) __raw_write_lock(rw) 181#define arch_write_lock_flags(rw, flags) arch_write_lock(rw)
182 182
183#define _raw_spin_relax(lock) cpu_relax() 183#define arch_spin_relax(lock) cpu_relax()
184#define _raw_read_relax(lock) cpu_relax() 184#define arch_read_relax(lock) cpu_relax()
185#define _raw_write_relax(lock) cpu_relax() 185#define arch_write_relax(lock) cpu_relax()
186 186
187#define __raw_read_can_lock(rw) (!((rw)->lock & 0xff)) 187#define arch_read_can_lock(rw) (!((rw)->lock & 0xff))
188#define __raw_write_can_lock(rw) (!(rw)->lock) 188#define arch_write_can_lock(rw) (!(rw)->lock)
189 189
190#endif /* !(__ASSEMBLY__) */ 190#endif /* !(__ASSEMBLY__) */
191 191
diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
index 43e514783582..073936a8b275 100644
--- a/arch/sparc/include/asm/spinlock_64.h
+++ b/arch/sparc/include/asm/spinlock_64.h
@@ -21,13 +21,13 @@
21 * the spinner sections must be pre-V9 branches. 21 * the spinner sections must be pre-V9 branches.
22 */ 22 */
23 23
24#define __raw_spin_is_locked(lp) ((lp)->lock != 0) 24#define arch_spin_is_locked(lp) ((lp)->lock != 0)
25 25
26#define __raw_spin_unlock_wait(lp) \ 26#define arch_spin_unlock_wait(lp) \
27 do { rmb(); \ 27 do { rmb(); \
28 } while((lp)->lock) 28 } while((lp)->lock)
29 29
30static inline void __raw_spin_lock(raw_spinlock_t *lock) 30static inline void arch_spin_lock(arch_spinlock_t *lock)
31{ 31{
32 unsigned long tmp; 32 unsigned long tmp;
33 33
@@ -46,7 +46,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
46 : "memory"); 46 : "memory");
47} 47}
48 48
49static inline int __raw_spin_trylock(raw_spinlock_t *lock) 49static inline int arch_spin_trylock(arch_spinlock_t *lock)
50{ 50{
51 unsigned long result; 51 unsigned long result;
52 52
@@ -59,7 +59,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
59 return (result == 0UL); 59 return (result == 0UL);
60} 60}
61 61
62static inline void __raw_spin_unlock(raw_spinlock_t *lock) 62static inline void arch_spin_unlock(arch_spinlock_t *lock)
63{ 63{
64 __asm__ __volatile__( 64 __asm__ __volatile__(
65" stb %%g0, [%0]" 65" stb %%g0, [%0]"
@@ -68,7 +68,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
68 : "memory"); 68 : "memory");
69} 69}
70 70
71static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) 71static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
72{ 72{
73 unsigned long tmp1, tmp2; 73 unsigned long tmp1, tmp2;
74 74
@@ -92,7 +92,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
92 92
93/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ 93/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
94 94
95static void inline arch_read_lock(raw_rwlock_t *lock) 95static void inline arch_read_lock(arch_rwlock_t *lock)
96{ 96{
97 unsigned long tmp1, tmp2; 97 unsigned long tmp1, tmp2;
98 98
@@ -115,7 +115,7 @@ static void inline arch_read_lock(raw_rwlock_t *lock)
115 : "memory"); 115 : "memory");
116} 116}
117 117
118static int inline arch_read_trylock(raw_rwlock_t *lock) 118static int inline arch_read_trylock(arch_rwlock_t *lock)
119{ 119{
120 int tmp1, tmp2; 120 int tmp1, tmp2;
121 121
@@ -136,7 +136,7 @@ static int inline arch_read_trylock(raw_rwlock_t *lock)
136 return tmp1; 136 return tmp1;
137} 137}
138 138
139static void inline arch_read_unlock(raw_rwlock_t *lock) 139static void inline arch_read_unlock(arch_rwlock_t *lock)
140{ 140{
141 unsigned long tmp1, tmp2; 141 unsigned long tmp1, tmp2;
142 142
@@ -152,7 +152,7 @@ static void inline arch_read_unlock(raw_rwlock_t *lock)
152 : "memory"); 152 : "memory");
153} 153}
154 154
155static void inline arch_write_lock(raw_rwlock_t *lock) 155static void inline arch_write_lock(arch_rwlock_t *lock)
156{ 156{
157 unsigned long mask, tmp1, tmp2; 157 unsigned long mask, tmp1, tmp2;
158 158
@@ -177,7 +177,7 @@ static void inline arch_write_lock(raw_rwlock_t *lock)
177 : "memory"); 177 : "memory");
178} 178}
179 179
180static void inline arch_write_unlock(raw_rwlock_t *lock) 180static void inline arch_write_unlock(arch_rwlock_t *lock)
181{ 181{
182 __asm__ __volatile__( 182 __asm__ __volatile__(
183" stw %%g0, [%0]" 183" stw %%g0, [%0]"
@@ -186,7 +186,7 @@ static void inline arch_write_unlock(raw_rwlock_t *lock)
186 : "memory"); 186 : "memory");
187} 187}
188 188
189static int inline arch_write_trylock(raw_rwlock_t *lock) 189static int inline arch_write_trylock(arch_rwlock_t *lock)
190{ 190{
191 unsigned long mask, tmp1, tmp2, result; 191 unsigned long mask, tmp1, tmp2, result;
192 192
@@ -210,21 +210,21 @@ static int inline arch_write_trylock(raw_rwlock_t *lock)
210 return result; 210 return result;
211} 211}
212 212
213#define __raw_read_lock(p) arch_read_lock(p) 213#define arch_read_lock(p) arch_read_lock(p)
214#define __raw_read_lock_flags(p, f) arch_read_lock(p) 214#define arch_read_lock_flags(p, f) arch_read_lock(p)
215#define __raw_read_trylock(p) arch_read_trylock(p) 215#define arch_read_trylock(p) arch_read_trylock(p)
216#define __raw_read_unlock(p) arch_read_unlock(p) 216#define arch_read_unlock(p) arch_read_unlock(p)
217#define __raw_write_lock(p) arch_write_lock(p) 217#define arch_write_lock(p) arch_write_lock(p)
218#define __raw_write_lock_flags(p, f) arch_write_lock(p) 218#define arch_write_lock_flags(p, f) arch_write_lock(p)
219#define __raw_write_unlock(p) arch_write_unlock(p) 219#define arch_write_unlock(p) arch_write_unlock(p)
220#define __raw_write_trylock(p) arch_write_trylock(p) 220#define arch_write_trylock(p) arch_write_trylock(p)
221 221
222#define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) 222#define arch_read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
223#define __raw_write_can_lock(rw) (!(rw)->lock) 223#define arch_write_can_lock(rw) (!(rw)->lock)
224 224
225#define _raw_spin_relax(lock) cpu_relax() 225#define arch_spin_relax(lock) cpu_relax()
226#define _raw_read_relax(lock) cpu_relax() 226#define arch_read_relax(lock) cpu_relax()
227#define _raw_write_relax(lock) cpu_relax() 227#define arch_write_relax(lock) cpu_relax()
228 228
229#endif /* !(__ASSEMBLY__) */ 229#endif /* !(__ASSEMBLY__) */
230 230
diff --git a/arch/sparc/include/asm/spinlock_types.h b/arch/sparc/include/asm/spinlock_types.h
index 37cbe01c585b..9c454fdeaad8 100644
--- a/arch/sparc/include/asm/spinlock_types.h
+++ b/arch/sparc/include/asm/spinlock_types.h
@@ -7,14 +7,14 @@
7 7
8typedef struct { 8typedef struct {
9 volatile unsigned char lock; 9 volatile unsigned char lock;
10} raw_spinlock_t; 10} arch_spinlock_t;
11 11
12#define __RAW_SPIN_LOCK_UNLOCKED { 0 } 12#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
13 13
14typedef struct { 14typedef struct {
15 volatile unsigned int lock; 15 volatile unsigned int lock;
16} raw_rwlock_t; 16} arch_rwlock_t;
17 17
18#define __RAW_RW_LOCK_UNLOCKED { 0 } 18#define __ARCH_RW_LOCK_UNLOCKED { 0 }
19 19
20#endif 20#endif
diff --git a/arch/sparc/include/asm/string_32.h b/arch/sparc/include/asm/string_32.h
index 6c5fddb7e6b5..edf196ee4ef8 100644
--- a/arch/sparc/include/asm/string_32.h
+++ b/arch/sparc/include/asm/string_32.h
@@ -16,8 +16,6 @@
16#ifdef __KERNEL__ 16#ifdef __KERNEL__
17 17
18extern void __memmove(void *,const void *,__kernel_size_t); 18extern void __memmove(void *,const void *,__kernel_size_t);
19extern __kernel_size_t __memcpy(void *,const void *,__kernel_size_t);
20extern __kernel_size_t __memset(void *,int,__kernel_size_t);
21 19
22#ifndef EXPORT_SYMTAB_STROPS 20#ifndef EXPORT_SYMTAB_STROPS
23 21
@@ -32,82 +30,10 @@ extern __kernel_size_t __memset(void *,int,__kernel_size_t);
32}) 30})
33 31
34#define __HAVE_ARCH_MEMCPY 32#define __HAVE_ARCH_MEMCPY
35 33#define memcpy(t, f, n) __builtin_memcpy(t, f, n)
36static inline void *__constant_memcpy(void *to, const void *from, __kernel_size_t n)
37{
38 extern void __copy_1page(void *, const void *);
39
40 if(n <= 32) {
41 __builtin_memcpy(to, from, n);
42 } else if (((unsigned int) to & 7) != 0) {
43 /* Destination is not aligned on the double-word boundary */
44 __memcpy(to, from, n);
45 } else {
46 switch(n) {
47 case PAGE_SIZE:
48 __copy_1page(to, from);
49 break;
50 default:
51 __memcpy(to, from, n);
52 break;
53 }
54 }
55 return to;
56}
57
58static inline void *__nonconstant_memcpy(void *to, const void *from, __kernel_size_t n)
59{
60 __memcpy(to, from, n);
61 return to;
62}
63
64#undef memcpy
65#define memcpy(t, f, n) \
66(__builtin_constant_p(n) ? \
67 __constant_memcpy((t),(f),(n)) : \
68 __nonconstant_memcpy((t),(f),(n)))
69 34
70#define __HAVE_ARCH_MEMSET 35#define __HAVE_ARCH_MEMSET
71 36#define memset(s, c, count) __builtin_memset(s, c, count)
72static inline void *__constant_c_and_count_memset(void *s, char c, __kernel_size_t count)
73{
74 extern void bzero_1page(void *);
75 extern __kernel_size_t __bzero(void *, __kernel_size_t);
76
77 if(!c) {
78 if(count == PAGE_SIZE)
79 bzero_1page(s);
80 else
81 __bzero(s, count);
82 } else {
83 __memset(s, c, count);
84 }
85 return s;
86}
87
88static inline void *__constant_c_memset(void *s, char c, __kernel_size_t count)
89{
90 extern __kernel_size_t __bzero(void *, __kernel_size_t);
91
92 if(!c)
93 __bzero(s, count);
94 else
95 __memset(s, c, count);
96 return s;
97}
98
99static inline void *__nonconstant_memset(void *s, char c, __kernel_size_t count)
100{
101 __memset(s, c, count);
102 return s;
103}
104
105#undef memset
106#define memset(s, c, count) \
107(__builtin_constant_p(c) ? (__builtin_constant_p(count) ? \
108 __constant_c_and_count_memset((s), (c), (count)) : \
109 __constant_c_memset((s), (c), (count))) \
110 : __nonconstant_memset((s), (c), (count)))
111 37
112#define __HAVE_ARCH_MEMSCAN 38#define __HAVE_ARCH_MEMSCAN
113 39
diff --git a/arch/sparc/include/asm/string_64.h b/arch/sparc/include/asm/string_64.h
index 43161f2d17eb..9623bc213158 100644
--- a/arch/sparc/include/asm/string_64.h
+++ b/arch/sparc/include/asm/string_64.h
@@ -15,8 +15,6 @@
15 15
16#include <asm/asi.h> 16#include <asm/asi.h>
17 17
18extern void *__memset(void *,int,__kernel_size_t);
19
20#ifndef EXPORT_SYMTAB_STROPS 18#ifndef EXPORT_SYMTAB_STROPS
21 19
22/* First the mem*() things. */ 20/* First the mem*() things. */
@@ -24,29 +22,10 @@ extern void *__memset(void *,int,__kernel_size_t);
24extern void *memmove(void *, const void *, __kernel_size_t); 22extern void *memmove(void *, const void *, __kernel_size_t);
25 23
26#define __HAVE_ARCH_MEMCPY 24#define __HAVE_ARCH_MEMCPY
27extern void *memcpy(void *, const void *, __kernel_size_t); 25#define memcpy(t, f, n) __builtin_memcpy(t, f, n)
28 26
29#define __HAVE_ARCH_MEMSET 27#define __HAVE_ARCH_MEMSET
30extern void *__builtin_memset(void *,int,__kernel_size_t); 28#define memset(s, c, count) __builtin_memset(s, c, count)
31
32static inline void *__constant_memset(void *s, int c, __kernel_size_t count)
33{
34 extern __kernel_size_t __bzero(void *, __kernel_size_t);
35
36 if (!c) {
37 __bzero(s, count);
38 return s;
39 } else
40 return __memset(s, c, count);
41}
42
43#undef memset
44#define memset(s, c, count) \
45((__builtin_constant_p(count) && (count) <= 32) ? \
46 __builtin_memset((s), (c), (count)) : \
47 (__builtin_constant_p(c) ? \
48 __constant_memset((s), (c), (count)) : \
49 __memset((s), (c), (count))))
50 29
51#define __HAVE_ARCH_MEMSCAN 30#define __HAVE_ARCH_MEMSCAN
52 31
diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
index 1b45a7bbe407..7257ebb8f394 100644
--- a/arch/sparc/include/asm/thread_info_64.h
+++ b/arch/sparc/include/asm/thread_info_64.h
@@ -227,6 +227,7 @@ register struct thread_info *current_thread_info_reg asm("g6");
227/* flag bit 8 is available */ 227/* flag bit 8 is available */
228#define TIF_SECCOMP 9 /* secure computing */ 228#define TIF_SECCOMP 9 /* secure computing */
229#define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */ 229#define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
230#define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
230/* flag bit 11 is available */ 231/* flag bit 11 is available */
231/* NOTE: Thread flags >= 12 should be ones we have no interest 232/* NOTE: Thread flags >= 12 should be ones we have no interest
232 * in using in assembly, else we can't use the mask as 233 * in using in assembly, else we can't use the mask as
@@ -246,6 +247,7 @@ register struct thread_info *current_thread_info_reg asm("g6");
246#define _TIF_32BIT (1<<TIF_32BIT) 247#define _TIF_32BIT (1<<TIF_32BIT)
247#define _TIF_SECCOMP (1<<TIF_SECCOMP) 248#define _TIF_SECCOMP (1<<TIF_SECCOMP)
248#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) 249#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
250#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
249#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING) 251#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING)
250#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) 252#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
251#define _TIF_FREEZE (1<<TIF_FREEZE) 253#define _TIF_FREEZE (1<<TIF_FREEZE)
diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
index 8303ac481034..489d2ba92bcb 100644
--- a/arch/sparc/include/asm/uaccess_32.h
+++ b/arch/sparc/include/asm/uaccess_32.h
@@ -260,8 +260,23 @@ static inline unsigned long __copy_to_user(void __user *to, const void *from, un
260 return __copy_user(to, (__force void __user *) from, n); 260 return __copy_user(to, (__force void __user *) from, n);
261} 261}
262 262
263extern void copy_from_user_overflow(void)
264#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
265 __compiletime_error("copy_from_user() buffer size is not provably correct")
266#else
267 __compiletime_warning("copy_from_user() buffer size is not provably correct")
268#endif
269;
270
263static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n) 271static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
264{ 272{
273 int sz = __compiletime_object_size(to);
274
275 if (unlikely(sz != -1 && sz < n)) {
276 copy_from_user_overflow();
277 return -EFAULT;
278 }
279
265 if (n && __access_ok((unsigned long) from, n)) 280 if (n && __access_ok((unsigned long) from, n))
266 return __copy_user((__force void __user *) to, from, n); 281 return __copy_user((__force void __user *) to, from, n);
267 else 282 else
diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
index 9ea271e19c70..dbc141660994 100644
--- a/arch/sparc/include/asm/uaccess_64.h
+++ b/arch/sparc/include/asm/uaccess_64.h
@@ -6,6 +6,7 @@
6 */ 6 */
7 7
8#ifdef __KERNEL__ 8#ifdef __KERNEL__
9#include <linux/errno.h>
9#include <linux/compiler.h> 10#include <linux/compiler.h>
10#include <linux/string.h> 11#include <linux/string.h>
11#include <linux/thread_info.h> 12#include <linux/thread_info.h>
@@ -204,6 +205,14 @@ __asm__ __volatile__( \
204 205
205extern int __get_user_bad(void); 206extern int __get_user_bad(void);
206 207
208extern void copy_from_user_overflow(void)
209#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
210 __compiletime_error("copy_from_user() buffer size is not provably correct")
211#else
212 __compiletime_warning("copy_from_user() buffer size is not provably correct")
213#endif
214;
215
207extern unsigned long __must_check ___copy_from_user(void *to, 216extern unsigned long __must_check ___copy_from_user(void *to,
208 const void __user *from, 217 const void __user *from,
209 unsigned long size); 218 unsigned long size);
@@ -212,10 +221,16 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
212static inline unsigned long __must_check 221static inline unsigned long __must_check
213copy_from_user(void *to, const void __user *from, unsigned long size) 222copy_from_user(void *to, const void __user *from, unsigned long size)
214{ 223{
215 unsigned long ret = ___copy_from_user(to, from, size); 224 unsigned long ret = (unsigned long) -EFAULT;
216 225 int sz = __compiletime_object_size(to);
217 if (unlikely(ret)) 226
218 ret = copy_from_user_fixup(to, from, size); 227 if (likely(sz == -1 || sz >= size)) {
228 ret = ___copy_from_user(to, from, size);
229 if (unlikely(ret))
230 ret = copy_from_user_fixup(to, from, size);
231 } else {
232 copy_from_user_overflow();
233 }
219 return ret; 234 return ret;
220} 235}
221#define __copy_from_user copy_from_user 236#define __copy_from_user copy_from_user
diff --git a/arch/sparc/include/asm/unistd.h b/arch/sparc/include/asm/unistd.h
index d8d25bd97121..cb4b9bfd0d87 100644
--- a/arch/sparc/include/asm/unistd.h
+++ b/arch/sparc/include/asm/unistd.h
@@ -398,7 +398,7 @@
398#define __NR_perf_event_open 327 398#define __NR_perf_event_open 327
399#define __NR_recvmmsg 328 399#define __NR_recvmmsg 328
400 400
401#define NR_SYSCALLS 329 401#define NR_syscalls 329
402 402
403#ifdef __32bit_syscall_numbers__ 403#ifdef __32bit_syscall_numbers__
404/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants, 404/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants,
diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S
index ec9c7bc67d21..1504df8ddf70 100644
--- a/arch/sparc/kernel/entry.S
+++ b/arch/sparc/kernel/entry.S
@@ -1294,7 +1294,7 @@ linux_sparc_syscall:
1294 sethi %hi(PSR_SYSCALL), %l4 1294 sethi %hi(PSR_SYSCALL), %l4
1295 or %l0, %l4, %l0 1295 or %l0, %l4, %l0
1296 /* Direct access to user regs, must faster. */ 1296 /* Direct access to user regs, must faster. */
1297 cmp %g1, NR_SYSCALLS 1297 cmp %g1, NR_syscalls
1298 bgeu linux_sparc_ni_syscall 1298 bgeu linux_sparc_ni_syscall
1299 sll %g1, 2, %l4 1299 sll %g1, 2, %l4
1300 ld [%l7 + %l4], %l7 1300 ld [%l7 + %l4], %l7
diff --git a/arch/sparc/kernel/ftrace.c b/arch/sparc/kernel/ftrace.c
index d3b1a3076569..29973daa9930 100644
--- a/arch/sparc/kernel/ftrace.c
+++ b/arch/sparc/kernel/ftrace.c
@@ -4,6 +4,7 @@
4#include <linux/percpu.h> 4#include <linux/percpu.h>
5#include <linux/init.h> 5#include <linux/init.h>
6#include <linux/list.h> 6#include <linux/list.h>
7#include <trace/syscall.h>
7 8
8#include <asm/ftrace.h> 9#include <asm/ftrace.h>
9 10
@@ -91,3 +92,13 @@ int __init ftrace_dyn_arch_init(void *data)
91} 92}
92#endif 93#endif
93 94
95#ifdef CONFIG_FTRACE_SYSCALLS
96
97extern unsigned int sys_call_table[];
98
99unsigned long __init arch_syscall_addr(int nr)
100{
101 return (unsigned long)sys_call_table[nr];
102}
103
104#endif
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
index 7690cc219ecc..5fad94950e76 100644
--- a/arch/sparc/kernel/iommu.c
+++ b/arch/sparc/kernel/iommu.c
@@ -11,6 +11,7 @@
11#include <linux/dma-mapping.h> 11#include <linux/dma-mapping.h>
12#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/iommu-helper.h> 13#include <linux/iommu-helper.h>
14#include <linux/bitmap.h>
14 15
15#ifdef CONFIG_PCI 16#ifdef CONFIG_PCI
16#include <linux/pci.h> 17#include <linux/pci.h>
@@ -169,7 +170,7 @@ void iommu_range_free(struct iommu *iommu, dma_addr_t dma_addr, unsigned long np
169 170
170 entry = (dma_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT; 171 entry = (dma_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
171 172
172 iommu_area_free(arena->map, entry, npages); 173 bitmap_clear(arena->map, entry, npages);
173} 174}
174 175
175int iommu_table_init(struct iommu *iommu, int tsbsize, 176int iommu_table_init(struct iommu *iommu, int tsbsize,
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index ce996f97855f..8d6882bb480a 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -176,7 +176,7 @@ int show_interrupts(struct seq_file *p, void *v)
176 } 176 }
177 177
178 if (i < NR_IRQS) { 178 if (i < NR_IRQS) {
179 spin_lock_irqsave(&irq_desc[i].lock, flags); 179 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
180 action = irq_desc[i].action; 180 action = irq_desc[i].action;
181 if (!action) 181 if (!action)
182 goto skip; 182 goto skip;
@@ -195,7 +195,7 @@ int show_interrupts(struct seq_file *p, void *v)
195 195
196 seq_putc(p, '\n'); 196 seq_putc(p, '\n');
197skip: 197skip:
198 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 198 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
199 } else if (i == NR_IRQS) { 199 } else if (i == NR_IRQS) {
200 seq_printf(p, "NMI: "); 200 seq_printf(p, "NMI: ");
201 for_each_online_cpu(j) 201 for_each_online_cpu(j)
@@ -785,14 +785,14 @@ void fixup_irqs(void)
785 for (irq = 0; irq < NR_IRQS; irq++) { 785 for (irq = 0; irq < NR_IRQS; irq++) {
786 unsigned long flags; 786 unsigned long flags;
787 787
788 spin_lock_irqsave(&irq_desc[irq].lock, flags); 788 raw_spin_lock_irqsave(&irq_desc[irq].lock, flags);
789 if (irq_desc[irq].action && 789 if (irq_desc[irq].action &&
790 !(irq_desc[irq].status & IRQ_PER_CPU)) { 790 !(irq_desc[irq].status & IRQ_PER_CPU)) {
791 if (irq_desc[irq].chip->set_affinity) 791 if (irq_desc[irq].chip->set_affinity)
792 irq_desc[irq].chip->set_affinity(irq, 792 irq_desc[irq].chip->set_affinity(irq,
793 irq_desc[irq].affinity); 793 irq_desc[irq].affinity);
794 } 794 }
795 spin_unlock_irqrestore(&irq_desc[irq].lock, flags); 795 raw_spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
796 } 796 }
797 797
798 tick_ops->disable_irq(); 798 tick_ops->disable_irq();
diff --git a/arch/sparc/kernel/kprobes.c b/arch/sparc/kernel/kprobes.c
index 3bc6527c95af..6716584e48ab 100644
--- a/arch/sparc/kernel/kprobes.c
+++ b/arch/sparc/kernel/kprobes.c
@@ -46,6 +46,9 @@ struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
46 46
47int __kprobes arch_prepare_kprobe(struct kprobe *p) 47int __kprobes arch_prepare_kprobe(struct kprobe *p)
48{ 48{
49 if ((unsigned long) p->addr & 0x3UL)
50 return -EILSEQ;
51
49 p->ainsn.insn[0] = *p->addr; 52 p->ainsn.insn[0] = *p->addr;
50 flushi(&p->ainsn.insn[0]); 53 flushi(&p->ainsn.insn[0]);
51 54
diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c
index cb3c72c45aab..df39a0f0d27a 100644
--- a/arch/sparc/kernel/ldc.c
+++ b/arch/sparc/kernel/ldc.c
@@ -14,6 +14,7 @@
14#include <linux/interrupt.h> 14#include <linux/interrupt.h>
15#include <linux/list.h> 15#include <linux/list.h>
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/bitmap.h>
17 18
18#include <asm/hypervisor.h> 19#include <asm/hypervisor.h>
19#include <asm/iommu.h> 20#include <asm/iommu.h>
@@ -1242,13 +1243,13 @@ int ldc_bind(struct ldc_channel *lp, const char *name)
1242 snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name); 1243 snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name);
1243 1244
1244 err = request_irq(lp->cfg.rx_irq, ldc_rx, 1245 err = request_irq(lp->cfg.rx_irq, ldc_rx,
1245 IRQF_SAMPLE_RANDOM | IRQF_DISABLED | IRQF_SHARED, 1246 IRQF_SAMPLE_RANDOM | IRQF_DISABLED,
1246 lp->rx_irq_name, lp); 1247 lp->rx_irq_name, lp);
1247 if (err) 1248 if (err)
1248 return err; 1249 return err;
1249 1250
1250 err = request_irq(lp->cfg.tx_irq, ldc_tx, 1251 err = request_irq(lp->cfg.tx_irq, ldc_tx,
1251 IRQF_SAMPLE_RANDOM | IRQF_DISABLED | IRQF_SHARED, 1252 IRQF_SAMPLE_RANDOM | IRQF_DISABLED,
1252 lp->tx_irq_name, lp); 1253 lp->tx_irq_name, lp);
1253 if (err) { 1254 if (err) {
1254 free_irq(lp->cfg.rx_irq, lp); 1255 free_irq(lp->cfg.rx_irq, lp);
@@ -1875,7 +1876,7 @@ EXPORT_SYMBOL(ldc_read);
1875static long arena_alloc(struct ldc_iommu *iommu, unsigned long npages) 1876static long arena_alloc(struct ldc_iommu *iommu, unsigned long npages)
1876{ 1877{
1877 struct iommu_arena *arena = &iommu->arena; 1878 struct iommu_arena *arena = &iommu->arena;
1878 unsigned long n, i, start, end, limit; 1879 unsigned long n, start, end, limit;
1879 int pass; 1880 int pass;
1880 1881
1881 limit = arena->limit; 1882 limit = arena->limit;
@@ -1883,7 +1884,7 @@ static long arena_alloc(struct ldc_iommu *iommu, unsigned long npages)
1883 pass = 0; 1884 pass = 0;
1884 1885
1885again: 1886again:
1886 n = find_next_zero_bit(arena->map, limit, start); 1887 n = bitmap_find_next_zero_area(arena->map, limit, start, npages, 0);
1887 end = n + npages; 1888 end = n + npages;
1888 if (unlikely(end >= limit)) { 1889 if (unlikely(end >= limit)) {
1889 if (likely(pass < 1)) { 1890 if (likely(pass < 1)) {
@@ -1896,16 +1897,7 @@ again:
1896 return -1; 1897 return -1;
1897 } 1898 }
1898 } 1899 }
1899 1900 bitmap_set(arena->map, n, npages);
1900 for (i = n; i < end; i++) {
1901 if (test_bit(i, arena->map)) {
1902 start = i + 1;
1903 goto again;
1904 }
1905 }
1906
1907 for (i = n; i < end; i++)
1908 __set_bit(i, arena->map);
1909 1901
1910 arena->hint = end; 1902 arena->hint = end;
1911 1903
diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c
index 938da19dc065..cdc91d919e93 100644
--- a/arch/sparc/kernel/mdesc.c
+++ b/arch/sparc/kernel/mdesc.c
@@ -10,6 +10,7 @@
10#include <linux/slab.h> 10#include <linux/slab.h>
11#include <linux/mm.h> 11#include <linux/mm.h>
12#include <linux/miscdevice.h> 12#include <linux/miscdevice.h>
13#include <linux/bootmem.h>
13 14
14#include <asm/cpudata.h> 15#include <asm/cpudata.h>
15#include <asm/hypervisor.h> 16#include <asm/hypervisor.h>
@@ -108,25 +109,15 @@ static struct mdesc_handle * __init mdesc_lmb_alloc(unsigned int mdesc_size)
108 109
109static void mdesc_lmb_free(struct mdesc_handle *hp) 110static void mdesc_lmb_free(struct mdesc_handle *hp)
110{ 111{
111 unsigned int alloc_size, handle_size = hp->handle_size; 112 unsigned int alloc_size;
112 unsigned long start, end; 113 unsigned long start;
113 114
114 BUG_ON(atomic_read(&hp->refcnt) != 0); 115 BUG_ON(atomic_read(&hp->refcnt) != 0);
115 BUG_ON(!list_empty(&hp->list)); 116 BUG_ON(!list_empty(&hp->list));
116 117
117 alloc_size = PAGE_ALIGN(handle_size); 118 alloc_size = PAGE_ALIGN(hp->handle_size);
118 119 start = __pa(hp);
119 start = (unsigned long) hp; 120 free_bootmem_late(start, alloc_size);
120 end = start + alloc_size;
121
122 while (start < end) {
123 struct page *p;
124
125 p = virt_to_page(start);
126 ClearPageReserved(p);
127 __free_page(p);
128 start += PAGE_SIZE;
129 }
130} 121}
131 122
132static struct mdesc_mem_ops lmb_mdesc_ops = { 123static struct mdesc_mem_ops lmb_mdesc_ops = {
diff --git a/arch/sparc/kernel/of_device_64.c b/arch/sparc/kernel/of_device_64.c
index 881947e59e95..0a6f2d1798d1 100644
--- a/arch/sparc/kernel/of_device_64.c
+++ b/arch/sparc/kernel/of_device_64.c
@@ -104,9 +104,19 @@ static int of_bus_pci_map(u32 *addr, const u32 *range,
104 int i; 104 int i;
105 105
106 /* Check address type match */ 106 /* Check address type match */
107 if ((addr[0] ^ range[0]) & 0x03000000) 107 if (!((addr[0] ^ range[0]) & 0x03000000))
108 return -EINVAL; 108 goto type_match;
109
110 /* Special exception, we can map a 64-bit address into
111 * a 32-bit range.
112 */
113 if ((addr[0] & 0x03000000) == 0x03000000 &&
114 (range[0] & 0x03000000) == 0x02000000)
115 goto type_match;
116
117 return -EINVAL;
109 118
119type_match:
110 if (of_out_of_range(addr + 1, range + 1, range + na + pna, 120 if (of_out_of_range(addr + 1, range + 1, range + na + pna,
111 na - 1, ns)) 121 na - 1, ns))
112 return -EINVAL; 122 return -EINVAL;
diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
index 4ae91dc2feb9..2f6524d1a817 100644
--- a/arch/sparc/kernel/ptrace_64.c
+++ b/arch/sparc/kernel/ptrace_64.c
@@ -23,6 +23,7 @@
23#include <linux/signal.h> 23#include <linux/signal.h>
24#include <linux/regset.h> 24#include <linux/regset.h>
25#include <linux/tracehook.h> 25#include <linux/tracehook.h>
26#include <trace/syscall.h>
26#include <linux/compat.h> 27#include <linux/compat.h>
27#include <linux/elf.h> 28#include <linux/elf.h>
28 29
@@ -37,6 +38,9 @@
37#include <asm/cpudata.h> 38#include <asm/cpudata.h>
38#include <asm/cacheflush.h> 39#include <asm/cacheflush.h>
39 40
41#define CREATE_TRACE_POINTS
42#include <trace/events/syscalls.h>
43
40#include "entry.h" 44#include "entry.h"
41 45
42/* #define ALLOW_INIT_TRACING */ 46/* #define ALLOW_INIT_TRACING */
@@ -1059,6 +1063,9 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
1059 if (test_thread_flag(TIF_SYSCALL_TRACE)) 1063 if (test_thread_flag(TIF_SYSCALL_TRACE))
1060 ret = tracehook_report_syscall_entry(regs); 1064 ret = tracehook_report_syscall_entry(regs);
1061 1065
1066 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
1067 trace_sys_enter(regs, regs->u_regs[UREG_G1]);
1068
1062 if (unlikely(current->audit_context) && !ret) 1069 if (unlikely(current->audit_context) && !ret)
1063 audit_syscall_entry((test_thread_flag(TIF_32BIT) ? 1070 audit_syscall_entry((test_thread_flag(TIF_32BIT) ?
1064 AUDIT_ARCH_SPARC : 1071 AUDIT_ARCH_SPARC :
@@ -1084,6 +1091,9 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
1084 audit_syscall_exit(result, regs->u_regs[UREG_I0]); 1091 audit_syscall_exit(result, regs->u_regs[UREG_I0]);
1085 } 1092 }
1086 1093
1094 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
1095 trace_sys_exit(regs, regs->u_regs[UREG_G1]);
1096
1087 if (test_thread_flag(TIF_SYSCALL_TRACE)) 1097 if (test_thread_flag(TIF_SYSCALL_TRACE))
1088 tracehook_report_syscall_exit(regs, 0); 1098 tracehook_report_syscall_exit(regs, 0);
1089} 1099}
diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
index d150c2aa98d2..dc4a458f74dc 100644
--- a/arch/sparc/kernel/syscalls.S
+++ b/arch/sparc/kernel/syscalls.S
@@ -62,7 +62,7 @@ sys32_rt_sigreturn:
62#endif 62#endif
63 .align 32 63 .align 32
641: ldx [%g6 + TI_FLAGS], %l5 641: ldx [%g6 + TI_FLAGS], %l5
65 andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %g0 65 andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
66 be,pt %icc, rtrap 66 be,pt %icc, rtrap
67 nop 67 nop
68 call syscall_trace_leave 68 call syscall_trace_leave
@@ -187,7 +187,7 @@ linux_syscall_trace:
187 .globl linux_sparc_syscall32 187 .globl linux_sparc_syscall32
188linux_sparc_syscall32: 188linux_sparc_syscall32:
189 /* Direct access to user regs, much faster. */ 189 /* Direct access to user regs, much faster. */
190 cmp %g1, NR_SYSCALLS ! IEU1 Group 190 cmp %g1, NR_syscalls ! IEU1 Group
191 bgeu,pn %xcc, linux_sparc_ni_syscall ! CTI 191 bgeu,pn %xcc, linux_sparc_ni_syscall ! CTI
192 srl %i0, 0, %o0 ! IEU0 192 srl %i0, 0, %o0 ! IEU0
193 sll %g1, 2, %l4 ! IEU0 Group 193 sll %g1, 2, %l4 ! IEU0 Group
@@ -198,7 +198,7 @@ linux_sparc_syscall32:
198 198
199 srl %i5, 0, %o5 ! IEU1 199 srl %i5, 0, %o5 ! IEU1
200 srl %i2, 0, %o2 ! IEU0 Group 200 srl %i2, 0, %o2 ! IEU0 Group
201 andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %g0 201 andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
202 bne,pn %icc, linux_syscall_trace32 ! CTI 202 bne,pn %icc, linux_syscall_trace32 ! CTI
203 mov %i0, %l5 ! IEU1 203 mov %i0, %l5 ! IEU1
204 call %l7 ! CTI Group brk forced 204 call %l7 ! CTI Group brk forced
@@ -210,7 +210,7 @@ linux_sparc_syscall32:
210 .globl linux_sparc_syscall 210 .globl linux_sparc_syscall
211linux_sparc_syscall: 211linux_sparc_syscall:
212 /* Direct access to user regs, much faster. */ 212 /* Direct access to user regs, much faster. */
213 cmp %g1, NR_SYSCALLS ! IEU1 Group 213 cmp %g1, NR_syscalls ! IEU1 Group
214 bgeu,pn %xcc, linux_sparc_ni_syscall ! CTI 214 bgeu,pn %xcc, linux_sparc_ni_syscall ! CTI
215 mov %i0, %o0 ! IEU0 215 mov %i0, %o0 ! IEU0
216 sll %g1, 2, %l4 ! IEU0 Group 216 sll %g1, 2, %l4 ! IEU0 Group
@@ -221,7 +221,7 @@ linux_sparc_syscall:
221 221
222 mov %i3, %o3 ! IEU1 222 mov %i3, %o3 ! IEU1
223 mov %i4, %o4 ! IEU0 Group 223 mov %i4, %o4 ! IEU0 Group
224 andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %g0 224 andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
225 bne,pn %icc, linux_syscall_trace ! CTI Group 225 bne,pn %icc, linux_syscall_trace ! CTI Group
226 mov %i0, %l5 ! IEU0 226 mov %i0, %l5 ! IEU0
2272: call %l7 ! CTI Group brk forced 2272: call %l7 ! CTI Group brk forced
@@ -245,7 +245,7 @@ ret_sys_call:
245 245
246 cmp %o0, -ERESTART_RESTARTBLOCK 246 cmp %o0, -ERESTART_RESTARTBLOCK
247 bgeu,pn %xcc, 1f 247 bgeu,pn %xcc, 1f
248 andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %l6 248 andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6
24980: 24980:
250 /* System call success, clear Carry condition code. */ 250 /* System call success, clear Carry condition code. */
251 andn %g3, %g2, %g3 251 andn %g3, %g2, %g3
@@ -260,7 +260,7 @@ ret_sys_call:
260 /* System call failure, set Carry condition code. 260 /* System call failure, set Carry condition code.
261 * Also, get abs(errno) to return to the process. 261 * Also, get abs(errno) to return to the process.
262 */ 262 */
263 andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %l6 263 andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6
264 sub %g0, %o0, %o0 264 sub %g0, %o0, %o0
265 or %g3, %g2, %g3 265 or %g3, %g2, %g3
266 stx %o0, [%sp + PTREGS_OFF + PT_V9_I0] 266 stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c
index 63f73ae8a892..67e165102885 100644
--- a/arch/sparc/kernel/time_64.c
+++ b/arch/sparc/kernel/time_64.c
@@ -774,26 +774,9 @@ void __devinit setup_sparc64_timer(void)
774static struct clocksource clocksource_tick = { 774static struct clocksource clocksource_tick = {
775 .rating = 100, 775 .rating = 100,
776 .mask = CLOCKSOURCE_MASK(64), 776 .mask = CLOCKSOURCE_MASK(64),
777 .shift = 16,
778 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 777 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
779}; 778};
780 779
781static void __init setup_clockevent_multiplier(unsigned long hz)
782{
783 unsigned long mult, shift = 32;
784
785 while (1) {
786 mult = div_sc(hz, NSEC_PER_SEC, shift);
787 if (mult && (mult >> 32UL) == 0UL)
788 break;
789
790 shift--;
791 }
792
793 sparc64_clockevent.shift = shift;
794 sparc64_clockevent.mult = mult;
795}
796
797static unsigned long tb_ticks_per_usec __read_mostly; 780static unsigned long tb_ticks_per_usec __read_mostly;
798 781
799void __delay(unsigned long loops) 782void __delay(unsigned long loops)
@@ -828,9 +811,7 @@ void __init time_init(void)
828 clocksource_hz2mult(freq, SPARC64_NSEC_PER_CYC_SHIFT); 811 clocksource_hz2mult(freq, SPARC64_NSEC_PER_CYC_SHIFT);
829 812
830 clocksource_tick.name = tick_ops->name; 813 clocksource_tick.name = tick_ops->name;
831 clocksource_tick.mult = 814 clocksource_calc_mult_shift(&clocksource_tick, freq, 4);
832 clocksource_hz2mult(freq,
833 clocksource_tick.shift);
834 clocksource_tick.read = clocksource_tick_read; 815 clocksource_tick.read = clocksource_tick_read;
835 816
836 printk("clocksource: mult[%x] shift[%d]\n", 817 printk("clocksource: mult[%x] shift[%d]\n",
@@ -839,15 +820,14 @@ void __init time_init(void)
839 clocksource_register(&clocksource_tick); 820 clocksource_register(&clocksource_tick);
840 821
841 sparc64_clockevent.name = tick_ops->name; 822 sparc64_clockevent.name = tick_ops->name;
842 823 clockevents_calc_mult_shift(&sparc64_clockevent, freq, 4);
843 setup_clockevent_multiplier(freq);
844 824
845 sparc64_clockevent.max_delta_ns = 825 sparc64_clockevent.max_delta_ns =
846 clockevent_delta2ns(0x7fffffffffffffffUL, &sparc64_clockevent); 826 clockevent_delta2ns(0x7fffffffffffffffUL, &sparc64_clockevent);
847 sparc64_clockevent.min_delta_ns = 827 sparc64_clockevent.min_delta_ns =
848 clockevent_delta2ns(0xF, &sparc64_clockevent); 828 clockevent_delta2ns(0xF, &sparc64_clockevent);
849 829
850 printk("clockevent: mult[%ux] shift[%d]\n", 830 printk("clockevent: mult[%x] shift[%d]\n",
851 sparc64_clockevent.mult, sparc64_clockevent.shift); 831 sparc64_clockevent.mult, sparc64_clockevent.shift);
852 832
853 setup_sparc64_timer(); 833 setup_sparc64_timer();
diff --git a/arch/sparc/kernel/unaligned_32.c b/arch/sparc/kernel/unaligned_32.c
index 6b1e6cde6fff..f8514e291e15 100644
--- a/arch/sparc/kernel/unaligned_32.c
+++ b/arch/sparc/kernel/unaligned_32.c
@@ -17,8 +17,7 @@
17#include <asm/uaccess.h> 17#include <asm/uaccess.h>
18#include <linux/smp.h> 18#include <linux/smp.h>
19#include <linux/smp_lock.h> 19#include <linux/smp_lock.h>
20 20#include <linux/perf_event.h>
21/* #define DEBUG_MNA */
22 21
23enum direction { 22enum direction {
24 load, /* ld, ldd, ldh, ldsh */ 23 load, /* ld, ldd, ldh, ldsh */
@@ -29,12 +28,6 @@ enum direction {
29 invalid, 28 invalid,
30}; 29};
31 30
32#ifdef DEBUG_MNA
33static char *dirstrings[] = {
34 "load", "store", "both", "fpload", "fpstore", "invalid"
35};
36#endif
37
38static inline enum direction decode_direction(unsigned int insn) 31static inline enum direction decode_direction(unsigned int insn)
39{ 32{
40 unsigned long tmp = (insn >> 21) & 1; 33 unsigned long tmp = (insn >> 21) & 1;
@@ -255,10 +248,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
255 unsigned long addr = compute_effective_address(regs, insn); 248 unsigned long addr = compute_effective_address(regs, insn);
256 int err; 249 int err;
257 250
258#ifdef DEBUG_MNA 251 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr);
259 printk("KMNA: pc=%08lx [dir=%s addr=%08lx size=%d] retpc[%08lx]\n",
260 regs->pc, dirstrings[dir], addr, size, regs->u_regs[UREG_RETPC]);
261#endif
262 switch (dir) { 252 switch (dir) {
263 case load: 253 case load:
264 err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f), 254 err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f),
@@ -350,6 +340,7 @@ asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn)
350 } 340 }
351 341
352 addr = compute_effective_address(regs, insn); 342 addr = compute_effective_address(regs, insn);
343 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr);
353 switch(dir) { 344 switch(dir) {
354 case load: 345 case load:
355 err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f), 346 err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f),
diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
index 379209982a07..378ca82b9ccc 100644
--- a/arch/sparc/kernel/unaligned_64.c
+++ b/arch/sparc/kernel/unaligned_64.c
@@ -20,10 +20,9 @@
20#include <asm/uaccess.h> 20#include <asm/uaccess.h>
21#include <linux/smp.h> 21#include <linux/smp.h>
22#include <linux/bitops.h> 22#include <linux/bitops.h>
23#include <linux/perf_event.h>
23#include <asm/fpumacro.h> 24#include <asm/fpumacro.h>
24 25
25/* #define DEBUG_MNA */
26
27enum direction { 26enum direction {
28 load, /* ld, ldd, ldh, ldsh */ 27 load, /* ld, ldd, ldh, ldsh */
29 store, /* st, std, sth, stsh */ 28 store, /* st, std, sth, stsh */
@@ -33,12 +32,6 @@ enum direction {
33 invalid, 32 invalid,
34}; 33};
35 34
36#ifdef DEBUG_MNA
37static char *dirstrings[] = {
38 "load", "store", "both", "fpload", "fpstore", "invalid"
39};
40#endif
41
42static inline enum direction decode_direction(unsigned int insn) 35static inline enum direction decode_direction(unsigned int insn)
43{ 36{
44 unsigned long tmp = (insn >> 21) & 1; 37 unsigned long tmp = (insn >> 21) & 1;
@@ -327,12 +320,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
327 320
328 addr = compute_effective_address(regs, insn, 321 addr = compute_effective_address(regs, insn,
329 ((insn >> 25) & 0x1f)); 322 ((insn >> 25) & 0x1f));
330#ifdef DEBUG_MNA 323 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr);
331 printk("KMNA: pc=%016lx [dir=%s addr=%016lx size=%d] "
332 "retpc[%016lx]\n",
333 regs->tpc, dirstrings[dir], addr, size,
334 regs->u_regs[UREG_RETPC]);
335#endif
336 switch (asi) { 324 switch (asi) {
337 case ASI_NL: 325 case ASI_NL:
338 case ASI_AIUPL: 326 case ASI_AIUPL:
@@ -399,6 +387,7 @@ int handle_popc(u32 insn, struct pt_regs *regs)
399 int ret, i, rd = ((insn >> 25) & 0x1f); 387 int ret, i, rd = ((insn >> 25) & 0x1f);
400 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; 388 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
401 389
390 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
402 if (insn & 0x2000) { 391 if (insn & 0x2000) {
403 maybe_flush_windows(0, 0, rd, from_kernel); 392 maybe_flush_windows(0, 0, rd, from_kernel);
404 value = sign_extend_imm13(insn); 393 value = sign_extend_imm13(insn);
@@ -445,6 +434,8 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs)
445 int asi = decode_asi(insn, regs); 434 int asi = decode_asi(insn, regs);
446 int flag = (freg < 32) ? FPRS_DL : FPRS_DU; 435 int flag = (freg < 32) ? FPRS_DL : FPRS_DU;
447 436
437 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
438
448 save_and_clear_fpu(); 439 save_and_clear_fpu();
449 current_thread_info()->xfsr[0] &= ~0x1c000; 440 current_thread_info()->xfsr[0] &= ~0x1c000;
450 if (freg & 3) { 441 if (freg & 3) {
@@ -566,6 +557,8 @@ void handle_ld_nf(u32 insn, struct pt_regs *regs)
566 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; 557 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
567 unsigned long *reg; 558 unsigned long *reg;
568 559
560 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
561
569 maybe_flush_windows(0, 0, rd, from_kernel); 562 maybe_flush_windows(0, 0, rd, from_kernel);
570 reg = fetch_reg_addr(rd, regs); 563 reg = fetch_reg_addr(rd, regs);
571 if (from_kernel || rd < 16) { 564 if (from_kernel || rd < 16) {
@@ -596,6 +589,7 @@ void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
596 589
597 if (tstate & TSTATE_PRIV) 590 if (tstate & TSTATE_PRIV)
598 die_if_kernel("lddfmna from kernel", regs); 591 die_if_kernel("lddfmna from kernel", regs);
592 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, sfar);
599 if (test_thread_flag(TIF_32BIT)) 593 if (test_thread_flag(TIF_32BIT))
600 pc = (u32)pc; 594 pc = (u32)pc;
601 if (get_user(insn, (u32 __user *) pc) != -EFAULT) { 595 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
@@ -657,6 +651,7 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
657 651
658 if (tstate & TSTATE_PRIV) 652 if (tstate & TSTATE_PRIV)
659 die_if_kernel("stdfmna from kernel", regs); 653 die_if_kernel("stdfmna from kernel", regs);
654 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, sfar);
660 if (test_thread_flag(TIF_32BIT)) 655 if (test_thread_flag(TIF_32BIT))
661 pc = (u32)pc; 656 pc = (u32)pc;
662 if (get_user(insn, (u32 __user *) pc) != -EFAULT) { 657 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
diff --git a/arch/sparc/kernel/visemul.c b/arch/sparc/kernel/visemul.c
index d231cbd5c526..9dfd2ebcb157 100644
--- a/arch/sparc/kernel/visemul.c
+++ b/arch/sparc/kernel/visemul.c
@@ -5,6 +5,7 @@
5#include <linux/kernel.h> 5#include <linux/kernel.h>
6#include <linux/errno.h> 6#include <linux/errno.h>
7#include <linux/thread_info.h> 7#include <linux/thread_info.h>
8#include <linux/perf_event.h>
8 9
9#include <asm/ptrace.h> 10#include <asm/ptrace.h>
10#include <asm/pstate.h> 11#include <asm/pstate.h>
@@ -801,6 +802,8 @@ int vis_emul(struct pt_regs *regs, unsigned int insn)
801 802
802 BUG_ON(regs->tstate & TSTATE_PRIV); 803 BUG_ON(regs->tstate & TSTATE_PRIV);
803 804
805 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
806
804 if (test_thread_flag(TIF_32BIT)) 807 if (test_thread_flag(TIF_32BIT))
805 pc = (u32)pc; 808 pc = (u32)pc;
806 809
diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
index e75faf0e59ae..c4b5e03af115 100644
--- a/arch/sparc/lib/Makefile
+++ b/arch/sparc/lib/Makefile
@@ -44,3 +44,4 @@ obj-y += iomap.o
44obj-$(CONFIG_SPARC32) += atomic32.o 44obj-$(CONFIG_SPARC32) += atomic32.o
45obj-y += ksyms.o 45obj-y += ksyms.o
46obj-$(CONFIG_SPARC64) += PeeCeeI.o 46obj-$(CONFIG_SPARC64) += PeeCeeI.o
47obj-y += usercopy.o
diff --git a/arch/sparc/lib/bzero.S b/arch/sparc/lib/bzero.S
index b6557297440f..615f401edf69 100644
--- a/arch/sparc/lib/bzero.S
+++ b/arch/sparc/lib/bzero.S
@@ -6,10 +6,6 @@
6 6
7 .text 7 .text
8 8
9 .globl __memset
10 .type __memset, #function
11__memset: /* %o0=buf, %o1=pat, %o2=len */
12
13 .globl memset 9 .globl memset
14 .type memset, #function 10 .type memset, #function
15memset: /* %o0=buf, %o1=pat, %o2=len */ 11memset: /* %o0=buf, %o1=pat, %o2=len */
@@ -83,7 +79,6 @@ __bzero_done:
83 retl 79 retl
84 mov %o3, %o0 80 mov %o3, %o0
85 .size __bzero, .-__bzero 81 .size __bzero, .-__bzero
86 .size __memset, .-__memset
87 .size memset, .-memset 82 .size memset, .-memset
88 83
89#define EX_ST(x,y) \ 84#define EX_ST(x,y) \
diff --git a/arch/sparc/lib/checksum_32.S b/arch/sparc/lib/checksum_32.S
index 77f228533d47..3632cb34e914 100644
--- a/arch/sparc/lib/checksum_32.S
+++ b/arch/sparc/lib/checksum_32.S
@@ -560,7 +560,7 @@ __csum_partial_copy_end:
560 mov %i0, %o1 560 mov %i0, %o1
561 mov %i1, %o0 561 mov %i1, %o0
5625: 5625:
563 call __memcpy 563 call memcpy
564 mov %i2, %o2 564 mov %i2, %o2
565 tst %o0 565 tst %o0
566 bne,a 2f 566 bne,a 2f
diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
index 704b12668388..1b30bb3bfdb1 100644
--- a/arch/sparc/lib/ksyms.c
+++ b/arch/sparc/lib/ksyms.c
@@ -30,7 +30,6 @@ EXPORT_SYMBOL(__memscan_generic);
30EXPORT_SYMBOL(memcmp); 30EXPORT_SYMBOL(memcmp);
31EXPORT_SYMBOL(memcpy); 31EXPORT_SYMBOL(memcpy);
32EXPORT_SYMBOL(memset); 32EXPORT_SYMBOL(memset);
33EXPORT_SYMBOL(__memset);
34EXPORT_SYMBOL(memmove); 33EXPORT_SYMBOL(memmove);
35EXPORT_SYMBOL(__bzero); 34EXPORT_SYMBOL(__bzero);
36 35
@@ -81,7 +80,6 @@ EXPORT_SYMBOL(__csum_partial_copy_sparc_generic);
81 80
82/* Special internal versions of library functions. */ 81/* Special internal versions of library functions. */
83EXPORT_SYMBOL(__copy_1page); 82EXPORT_SYMBOL(__copy_1page);
84EXPORT_SYMBOL(__memcpy);
85EXPORT_SYMBOL(__memmove); 83EXPORT_SYMBOL(__memmove);
86EXPORT_SYMBOL(bzero_1page); 84EXPORT_SYMBOL(bzero_1page);
87 85
diff --git a/arch/sparc/lib/mcount.S b/arch/sparc/lib/mcount.S
index 7ce9c65f3592..24b8b12deed2 100644
--- a/arch/sparc/lib/mcount.S
+++ b/arch/sparc/lib/mcount.S
@@ -64,8 +64,9 @@ mcount:
642: sethi %hi(softirq_stack), %g3 642: sethi %hi(softirq_stack), %g3
65 or %g3, %lo(softirq_stack), %g3 65 or %g3, %lo(softirq_stack), %g3
66 ldx [%g3 + %g1], %g7 66 ldx [%g3 + %g1], %g7
67 sub %g7, STACK_BIAS, %g7
67 cmp %sp, %g7 68 cmp %sp, %g7
68 bleu,pt %xcc, 2f 69 bleu,pt %xcc, 3f
69 sethi %hi(THREAD_SIZE), %g3 70 sethi %hi(THREAD_SIZE), %g3
70 add %g7, %g3, %g7 71 add %g7, %g3, %g7
71 cmp %sp, %g7 72 cmp %sp, %g7
@@ -75,7 +76,7 @@ mcount:
75 * again, we are already trying to output the stack overflow 76 * again, we are already trying to output the stack overflow
76 * message. 77 * message.
77 */ 78 */
78 sethi %hi(ovstack), %g7 ! cant move to panic stack fast enough 793: sethi %hi(ovstack), %g7 ! cant move to panic stack fast enough
79 or %g7, %lo(ovstack), %g7 80 or %g7, %lo(ovstack), %g7
80 add %g7, OVSTACKSIZE, %g3 81 add %g7, OVSTACKSIZE, %g3
81 sub %g3, STACK_BIAS + 192, %g3 82 sub %g3, STACK_BIAS + 192, %g3
diff --git a/arch/sparc/lib/memcpy.S b/arch/sparc/lib/memcpy.S
index ce10bc869af9..34fe65751737 100644
--- a/arch/sparc/lib/memcpy.S
+++ b/arch/sparc/lib/memcpy.S
@@ -543,9 +543,6 @@ FUNC(memmove)
543 b 3f 543 b 3f
544 add %o0, 2, %o0 544 add %o0, 2, %o0
545 545
546#ifdef __KERNEL__
547FUNC(__memcpy)
548#endif
549FUNC(memcpy) /* %o0=dst %o1=src %o2=len */ 546FUNC(memcpy) /* %o0=dst %o1=src %o2=len */
550 547
551 sub %o0, %o1, %o4 548 sub %o0, %o1, %o4
diff --git a/arch/sparc/lib/memset.S b/arch/sparc/lib/memset.S
index 1c37ea892deb..99c017be8719 100644
--- a/arch/sparc/lib/memset.S
+++ b/arch/sparc/lib/memset.S
@@ -60,11 +60,10 @@
60 .globl __bzero_begin 60 .globl __bzero_begin
61__bzero_begin: 61__bzero_begin:
62 62
63 .globl __bzero, __memset, 63 .globl __bzero
64 .globl memset 64 .globl memset
65 .globl __memset_start, __memset_end 65 .globl __memset_start, __memset_end
66__memset_start: 66__memset_start:
67__memset:
68memset: 67memset:
69 and %o1, 0xff, %g3 68 and %o1, 0xff, %g3
70 sll %g3, 8, %g2 69 sll %g3, 8, %g2
diff --git a/arch/sparc/lib/usercopy.c b/arch/sparc/lib/usercopy.c
new file mode 100644
index 000000000000..14b363fec8a2
--- /dev/null
+++ b/arch/sparc/lib/usercopy.c
@@ -0,0 +1,8 @@
1#include <linux/module.h>
2#include <linux/bug.h>
3
4void copy_from_user_overflow(void)
5{
6 WARN(1, "Buffer overflow detected!\n");
7}
8EXPORT_SYMBOL(copy_from_user_overflow);
diff --git a/arch/sparc/math-emu/math_32.c b/arch/sparc/math-emu/math_32.c
index e13f65da17df..a3fccde894ec 100644
--- a/arch/sparc/math-emu/math_32.c
+++ b/arch/sparc/math-emu/math_32.c
@@ -67,6 +67,7 @@
67#include <linux/types.h> 67#include <linux/types.h>
68#include <linux/sched.h> 68#include <linux/sched.h>
69#include <linux/mm.h> 69#include <linux/mm.h>
70#include <linux/perf_event.h>
70#include <asm/uaccess.h> 71#include <asm/uaccess.h>
71 72
72#include "sfp-util_32.h" 73#include "sfp-util_32.h"
@@ -163,6 +164,8 @@ int do_mathemu(struct pt_regs *regs, struct task_struct *fpt)
163 int retcode = 0; /* assume all succeed */ 164 int retcode = 0; /* assume all succeed */
164 unsigned long insn; 165 unsigned long insn;
165 166
167 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
168
166#ifdef DEBUG_MATHEMU 169#ifdef DEBUG_MATHEMU
167 printk("In do_mathemu()... pc is %08lx\n", regs->pc); 170 printk("In do_mathemu()... pc is %08lx\n", regs->pc);
168 printk("fpqdepth is %ld\n", fpt->thread.fpqdepth); 171 printk("fpqdepth is %ld\n", fpt->thread.fpqdepth);
diff --git a/arch/sparc/math-emu/math_64.c b/arch/sparc/math-emu/math_64.c
index 6863c9bde25c..56d2c44747b8 100644
--- a/arch/sparc/math-emu/math_64.c
+++ b/arch/sparc/math-emu/math_64.c
@@ -11,6 +11,7 @@
11#include <linux/types.h> 11#include <linux/types.h>
12#include <linux/sched.h> 12#include <linux/sched.h>
13#include <linux/errno.h> 13#include <linux/errno.h>
14#include <linux/perf_event.h>
14 15
15#include <asm/fpumacro.h> 16#include <asm/fpumacro.h>
16#include <asm/ptrace.h> 17#include <asm/ptrace.h>
@@ -183,6 +184,7 @@ int do_mathemu(struct pt_regs *regs, struct fpustate *f)
183 184
184 if (tstate & TSTATE_PRIV) 185 if (tstate & TSTATE_PRIV)
185 die_if_kernel("unfinished/unimplemented FPop from kernel", regs); 186 die_if_kernel("unfinished/unimplemented FPop from kernel", regs);
187 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
186 if (test_thread_flag(TIF_32BIT)) 188 if (test_thread_flag(TIF_32BIT))
187 pc = (u32)pc; 189 pc = (u32)pc;
188 if (get_user(insn, (u32 __user *) pc) != -EFAULT) { 190 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index 43b0da96a4fb..6081936bf03b 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -31,13 +31,12 @@
31#include <asm/sections.h> 31#include <asm/sections.h>
32#include <asm/mmu_context.h> 32#include <asm/mmu_context.h>
33 33
34#ifdef CONFIG_KPROBES 34static inline __kprobes int notify_page_fault(struct pt_regs *regs)
35static inline int notify_page_fault(struct pt_regs *regs)
36{ 35{
37 int ret = 0; 36 int ret = 0;
38 37
39 /* kprobe_running() needs smp_processor_id() */ 38 /* kprobe_running() needs smp_processor_id() */
40 if (!user_mode(regs)) { 39 if (kprobes_built_in() && !user_mode(regs)) {
41 preempt_disable(); 40 preempt_disable();
42 if (kprobe_running() && kprobe_fault_handler(regs, 0)) 41 if (kprobe_running() && kprobe_fault_handler(regs, 0))
43 ret = 1; 42 ret = 1;
@@ -45,12 +44,6 @@ static inline int notify_page_fault(struct pt_regs *regs)
45 } 44 }
46 return ret; 45 return ret;
47} 46}
48#else
49static inline int notify_page_fault(struct pt_regs *regs)
50{
51 return 0;
52}
53#endif
54 47
55static void __kprobes unhandled_fault(unsigned long address, 48static void __kprobes unhandled_fault(unsigned long address,
56 struct task_struct *tsk, 49 struct task_struct *tsk,
@@ -73,7 +66,7 @@ static void __kprobes unhandled_fault(unsigned long address,
73 die_if_kernel("Oops", regs); 66 die_if_kernel("Oops", regs);
74} 67}
75 68
76static void bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr) 69static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
77{ 70{
78 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n", 71 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
79 regs->tpc); 72 regs->tpc);
@@ -170,8 +163,9 @@ static unsigned int get_fault_insn(struct pt_regs *regs, unsigned int insn)
170 return insn; 163 return insn;
171} 164}
172 165
173static void do_kernel_fault(struct pt_regs *regs, int si_code, int fault_code, 166static void __kprobes do_kernel_fault(struct pt_regs *regs, int si_code,
174 unsigned int insn, unsigned long address) 167 int fault_code, unsigned int insn,
168 unsigned long address)
175{ 169{
176 unsigned char asi = ASI_P; 170 unsigned char asi = ASI_P;
177 171
@@ -225,7 +219,7 @@ cannot_handle:
225 unhandled_fault (address, current, regs); 219 unhandled_fault (address, current, regs);
226} 220}
227 221
228static void noinline bogus_32bit_fault_tpc(struct pt_regs *regs) 222static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
229{ 223{
230 static int times; 224 static int times;
231 225
@@ -237,8 +231,8 @@ static void noinline bogus_32bit_fault_tpc(struct pt_regs *regs)
237 show_regs(regs); 231 show_regs(regs);
238} 232}
239 233
240static void noinline bogus_32bit_fault_address(struct pt_regs *regs, 234static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
241 unsigned long addr) 235 unsigned long addr)
242{ 236{
243 static int times; 237 static int times;
244 238
diff --git a/arch/sparc/mm/sun4c.c b/arch/sparc/mm/sun4c.c
index 2ffacd67c424..a89baf0d875a 100644
--- a/arch/sparc/mm/sun4c.c
+++ b/arch/sparc/mm/sun4c.c
@@ -17,6 +17,7 @@
17#include <linux/fs.h> 17#include <linux/fs.h>
18#include <linux/seq_file.h> 18#include <linux/seq_file.h>
19#include <linux/scatterlist.h> 19#include <linux/scatterlist.h>
20#include <linux/bitmap.h>
20 21
21#include <asm/sections.h> 22#include <asm/sections.h>
22#include <asm/page.h> 23#include <asm/page.h>
@@ -1021,20 +1022,12 @@ static char *sun4c_lockarea(char *vaddr, unsigned long size)
1021 npages = (((unsigned long)vaddr & ~PAGE_MASK) + 1022 npages = (((unsigned long)vaddr & ~PAGE_MASK) +
1022 size + (PAGE_SIZE-1)) >> PAGE_SHIFT; 1023 size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
1023 1024
1024 scan = 0;
1025 local_irq_save(flags); 1025 local_irq_save(flags);
1026 for (;;) { 1026 base = bitmap_find_next_zero_area(sun4c_iobuffer_map, iobuffer_map_size,
1027 scan = find_next_zero_bit(sun4c_iobuffer_map, 1027 0, npages, 0);
1028 iobuffer_map_size, scan); 1028 if (base >= iobuffer_map_size)
1029 if ((base = scan) + npages > iobuffer_map_size) goto abend; 1029 goto abend;
1030 for (;;) {
1031 if (scan >= base + npages) goto found;
1032 if (test_bit(scan, sun4c_iobuffer_map)) break;
1033 scan++;
1034 }
1035 }
1036 1030
1037found:
1038 high = ((base + npages) << PAGE_SHIFT) + sun4c_iobuffer_start; 1031 high = ((base + npages) << PAGE_SHIFT) + sun4c_iobuffer_start;
1039 high = SUN4C_REAL_PGDIR_ALIGN(high); 1032 high = SUN4C_REAL_PGDIR_ALIGN(high);
1040 while (high > sun4c_iobuffer_high) { 1033 while (high > sun4c_iobuffer_high) {
diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c
index e14629c87de4..51069245b79a 100644
--- a/arch/um/drivers/mconsole_kern.c
+++ b/arch/um/drivers/mconsole_kern.c
@@ -6,6 +6,7 @@
6 6
7#include <linux/console.h> 7#include <linux/console.h>
8#include <linux/ctype.h> 8#include <linux/ctype.h>
9#include <linux/string.h>
9#include <linux/interrupt.h> 10#include <linux/interrupt.h>
10#include <linux/list.h> 11#include <linux/list.h>
11#include <linux/mm.h> 12#include <linux/mm.h>
@@ -131,7 +132,7 @@ void mconsole_proc(struct mc_request *req)
131 char *ptr = req->request.data, *buf; 132 char *ptr = req->request.data, *buf;
132 133
133 ptr += strlen("proc"); 134 ptr += strlen("proc");
134 while (isspace(*ptr)) ptr++; 135 ptr = skip_spaces(ptr);
135 136
136 proc = get_fs_type("proc"); 137 proc = get_fs_type("proc");
137 if (proc == NULL) { 138 if (proc == NULL) {
@@ -212,8 +213,7 @@ void mconsole_proc(struct mc_request *req)
212 char *ptr = req->request.data; 213 char *ptr = req->request.data;
213 214
214 ptr += strlen("proc"); 215 ptr += strlen("proc");
215 while (isspace(*ptr)) 216 ptr = skip_spaces(ptr);
216 ptr++;
217 snprintf(path, sizeof(path), "/proc/%s", ptr); 217 snprintf(path, sizeof(path), "/proc/%s", ptr);
218 218
219 fd = sys_open(path, 0, 0); 219 fd = sys_open(path, 0, 0);
@@ -560,8 +560,7 @@ void mconsole_config(struct mc_request *req)
560 int err; 560 int err;
561 561
562 ptr += strlen("config"); 562 ptr += strlen("config");
563 while (isspace(*ptr)) 563 ptr = skip_spaces(ptr);
564 ptr++;
565 dev = mconsole_find_dev(ptr); 564 dev = mconsole_find_dev(ptr);
566 if (dev == NULL) { 565 if (dev == NULL) {
567 mconsole_reply(req, "Bad configuration option", 1, 0); 566 mconsole_reply(req, "Bad configuration option", 1, 0);
@@ -588,7 +587,7 @@ void mconsole_remove(struct mc_request *req)
588 int err, start, end, n; 587 int err, start, end, n;
589 588
590 ptr += strlen("remove"); 589 ptr += strlen("remove");
591 while (isspace(*ptr)) ptr++; 590 ptr = skip_spaces(ptr);
592 dev = mconsole_find_dev(ptr); 591 dev = mconsole_find_dev(ptr);
593 if (dev == NULL) { 592 if (dev == NULL) {
594 mconsole_reply(req, "Bad remove option", 1, 0); 593 mconsole_reply(req, "Bad remove option", 1, 0);
@@ -712,7 +711,7 @@ void mconsole_sysrq(struct mc_request *req)
712 char *ptr = req->request.data; 711 char *ptr = req->request.data;
713 712
714 ptr += strlen("sysrq"); 713 ptr += strlen("sysrq");
715 while (isspace(*ptr)) ptr++; 714 ptr = skip_spaces(ptr);
716 715
717 /* 716 /*
718 * With 'b', the system will shut down without a chance to reply, 717 * With 'b', the system will shut down without a chance to reply,
@@ -757,8 +756,7 @@ void mconsole_stack(struct mc_request *req)
757 */ 756 */
758 757
759 ptr += strlen("stack"); 758 ptr += strlen("stack");
760 while (isspace(*ptr)) 759 ptr = skip_spaces(ptr);
761 ptr++;
762 760
763 /* 761 /*
764 * Should really check for multiple pids or reject bad args here 762 * Should really check for multiple pids or reject bad args here
@@ -833,8 +831,8 @@ static int __init mconsole_init(void)
833 831
834__initcall(mconsole_init); 832__initcall(mconsole_init);
835 833
836static int write_proc_mconsole(struct file *file, const char __user *buffer, 834static ssize_t mconsole_proc_write(struct file *file,
837 unsigned long count, void *data) 835 const char __user *buffer, size_t count, loff_t *pos)
838{ 836{
839 char *buf; 837 char *buf;
840 838
@@ -855,6 +853,11 @@ static int write_proc_mconsole(struct file *file, const char __user *buffer,
855 return count; 853 return count;
856} 854}
857 855
856static const struct file_operations mconsole_proc_fops = {
857 .owner = THIS_MODULE,
858 .write = mconsole_proc_write,
859};
860
858static int create_proc_mconsole(void) 861static int create_proc_mconsole(void)
859{ 862{
860 struct proc_dir_entry *ent; 863 struct proc_dir_entry *ent;
@@ -862,15 +865,12 @@ static int create_proc_mconsole(void)
862 if (notify_socket == NULL) 865 if (notify_socket == NULL)
863 return 0; 866 return 0;
864 867
865 ent = create_proc_entry("mconsole", S_IFREG | 0200, NULL); 868 ent = proc_create("mconsole", 0200, NULL, &mconsole_proc_fops);
866 if (ent == NULL) { 869 if (ent == NULL) {
867 printk(KERN_INFO "create_proc_mconsole : create_proc_entry " 870 printk(KERN_INFO "create_proc_mconsole : create_proc_entry "
868 "failed\n"); 871 "failed\n");
869 return 0; 872 return 0;
870 } 873 }
871
872 ent->read_proc = NULL;
873 ent->write_proc = write_proc_mconsole;
874 return 0; 874 return 0;
875} 875}
876 876
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index 635d16d90a80..5ff554677f40 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -27,6 +27,7 @@
27#include "linux/init.h" 27#include "linux/init.h"
28#include "linux/cdrom.h" 28#include "linux/cdrom.h"
29#include "linux/proc_fs.h" 29#include "linux/proc_fs.h"
30#include "linux/seq_file.h"
30#include "linux/ctype.h" 31#include "linux/ctype.h"
31#include "linux/capability.h" 32#include "linux/capability.h"
32#include "linux/mm.h" 33#include "linux/mm.h"
@@ -200,23 +201,25 @@ static void make_proc_ide(void)
200 proc_ide = proc_mkdir("ide0", proc_ide_root); 201 proc_ide = proc_mkdir("ide0", proc_ide_root);
201} 202}
202 203
203static int proc_ide_read_media(char *page, char **start, off_t off, int count, 204static int fake_ide_media_proc_show(struct seq_file *m, void *v)
204 int *eof, void *data)
205{ 205{
206 int len; 206 seq_puts(m, "disk\n");
207 207 return 0;
208 strcpy(page, "disk\n"); 208}
209 len = strlen("disk\n"); 209
210 len -= off; 210static int fake_ide_media_proc_open(struct inode *inode, struct file *file)
211 if (len < count){ 211{
212 *eof = 1; 212 return single_open(file, fake_ide_media_proc_show, NULL);
213 if (len <= 0) return 0;
214 }
215 else len = count;
216 *start = page + off;
217 return len;
218} 213}
219 214
215static const struct file_operations fake_ide_media_proc_fops = {
216 .owner = THIS_MODULE,
217 .open = fake_ide_media_proc_open,
218 .read = seq_read,
219 .llseek = seq_lseek,
220 .release = single_release,
221};
222
220static void make_ide_entries(const char *dev_name) 223static void make_ide_entries(const char *dev_name)
221{ 224{
222 struct proc_dir_entry *dir, *ent; 225 struct proc_dir_entry *dir, *ent;
@@ -227,11 +230,8 @@ static void make_ide_entries(const char *dev_name)
227 dir = proc_mkdir(dev_name, proc_ide); 230 dir = proc_mkdir(dev_name, proc_ide);
228 if(!dir) return; 231 if(!dir) return;
229 232
230 ent = create_proc_entry("media", S_IFREG|S_IRUGO, dir); 233 ent = proc_create("media", S_IRUGO, dir, &fake_ide_media_proc_fops);
231 if(!ent) return; 234 if(!ent) return;
232 ent->data = NULL;
233 ent->read_proc = proc_ide_read_media;
234 ent->write_proc = NULL;
235 snprintf(name, sizeof(name), "ide0/%s", dev_name); 235 snprintf(name, sizeof(name), "ide0/%s", dev_name);
236 proc_symlink(dev_name, proc_ide_root, name); 236 proc_symlink(dev_name, proc_ide_root, name);
237} 237}
diff --git a/arch/um/kernel/exitcode.c b/arch/um/kernel/exitcode.c
index 6540d2c9fbb7..829df49dee99 100644
--- a/arch/um/kernel/exitcode.c
+++ b/arch/um/kernel/exitcode.c
@@ -6,7 +6,9 @@
6#include <linux/ctype.h> 6#include <linux/ctype.h>
7#include <linux/init.h> 7#include <linux/init.h>
8#include <linux/kernel.h> 8#include <linux/kernel.h>
9#include <linux/module.h>
9#include <linux/proc_fs.h> 10#include <linux/proc_fs.h>
11#include <linux/seq_file.h>
10#include <linux/types.h> 12#include <linux/types.h>
11#include <asm/uaccess.h> 13#include <asm/uaccess.h>
12 14
@@ -16,30 +18,26 @@
16 */ 18 */
17int uml_exitcode = 0; 19int uml_exitcode = 0;
18 20
19static int read_proc_exitcode(char *page, char **start, off_t off, 21static int exitcode_proc_show(struct seq_file *m, void *v)
20 int count, int *eof, void *data)
21{ 22{
22 int len, val; 23 int val;
23 24
24 /* 25 /*
25 * Save uml_exitcode in a local so that we don't need to guarantee 26 * Save uml_exitcode in a local so that we don't need to guarantee
26 * that sprintf accesses it atomically. 27 * that sprintf accesses it atomically.
27 */ 28 */
28 val = uml_exitcode; 29 val = uml_exitcode;
29 len = sprintf(page, "%d\n", val); 30 seq_printf(m, "%d\n", val);
30 len -= off; 31 return 0;
31 if (len <= off+count) 32}
32 *eof = 1; 33
33 *start = page + off; 34static int exitcode_proc_open(struct inode *inode, struct file *file)
34 if (len > count) 35{
35 len = count; 36 return single_open(file, exitcode_proc_show, NULL);
36 if (len < 0)
37 len = 0;
38 return len;
39} 37}
40 38
41static int write_proc_exitcode(struct file *file, const char __user *buffer, 39static ssize_t exitcode_proc_write(struct file *file,
42 unsigned long count, void *data) 40 const char __user *buffer, size_t count, loff_t *pos)
43{ 41{
44 char *end, buf[sizeof("nnnnn\0")]; 42 char *end, buf[sizeof("nnnnn\0")];
45 int tmp; 43 int tmp;
@@ -55,20 +53,25 @@ static int write_proc_exitcode(struct file *file, const char __user *buffer,
55 return count; 53 return count;
56} 54}
57 55
56static const struct file_operations exitcode_proc_fops = {
57 .owner = THIS_MODULE,
58 .open = exitcode_proc_open,
59 .read = seq_read,
60 .llseek = seq_lseek,
61 .release = single_release,
62 .write = exitcode_proc_write,
63};
64
58static int make_proc_exitcode(void) 65static int make_proc_exitcode(void)
59{ 66{
60 struct proc_dir_entry *ent; 67 struct proc_dir_entry *ent;
61 68
62 ent = create_proc_entry("exitcode", 0600, NULL); 69 ent = proc_create("exitcode", 0600, NULL, &exitcode_proc_fops);
63 if (ent == NULL) { 70 if (ent == NULL) {
64 printk(KERN_WARNING "make_proc_exitcode : Failed to register " 71 printk(KERN_WARNING "make_proc_exitcode : Failed to register "
65 "/proc/exitcode\n"); 72 "/proc/exitcode\n");
66 return 0; 73 return 0;
67 } 74 }
68
69 ent->read_proc = read_proc_exitcode;
70 ent->write_proc = write_proc_exitcode;
71
72 return 0; 75 return 0;
73} 76}
74 77
diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c
index 039270b9b73b..89474ba0741e 100644
--- a/arch/um/kernel/irq.c
+++ b/arch/um/kernel/irq.c
@@ -34,7 +34,7 @@ int show_interrupts(struct seq_file *p, void *v)
34 } 34 }
35 35
36 if (i < NR_IRQS) { 36 if (i < NR_IRQS) {
37 spin_lock_irqsave(&irq_desc[i].lock, flags); 37 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
38 action = irq_desc[i].action; 38 action = irq_desc[i].action;
39 if (!action) 39 if (!action)
40 goto skip; 40 goto skip;
@@ -53,7 +53,7 @@ int show_interrupts(struct seq_file *p, void *v)
53 53
54 seq_putc(p, '\n'); 54 seq_putc(p, '\n');
55skip: 55skip:
56 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 56 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
57 } else if (i == NR_IRQS) 57 } else if (i == NR_IRQS)
58 seq_putc(p, '\n'); 58 seq_putc(p, '\n');
59 59
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index 4a28a1568d85..2f910a1b7454 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -9,11 +9,13 @@
9#include <linux/hardirq.h> 9#include <linux/hardirq.h>
10#include <linux/gfp.h> 10#include <linux/gfp.h>
11#include <linux/mm.h> 11#include <linux/mm.h>
12#include <linux/module.h>
12#include <linux/personality.h> 13#include <linux/personality.h>
13#include <linux/proc_fs.h> 14#include <linux/proc_fs.h>
14#include <linux/ptrace.h> 15#include <linux/ptrace.h>
15#include <linux/random.h> 16#include <linux/random.h>
16#include <linux/sched.h> 17#include <linux/sched.h>
18#include <linux/seq_file.h>
17#include <linux/tick.h> 19#include <linux/tick.h>
18#include <linux/threads.h> 20#include <linux/threads.h>
19#include <asm/current.h> 21#include <asm/current.h>
@@ -336,16 +338,19 @@ int get_using_sysemu(void)
336 return atomic_read(&using_sysemu); 338 return atomic_read(&using_sysemu);
337} 339}
338 340
339static int proc_read_sysemu(char *buf, char **start, off_t offset, int size,int *eof, void *data) 341static int sysemu_proc_show(struct seq_file *m, void *v)
340{ 342{
341 if (snprintf(buf, size, "%d\n", get_using_sysemu()) < size) 343 seq_printf(m, "%d\n", get_using_sysemu());
342 /* No overflow */ 344 return 0;
343 *eof = 1; 345}
344 346
345 return strlen(buf); 347static int sysemu_proc_open(struct inode *inode, struct file *file)
348{
349 return single_open(file, sysemu_proc_show, NULL);
346} 350}
347 351
348static int proc_write_sysemu(struct file *file,const char __user *buf, unsigned long count,void *data) 352static ssize_t sysemu_proc_write(struct file *file, const char __user *buf,
353 size_t count, loff_t *pos)
349{ 354{
350 char tmp[2]; 355 char tmp[2];
351 356
@@ -358,13 +363,22 @@ static int proc_write_sysemu(struct file *file,const char __user *buf, unsigned
358 return count; 363 return count;
359} 364}
360 365
366static const struct file_operations sysemu_proc_fops = {
367 .owner = THIS_MODULE,
368 .open = sysemu_proc_open,
369 .read = seq_read,
370 .llseek = seq_lseek,
371 .release = single_release,
372 .write = sysemu_proc_write,
373};
374
361int __init make_proc_sysemu(void) 375int __init make_proc_sysemu(void)
362{ 376{
363 struct proc_dir_entry *ent; 377 struct proc_dir_entry *ent;
364 if (!sysemu_supported) 378 if (!sysemu_supported)
365 return 0; 379 return 0;
366 380
367 ent = create_proc_entry("sysemu", 0600, NULL); 381 ent = proc_create("sysemu", 0600, NULL, &sysemu_proc_fops);
368 382
369 if (ent == NULL) 383 if (ent == NULL)
370 { 384 {
@@ -372,9 +386,6 @@ int __init make_proc_sysemu(void)
372 return 0; 386 return 0;
373 } 387 }
374 388
375 ent->read_proc = proc_read_sysemu;
376 ent->write_proc = proc_write_sysemu;
377
378 return 0; 389 return 0;
379} 390}
380 391
diff --git a/arch/um/sys-i386/asm/elf.h b/arch/um/sys-i386/asm/elf.h
index d0da9d7c5371..770885472ed4 100644
--- a/arch/um/sys-i386/asm/elf.h
+++ b/arch/um/sys-i386/asm/elf.h
@@ -48,7 +48,6 @@ typedef struct user_i387_struct elf_fpregset_t;
48 PT_REGS_EAX(regs) = 0; \ 48 PT_REGS_EAX(regs) = 0; \
49} while (0) 49} while (0)
50 50
51#define USE_ELF_CORE_DUMP
52#define ELF_EXEC_PAGESIZE 4096 51#define ELF_EXEC_PAGESIZE 4096
53 52
54#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3) 53#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
diff --git a/arch/um/sys-ppc/asm/elf.h b/arch/um/sys-ppc/asm/elf.h
index af9463cd8ce5..8aacaf56508d 100644
--- a/arch/um/sys-ppc/asm/elf.h
+++ b/arch/um/sys-ppc/asm/elf.h
@@ -17,8 +17,6 @@ extern long elf_aux_hwcap;
17#define ELF_CLASS ELFCLASS32 17#define ELF_CLASS ELFCLASS32
18#endif 18#endif
19 19
20#define USE_ELF_CORE_DUMP
21
22#define R_386_NONE 0 20#define R_386_NONE 0
23#define R_386_32 1 21#define R_386_32 1
24#define R_386_PC32 2 22#define R_386_PC32 2
diff --git a/arch/um/sys-x86_64/asm/elf.h b/arch/um/sys-x86_64/asm/elf.h
index 04b9e87c8dad..49655c83efd2 100644
--- a/arch/um/sys-x86_64/asm/elf.h
+++ b/arch/um/sys-x86_64/asm/elf.h
@@ -104,7 +104,6 @@ extern int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu);
104 clear_thread_flag(TIF_IA32); 104 clear_thread_flag(TIF_IA32);
105#endif 105#endif
106 106
107#define USE_ELF_CORE_DUMP
108#define ELF_EXEC_PAGESIZE 4096 107#define ELF_EXEC_PAGESIZE 4096
109 108
110#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3) 109#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 32a1918e1b88..3b2a5aca4edb 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -2012,18 +2012,9 @@ config SCx200HR_TIMER
2012 processor goes idle (as is done by the scheduler). The 2012 processor goes idle (as is done by the scheduler). The
2013 other workaround is idle=poll boot option. 2013 other workaround is idle=poll boot option.
2014 2014
2015config GEODE_MFGPT_TIMER
2016 def_bool y
2017 prompt "Geode Multi-Function General Purpose Timer (MFGPT) events"
2018 depends on MGEODE_LX && GENERIC_TIME && GENERIC_CLOCKEVENTS
2019 ---help---
2020 This driver provides a clock event source based on the MFGPT
2021 timer(s) in the CS5535 and CS5536 companion chip for the geode.
2022 MFGPTs have a better resolution and max interval than the
2023 generic PIT, and are suitable for use as high-res timers.
2024
2025config OLPC 2015config OLPC
2026 bool "One Laptop Per Child support" 2016 bool "One Laptop Per Child support"
2017 select GPIOLIB
2027 default n 2018 default n
2028 ---help--- 2019 ---help---
2029 Add support for detecting the unique features of the OLPC 2020 Add support for detecting the unique features of the OLPC
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
index 0f6c02f3b7d4..ac91eed21061 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -67,7 +67,7 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
67 if (!dev->dma_mask) 67 if (!dev->dma_mask)
68 return 0; 68 return 0;
69 69
70 return addr + size <= *dev->dma_mask; 70 return addr + size - 1 <= *dev->dma_mask;
71} 71}
72 72
73static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) 73static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index 8a024babe5e6..b4501ee223ad 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -239,7 +239,6 @@ extern int force_personality32;
239#endif /* !CONFIG_X86_32 */ 239#endif /* !CONFIG_X86_32 */
240 240
241#define CORE_DUMP_USE_REGSET 241#define CORE_DUMP_USE_REGSET
242#define USE_ELF_CORE_DUMP
243#define ELF_EXEC_PAGESIZE 4096 242#define ELF_EXEC_PAGESIZE 4096
244 243
245/* This is the location that an ET_DYN program is loaded if exec'ed. Typical 244/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
diff --git a/arch/x86/include/asm/geode.h b/arch/x86/include/asm/geode.h
index ad3c2ed75481..7cd73552a4e8 100644
--- a/arch/x86/include/asm/geode.h
+++ b/arch/x86/include/asm/geode.h
@@ -12,160 +12,7 @@
12 12
13#include <asm/processor.h> 13#include <asm/processor.h>
14#include <linux/io.h> 14#include <linux/io.h>
15 15#include <linux/cs5535.h>
16/* Generic southbridge functions */
17
18#define GEODE_DEV_PMS 0
19#define GEODE_DEV_ACPI 1
20#define GEODE_DEV_GPIO 2
21#define GEODE_DEV_MFGPT 3
22
23extern int geode_get_dev_base(unsigned int dev);
24
25/* Useful macros */
26#define geode_pms_base() geode_get_dev_base(GEODE_DEV_PMS)
27#define geode_acpi_base() geode_get_dev_base(GEODE_DEV_ACPI)
28#define geode_gpio_base() geode_get_dev_base(GEODE_DEV_GPIO)
29#define geode_mfgpt_base() geode_get_dev_base(GEODE_DEV_MFGPT)
30
31/* MSRS */
32
33#define MSR_GLIU_P2D_RO0 0x10000029
34
35#define MSR_LX_GLD_MSR_CONFIG 0x48002001
36#define MSR_LX_MSR_PADSEL 0x48002011 /* NOT 0x48000011; the data
37 * sheet has the wrong value */
38#define MSR_GLCP_SYS_RSTPLL 0x4C000014
39#define MSR_GLCP_DOTPLL 0x4C000015
40
41#define MSR_LBAR_SMB 0x5140000B
42#define MSR_LBAR_GPIO 0x5140000C
43#define MSR_LBAR_MFGPT 0x5140000D
44#define MSR_LBAR_ACPI 0x5140000E
45#define MSR_LBAR_PMS 0x5140000F
46
47#define MSR_DIVIL_SOFT_RESET 0x51400017
48
49#define MSR_PIC_YSEL_LOW 0x51400020
50#define MSR_PIC_YSEL_HIGH 0x51400021
51#define MSR_PIC_ZSEL_LOW 0x51400022
52#define MSR_PIC_ZSEL_HIGH 0x51400023
53#define MSR_PIC_IRQM_LPC 0x51400025
54
55#define MSR_MFGPT_IRQ 0x51400028
56#define MSR_MFGPT_NR 0x51400029
57#define MSR_MFGPT_SETUP 0x5140002B
58
59#define MSR_LX_SPARE_MSR 0x80000011 /* DC-specific */
60
61#define MSR_GX_GLD_MSR_CONFIG 0xC0002001
62#define MSR_GX_MSR_PADSEL 0xC0002011
63
64/* Resource Sizes */
65
66#define LBAR_GPIO_SIZE 0xFF
67#define LBAR_MFGPT_SIZE 0x40
68#define LBAR_ACPI_SIZE 0x40
69#define LBAR_PMS_SIZE 0x80
70
71/* ACPI registers (PMS block) */
72
73/*
74 * PM1_EN is only valid when VSA is enabled for 16 bit reads.
75 * When VSA is not enabled, *always* read both PM1_STS and PM1_EN
76 * with a 32 bit read at offset 0x0
77 */
78
79#define PM1_STS 0x00
80#define PM1_EN 0x02
81#define PM1_CNT 0x08
82#define PM2_CNT 0x0C
83#define PM_TMR 0x10
84#define PM_GPE0_STS 0x18
85#define PM_GPE0_EN 0x1C
86
87/* PMC registers (PMS block) */
88
89#define PM_SSD 0x00
90#define PM_SCXA 0x04
91#define PM_SCYA 0x08
92#define PM_OUT_SLPCTL 0x0C
93#define PM_SCLK 0x10
94#define PM_SED 0x1
95#define PM_SCXD 0x18
96#define PM_SCYD 0x1C
97#define PM_IN_SLPCTL 0x20
98#define PM_WKD 0x30
99#define PM_WKXD 0x34
100#define PM_RD 0x38
101#define PM_WKXA 0x3C
102#define PM_FSD 0x40
103#define PM_TSD 0x44
104#define PM_PSD 0x48
105#define PM_NWKD 0x4C
106#define PM_AWKD 0x50
107#define PM_SSC 0x54
108
109/* VSA2 magic values */
110
111#define VSA_VRC_INDEX 0xAC1C
112#define VSA_VRC_DATA 0xAC1E
113#define VSA_VR_UNLOCK 0xFC53 /* unlock virtual register */
114#define VSA_VR_SIGNATURE 0x0003
115#define VSA_VR_MEM_SIZE 0x0200
116#define AMD_VSA_SIG 0x4132 /* signature is ascii 'VSA2' */
117#define GSW_VSA_SIG 0x534d /* General Software signature */
118/* GPIO */
119
120#define GPIO_OUTPUT_VAL 0x00
121#define GPIO_OUTPUT_ENABLE 0x04
122#define GPIO_OUTPUT_OPEN_DRAIN 0x08
123#define GPIO_OUTPUT_INVERT 0x0C
124#define GPIO_OUTPUT_AUX1 0x10
125#define GPIO_OUTPUT_AUX2 0x14
126#define GPIO_PULL_UP 0x18
127#define GPIO_PULL_DOWN 0x1C
128#define GPIO_INPUT_ENABLE 0x20
129#define GPIO_INPUT_INVERT 0x24
130#define GPIO_INPUT_FILTER 0x28
131#define GPIO_INPUT_EVENT_COUNT 0x2C
132#define GPIO_READ_BACK 0x30
133#define GPIO_INPUT_AUX1 0x34
134#define GPIO_EVENTS_ENABLE 0x38
135#define GPIO_LOCK_ENABLE 0x3C
136#define GPIO_POSITIVE_EDGE_EN 0x40
137#define GPIO_NEGATIVE_EDGE_EN 0x44
138#define GPIO_POSITIVE_EDGE_STS 0x48
139#define GPIO_NEGATIVE_EDGE_STS 0x4C
140
141#define GPIO_MAP_X 0xE0
142#define GPIO_MAP_Y 0xE4
143#define GPIO_MAP_Z 0xE8
144#define GPIO_MAP_W 0xEC
145
146static inline u32 geode_gpio(unsigned int nr)
147{
148 BUG_ON(nr > 28);
149 return 1 << nr;
150}
151
152extern void geode_gpio_set(u32, unsigned int);
153extern void geode_gpio_clear(u32, unsigned int);
154extern int geode_gpio_isset(u32, unsigned int);
155extern void geode_gpio_setup_event(unsigned int, int, int);
156extern void geode_gpio_set_irq(unsigned int, unsigned int);
157
158static inline void geode_gpio_event_irq(unsigned int gpio, int pair)
159{
160 geode_gpio_setup_event(gpio, pair, 0);
161}
162
163static inline void geode_gpio_event_pme(unsigned int gpio, int pair)
164{
165 geode_gpio_setup_event(gpio, pair, 1);
166}
167
168/* Specific geode tests */
169 16
170static inline int is_geode_gx(void) 17static inline int is_geode_gx(void)
171{ 18{
@@ -186,68 +33,4 @@ static inline int is_geode(void)
186 return (is_geode_gx() || is_geode_lx()); 33 return (is_geode_gx() || is_geode_lx());
187} 34}
188 35
189#ifdef CONFIG_MGEODE_LX
190extern int geode_has_vsa2(void);
191#else
192static inline int geode_has_vsa2(void)
193{
194 return 0;
195}
196#endif
197
198/* MFGPTs */
199
200#define MFGPT_MAX_TIMERS 8
201#define MFGPT_TIMER_ANY (-1)
202
203#define MFGPT_DOMAIN_WORKING 1
204#define MFGPT_DOMAIN_STANDBY 2
205#define MFGPT_DOMAIN_ANY (MFGPT_DOMAIN_WORKING | MFGPT_DOMAIN_STANDBY)
206
207#define MFGPT_CMP1 0
208#define MFGPT_CMP2 1
209
210#define MFGPT_EVENT_IRQ 0
211#define MFGPT_EVENT_NMI 1
212#define MFGPT_EVENT_RESET 3
213
214#define MFGPT_REG_CMP1 0
215#define MFGPT_REG_CMP2 2
216#define MFGPT_REG_COUNTER 4
217#define MFGPT_REG_SETUP 6
218
219#define MFGPT_SETUP_CNTEN (1 << 15)
220#define MFGPT_SETUP_CMP2 (1 << 14)
221#define MFGPT_SETUP_CMP1 (1 << 13)
222#define MFGPT_SETUP_SETUP (1 << 12)
223#define MFGPT_SETUP_STOPEN (1 << 11)
224#define MFGPT_SETUP_EXTEN (1 << 10)
225#define MFGPT_SETUP_REVEN (1 << 5)
226#define MFGPT_SETUP_CLKSEL (1 << 4)
227
228static inline void geode_mfgpt_write(int timer, u16 reg, u16 value)
229{
230 u32 base = geode_get_dev_base(GEODE_DEV_MFGPT);
231 outw(value, base + reg + (timer * 8));
232}
233
234static inline u16 geode_mfgpt_read(int timer, u16 reg)
235{
236 u32 base = geode_get_dev_base(GEODE_DEV_MFGPT);
237 return inw(base + reg + (timer * 8));
238}
239
240extern int geode_mfgpt_toggle_event(int timer, int cmp, int event, int enable);
241extern int geode_mfgpt_set_irq(int timer, int cmp, int *irq, int enable);
242extern int geode_mfgpt_alloc_timer(int timer, int domain);
243
244#define geode_mfgpt_setup_irq(t, c, i) geode_mfgpt_set_irq((t), (c), (i), 1)
245#define geode_mfgpt_release_irq(t, c, i) geode_mfgpt_set_irq((t), (c), (i), 0)
246
247#ifdef CONFIG_GEODE_MFGPT_TIMER
248extern int __init mfgpt_timer_setup(void);
249#else
250static inline int mfgpt_timer_setup(void) { return 0; }
251#endif
252
253#endif /* _ASM_X86_GEODE_H */ 36#endif /* _ASM_X86_GEODE_H */
diff --git a/arch/x86/include/asm/olpc.h b/arch/x86/include/asm/olpc.h
index 834a30295fab..3a57385d9fa7 100644
--- a/arch/x86/include/asm/olpc.h
+++ b/arch/x86/include/asm/olpc.h
@@ -120,7 +120,7 @@ extern int olpc_ec_mask_unset(uint8_t bits);
120 120
121/* GPIO assignments */ 121/* GPIO assignments */
122 122
123#define OLPC_GPIO_MIC_AC geode_gpio(1) 123#define OLPC_GPIO_MIC_AC 1
124#define OLPC_GPIO_DCON_IRQ geode_gpio(7) 124#define OLPC_GPIO_DCON_IRQ geode_gpio(7)
125#define OLPC_GPIO_THRM_ALRM geode_gpio(10) 125#define OLPC_GPIO_THRM_ALRM geode_gpio(10)
126#define OLPC_GPIO_SMB_CLK geode_gpio(14) 126#define OLPC_GPIO_SMB_CLK geode_gpio(14)
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index efb38994859c..dd59a85a918f 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -731,34 +731,34 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
731 731
732#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) 732#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
733 733
734static inline int __raw_spin_is_locked(struct raw_spinlock *lock) 734static inline int arch_spin_is_locked(struct arch_spinlock *lock)
735{ 735{
736 return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock); 736 return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock);
737} 737}
738 738
739static inline int __raw_spin_is_contended(struct raw_spinlock *lock) 739static inline int arch_spin_is_contended(struct arch_spinlock *lock)
740{ 740{
741 return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock); 741 return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
742} 742}
743#define __raw_spin_is_contended __raw_spin_is_contended 743#define arch_spin_is_contended arch_spin_is_contended
744 744
745static __always_inline void __raw_spin_lock(struct raw_spinlock *lock) 745static __always_inline void arch_spin_lock(struct arch_spinlock *lock)
746{ 746{
747 PVOP_VCALL1(pv_lock_ops.spin_lock, lock); 747 PVOP_VCALL1(pv_lock_ops.spin_lock, lock);
748} 748}
749 749
750static __always_inline void __raw_spin_lock_flags(struct raw_spinlock *lock, 750static __always_inline void arch_spin_lock_flags(struct arch_spinlock *lock,
751 unsigned long flags) 751 unsigned long flags)
752{ 752{
753 PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags); 753 PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags);
754} 754}
755 755
756static __always_inline int __raw_spin_trylock(struct raw_spinlock *lock) 756static __always_inline int arch_spin_trylock(struct arch_spinlock *lock)
757{ 757{
758 return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock); 758 return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
759} 759}
760 760
761static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock) 761static __always_inline void arch_spin_unlock(struct arch_spinlock *lock)
762{ 762{
763 PVOP_VCALL1(pv_lock_ops.spin_unlock, lock); 763 PVOP_VCALL1(pv_lock_ops.spin_unlock, lock);
764} 764}
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 9357473c8da0..b1e70d51e40c 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -318,14 +318,14 @@ struct pv_mmu_ops {
318 phys_addr_t phys, pgprot_t flags); 318 phys_addr_t phys, pgprot_t flags);
319}; 319};
320 320
321struct raw_spinlock; 321struct arch_spinlock;
322struct pv_lock_ops { 322struct pv_lock_ops {
323 int (*spin_is_locked)(struct raw_spinlock *lock); 323 int (*spin_is_locked)(struct arch_spinlock *lock);
324 int (*spin_is_contended)(struct raw_spinlock *lock); 324 int (*spin_is_contended)(struct arch_spinlock *lock);
325 void (*spin_lock)(struct raw_spinlock *lock); 325 void (*spin_lock)(struct arch_spinlock *lock);
326 void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags); 326 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
327 int (*spin_trylock)(struct raw_spinlock *lock); 327 int (*spin_trylock)(struct arch_spinlock *lock);
328 void (*spin_unlock)(struct raw_spinlock *lock); 328 void (*spin_unlock)(struct arch_spinlock *lock);
329}; 329};
330 330
331/* This contains all the paravirt structures: we get a convenient 331/* This contains all the paravirt structures: we get a convenient
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index 3d11fd0f44c5..9d369f680321 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -292,6 +292,8 @@ extern void user_enable_block_step(struct task_struct *);
292#define arch_has_block_step() (boot_cpu_data.x86 >= 6) 292#define arch_has_block_step() (boot_cpu_data.x86 >= 6)
293#endif 293#endif
294 294
295#define ARCH_HAS_USER_SINGLE_STEP_INFO
296
295struct user_desc; 297struct user_desc;
296extern int do_get_thread_area(struct task_struct *p, int idx, 298extern int do_get_thread_area(struct task_struct *p, int idx,
297 struct user_desc __user *info); 299 struct user_desc __user *info);
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index 4e77853321db..3089f70c0c52 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -58,7 +58,7 @@
58#if (NR_CPUS < 256) 58#if (NR_CPUS < 256)
59#define TICKET_SHIFT 8 59#define TICKET_SHIFT 8
60 60
61static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) 61static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
62{ 62{
63 short inc = 0x0100; 63 short inc = 0x0100;
64 64
@@ -77,7 +77,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
77 : "memory", "cc"); 77 : "memory", "cc");
78} 78}
79 79
80static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) 80static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
81{ 81{
82 int tmp, new; 82 int tmp, new;
83 83
@@ -96,7 +96,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
96 return tmp; 96 return tmp;
97} 97}
98 98
99static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) 99static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
100{ 100{
101 asm volatile(UNLOCK_LOCK_PREFIX "incb %0" 101 asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
102 : "+m" (lock->slock) 102 : "+m" (lock->slock)
@@ -106,7 +106,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
106#else 106#else
107#define TICKET_SHIFT 16 107#define TICKET_SHIFT 16
108 108
109static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) 109static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
110{ 110{
111 int inc = 0x00010000; 111 int inc = 0x00010000;
112 int tmp; 112 int tmp;
@@ -127,7 +127,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
127 : "memory", "cc"); 127 : "memory", "cc");
128} 128}
129 129
130static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) 130static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
131{ 131{
132 int tmp; 132 int tmp;
133 int new; 133 int new;
@@ -149,7 +149,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
149 return tmp; 149 return tmp;
150} 150}
151 151
152static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) 152static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
153{ 153{
154 asm volatile(UNLOCK_LOCK_PREFIX "incw %0" 154 asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
155 : "+m" (lock->slock) 155 : "+m" (lock->slock)
@@ -158,14 +158,14 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
158} 158}
159#endif 159#endif
160 160
161static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) 161static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
162{ 162{
163 int tmp = ACCESS_ONCE(lock->slock); 163 int tmp = ACCESS_ONCE(lock->slock);
164 164
165 return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1 << TICKET_SHIFT) - 1)); 165 return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1 << TICKET_SHIFT) - 1));
166} 166}
167 167
168static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) 168static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
169{ 169{
170 int tmp = ACCESS_ONCE(lock->slock); 170 int tmp = ACCESS_ONCE(lock->slock);
171 171
@@ -174,43 +174,43 @@ static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
174 174
175#ifndef CONFIG_PARAVIRT_SPINLOCKS 175#ifndef CONFIG_PARAVIRT_SPINLOCKS
176 176
177static inline int __raw_spin_is_locked(raw_spinlock_t *lock) 177static inline int arch_spin_is_locked(arch_spinlock_t *lock)
178{ 178{
179 return __ticket_spin_is_locked(lock); 179 return __ticket_spin_is_locked(lock);
180} 180}
181 181
182static inline int __raw_spin_is_contended(raw_spinlock_t *lock) 182static inline int arch_spin_is_contended(arch_spinlock_t *lock)
183{ 183{
184 return __ticket_spin_is_contended(lock); 184 return __ticket_spin_is_contended(lock);
185} 185}
186#define __raw_spin_is_contended __raw_spin_is_contended 186#define arch_spin_is_contended arch_spin_is_contended
187 187
188static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) 188static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
189{ 189{
190 __ticket_spin_lock(lock); 190 __ticket_spin_lock(lock);
191} 191}
192 192
193static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock) 193static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
194{ 194{
195 return __ticket_spin_trylock(lock); 195 return __ticket_spin_trylock(lock);
196} 196}
197 197
198static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) 198static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
199{ 199{
200 __ticket_spin_unlock(lock); 200 __ticket_spin_unlock(lock);
201} 201}
202 202
203static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock, 203static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
204 unsigned long flags) 204 unsigned long flags)
205{ 205{
206 __raw_spin_lock(lock); 206 arch_spin_lock(lock);
207} 207}
208 208
209#endif /* CONFIG_PARAVIRT_SPINLOCKS */ 209#endif /* CONFIG_PARAVIRT_SPINLOCKS */
210 210
211static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) 211static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
212{ 212{
213 while (__raw_spin_is_locked(lock)) 213 while (arch_spin_is_locked(lock))
214 cpu_relax(); 214 cpu_relax();
215} 215}
216 216
@@ -232,7 +232,7 @@ static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
232 * read_can_lock - would read_trylock() succeed? 232 * read_can_lock - would read_trylock() succeed?
233 * @lock: the rwlock in question. 233 * @lock: the rwlock in question.
234 */ 234 */
235static inline int __raw_read_can_lock(raw_rwlock_t *lock) 235static inline int arch_read_can_lock(arch_rwlock_t *lock)
236{ 236{
237 return (int)(lock)->lock > 0; 237 return (int)(lock)->lock > 0;
238} 238}
@@ -241,12 +241,12 @@ static inline int __raw_read_can_lock(raw_rwlock_t *lock)
241 * write_can_lock - would write_trylock() succeed? 241 * write_can_lock - would write_trylock() succeed?
242 * @lock: the rwlock in question. 242 * @lock: the rwlock in question.
243 */ 243 */
244static inline int __raw_write_can_lock(raw_rwlock_t *lock) 244static inline int arch_write_can_lock(arch_rwlock_t *lock)
245{ 245{
246 return (lock)->lock == RW_LOCK_BIAS; 246 return (lock)->lock == RW_LOCK_BIAS;
247} 247}
248 248
249static inline void __raw_read_lock(raw_rwlock_t *rw) 249static inline void arch_read_lock(arch_rwlock_t *rw)
250{ 250{
251 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t" 251 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
252 "jns 1f\n" 252 "jns 1f\n"
@@ -255,7 +255,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
255 ::LOCK_PTR_REG (rw) : "memory"); 255 ::LOCK_PTR_REG (rw) : "memory");
256} 256}
257 257
258static inline void __raw_write_lock(raw_rwlock_t *rw) 258static inline void arch_write_lock(arch_rwlock_t *rw)
259{ 259{
260 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t" 260 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
261 "jz 1f\n" 261 "jz 1f\n"
@@ -264,7 +264,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
264 ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory"); 264 ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory");
265} 265}
266 266
267static inline int __raw_read_trylock(raw_rwlock_t *lock) 267static inline int arch_read_trylock(arch_rwlock_t *lock)
268{ 268{
269 atomic_t *count = (atomic_t *)lock; 269 atomic_t *count = (atomic_t *)lock;
270 270
@@ -274,7 +274,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *lock)
274 return 0; 274 return 0;
275} 275}
276 276
277static inline int __raw_write_trylock(raw_rwlock_t *lock) 277static inline int arch_write_trylock(arch_rwlock_t *lock)
278{ 278{
279 atomic_t *count = (atomic_t *)lock; 279 atomic_t *count = (atomic_t *)lock;
280 280
@@ -284,23 +284,23 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)
284 return 0; 284 return 0;
285} 285}
286 286
287static inline void __raw_read_unlock(raw_rwlock_t *rw) 287static inline void arch_read_unlock(arch_rwlock_t *rw)
288{ 288{
289 asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory"); 289 asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
290} 290}
291 291
292static inline void __raw_write_unlock(raw_rwlock_t *rw) 292static inline void arch_write_unlock(arch_rwlock_t *rw)
293{ 293{
294 asm volatile(LOCK_PREFIX "addl %1, %0" 294 asm volatile(LOCK_PREFIX "addl %1, %0"
295 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory"); 295 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
296} 296}
297 297
298#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 298#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
299#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) 299#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
300 300
301#define _raw_spin_relax(lock) cpu_relax() 301#define arch_spin_relax(lock) cpu_relax()
302#define _raw_read_relax(lock) cpu_relax() 302#define arch_read_relax(lock) cpu_relax()
303#define _raw_write_relax(lock) cpu_relax() 303#define arch_write_relax(lock) cpu_relax()
304 304
305/* The {read|write|spin}_lock() on x86 are full memory barriers. */ 305/* The {read|write|spin}_lock() on x86 are full memory barriers. */
306static inline void smp_mb__after_lock(void) { } 306static inline void smp_mb__after_lock(void) { }
diff --git a/arch/x86/include/asm/spinlock_types.h b/arch/x86/include/asm/spinlock_types.h
index 845f81c87091..dcb48b2edc11 100644
--- a/arch/x86/include/asm/spinlock_types.h
+++ b/arch/x86/include/asm/spinlock_types.h
@@ -5,16 +5,16 @@
5# error "please don't include this file directly" 5# error "please don't include this file directly"
6#endif 6#endif
7 7
8typedef struct raw_spinlock { 8typedef struct arch_spinlock {
9 unsigned int slock; 9 unsigned int slock;
10} raw_spinlock_t; 10} arch_spinlock_t;
11 11
12#define __RAW_SPIN_LOCK_UNLOCKED { 0 } 12#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
13 13
14typedef struct { 14typedef struct {
15 unsigned int lock; 15 unsigned int lock;
16} raw_rwlock_t; 16} arch_rwlock_t;
17 17
18#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } 18#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
19 19
20#endif /* _ASM_X86_SPINLOCK_TYPES_H */ 20#endif /* _ASM_X86_SPINLOCK_TYPES_H */
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index 40e37b10c6c0..c5087d796587 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -35,11 +35,16 @@
35# endif 35# endif
36#endif 36#endif
37 37
38/* Node not present */ 38/*
39#define NUMA_NO_NODE (-1) 39 * to preserve the visibility of NUMA_NO_NODE definition,
40 * moved to there from here. May be used independent of
41 * CONFIG_NUMA.
42 */
43#include <linux/numa.h>
40 44
41#ifdef CONFIG_NUMA 45#ifdef CONFIG_NUMA
42#include <linux/cpumask.h> 46#include <linux/cpumask.h>
47
43#include <asm/mpspec.h> 48#include <asm/mpspec.h>
44 49
45#ifdef CONFIG_X86_32 50#ifdef CONFIG_X86_32
diff --git a/arch/x86/include/asm/uv/bios.h b/arch/x86/include/asm/uv/bios.h
index 7ed17ff502b9..2751f3075d8b 100644
--- a/arch/x86/include/asm/uv/bios.h
+++ b/arch/x86/include/asm/uv/bios.h
@@ -76,15 +76,6 @@ union partition_info_u {
76 }; 76 };
77}; 77};
78 78
79union uv_watchlist_u {
80 u64 val;
81 struct {
82 u64 blade : 16,
83 size : 32,
84 filler : 16;
85 };
86};
87
88enum uv_memprotect { 79enum uv_memprotect {
89 UV_MEMPROT_RESTRICT_ACCESS, 80 UV_MEMPROT_RESTRICT_ACCESS,
90 UV_MEMPROT_ALLOW_AMO, 81 UV_MEMPROT_ALLOW_AMO,
@@ -100,7 +91,7 @@ extern s64 uv_bios_call_reentrant(enum uv_bios_cmd, u64, u64, u64, u64, u64);
100 91
101extern s64 uv_bios_get_sn_info(int, int *, long *, long *, long *); 92extern s64 uv_bios_get_sn_info(int, int *, long *, long *, long *);
102extern s64 uv_bios_freq_base(u64, u64 *); 93extern s64 uv_bios_freq_base(u64, u64 *);
103extern int uv_bios_mq_watchlist_alloc(int, unsigned long, unsigned int, 94extern int uv_bios_mq_watchlist_alloc(unsigned long, unsigned int,
104 unsigned long *); 95 unsigned long *);
105extern int uv_bios_mq_watchlist_free(int, int); 96extern int uv_bios_mq_watchlist_free(int, int);
106extern s64 uv_bios_change_memprotect(u64, u64, enum uv_memprotect); 97extern s64 uv_bios_change_memprotect(u64, u64, enum uv_memprotect);
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
index d1414af98559..811bfabc80b7 100644
--- a/arch/x86/include/asm/uv/uv_hub.h
+++ b/arch/x86/include/asm/uv/uv_hub.h
@@ -172,6 +172,8 @@ DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
172#define UV_LOCAL_MMR_SIZE (64UL * 1024 * 1024) 172#define UV_LOCAL_MMR_SIZE (64UL * 1024 * 1024)
173#define UV_GLOBAL_MMR32_SIZE (64UL * 1024 * 1024) 173#define UV_GLOBAL_MMR32_SIZE (64UL * 1024 * 1024)
174 174
175#define UV_GLOBAL_GRU_MMR_BASE 0x4000000
176
175#define UV_GLOBAL_MMR32_PNODE_SHIFT 15 177#define UV_GLOBAL_MMR32_PNODE_SHIFT 15
176#define UV_GLOBAL_MMR64_PNODE_SHIFT 26 178#define UV_GLOBAL_MMR64_PNODE_SHIFT 26
177 179
@@ -232,6 +234,26 @@ static inline unsigned long uv_gpa(void *v)
232 return uv_soc_phys_ram_to_gpa(__pa(v)); 234 return uv_soc_phys_ram_to_gpa(__pa(v));
233} 235}
234 236
237/* Top two bits indicate the requested address is in MMR space. */
238static inline int
239uv_gpa_in_mmr_space(unsigned long gpa)
240{
241 return (gpa >> 62) == 0x3UL;
242}
243
244/* UV global physical address --> socket phys RAM */
245static inline unsigned long uv_gpa_to_soc_phys_ram(unsigned long gpa)
246{
247 unsigned long paddr = gpa & uv_hub_info->gpa_mask;
248 unsigned long remap_base = uv_hub_info->lowmem_remap_base;
249 unsigned long remap_top = uv_hub_info->lowmem_remap_top;
250
251 if (paddr >= remap_base && paddr < remap_base + remap_top)
252 paddr -= remap_base;
253 return paddr;
254}
255
256
235/* gnode -> pnode */ 257/* gnode -> pnode */
236static inline unsigned long uv_gpa_to_gnode(unsigned long gpa) 258static inline unsigned long uv_gpa_to_gnode(unsigned long gpa)
237{ 259{
@@ -308,6 +330,15 @@ static inline unsigned long uv_read_global_mmr64(int pnode,
308} 330}
309 331
310/* 332/*
333 * Global MMR space addresses when referenced by the GRU. (GRU does
334 * NOT use socket addressing).
335 */
336static inline unsigned long uv_global_gru_mmr_address(int pnode, unsigned long offset)
337{
338 return UV_GLOBAL_GRU_MMR_BASE | offset | (pnode << uv_hub_info->m_val);
339}
340
341/*
311 * Access hub local MMRs. Faster than using global space but only local MMRs 342 * Access hub local MMRs. Faster than using global space but only local MMRs
312 * are accessible. 343 * are accessible.
313 */ 344 */
@@ -434,6 +465,14 @@ static inline void uv_set_cpu_scir_bits(int cpu, unsigned char value)
434 } 465 }
435} 466}
436 467
468static unsigned long uv_hub_ipi_value(int apicid, int vector, int mode)
469{
470 return (1UL << UVH_IPI_INT_SEND_SHFT) |
471 ((apicid) << UVH_IPI_INT_APIC_ID_SHFT) |
472 (mode << UVH_IPI_INT_DELIVERY_MODE_SHFT) |
473 (vector << UVH_IPI_INT_VECTOR_SHFT);
474}
475
437static inline void uv_hub_send_ipi(int pnode, int apicid, int vector) 476static inline void uv_hub_send_ipi(int pnode, int apicid, int vector)
438{ 477{
439 unsigned long val; 478 unsigned long val;
@@ -442,10 +481,7 @@ static inline void uv_hub_send_ipi(int pnode, int apicid, int vector)
442 if (vector == NMI_VECTOR) 481 if (vector == NMI_VECTOR)
443 dmode = dest_NMI; 482 dmode = dest_NMI;
444 483
445 val = (1UL << UVH_IPI_INT_SEND_SHFT) | 484 val = uv_hub_ipi_value(apicid, vector, dmode);
446 ((apicid) << UVH_IPI_INT_APIC_ID_SHFT) |
447 (dmode << UVH_IPI_INT_DELIVERY_MODE_SHFT) |
448 (vector << UVH_IPI_INT_VECTOR_SHFT);
449 uv_write_global_mmr64(pnode, UVH_IPI_INT, val); 485 uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
450} 486}
451 487
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 4f2e66e29ecc..d87f09bc5a52 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -89,7 +89,6 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
89obj-$(CONFIG_HPET_TIMER) += hpet.o 89obj-$(CONFIG_HPET_TIMER) += hpet.o
90 90
91obj-$(CONFIG_K8_NB) += k8.o 91obj-$(CONFIG_K8_NB) += k8.o
92obj-$(CONFIG_MGEODE_LX) += geode_32.o mfgpt_32.o
93obj-$(CONFIG_DEBUG_RODATA_TEST) += test_rodata.o 92obj-$(CONFIG_DEBUG_RODATA_TEST) += test_rodata.o
94obj-$(CONFIG_DEBUG_NX_TEST) += test_nx.o 93obj-$(CONFIG_DEBUG_NX_TEST) += test_nx.o
95 94
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index b990b5cc9541..23824fef789c 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -19,7 +19,7 @@
19 19
20#include <linux/pci.h> 20#include <linux/pci.h>
21#include <linux/gfp.h> 21#include <linux/gfp.h>
22#include <linux/bitops.h> 22#include <linux/bitmap.h>
23#include <linux/debugfs.h> 23#include <linux/debugfs.h>
24#include <linux/scatterlist.h> 24#include <linux/scatterlist.h>
25#include <linux/dma-mapping.h> 25#include <linux/dma-mapping.h>
@@ -1162,7 +1162,7 @@ static void dma_ops_free_addresses(struct dma_ops_domain *dom,
1162 1162
1163 address = (address % APERTURE_RANGE_SIZE) >> PAGE_SHIFT; 1163 address = (address % APERTURE_RANGE_SIZE) >> PAGE_SHIFT;
1164 1164
1165 iommu_area_free(range->bitmap, address, pages); 1165 bitmap_clear(range->bitmap, address, pages);
1166 1166
1167} 1167}
1168 1168
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index d5d498fbee4b..11a5851f1f50 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2431,7 +2431,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
2431 continue; 2431 continue;
2432 2432
2433 cfg = irq_cfg(irq); 2433 cfg = irq_cfg(irq);
2434 spin_lock(&desc->lock); 2434 raw_spin_lock(&desc->lock);
2435 2435
2436 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) 2436 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
2437 goto unlock; 2437 goto unlock;
@@ -2450,7 +2450,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
2450 } 2450 }
2451 __get_cpu_var(vector_irq)[vector] = -1; 2451 __get_cpu_var(vector_irq)[vector] = -1;
2452unlock: 2452unlock:
2453 spin_unlock(&desc->lock); 2453 raw_spin_unlock(&desc->lock);
2454 } 2454 }
2455 2455
2456 irq_exit(); 2456 irq_exit();
diff --git a/arch/x86/kernel/bios_uv.c b/arch/x86/kernel/bios_uv.c
index 63a88e1f987d..b0206a211b09 100644
--- a/arch/x86/kernel/bios_uv.c
+++ b/arch/x86/kernel/bios_uv.c
@@ -101,21 +101,17 @@ s64 uv_bios_get_sn_info(int fc, int *uvtype, long *partid, long *coher,
101} 101}
102 102
103int 103int
104uv_bios_mq_watchlist_alloc(int blade, unsigned long addr, unsigned int mq_size, 104uv_bios_mq_watchlist_alloc(unsigned long addr, unsigned int mq_size,
105 unsigned long *intr_mmr_offset) 105 unsigned long *intr_mmr_offset)
106{ 106{
107 union uv_watchlist_u size_blade;
108 u64 watchlist; 107 u64 watchlist;
109 s64 ret; 108 s64 ret;
110 109
111 size_blade.size = mq_size;
112 size_blade.blade = blade;
113
114 /* 110 /*
115 * bios returns watchlist number or negative error number. 111 * bios returns watchlist number or negative error number.
116 */ 112 */
117 ret = (int)uv_bios_call_irqsave(UV_BIOS_WATCHLIST_ALLOC, addr, 113 ret = (int)uv_bios_call_irqsave(UV_BIOS_WATCHLIST_ALLOC, addr,
118 size_blade.val, (u64)intr_mmr_offset, 114 mq_size, (u64)intr_mmr_offset,
119 (u64)&watchlist, 0); 115 (u64)&watchlist, 0);
120 if (ret < BIOS_STATUS_SUCCESS) 116 if (ret < BIOS_STATUS_SUCCESS)
121 return ret; 117 return ret;
diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c
index 3c1b12d461d1..e006e56f699c 100644
--- a/arch/x86/kernel/cpu/mtrr/if.c
+++ b/arch/x86/kernel/cpu/mtrr/if.c
@@ -4,6 +4,7 @@
4#include <linux/proc_fs.h> 4#include <linux/proc_fs.h>
5#include <linux/module.h> 5#include <linux/module.h>
6#include <linux/ctype.h> 6#include <linux/ctype.h>
7#include <linux/string.h>
7#include <linux/init.h> 8#include <linux/init.h>
8 9
9#define LINE_SIZE 80 10#define LINE_SIZE 80
@@ -133,8 +134,7 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
133 return -EINVAL; 134 return -EINVAL;
134 135
135 base = simple_strtoull(line + 5, &ptr, 0); 136 base = simple_strtoull(line + 5, &ptr, 0);
136 while (isspace(*ptr)) 137 ptr = skip_spaces(ptr);
137 ptr++;
138 138
139 if (strncmp(ptr, "size=", 5)) 139 if (strncmp(ptr, "size=", 5))
140 return -EINVAL; 140 return -EINVAL;
@@ -142,14 +142,11 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
142 size = simple_strtoull(ptr + 5, &ptr, 0); 142 size = simple_strtoull(ptr + 5, &ptr, 0);
143 if ((base & 0xfff) || (size & 0xfff)) 143 if ((base & 0xfff) || (size & 0xfff))
144 return -EINVAL; 144 return -EINVAL;
145 while (isspace(*ptr)) 145 ptr = skip_spaces(ptr);
146 ptr++;
147 146
148 if (strncmp(ptr, "type=", 5)) 147 if (strncmp(ptr, "type=", 5))
149 return -EINVAL; 148 return -EINVAL;
150 ptr += 5; 149 ptr = skip_spaces(ptr + 5);
151 while (isspace(*ptr))
152 ptr++;
153 150
154 for (i = 0; i < MTRR_NUM_TYPES; ++i) { 151 for (i = 0; i < MTRR_NUM_TYPES; ++i) {
155 if (strcmp(ptr, mtrr_strings[i])) 152 if (strcmp(ptr, mtrr_strings[i]))
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index b8ce165dde5d..0a0aa1cec8f1 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -188,7 +188,7 @@ void dump_stack(void)
188} 188}
189EXPORT_SYMBOL(dump_stack); 189EXPORT_SYMBOL(dump_stack);
190 190
191static raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED; 191static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
192static int die_owner = -1; 192static int die_owner = -1;
193static unsigned int die_nest_count; 193static unsigned int die_nest_count;
194 194
@@ -207,11 +207,11 @@ unsigned __kprobes long oops_begin(void)
207 /* racy, but better than risking deadlock. */ 207 /* racy, but better than risking deadlock. */
208 raw_local_irq_save(flags); 208 raw_local_irq_save(flags);
209 cpu = smp_processor_id(); 209 cpu = smp_processor_id();
210 if (!__raw_spin_trylock(&die_lock)) { 210 if (!arch_spin_trylock(&die_lock)) {
211 if (cpu == die_owner) 211 if (cpu == die_owner)
212 /* nested oops. should stop eventually */; 212 /* nested oops. should stop eventually */;
213 else 213 else
214 __raw_spin_lock(&die_lock); 214 arch_spin_lock(&die_lock);
215 } 215 }
216 die_nest_count++; 216 die_nest_count++;
217 die_owner = cpu; 217 die_owner = cpu;
@@ -231,7 +231,7 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
231 die_nest_count--; 231 die_nest_count--;
232 if (!die_nest_count) 232 if (!die_nest_count)
233 /* Nest count reaches zero, release the lock. */ 233 /* Nest count reaches zero, release the lock. */
234 __raw_spin_unlock(&die_lock); 234 arch_spin_unlock(&die_lock);
235 raw_local_irq_restore(flags); 235 raw_local_irq_restore(flags);
236 oops_exit(); 236 oops_exit();
237 237
diff --git a/arch/x86/kernel/geode_32.c b/arch/x86/kernel/geode_32.c
deleted file mode 100644
index 9b08e852fd1a..000000000000
--- a/arch/x86/kernel/geode_32.c
+++ /dev/null
@@ -1,196 +0,0 @@
1/*
2 * AMD Geode southbridge support code
3 * Copyright (C) 2006, Advanced Micro Devices, Inc.
4 * Copyright (C) 2007, Andres Salomon <dilinger@debian.org>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public License
8 * as published by the Free Software Foundation.
9 */
10
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/ioport.h>
14#include <linux/io.h>
15#include <asm/msr.h>
16#include <asm/geode.h>
17
18static struct {
19 char *name;
20 u32 msr;
21 int size;
22 u32 base;
23} lbars[] = {
24 { "geode-pms", MSR_LBAR_PMS, LBAR_PMS_SIZE, 0 },
25 { "geode-acpi", MSR_LBAR_ACPI, LBAR_ACPI_SIZE, 0 },
26 { "geode-gpio", MSR_LBAR_GPIO, LBAR_GPIO_SIZE, 0 },
27 { "geode-mfgpt", MSR_LBAR_MFGPT, LBAR_MFGPT_SIZE, 0 }
28};
29
30static void __init init_lbars(void)
31{
32 u32 lo, hi;
33 int i;
34
35 for (i = 0; i < ARRAY_SIZE(lbars); i++) {
36 rdmsr(lbars[i].msr, lo, hi);
37 if (hi & 0x01)
38 lbars[i].base = lo & 0x0000ffff;
39
40 if (lbars[i].base == 0)
41 printk(KERN_ERR "geode: Couldn't initialize '%s'\n",
42 lbars[i].name);
43 }
44}
45
46int geode_get_dev_base(unsigned int dev)
47{
48 BUG_ON(dev >= ARRAY_SIZE(lbars));
49 return lbars[dev].base;
50}
51EXPORT_SYMBOL_GPL(geode_get_dev_base);
52
53/* === GPIO API === */
54
55void geode_gpio_set(u32 gpio, unsigned int reg)
56{
57 u32 base = geode_get_dev_base(GEODE_DEV_GPIO);
58
59 if (!base)
60 return;
61
62 /* low bank register */
63 if (gpio & 0xFFFF)
64 outl(gpio & 0xFFFF, base + reg);
65 /* high bank register */
66 gpio >>= 16;
67 if (gpio)
68 outl(gpio, base + 0x80 + reg);
69}
70EXPORT_SYMBOL_GPL(geode_gpio_set);
71
72void geode_gpio_clear(u32 gpio, unsigned int reg)
73{
74 u32 base = geode_get_dev_base(GEODE_DEV_GPIO);
75
76 if (!base)
77 return;
78
79 /* low bank register */
80 if (gpio & 0xFFFF)
81 outl((gpio & 0xFFFF) << 16, base + reg);
82 /* high bank register */
83 gpio &= (0xFFFF << 16);
84 if (gpio)
85 outl(gpio, base + 0x80 + reg);
86}
87EXPORT_SYMBOL_GPL(geode_gpio_clear);
88
89int geode_gpio_isset(u32 gpio, unsigned int reg)
90{
91 u32 base = geode_get_dev_base(GEODE_DEV_GPIO);
92 u32 val;
93
94 if (!base)
95 return 0;
96
97 /* low bank register */
98 if (gpio & 0xFFFF) {
99 val = inl(base + reg) & (gpio & 0xFFFF);
100 if ((gpio & 0xFFFF) == val)
101 return 1;
102 }
103 /* high bank register */
104 gpio >>= 16;
105 if (gpio) {
106 val = inl(base + 0x80 + reg) & gpio;
107 if (gpio == val)
108 return 1;
109 }
110 return 0;
111}
112EXPORT_SYMBOL_GPL(geode_gpio_isset);
113
114void geode_gpio_set_irq(unsigned int group, unsigned int irq)
115{
116 u32 lo, hi;
117
118 if (group > 7 || irq > 15)
119 return;
120
121 rdmsr(MSR_PIC_ZSEL_HIGH, lo, hi);
122
123 lo &= ~(0xF << (group * 4));
124 lo |= (irq & 0xF) << (group * 4);
125
126 wrmsr(MSR_PIC_ZSEL_HIGH, lo, hi);
127}
128EXPORT_SYMBOL_GPL(geode_gpio_set_irq);
129
130void geode_gpio_setup_event(unsigned int gpio, int pair, int pme)
131{
132 u32 base = geode_get_dev_base(GEODE_DEV_GPIO);
133 u32 offset, shift, val;
134
135 if (gpio >= 24)
136 offset = GPIO_MAP_W;
137 else if (gpio >= 16)
138 offset = GPIO_MAP_Z;
139 else if (gpio >= 8)
140 offset = GPIO_MAP_Y;
141 else
142 offset = GPIO_MAP_X;
143
144 shift = (gpio % 8) * 4;
145
146 val = inl(base + offset);
147
148 /* Clear whatever was there before */
149 val &= ~(0xF << shift);
150
151 /* And set the new value */
152
153 val |= ((pair & 7) << shift);
154
155 /* Set the PME bit if this is a PME event */
156
157 if (pme)
158 val |= (1 << (shift + 3));
159
160 outl(val, base + offset);
161}
162EXPORT_SYMBOL_GPL(geode_gpio_setup_event);
163
164int geode_has_vsa2(void)
165{
166 static int has_vsa2 = -1;
167
168 if (has_vsa2 == -1) {
169 u16 val;
170
171 /*
172 * The VSA has virtual registers that we can query for a
173 * signature.
174 */
175 outw(VSA_VR_UNLOCK, VSA_VRC_INDEX);
176 outw(VSA_VR_SIGNATURE, VSA_VRC_INDEX);
177
178 val = inw(VSA_VRC_DATA);
179 has_vsa2 = (val == AMD_VSA_SIG || val == GSW_VSA_SIG);
180 }
181
182 return has_vsa2;
183}
184EXPORT_SYMBOL_GPL(geode_has_vsa2);
185
186static int __init geode_southbridge_init(void)
187{
188 if (!is_geode())
189 return -ENODEV;
190
191 init_lbars();
192 (void) mfgpt_timer_setup();
193 return 0;
194}
195
196postcore_initcall(geode_southbridge_init);
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 664bcb7384ac..91fd0c70a18a 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -149,7 +149,7 @@ int show_interrupts(struct seq_file *p, void *v)
149 if (!desc) 149 if (!desc)
150 return 0; 150 return 0;
151 151
152 spin_lock_irqsave(&desc->lock, flags); 152 raw_spin_lock_irqsave(&desc->lock, flags);
153 for_each_online_cpu(j) 153 for_each_online_cpu(j)
154 any_count |= kstat_irqs_cpu(i, j); 154 any_count |= kstat_irqs_cpu(i, j);
155 action = desc->action; 155 action = desc->action;
@@ -170,7 +170,7 @@ int show_interrupts(struct seq_file *p, void *v)
170 170
171 seq_putc(p, '\n'); 171 seq_putc(p, '\n');
172out: 172out:
173 spin_unlock_irqrestore(&desc->lock, flags); 173 raw_spin_unlock_irqrestore(&desc->lock, flags);
174 return 0; 174 return 0;
175} 175}
176 176
@@ -294,12 +294,12 @@ void fixup_irqs(void)
294 continue; 294 continue;
295 295
296 /* interrupt's are disabled at this point */ 296 /* interrupt's are disabled at this point */
297 spin_lock(&desc->lock); 297 raw_spin_lock(&desc->lock);
298 298
299 affinity = desc->affinity; 299 affinity = desc->affinity;
300 if (!irq_has_action(irq) || 300 if (!irq_has_action(irq) ||
301 cpumask_equal(affinity, cpu_online_mask)) { 301 cpumask_equal(affinity, cpu_online_mask)) {
302 spin_unlock(&desc->lock); 302 raw_spin_unlock(&desc->lock);
303 continue; 303 continue;
304 } 304 }
305 305
@@ -326,7 +326,7 @@ void fixup_irqs(void)
326 if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->unmask) 326 if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->unmask)
327 desc->chip->unmask(irq); 327 desc->chip->unmask(irq);
328 328
329 spin_unlock(&desc->lock); 329 raw_spin_unlock(&desc->lock);
330 330
331 if (break_affinity && set_affinity) 331 if (break_affinity && set_affinity)
332 printk("Broke affinity for irq %i\n", irq); 332 printk("Broke affinity for irq %i\n", irq);
@@ -356,10 +356,10 @@ void fixup_irqs(void)
356 irq = __get_cpu_var(vector_irq)[vector]; 356 irq = __get_cpu_var(vector_irq)[vector];
357 357
358 desc = irq_to_desc(irq); 358 desc = irq_to_desc(irq);
359 spin_lock(&desc->lock); 359 raw_spin_lock(&desc->lock);
360 if (desc->chip->retrigger) 360 if (desc->chip->retrigger)
361 desc->chip->retrigger(irq); 361 desc->chip->retrigger(irq);
362 spin_unlock(&desc->lock); 362 raw_spin_unlock(&desc->lock);
363 } 363 }
364 } 364 }
365} 365}
diff --git a/arch/x86/kernel/mfgpt_32.c b/arch/x86/kernel/mfgpt_32.c
deleted file mode 100644
index 2a62d843f015..000000000000
--- a/arch/x86/kernel/mfgpt_32.c
+++ /dev/null
@@ -1,410 +0,0 @@
1/*
2 * Driver/API for AMD Geode Multi-Function General Purpose Timers (MFGPT)
3 *
4 * Copyright (C) 2006, Advanced Micro Devices, Inc.
5 * Copyright (C) 2007, Andres Salomon <dilinger@debian.org>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of version 2 of the GNU General Public License
9 * as published by the Free Software Foundation.
10 *
11 * The MFGPTs are documented in AMD Geode CS5536 Companion Device Data Book.
12 */
13
14/*
15 * We are using the 32.768kHz input clock - it's the only one that has the
16 * ranges we find desirable. The following table lists the suitable
17 * divisors and the associated Hz, minimum interval and the maximum interval:
18 *
19 * Divisor Hz Min Delta (s) Max Delta (s)
20 * 1 32768 .00048828125 2.000
21 * 2 16384 .0009765625 4.000
22 * 4 8192 .001953125 8.000
23 * 8 4096 .00390625 16.000
24 * 16 2048 .0078125 32.000
25 * 32 1024 .015625 64.000
26 * 64 512 .03125 128.000
27 * 128 256 .0625 256.000
28 * 256 128 .125 512.000
29 */
30
31#include <linux/kernel.h>
32#include <linux/interrupt.h>
33#include <linux/module.h>
34#include <asm/geode.h>
35
36#define MFGPT_DEFAULT_IRQ 7
37
38static struct mfgpt_timer_t {
39 unsigned int avail:1;
40} mfgpt_timers[MFGPT_MAX_TIMERS];
41
42/* Selected from the table above */
43
44#define MFGPT_DIVISOR 16
45#define MFGPT_SCALE 4 /* divisor = 2^(scale) */
46#define MFGPT_HZ (32768 / MFGPT_DIVISOR)
47#define MFGPT_PERIODIC (MFGPT_HZ / HZ)
48
49/* Allow for disabling of MFGPTs */
50static int disable;
51static int __init mfgpt_disable(char *s)
52{
53 disable = 1;
54 return 1;
55}
56__setup("nomfgpt", mfgpt_disable);
57
58/* Reset the MFGPT timers. This is required by some broken BIOSes which already
59 * do the same and leave the system in an unstable state. TinyBIOS 0.98 is
60 * affected at least (0.99 is OK with MFGPT workaround left to off).
61 */
62static int __init mfgpt_fix(char *s)
63{
64 u32 val, dummy;
65
66 /* The following udocumented bit resets the MFGPT timers */
67 val = 0xFF; dummy = 0;
68 wrmsr(MSR_MFGPT_SETUP, val, dummy);
69 return 1;
70}
71__setup("mfgptfix", mfgpt_fix);
72
73/*
74 * Check whether any MFGPTs are available for the kernel to use. In most
75 * cases, firmware that uses AMD's VSA code will claim all timers during
76 * bootup; we certainly don't want to take them if they're already in use.
77 * In other cases (such as with VSAless OpenFirmware), the system firmware
78 * leaves timers available for us to use.
79 */
80
81
82static int timers = -1;
83
84static void geode_mfgpt_detect(void)
85{
86 int i;
87 u16 val;
88
89 timers = 0;
90
91 if (disable) {
92 printk(KERN_INFO "geode-mfgpt: MFGPT support is disabled\n");
93 goto done;
94 }
95
96 if (!geode_get_dev_base(GEODE_DEV_MFGPT)) {
97 printk(KERN_INFO "geode-mfgpt: MFGPT LBAR is not set up\n");
98 goto done;
99 }
100
101 for (i = 0; i < MFGPT_MAX_TIMERS; i++) {
102 val = geode_mfgpt_read(i, MFGPT_REG_SETUP);
103 if (!(val & MFGPT_SETUP_SETUP)) {
104 mfgpt_timers[i].avail = 1;
105 timers++;
106 }
107 }
108
109done:
110 printk(KERN_INFO "geode-mfgpt: %d MFGPT timers available.\n", timers);
111}
112
113int geode_mfgpt_toggle_event(int timer, int cmp, int event, int enable)
114{
115 u32 msr, mask, value, dummy;
116 int shift = (cmp == MFGPT_CMP1) ? 0 : 8;
117
118 if (timer < 0 || timer >= MFGPT_MAX_TIMERS)
119 return -EIO;
120
121 /*
122 * The register maps for these are described in sections 6.17.1.x of
123 * the AMD Geode CS5536 Companion Device Data Book.
124 */
125 switch (event) {
126 case MFGPT_EVENT_RESET:
127 /*
128 * XXX: According to the docs, we cannot reset timers above
129 * 6; that is, resets for 7 and 8 will be ignored. Is this
130 * a problem? -dilinger
131 */
132 msr = MSR_MFGPT_NR;
133 mask = 1 << (timer + 24);
134 break;
135
136 case MFGPT_EVENT_NMI:
137 msr = MSR_MFGPT_NR;
138 mask = 1 << (timer + shift);
139 break;
140
141 case MFGPT_EVENT_IRQ:
142 msr = MSR_MFGPT_IRQ;
143 mask = 1 << (timer + shift);
144 break;
145
146 default:
147 return -EIO;
148 }
149
150 rdmsr(msr, value, dummy);
151
152 if (enable)
153 value |= mask;
154 else
155 value &= ~mask;
156
157 wrmsr(msr, value, dummy);
158 return 0;
159}
160EXPORT_SYMBOL_GPL(geode_mfgpt_toggle_event);
161
162int geode_mfgpt_set_irq(int timer, int cmp, int *irq, int enable)
163{
164 u32 zsel, lpc, dummy;
165 int shift;
166
167 if (timer < 0 || timer >= MFGPT_MAX_TIMERS)
168 return -EIO;
169
170 /*
171 * Unfortunately, MFGPTs come in pairs sharing their IRQ lines. If VSA
172 * is using the same CMP of the timer's Siamese twin, the IRQ is set to
173 * 2, and we mustn't use nor change it.
174 * XXX: Likewise, 2 Linux drivers might clash if the 2nd overwrites the
175 * IRQ of the 1st. This can only happen if forcing an IRQ, calling this
176 * with *irq==0 is safe. Currently there _are_ no 2 drivers.
177 */
178 rdmsr(MSR_PIC_ZSEL_LOW, zsel, dummy);
179 shift = ((cmp == MFGPT_CMP1 ? 0 : 4) + timer % 4) * 4;
180 if (((zsel >> shift) & 0xF) == 2)
181 return -EIO;
182
183 /* Choose IRQ: if none supplied, keep IRQ already set or use default */
184 if (!*irq)
185 *irq = (zsel >> shift) & 0xF;
186 if (!*irq)
187 *irq = MFGPT_DEFAULT_IRQ;
188
189 /* Can't use IRQ if it's 0 (=disabled), 2, or routed to LPC */
190 if (*irq < 1 || *irq == 2 || *irq > 15)
191 return -EIO;
192 rdmsr(MSR_PIC_IRQM_LPC, lpc, dummy);
193 if (lpc & (1 << *irq))
194 return -EIO;
195
196 /* All chosen and checked - go for it */
197 if (geode_mfgpt_toggle_event(timer, cmp, MFGPT_EVENT_IRQ, enable))
198 return -EIO;
199 if (enable) {
200 zsel = (zsel & ~(0xF << shift)) | (*irq << shift);
201 wrmsr(MSR_PIC_ZSEL_LOW, zsel, dummy);
202 }
203
204 return 0;
205}
206
207static int mfgpt_get(int timer)
208{
209 mfgpt_timers[timer].avail = 0;
210 printk(KERN_INFO "geode-mfgpt: Registered timer %d\n", timer);
211 return timer;
212}
213
214int geode_mfgpt_alloc_timer(int timer, int domain)
215{
216 int i;
217
218 if (timers == -1) {
219 /* timers haven't been detected yet */
220 geode_mfgpt_detect();
221 }
222
223 if (!timers)
224 return -1;
225
226 if (timer >= MFGPT_MAX_TIMERS)
227 return -1;
228
229 if (timer < 0) {
230 /* Try to find an available timer */
231 for (i = 0; i < MFGPT_MAX_TIMERS; i++) {
232 if (mfgpt_timers[i].avail)
233 return mfgpt_get(i);
234
235 if (i == 5 && domain == MFGPT_DOMAIN_WORKING)
236 break;
237 }
238 } else {
239 /* If they requested a specific timer, try to honor that */
240 if (mfgpt_timers[timer].avail)
241 return mfgpt_get(timer);
242 }
243
244 /* No timers available - too bad */
245 return -1;
246}
247EXPORT_SYMBOL_GPL(geode_mfgpt_alloc_timer);
248
249
250#ifdef CONFIG_GEODE_MFGPT_TIMER
251
252/*
253 * The MFPGT timers on the CS5536 provide us with suitable timers to use
254 * as clock event sources - not as good as a HPET or APIC, but certainly
255 * better than the PIT. This isn't a general purpose MFGPT driver, but
256 * a simplified one designed specifically to act as a clock event source.
257 * For full details about the MFGPT, please consult the CS5536 data sheet.
258 */
259
260#include <linux/clocksource.h>
261#include <linux/clockchips.h>
262
263static unsigned int mfgpt_tick_mode = CLOCK_EVT_MODE_SHUTDOWN;
264static u16 mfgpt_event_clock;
265
266static int irq;
267static int __init mfgpt_setup(char *str)
268{
269 get_option(&str, &irq);
270 return 1;
271}
272__setup("mfgpt_irq=", mfgpt_setup);
273
274static void mfgpt_disable_timer(u16 clock)
275{
276 /* avoid races by clearing CMP1 and CMP2 unconditionally */
277 geode_mfgpt_write(clock, MFGPT_REG_SETUP, (u16) ~MFGPT_SETUP_CNTEN |
278 MFGPT_SETUP_CMP1 | MFGPT_SETUP_CMP2);
279}
280
281static int mfgpt_next_event(unsigned long, struct clock_event_device *);
282static void mfgpt_set_mode(enum clock_event_mode, struct clock_event_device *);
283
284static struct clock_event_device mfgpt_clockevent = {
285 .name = "mfgpt-timer",
286 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
287 .set_mode = mfgpt_set_mode,
288 .set_next_event = mfgpt_next_event,
289 .rating = 250,
290 .cpumask = cpu_all_mask,
291 .shift = 32
292};
293
294static void mfgpt_start_timer(u16 delta)
295{
296 geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_CMP2, (u16) delta);
297 geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_COUNTER, 0);
298
299 geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_SETUP,
300 MFGPT_SETUP_CNTEN | MFGPT_SETUP_CMP2);
301}
302
303static void mfgpt_set_mode(enum clock_event_mode mode,
304 struct clock_event_device *evt)
305{
306 mfgpt_disable_timer(mfgpt_event_clock);
307
308 if (mode == CLOCK_EVT_MODE_PERIODIC)
309 mfgpt_start_timer(MFGPT_PERIODIC);
310
311 mfgpt_tick_mode = mode;
312}
313
314static int mfgpt_next_event(unsigned long delta, struct clock_event_device *evt)
315{
316 mfgpt_start_timer(delta);
317 return 0;
318}
319
320static irqreturn_t mfgpt_tick(int irq, void *dev_id)
321{
322 u16 val = geode_mfgpt_read(mfgpt_event_clock, MFGPT_REG_SETUP);
323
324 /* See if the interrupt was for us */
325 if (!(val & (MFGPT_SETUP_SETUP | MFGPT_SETUP_CMP2 | MFGPT_SETUP_CMP1)))
326 return IRQ_NONE;
327
328 /* Turn off the clock (and clear the event) */
329 mfgpt_disable_timer(mfgpt_event_clock);
330
331 if (mfgpt_tick_mode == CLOCK_EVT_MODE_SHUTDOWN)
332 return IRQ_HANDLED;
333
334 /* Clear the counter */
335 geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_COUNTER, 0);
336
337 /* Restart the clock in periodic mode */
338
339 if (mfgpt_tick_mode == CLOCK_EVT_MODE_PERIODIC) {
340 geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_SETUP,
341 MFGPT_SETUP_CNTEN | MFGPT_SETUP_CMP2);
342 }
343
344 mfgpt_clockevent.event_handler(&mfgpt_clockevent);
345 return IRQ_HANDLED;
346}
347
348static struct irqaction mfgptirq = {
349 .handler = mfgpt_tick,
350 .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_TIMER,
351 .name = "mfgpt-timer"
352};
353
354int __init mfgpt_timer_setup(void)
355{
356 int timer, ret;
357 u16 val;
358
359 timer = geode_mfgpt_alloc_timer(MFGPT_TIMER_ANY, MFGPT_DOMAIN_WORKING);
360 if (timer < 0) {
361 printk(KERN_ERR
362 "mfgpt-timer: Could not allocate a MFPGT timer\n");
363 return -ENODEV;
364 }
365
366 mfgpt_event_clock = timer;
367
368 /* Set up the IRQ on the MFGPT side */
369 if (geode_mfgpt_setup_irq(mfgpt_event_clock, MFGPT_CMP2, &irq)) {
370 printk(KERN_ERR "mfgpt-timer: Could not set up IRQ %d\n", irq);
371 return -EIO;
372 }
373
374 /* And register it with the kernel */
375 ret = setup_irq(irq, &mfgptirq);
376
377 if (ret) {
378 printk(KERN_ERR
379 "mfgpt-timer: Unable to set up the interrupt.\n");
380 goto err;
381 }
382
383 /* Set the clock scale and enable the event mode for CMP2 */
384 val = MFGPT_SCALE | (3 << 8);
385
386 geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_SETUP, val);
387
388 /* Set up the clock event */
389 mfgpt_clockevent.mult = div_sc(MFGPT_HZ, NSEC_PER_SEC,
390 mfgpt_clockevent.shift);
391 mfgpt_clockevent.min_delta_ns = clockevent_delta2ns(0xF,
392 &mfgpt_clockevent);
393 mfgpt_clockevent.max_delta_ns = clockevent_delta2ns(0xFFFE,
394 &mfgpt_clockevent);
395
396 printk(KERN_INFO
397 "mfgpt-timer: Registering MFGPT timer %d as a clock event, using IRQ %d\n",
398 timer, irq);
399 clockevents_register_device(&mfgpt_clockevent);
400
401 return 0;
402
403err:
404 geode_mfgpt_release_irq(mfgpt_event_clock, MFGPT_CMP2, &irq);
405 printk(KERN_ERR
406 "mfgpt-timer: Unable to set up the MFGPT clock source\n");
407 return -EIO;
408}
409
410#endif
diff --git a/arch/x86/kernel/olpc.c b/arch/x86/kernel/olpc.c
index 4006c522adc7..9d1d263f786f 100644
--- a/arch/x86/kernel/olpc.c
+++ b/arch/x86/kernel/olpc.c
@@ -212,7 +212,7 @@ static int __init olpc_init(void)
212 unsigned char *romsig; 212 unsigned char *romsig;
213 213
214 /* The ioremap check is dangerous; limit what we run it on */ 214 /* The ioremap check is dangerous; limit what we run it on */
215 if (!is_geode() || geode_has_vsa2()) 215 if (!is_geode() || cs5535_has_vsa2())
216 return 0; 216 return 0;
217 217
218 spin_lock_init(&ec_lock); 218 spin_lock_init(&ec_lock);
@@ -244,7 +244,7 @@ static int __init olpc_init(void)
244 (unsigned char *) &olpc_platform_info.ecver, 1); 244 (unsigned char *) &olpc_platform_info.ecver, 1);
245 245
246 /* check to see if the VSA exists */ 246 /* check to see if the VSA exists */
247 if (geode_has_vsa2()) 247 if (cs5535_has_vsa2())
248 olpc_platform_info.flags |= OLPC_F_VSA; 248 olpc_platform_info.flags |= OLPC_F_VSA;
249 249
250 printk(KERN_INFO "OLPC board revision %s%X (EC=%x)\n", 250 printk(KERN_INFO "OLPC board revision %s%X (EC=%x)\n",
diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
index 3a7c5a44082e..676b8c77a976 100644
--- a/arch/x86/kernel/paravirt-spinlocks.c
+++ b/arch/x86/kernel/paravirt-spinlocks.c
@@ -8,9 +8,9 @@
8#include <asm/paravirt.h> 8#include <asm/paravirt.h>
9 9
10static inline void 10static inline void
11default_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) 11default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
12{ 12{
13 __raw_spin_lock(lock); 13 arch_spin_lock(lock);
14} 14}
15 15
16struct pv_lock_ops pv_lock_ops = { 16struct pv_lock_ops pv_lock_ops = {
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index c563e4c8ff39..2bbde6078143 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -31,7 +31,7 @@
31#include <linux/string.h> 31#include <linux/string.h>
32#include <linux/crash_dump.h> 32#include <linux/crash_dump.h>
33#include <linux/dma-mapping.h> 33#include <linux/dma-mapping.h>
34#include <linux/bitops.h> 34#include <linux/bitmap.h>
35#include <linux/pci_ids.h> 35#include <linux/pci_ids.h>
36#include <linux/pci.h> 36#include <linux/pci.h>
37#include <linux/delay.h> 37#include <linux/delay.h>
@@ -212,7 +212,7 @@ static void iommu_range_reserve(struct iommu_table *tbl,
212 212
213 spin_lock_irqsave(&tbl->it_lock, flags); 213 spin_lock_irqsave(&tbl->it_lock, flags);
214 214
215 iommu_area_reserve(tbl->it_map, index, npages); 215 bitmap_set(tbl->it_map, index, npages);
216 216
217 spin_unlock_irqrestore(&tbl->it_lock, flags); 217 spin_unlock_irqrestore(&tbl->it_lock, flags);
218} 218}
@@ -303,7 +303,7 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
303 303
304 spin_lock_irqsave(&tbl->it_lock, flags); 304 spin_lock_irqsave(&tbl->it_lock, flags);
305 305
306 iommu_area_free(tbl->it_map, entry, npages); 306 bitmap_clear(tbl->it_map, entry, npages);
307 307
308 spin_unlock_irqrestore(&tbl->it_lock, flags); 308 spin_unlock_irqrestore(&tbl->it_lock, flags);
309} 309}
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index 56c0e730d3fe..34de53b46f87 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -23,7 +23,7 @@
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/topology.h> 24#include <linux/topology.h>
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/bitops.h> 26#include <linux/bitmap.h>
27#include <linux/kdebug.h> 27#include <linux/kdebug.h>
28#include <linux/scatterlist.h> 28#include <linux/scatterlist.h>
29#include <linux/iommu-helper.h> 29#include <linux/iommu-helper.h>
@@ -126,7 +126,7 @@ static void free_iommu(unsigned long offset, int size)
126 unsigned long flags; 126 unsigned long flags;
127 127
128 spin_lock_irqsave(&iommu_bitmap_lock, flags); 128 spin_lock_irqsave(&iommu_bitmap_lock, flags);
129 iommu_area_free(iommu_gart_bitmap, offset, size); 129 bitmap_clear(iommu_gart_bitmap, offset, size);
130 if (offset >= next_bit) 130 if (offset >= next_bit)
131 next_bit = offset + size; 131 next_bit = offset + size;
132 spin_unlock_irqrestore(&iommu_bitmap_lock, flags); 132 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
@@ -792,7 +792,7 @@ int __init gart_iommu_init(void)
792 * Out of IOMMU space handling. 792 * Out of IOMMU space handling.
793 * Reserve some invalid pages at the beginning of the GART. 793 * Reserve some invalid pages at the beginning of the GART.
794 */ 794 */
795 iommu_area_reserve(iommu_gart_bitmap, 0, EMERGENCY_PAGES); 795 bitmap_set(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
796 796
797 pr_info("PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n", 797 pr_info("PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
798 iommu_size >> 20); 798 iommu_size >> 20);
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 7079ddaf0731..2779321046bd 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -1676,21 +1676,33 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1676#endif 1676#endif
1677} 1677}
1678 1678
1679void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, 1679static void fill_sigtrap_info(struct task_struct *tsk,
1680 int error_code, int si_code) 1680 struct pt_regs *regs,
1681 int error_code, int si_code,
1682 struct siginfo *info)
1681{ 1683{
1682 struct siginfo info;
1683
1684 tsk->thread.trap_no = 1; 1684 tsk->thread.trap_no = 1;
1685 tsk->thread.error_code = error_code; 1685 tsk->thread.error_code = error_code;
1686 1686
1687 memset(&info, 0, sizeof(info)); 1687 memset(info, 0, sizeof(*info));
1688 info.si_signo = SIGTRAP; 1688 info->si_signo = SIGTRAP;
1689 info.si_code = si_code; 1689 info->si_code = si_code;
1690 info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
1691}
1690 1692
1691 /* User-mode ip? */ 1693void user_single_step_siginfo(struct task_struct *tsk,
1692 info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL; 1694 struct pt_regs *regs,
1695 struct siginfo *info)
1696{
1697 fill_sigtrap_info(tsk, regs, 0, TRAP_BRKPT, info);
1698}
1693 1699
1700void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
1701 int error_code, int si_code)
1702{
1703 struct siginfo info;
1704
1705 fill_sigtrap_info(tsk, regs, error_code, si_code, &info);
1694 /* Send us the fake SIGTRAP */ 1706 /* Send us the fake SIGTRAP */
1695 force_sig_info(SIGTRAP, &info, tsk); 1707 force_sig_info(SIGTRAP, &info, tsk);
1696} 1708}
@@ -1755,29 +1767,22 @@ asmregparm long syscall_trace_enter(struct pt_regs *regs)
1755 1767
1756asmregparm void syscall_trace_leave(struct pt_regs *regs) 1768asmregparm void syscall_trace_leave(struct pt_regs *regs)
1757{ 1769{
1770 bool step;
1771
1758 if (unlikely(current->audit_context)) 1772 if (unlikely(current->audit_context))
1759 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax); 1773 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
1760 1774
1761 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) 1775 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
1762 trace_sys_exit(regs, regs->ax); 1776 trace_sys_exit(regs, regs->ax);
1763 1777
1764 if (test_thread_flag(TIF_SYSCALL_TRACE))
1765 tracehook_report_syscall_exit(regs, 0);
1766
1767 /* 1778 /*
1768 * If TIF_SYSCALL_EMU is set, we only get here because of 1779 * If TIF_SYSCALL_EMU is set, we only get here because of
1769 * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP). 1780 * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP).
1770 * We already reported this syscall instruction in 1781 * We already reported this syscall instruction in
1771 * syscall_trace_enter(), so don't do any more now. 1782 * syscall_trace_enter().
1772 */
1773 if (unlikely(test_thread_flag(TIF_SYSCALL_EMU)))
1774 return;
1775
1776 /*
1777 * If we are single-stepping, synthesize a trap to follow the
1778 * system call instruction.
1779 */ 1783 */
1780 if (test_thread_flag(TIF_SINGLESTEP) && 1784 step = unlikely(test_thread_flag(TIF_SINGLESTEP)) &&
1781 tracehook_consider_fatal_signal(current, SIGTRAP)) 1785 !test_thread_flag(TIF_SYSCALL_EMU);
1782 send_sigtrap(current, regs, 0, TRAP_BRKPT); 1786 if (step || test_thread_flag(TIF_SYSCALL_TRACE))
1787 tracehook_report_syscall_exit(regs, step);
1783} 1788}
diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c
index 201eab63b05f..fda313ebbb03 100644
--- a/arch/x86/kernel/reboot_fixups_32.c
+++ b/arch/x86/kernel/reboot_fixups_32.c
@@ -12,7 +12,7 @@
12#include <linux/interrupt.h> 12#include <linux/interrupt.h>
13#include <asm/reboot_fixups.h> 13#include <asm/reboot_fixups.h>
14#include <asm/msr.h> 14#include <asm/msr.h>
15#include <asm/geode.h> 15#include <linux/cs5535.h>
16 16
17static void cs5530a_warm_reset(struct pci_dev *dev) 17static void cs5530a_warm_reset(struct pci_dev *dev)
18{ 18{
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
index eed156851f5d..0aa5fed8b9e6 100644
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -33,7 +33,7 @@ static __cpuinitdata atomic_t stop_count;
33 * we want to have the fastest, inlined, non-debug version 33 * we want to have the fastest, inlined, non-debug version
34 * of a critical section, to be able to prove TSC time-warps: 34 * of a critical section, to be able to prove TSC time-warps:
35 */ 35 */
36static __cpuinitdata raw_spinlock_t sync_lock = __RAW_SPIN_LOCK_UNLOCKED; 36static __cpuinitdata arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED;
37 37
38static __cpuinitdata cycles_t last_tsc; 38static __cpuinitdata cycles_t last_tsc;
39static __cpuinitdata cycles_t max_warp; 39static __cpuinitdata cycles_t max_warp;
@@ -62,13 +62,13 @@ static __cpuinit void check_tsc_warp(void)
62 * previous TSC that was measured (possibly on 62 * previous TSC that was measured (possibly on
63 * another CPU) and update the previous TSC timestamp. 63 * another CPU) and update the previous TSC timestamp.
64 */ 64 */
65 __raw_spin_lock(&sync_lock); 65 arch_spin_lock(&sync_lock);
66 prev = last_tsc; 66 prev = last_tsc;
67 rdtsc_barrier(); 67 rdtsc_barrier();
68 now = get_cycles(); 68 now = get_cycles();
69 rdtsc_barrier(); 69 rdtsc_barrier();
70 last_tsc = now; 70 last_tsc = now;
71 __raw_spin_unlock(&sync_lock); 71 arch_spin_unlock(&sync_lock);
72 72
73 /* 73 /*
74 * Be nice every now and then (and also check whether 74 * Be nice every now and then (and also check whether
@@ -87,10 +87,10 @@ static __cpuinit void check_tsc_warp(void)
87 * we saw a time-warp of the TSC going backwards: 87 * we saw a time-warp of the TSC going backwards:
88 */ 88 */
89 if (unlikely(prev > now)) { 89 if (unlikely(prev > now)) {
90 __raw_spin_lock(&sync_lock); 90 arch_spin_lock(&sync_lock);
91 max_warp = max(max_warp, prev - now); 91 max_warp = max(max_warp, prev - now);
92 nr_warps++; 92 nr_warps++;
93 __raw_spin_unlock(&sync_lock); 93 arch_spin_unlock(&sync_lock);
94 } 94 }
95 } 95 }
96 WARN(!(now-start), 96 WARN(!(now-start),
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 36a5141108df..24ded31b5aec 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -120,14 +120,14 @@ struct xen_spinlock {
120 unsigned short spinners; /* count of waiting cpus */ 120 unsigned short spinners; /* count of waiting cpus */
121}; 121};
122 122
123static int xen_spin_is_locked(struct raw_spinlock *lock) 123static int xen_spin_is_locked(struct arch_spinlock *lock)
124{ 124{
125 struct xen_spinlock *xl = (struct xen_spinlock *)lock; 125 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
126 126
127 return xl->lock != 0; 127 return xl->lock != 0;
128} 128}
129 129
130static int xen_spin_is_contended(struct raw_spinlock *lock) 130static int xen_spin_is_contended(struct arch_spinlock *lock)
131{ 131{
132 struct xen_spinlock *xl = (struct xen_spinlock *)lock; 132 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
133 133
@@ -136,7 +136,7 @@ static int xen_spin_is_contended(struct raw_spinlock *lock)
136 return xl->spinners != 0; 136 return xl->spinners != 0;
137} 137}
138 138
139static int xen_spin_trylock(struct raw_spinlock *lock) 139static int xen_spin_trylock(struct arch_spinlock *lock)
140{ 140{
141 struct xen_spinlock *xl = (struct xen_spinlock *)lock; 141 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
142 u8 old = 1; 142 u8 old = 1;
@@ -181,7 +181,7 @@ static inline void unspinning_lock(struct xen_spinlock *xl, struct xen_spinlock
181 __get_cpu_var(lock_spinners) = prev; 181 __get_cpu_var(lock_spinners) = prev;
182} 182}
183 183
184static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enable) 184static noinline int xen_spin_lock_slow(struct arch_spinlock *lock, bool irq_enable)
185{ 185{
186 struct xen_spinlock *xl = (struct xen_spinlock *)lock; 186 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
187 struct xen_spinlock *prev; 187 struct xen_spinlock *prev;
@@ -254,7 +254,7 @@ out:
254 return ret; 254 return ret;
255} 255}
256 256
257static inline void __xen_spin_lock(struct raw_spinlock *lock, bool irq_enable) 257static inline void __xen_spin_lock(struct arch_spinlock *lock, bool irq_enable)
258{ 258{
259 struct xen_spinlock *xl = (struct xen_spinlock *)lock; 259 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
260 unsigned timeout; 260 unsigned timeout;
@@ -291,12 +291,12 @@ static inline void __xen_spin_lock(struct raw_spinlock *lock, bool irq_enable)
291 spin_time_accum_total(start_spin); 291 spin_time_accum_total(start_spin);
292} 292}
293 293
294static void xen_spin_lock(struct raw_spinlock *lock) 294static void xen_spin_lock(struct arch_spinlock *lock)
295{ 295{
296 __xen_spin_lock(lock, false); 296 __xen_spin_lock(lock, false);
297} 297}
298 298
299static void xen_spin_lock_flags(struct raw_spinlock *lock, unsigned long flags) 299static void xen_spin_lock_flags(struct arch_spinlock *lock, unsigned long flags)
300{ 300{
301 __xen_spin_lock(lock, !raw_irqs_disabled_flags(flags)); 301 __xen_spin_lock(lock, !raw_irqs_disabled_flags(flags));
302} 302}
@@ -317,7 +317,7 @@ static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl)
317 } 317 }
318} 318}
319 319
320static void xen_spin_unlock(struct raw_spinlock *lock) 320static void xen_spin_unlock(struct arch_spinlock *lock)
321{ 321{
322 struct xen_spinlock *xl = (struct xen_spinlock *)lock; 322 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
323 323
diff --git a/arch/xtensa/include/asm/elf.h b/arch/xtensa/include/asm/elf.h
index c3f53e755ca5..5eb6d695e987 100644
--- a/arch/xtensa/include/asm/elf.h
+++ b/arch/xtensa/include/asm/elf.h
@@ -123,7 +123,6 @@ extern void xtensa_elf_core_copy_regs (xtensa_gregset_t *, struct pt_regs *);
123#define ELF_CLASS ELFCLASS32 123#define ELF_CLASS ELFCLASS32
124#define ELF_ARCH EM_XTENSA 124#define ELF_ARCH EM_XTENSA
125 125
126#define USE_ELF_CORE_DUMP
127#define ELF_EXEC_PAGESIZE PAGE_SIZE 126#define ELF_EXEC_PAGESIZE PAGE_SIZE
128 127
129/* 128/*
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
index a1badb32fcda..8cd38484e130 100644
--- a/arch/xtensa/kernel/irq.c
+++ b/arch/xtensa/kernel/irq.c
@@ -90,7 +90,7 @@ int show_interrupts(struct seq_file *p, void *v)
90 } 90 }
91 91
92 if (i < NR_IRQS) { 92 if (i < NR_IRQS) {
93 spin_lock_irqsave(&irq_desc[i].lock, flags); 93 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
94 action = irq_desc[i].action; 94 action = irq_desc[i].action;
95 if (!action) 95 if (!action)
96 goto skip; 96 goto skip;
@@ -109,7 +109,7 @@ int show_interrupts(struct seq_file *p, void *v)
109 109
110 seq_putc(p, '\n'); 110 seq_putc(p, '\n');
111skip: 111skip:
112 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 112 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
113 } else if (i == NR_IRQS) { 113 } else if (i == NR_IRQS) {
114 seq_printf(p, "NMI: "); 114 seq_printf(p, "NMI: ");
115 for_each_online_cpu(j) 115 for_each_online_cpu(j)
diff --git a/block/blk-settings.c b/block/blk-settings.c
index dd1f1e0e196f..6ae118d6e193 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -554,11 +554,18 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
554 ret = -1; 554 ret = -1;
555 } 555 }
556 556
557 /*
558 * Temporarily disable discard granularity. It's currently buggy
559 * since we default to 0 for discard_granularity, hence this
560 * "failure" will always trigger for non-zero offsets.
561 */
562#if 0
557 if (offset && 563 if (offset &&
558 (offset & (b->discard_granularity - 1)) != b->discard_alignment) { 564 (offset & (b->discard_granularity - 1)) != b->discard_alignment) {
559 t->discard_misaligned = 1; 565 t->discard_misaligned = 1;
560 ret = -1; 566 ret = -1;
561 } 567 }
568#endif
562 569
563 /* If top has no alignment offset, inherit from bottom */ 570 /* If top has no alignment offset, inherit from bottom */
564 if (!t->alignment_offset) 571 if (!t->alignment_offset)
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index cfb0b2f5f63d..e2f80463ed0d 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -283,7 +283,7 @@ struct cfq_data {
283 */ 283 */
284 struct cfq_queue oom_cfqq; 284 struct cfq_queue oom_cfqq;
285 285
286 unsigned long last_end_sync_rq; 286 unsigned long last_delayed_sync;
287 287
288 /* List of cfq groups being managed on this device*/ 288 /* List of cfq groups being managed on this device*/
289 struct hlist_head cfqg_list; 289 struct hlist_head cfqg_list;
@@ -319,7 +319,6 @@ enum cfqq_state_flags {
319 CFQ_CFQQ_FLAG_coop, /* cfqq is shared */ 319 CFQ_CFQQ_FLAG_coop, /* cfqq is shared */
320 CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */ 320 CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */
321 CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */ 321 CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */
322 CFQ_CFQQ_FLAG_wait_busy_done, /* Got new request. Expire the queue */
323}; 322};
324 323
325#define CFQ_CFQQ_FNS(name) \ 324#define CFQ_CFQQ_FNS(name) \
@@ -348,7 +347,6 @@ CFQ_CFQQ_FNS(sync);
348CFQ_CFQQ_FNS(coop); 347CFQ_CFQQ_FNS(coop);
349CFQ_CFQQ_FNS(deep); 348CFQ_CFQQ_FNS(deep);
350CFQ_CFQQ_FNS(wait_busy); 349CFQ_CFQQ_FNS(wait_busy);
351CFQ_CFQQ_FNS(wait_busy_done);
352#undef CFQ_CFQQ_FNS 350#undef CFQ_CFQQ_FNS
353 351
354#ifdef CONFIG_DEBUG_CFQ_IOSCHED 352#ifdef CONFIG_DEBUG_CFQ_IOSCHED
@@ -1574,7 +1572,6 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1574 1572
1575 cfq_clear_cfqq_wait_request(cfqq); 1573 cfq_clear_cfqq_wait_request(cfqq);
1576 cfq_clear_cfqq_wait_busy(cfqq); 1574 cfq_clear_cfqq_wait_busy(cfqq);
1577 cfq_clear_cfqq_wait_busy_done(cfqq);
1578 1575
1579 /* 1576 /*
1580 * store what was left of this slice, if the queue idled/timed out 1577 * store what was left of this slice, if the queue idled/timed out
@@ -1750,6 +1747,12 @@ static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
1750 return NULL; 1747 return NULL;
1751 1748
1752 /* 1749 /*
1750 * Don't search priority tree if it's the only queue in the group.
1751 */
1752 if (cur_cfqq->cfqg->nr_cfqq == 1)
1753 return NULL;
1754
1755 /*
1753 * We should notice if some of the queues are cooperating, eg 1756 * We should notice if some of the queues are cooperating, eg
1754 * working closely on the same area of the disk. In that case, 1757 * working closely on the same area of the disk. In that case,
1755 * we can group them together and don't waste time idling. 1758 * we can group them together and don't waste time idling.
@@ -2110,7 +2113,9 @@ static void cfq_choose_cfqg(struct cfq_data *cfqd)
2110 cfqd->workload_expires = jiffies + cfqg->saved_workload_slice; 2113 cfqd->workload_expires = jiffies + cfqg->saved_workload_slice;
2111 cfqd->serving_type = cfqg->saved_workload; 2114 cfqd->serving_type = cfqg->saved_workload;
2112 cfqd->serving_prio = cfqg->saved_serving_prio; 2115 cfqd->serving_prio = cfqg->saved_serving_prio;
2113 } 2116 } else
2117 cfqd->workload_expires = jiffies - 1;
2118
2114 choose_service_tree(cfqd, cfqg); 2119 choose_service_tree(cfqd, cfqg);
2115} 2120}
2116 2121
@@ -2128,14 +2133,35 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
2128 2133
2129 if (!cfqd->rq_queued) 2134 if (!cfqd->rq_queued)
2130 return NULL; 2135 return NULL;
2136
2131 /* 2137 /*
2132 * The active queue has run out of time, expire it and select new. 2138 * We were waiting for group to get backlogged. Expire the queue
2133 */ 2139 */
2134 if ((cfq_slice_used(cfqq) || cfq_cfqq_wait_busy_done(cfqq)) 2140 if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
2135 && !cfq_cfqq_must_dispatch(cfqq))
2136 goto expire; 2141 goto expire;
2137 2142
2138 /* 2143 /*
2144 * The active queue has run out of time, expire it and select new.
2145 */
2146 if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
2147 /*
2148 * If slice had not expired at the completion of last request
2149 * we might not have turned on wait_busy flag. Don't expire
2150 * the queue yet. Allow the group to get backlogged.
2151 *
2152 * The very fact that we have used the slice, that means we
2153 * have been idling all along on this queue and it should be
2154 * ok to wait for this request to complete.
2155 */
2156 if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
2157 && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
2158 cfqq = NULL;
2159 goto keep_queue;
2160 } else
2161 goto expire;
2162 }
2163
2164 /*
2139 * The active queue has requests and isn't expired, allow it to 2165 * The active queue has requests and isn't expired, allow it to
2140 * dispatch. 2166 * dispatch.
2141 */ 2167 */
@@ -2264,7 +2290,7 @@ static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2264 * based on the last sync IO we serviced 2290 * based on the last sync IO we serviced
2265 */ 2291 */
2266 if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) { 2292 if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
2267 unsigned long last_sync = jiffies - cfqd->last_end_sync_rq; 2293 unsigned long last_sync = jiffies - cfqd->last_delayed_sync;
2268 unsigned int depth; 2294 unsigned int depth;
2269 2295
2270 depth = last_sync / cfqd->cfq_slice[1]; 2296 depth = last_sync / cfqd->cfq_slice[1];
@@ -3165,10 +3191,6 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3165 cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); 3191 cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
3166 3192
3167 if (cfqq == cfqd->active_queue) { 3193 if (cfqq == cfqd->active_queue) {
3168 if (cfq_cfqq_wait_busy(cfqq)) {
3169 cfq_clear_cfqq_wait_busy(cfqq);
3170 cfq_mark_cfqq_wait_busy_done(cfqq);
3171 }
3172 /* 3194 /*
3173 * Remember that we saw a request from this process, but 3195 * Remember that we saw a request from this process, but
3174 * don't start queuing just yet. Otherwise we risk seeing lots 3196 * don't start queuing just yet. Otherwise we risk seeing lots
@@ -3183,6 +3205,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3183 if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE || 3205 if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
3184 cfqd->busy_queues > 1) { 3206 cfqd->busy_queues > 1) {
3185 del_timer(&cfqd->idle_slice_timer); 3207 del_timer(&cfqd->idle_slice_timer);
3208 cfq_clear_cfqq_wait_request(cfqq);
3186 __blk_run_queue(cfqd->queue); 3209 __blk_run_queue(cfqd->queue);
3187 } else 3210 } else
3188 cfq_mark_cfqq_must_dispatch(cfqq); 3211 cfq_mark_cfqq_must_dispatch(cfqq);
@@ -3251,6 +3274,35 @@ static void cfq_update_hw_tag(struct cfq_data *cfqd)
3251 cfqd->hw_tag = 0; 3274 cfqd->hw_tag = 0;
3252} 3275}
3253 3276
3277static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3278{
3279 struct cfq_io_context *cic = cfqd->active_cic;
3280
3281 /* If there are other queues in the group, don't wait */
3282 if (cfqq->cfqg->nr_cfqq > 1)
3283 return false;
3284
3285 if (cfq_slice_used(cfqq))
3286 return true;
3287
3288 /* if slice left is less than think time, wait busy */
3289 if (cic && sample_valid(cic->ttime_samples)
3290 && (cfqq->slice_end - jiffies < cic->ttime_mean))
3291 return true;
3292
3293 /*
3294 * If think times is less than a jiffy than ttime_mean=0 and above
3295 * will not be true. It might happen that slice has not expired yet
3296 * but will expire soon (4-5 ns) during select_queue(). To cover the
3297 * case where think time is less than a jiffy, mark the queue wait
3298 * busy if only 1 jiffy is left in the slice.
3299 */
3300 if (cfqq->slice_end - jiffies == 1)
3301 return true;
3302
3303 return false;
3304}
3305
3254static void cfq_completed_request(struct request_queue *q, struct request *rq) 3306static void cfq_completed_request(struct request_queue *q, struct request *rq)
3255{ 3307{
3256 struct cfq_queue *cfqq = RQ_CFQQ(rq); 3308 struct cfq_queue *cfqq = RQ_CFQQ(rq);
@@ -3273,7 +3325,8 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
3273 3325
3274 if (sync) { 3326 if (sync) {
3275 RQ_CIC(rq)->last_end_request = now; 3327 RQ_CIC(rq)->last_end_request = now;
3276 cfqd->last_end_sync_rq = now; 3328 if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
3329 cfqd->last_delayed_sync = now;
3277 } 3330 }
3278 3331
3279 /* 3332 /*
@@ -3289,11 +3342,10 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
3289 } 3342 }
3290 3343
3291 /* 3344 /*
3292 * If this queue consumed its slice and this is last queue 3345 * Should we wait for next request to come in before we expire
3293 * in the group, wait for next request before we expire 3346 * the queue.
3294 * the queue
3295 */ 3347 */
3296 if (cfq_slice_used(cfqq) && cfqq->cfqg->nr_cfqq == 1) { 3348 if (cfq_should_wait_busy(cfqd, cfqq)) {
3297 cfqq->slice_end = jiffies + cfqd->cfq_slice_idle; 3349 cfqq->slice_end = jiffies + cfqd->cfq_slice_idle;
3298 cfq_mark_cfqq_wait_busy(cfqq); 3350 cfq_mark_cfqq_wait_busy(cfqq);
3299 } 3351 }
@@ -3711,7 +3763,11 @@ static void *cfq_init_queue(struct request_queue *q)
3711 cfqd->cfq_latency = 1; 3763 cfqd->cfq_latency = 1;
3712 cfqd->cfq_group_isolation = 0; 3764 cfqd->cfq_group_isolation = 0;
3713 cfqd->hw_tag = -1; 3765 cfqd->hw_tag = -1;
3714 cfqd->last_end_sync_rq = jiffies; 3766 /*
3767 * we optimistically start assuming sync ops weren't delayed in last
3768 * second, in order to have larger depth for async operations.
3769 */
3770 cfqd->last_delayed_sync = jiffies - HZ;
3715 INIT_RCU_HEAD(&cfqd->rcu); 3771 INIT_RCU_HEAD(&cfqd->rcu);
3716 return cfqd; 3772 return cfqd;
3717} 3773}
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 26e434ad373c..8a07363417ed 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -96,6 +96,8 @@ source "drivers/edac/Kconfig"
96 96
97source "drivers/rtc/Kconfig" 97source "drivers/rtc/Kconfig"
98 98
99source "drivers/clocksource/Kconfig"
100
99source "drivers/dma/Kconfig" 101source "drivers/dma/Kconfig"
100 102
101source "drivers/dca/Kconfig" 103source "drivers/dca/Kconfig"
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index f734b345ac71..25a4c86f839b 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -557,7 +557,7 @@ static int ia_cbr_setup (IADEV *dev, struct atm_vcc *vcc) {
557 memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC)); 557 memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
558 } /* while */ 558 } /* while */
559 // Move this VCI number into this location of the CBR Sched table. 559 // Move this VCI number into this location of the CBR Sched table.
560 memcpy((caddr_t)TstSchedTbl, (caddr_t)&vcIndex,sizeof(TstSchedTbl)); 560 memcpy((caddr_t)TstSchedTbl, (caddr_t)&vcIndex, sizeof(*TstSchedTbl));
561 dev->CbrRemEntries--; 561 dev->CbrRemEntries--;
562 toBeAssigned--; 562 toBeAssigned--;
563 } /* while */ 563 } /* while */
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 1fe5536d404f..70122791683d 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -173,6 +173,47 @@ static ssize_t node_read_distance(struct sys_device * dev,
173} 173}
174static SYSDEV_ATTR(distance, S_IRUGO, node_read_distance, NULL); 174static SYSDEV_ATTR(distance, S_IRUGO, node_read_distance, NULL);
175 175
176#ifdef CONFIG_HUGETLBFS
177/*
178 * hugetlbfs per node attributes registration interface:
179 * When/if hugetlb[fs] subsystem initializes [sometime after this module],
180 * it will register its per node attributes for all online nodes with
181 * memory. It will also call register_hugetlbfs_with_node(), below, to
182 * register its attribute registration functions with this node driver.
183 * Once these hooks have been initialized, the node driver will call into
184 * the hugetlb module to [un]register attributes for hot-plugged nodes.
185 */
186static node_registration_func_t __hugetlb_register_node;
187static node_registration_func_t __hugetlb_unregister_node;
188
189static inline bool hugetlb_register_node(struct node *node)
190{
191 if (__hugetlb_register_node &&
192 node_state(node->sysdev.id, N_HIGH_MEMORY)) {
193 __hugetlb_register_node(node);
194 return true;
195 }
196 return false;
197}
198
199static inline void hugetlb_unregister_node(struct node *node)
200{
201 if (__hugetlb_unregister_node)
202 __hugetlb_unregister_node(node);
203}
204
205void register_hugetlbfs_with_node(node_registration_func_t doregister,
206 node_registration_func_t unregister)
207{
208 __hugetlb_register_node = doregister;
209 __hugetlb_unregister_node = unregister;
210}
211#else
212static inline void hugetlb_register_node(struct node *node) {}
213
214static inline void hugetlb_unregister_node(struct node *node) {}
215#endif
216
176 217
177/* 218/*
178 * register_node - Setup a sysfs device for a node. 219 * register_node - Setup a sysfs device for a node.
@@ -196,6 +237,8 @@ int register_node(struct node *node, int num, struct node *parent)
196 sysdev_create_file(&node->sysdev, &attr_distance); 237 sysdev_create_file(&node->sysdev, &attr_distance);
197 238
198 scan_unevictable_register_node(node); 239 scan_unevictable_register_node(node);
240
241 hugetlb_register_node(node);
199 } 242 }
200 return error; 243 return error;
201} 244}
@@ -216,6 +259,7 @@ void unregister_node(struct node *node)
216 sysdev_remove_file(&node->sysdev, &attr_distance); 259 sysdev_remove_file(&node->sysdev, &attr_distance);
217 260
218 scan_unevictable_unregister_node(node); 261 scan_unevictable_unregister_node(node);
262 hugetlb_unregister_node(node); /* no-op, if memoryless node */
219 263
220 sysdev_unregister(&node->sysdev); 264 sysdev_unregister(&node->sysdev);
221} 265}
@@ -227,26 +271,43 @@ struct node node_devices[MAX_NUMNODES];
227 */ 271 */
228int register_cpu_under_node(unsigned int cpu, unsigned int nid) 272int register_cpu_under_node(unsigned int cpu, unsigned int nid)
229{ 273{
230 if (node_online(nid)) { 274 int ret;
231 struct sys_device *obj = get_cpu_sysdev(cpu); 275 struct sys_device *obj;
232 if (!obj)
233 return 0;
234 return sysfs_create_link(&node_devices[nid].sysdev.kobj,
235 &obj->kobj,
236 kobject_name(&obj->kobj));
237 }
238 276
239 return 0; 277 if (!node_online(nid))
278 return 0;
279
280 obj = get_cpu_sysdev(cpu);
281 if (!obj)
282 return 0;
283
284 ret = sysfs_create_link(&node_devices[nid].sysdev.kobj,
285 &obj->kobj,
286 kobject_name(&obj->kobj));
287 if (ret)
288 return ret;
289
290 return sysfs_create_link(&obj->kobj,
291 &node_devices[nid].sysdev.kobj,
292 kobject_name(&node_devices[nid].sysdev.kobj));
240} 293}
241 294
242int unregister_cpu_under_node(unsigned int cpu, unsigned int nid) 295int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
243{ 296{
244 if (node_online(nid)) { 297 struct sys_device *obj;
245 struct sys_device *obj = get_cpu_sysdev(cpu); 298
246 if (obj) 299 if (!node_online(nid))
247 sysfs_remove_link(&node_devices[nid].sysdev.kobj, 300 return 0;
248 kobject_name(&obj->kobj)); 301
249 } 302 obj = get_cpu_sysdev(cpu);
303 if (!obj)
304 return 0;
305
306 sysfs_remove_link(&node_devices[nid].sysdev.kobj,
307 kobject_name(&obj->kobj));
308 sysfs_remove_link(&obj->kobj,
309 kobject_name(&node_devices[nid].sysdev.kobj));
310
250 return 0; 311 return 0;
251} 312}
252 313
@@ -268,6 +329,7 @@ static int get_nid_for_pfn(unsigned long pfn)
268/* register memory section under specified node if it spans that node */ 329/* register memory section under specified node if it spans that node */
269int register_mem_sect_under_node(struct memory_block *mem_blk, int nid) 330int register_mem_sect_under_node(struct memory_block *mem_blk, int nid)
270{ 331{
332 int ret;
271 unsigned long pfn, sect_start_pfn, sect_end_pfn; 333 unsigned long pfn, sect_start_pfn, sect_end_pfn;
272 334
273 if (!mem_blk) 335 if (!mem_blk)
@@ -284,9 +346,15 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, int nid)
284 continue; 346 continue;
285 if (page_nid != nid) 347 if (page_nid != nid)
286 continue; 348 continue;
287 return sysfs_create_link_nowarn(&node_devices[nid].sysdev.kobj, 349 ret = sysfs_create_link_nowarn(&node_devices[nid].sysdev.kobj,
288 &mem_blk->sysdev.kobj, 350 &mem_blk->sysdev.kobj,
289 kobject_name(&mem_blk->sysdev.kobj)); 351 kobject_name(&mem_blk->sysdev.kobj));
352 if (ret)
353 return ret;
354
355 return sysfs_create_link_nowarn(&mem_blk->sysdev.kobj,
356 &node_devices[nid].sysdev.kobj,
357 kobject_name(&node_devices[nid].sysdev.kobj));
290 } 358 }
291 /* mem section does not span the specified node */ 359 /* mem section does not span the specified node */
292 return 0; 360 return 0;
@@ -295,12 +363,16 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, int nid)
295/* unregister memory section under all nodes that it spans */ 363/* unregister memory section under all nodes that it spans */
296int unregister_mem_sect_under_nodes(struct memory_block *mem_blk) 364int unregister_mem_sect_under_nodes(struct memory_block *mem_blk)
297{ 365{
298 nodemask_t unlinked_nodes; 366 NODEMASK_ALLOC(nodemask_t, unlinked_nodes, GFP_KERNEL);
299 unsigned long pfn, sect_start_pfn, sect_end_pfn; 367 unsigned long pfn, sect_start_pfn, sect_end_pfn;
300 368
301 if (!mem_blk) 369 if (!mem_blk) {
370 NODEMASK_FREE(unlinked_nodes);
302 return -EFAULT; 371 return -EFAULT;
303 nodes_clear(unlinked_nodes); 372 }
373 if (!unlinked_nodes)
374 return -ENOMEM;
375 nodes_clear(*unlinked_nodes);
304 sect_start_pfn = section_nr_to_pfn(mem_blk->phys_index); 376 sect_start_pfn = section_nr_to_pfn(mem_blk->phys_index);
305 sect_end_pfn = sect_start_pfn + PAGES_PER_SECTION - 1; 377 sect_end_pfn = sect_start_pfn + PAGES_PER_SECTION - 1;
306 for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) { 378 for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) {
@@ -311,11 +383,14 @@ int unregister_mem_sect_under_nodes(struct memory_block *mem_blk)
311 continue; 383 continue;
312 if (!node_online(nid)) 384 if (!node_online(nid))
313 continue; 385 continue;
314 if (node_test_and_set(nid, unlinked_nodes)) 386 if (node_test_and_set(nid, *unlinked_nodes))
315 continue; 387 continue;
316 sysfs_remove_link(&node_devices[nid].sysdev.kobj, 388 sysfs_remove_link(&node_devices[nid].sysdev.kobj,
317 kobject_name(&mem_blk->sysdev.kobj)); 389 kobject_name(&mem_blk->sysdev.kobj));
390 sysfs_remove_link(&mem_blk->sysdev.kobj,
391 kobject_name(&node_devices[nid].sysdev.kobj));
318 } 392 }
393 NODEMASK_FREE(unlinked_nodes);
319 return 0; 394 return 0;
320} 395}
321 396
@@ -345,9 +420,77 @@ static int link_mem_sections(int nid)
345 } 420 }
346 return err; 421 return err;
347} 422}
348#else 423
424#ifdef CONFIG_HUGETLBFS
425/*
426 * Handle per node hstate attribute [un]registration on transistions
427 * to/from memoryless state.
428 */
429static void node_hugetlb_work(struct work_struct *work)
430{
431 struct node *node = container_of(work, struct node, node_work);
432
433 /*
434 * We only get here when a node transitions to/from memoryless state.
435 * We can detect which transition occurred by examining whether the
436 * node has memory now. hugetlb_register_node() already check this
437 * so we try to register the attributes. If that fails, then the
438 * node has transitioned to memoryless, try to unregister the
439 * attributes.
440 */
441 if (!hugetlb_register_node(node))
442 hugetlb_unregister_node(node);
443}
444
445static void init_node_hugetlb_work(int nid)
446{
447 INIT_WORK(&node_devices[nid].node_work, node_hugetlb_work);
448}
449
450static int node_memory_callback(struct notifier_block *self,
451 unsigned long action, void *arg)
452{
453 struct memory_notify *mnb = arg;
454 int nid = mnb->status_change_nid;
455
456 switch (action) {
457 case MEM_ONLINE:
458 case MEM_OFFLINE:
459 /*
460 * offload per node hstate [un]registration to a work thread
461 * when transitioning to/from memoryless state.
462 */
463 if (nid != NUMA_NO_NODE)
464 schedule_work(&node_devices[nid].node_work);
465 break;
466
467 case MEM_GOING_ONLINE:
468 case MEM_GOING_OFFLINE:
469 case MEM_CANCEL_ONLINE:
470 case MEM_CANCEL_OFFLINE:
471 default:
472 break;
473 }
474
475 return NOTIFY_OK;
476}
477#endif /* CONFIG_HUGETLBFS */
478#else /* !CONFIG_MEMORY_HOTPLUG_SPARSE */
479
349static int link_mem_sections(int nid) { return 0; } 480static int link_mem_sections(int nid) { return 0; }
350#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ 481#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
482
483#if !defined(CONFIG_MEMORY_HOTPLUG_SPARSE) || \
484 !defined(CONFIG_HUGETLBFS)
485static inline int node_memory_callback(struct notifier_block *self,
486 unsigned long action, void *arg)
487{
488 return NOTIFY_OK;
489}
490
491static void init_node_hugetlb_work(int nid) { }
492
493#endif
351 494
352int register_one_node(int nid) 495int register_one_node(int nid)
353{ 496{
@@ -371,6 +514,9 @@ int register_one_node(int nid)
371 514
372 /* link memory sections under this node */ 515 /* link memory sections under this node */
373 error = link_mem_sections(nid); 516 error = link_mem_sections(nid);
517
518 /* initialize work queue for memory hot plug */
519 init_node_hugetlb_work(nid);
374 } 520 }
375 521
376 return error; 522 return error;
@@ -460,13 +606,17 @@ static int node_states_init(void)
460 return err; 606 return err;
461} 607}
462 608
609#define NODE_CALLBACK_PRI 2 /* lower than SLAB */
463static int __init register_node_type(void) 610static int __init register_node_type(void)
464{ 611{
465 int ret; 612 int ret;
466 613
467 ret = sysdev_class_register(&node_class); 614 ret = sysdev_class_register(&node_class);
468 if (!ret) 615 if (!ret) {
469 ret = node_states_init(); 616 ret = node_states_init();
617 hotplug_memory_notifier(node_memory_callback,
618 NODE_CALLBACK_PRI);
619 }
470 620
471 /* 621 /*
472 * Note: we're not going to unregister the node class if we fail 622 * Note: we're not going to unregister the node class if we fail
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 436a090b532b..4e0726aa53b0 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -1271,8 +1271,7 @@ static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1271 goto fail; 1271 goto fail;
1272 } 1272 }
1273 1273
1274 if (crypto_tfm_alg_type(crypto_hash_tfm(tfm)) 1274 if (crypto_tfm_alg_type(crypto_hash_tfm(tfm)) != CRYPTO_ALG_TYPE_SHASH) {
1275 != CRYPTO_ALG_TYPE_HASH) {
1276 retcode = ERR_AUTH_ALG_ND; 1275 retcode = ERR_AUTH_ALG_ND;
1277 goto fail; 1276 goto fail;
1278 } 1277 }
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 5c01f747571b..3266b4f65daa 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3497,6 +3497,9 @@ static int fd_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
3497 ((cmd & 0x80) && !capable(CAP_SYS_ADMIN))) 3497 ((cmd & 0x80) && !capable(CAP_SYS_ADMIN)))
3498 return -EPERM; 3498 return -EPERM;
3499 3499
3500 if (WARN_ON(size < 0 || size > sizeof(inparam)))
3501 return -EINVAL;
3502
3500 /* copyin */ 3503 /* copyin */
3501 CLEARSTRUCT(&inparam); 3504 CLEARSTRUCT(&inparam);
3502 if (_IOC_DIR(cmd) & _IOC_WRITE) 3505 if (_IOC_DIR(cmd) & _IOC_WRITE)
@@ -4162,7 +4165,7 @@ static int floppy_resume(struct device *dev)
4162 return 0; 4165 return 0;
4163} 4166}
4164 4167
4165static struct dev_pm_ops floppy_pm_ops = { 4168static const struct dev_pm_ops floppy_pm_ops = {
4166 .resume = floppy_resume, 4169 .resume = floppy_resume,
4167 .restore = floppy_resume, 4170 .restore = floppy_resume,
4168}; 4171};
diff --git a/drivers/block/xd.c b/drivers/block/xd.c
index 0877d3628fda..d1fd032e7514 100644
--- a/drivers/block/xd.c
+++ b/drivers/block/xd.c
@@ -169,13 +169,6 @@ static int __init xd_init(void)
169 169
170 init_timer (&xd_watchdog_int); xd_watchdog_int.function = xd_watchdog; 170 init_timer (&xd_watchdog_int); xd_watchdog_int.function = xd_watchdog;
171 171
172 if (!xd_dma_buffer)
173 xd_dma_buffer = (char *)xd_dma_mem_alloc(xd_maxsectors * 0x200);
174 if (!xd_dma_buffer) {
175 printk(KERN_ERR "xd: Out of memory.\n");
176 return -ENOMEM;
177 }
178
179 err = -EBUSY; 172 err = -EBUSY;
180 if (register_blkdev(XT_DISK_MAJOR, "xd")) 173 if (register_blkdev(XT_DISK_MAJOR, "xd"))
181 goto out1; 174 goto out1;
@@ -202,6 +195,19 @@ static int __init xd_init(void)
202 xd_drives,xd_drives == 1 ? "" : "s",xd_irq,xd_dma); 195 xd_drives,xd_drives == 1 ? "" : "s",xd_irq,xd_dma);
203 } 196 }
204 197
198 /*
199 * With the drive detected, xd_maxsectors should now be known.
200 * If xd_maxsectors is 0, nothing was detected and we fall through
201 * to return -ENODEV
202 */
203 if (!xd_dma_buffer && xd_maxsectors) {
204 xd_dma_buffer = (char *)xd_dma_mem_alloc(xd_maxsectors * 0x200);
205 if (!xd_dma_buffer) {
206 printk(KERN_ERR "xd: Out of memory.\n");
207 goto out3;
208 }
209 }
210
205 err = -ENODEV; 211 err = -ENODEV;
206 if (!xd_drives) 212 if (!xd_drives)
207 goto out3; 213 goto out3;
@@ -249,15 +255,17 @@ out4:
249 for (i = 0; i < xd_drives; i++) 255 for (i = 0; i < xd_drives; i++)
250 put_disk(xd_gendisk[i]); 256 put_disk(xd_gendisk[i]);
251out3: 257out3:
252 release_region(xd_iobase,4); 258 if (xd_maxsectors)
259 release_region(xd_iobase,4);
260
261 if (xd_dma_buffer)
262 xd_dma_mem_free((unsigned long)xd_dma_buffer,
263 xd_maxsectors * 0x200);
253out2: 264out2:
254 blk_cleanup_queue(xd_queue); 265 blk_cleanup_queue(xd_queue);
255out1a: 266out1a:
256 unregister_blkdev(XT_DISK_MAJOR, "xd"); 267 unregister_blkdev(XT_DISK_MAJOR, "xd");
257out1: 268out1:
258 if (xd_dma_buffer)
259 xd_dma_mem_free((unsigned long)xd_dma_buffer,
260 xd_maxsectors * 0x200);
261 return err; 269 return err;
262Enomem: 270Enomem:
263 err = -ENOMEM; 271 err = -ENOMEM;
diff --git a/drivers/char/efirtc.c b/drivers/char/efirtc.c
index 26a47dc88f61..53c524e7b829 100644
--- a/drivers/char/efirtc.c
+++ b/drivers/char/efirtc.c
@@ -285,6 +285,7 @@ static const struct file_operations efi_rtc_fops = {
285 .unlocked_ioctl = efi_rtc_ioctl, 285 .unlocked_ioctl = efi_rtc_ioctl,
286 .open = efi_rtc_open, 286 .open = efi_rtc_open,
287 .release = efi_rtc_close, 287 .release = efi_rtc_close,
288 .llseek = no_llseek,
288}; 289};
289 290
290static struct miscdevice efi_rtc_dev= { 291static struct miscdevice efi_rtc_dev= {
diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c
index b8a5d654d3d0..fe62bd0e17b7 100644
--- a/drivers/char/hvc_iucv.c
+++ b/drivers/char/hvc_iucv.c
@@ -931,7 +931,7 @@ static struct hv_ops hvc_iucv_ops = {
931}; 931};
932 932
933/* Suspend / resume device operations */ 933/* Suspend / resume device operations */
934static struct dev_pm_ops hvc_iucv_pm_ops = { 934static const struct dev_pm_ops hvc_iucv_pm_ops = {
935 .freeze = hvc_iucv_pm_freeze, 935 .freeze = hvc_iucv_pm_freeze,
936 .thaw = hvc_iucv_pm_restore_thaw, 936 .thaw = hvc_iucv_pm_restore_thaw,
937 .restore = hvc_iucv_pm_restore_thaw, 937 .restore = hvc_iucv_pm_restore_thaw,
diff --git a/drivers/char/ipmi/ipmi_kcs_sm.c b/drivers/char/ipmi/ipmi_kcs_sm.c
index 80704875794c..cf82fedae099 100644
--- a/drivers/char/ipmi/ipmi_kcs_sm.c
+++ b/drivers/char/ipmi/ipmi_kcs_sm.c
@@ -370,7 +370,7 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
370 return SI_SM_IDLE; 370 return SI_SM_IDLE;
371 371
372 case KCS_START_OP: 372 case KCS_START_OP:
373 if (state != KCS_IDLE) { 373 if (state != KCS_IDLE_STATE) {
374 start_error_recovery(kcs, 374 start_error_recovery(kcs,
375 "State machine not idle at start"); 375 "State machine not idle at start");
376 break; 376 break;
diff --git a/drivers/char/keyboard.c b/drivers/char/keyboard.c
index 5619007e7e05..f706b1dffdb3 100644
--- a/drivers/char/keyboard.c
+++ b/drivers/char/keyboard.c
@@ -233,7 +233,8 @@ int setkeycode(unsigned int scancode, unsigned int keycode)
233} 233}
234 234
235/* 235/*
236 * Making beeps and bells. 236 * Making beeps and bells. Note that we prefer beeps to bells, but when
237 * shutting the sound off we do both.
237 */ 238 */
238 239
239static int kd_sound_helper(struct input_handle *handle, void *data) 240static int kd_sound_helper(struct input_handle *handle, void *data)
@@ -242,9 +243,12 @@ static int kd_sound_helper(struct input_handle *handle, void *data)
242 struct input_dev *dev = handle->dev; 243 struct input_dev *dev = handle->dev;
243 244
244 if (test_bit(EV_SND, dev->evbit)) { 245 if (test_bit(EV_SND, dev->evbit)) {
245 if (test_bit(SND_TONE, dev->sndbit)) 246 if (test_bit(SND_TONE, dev->sndbit)) {
246 input_inject_event(handle, EV_SND, SND_TONE, *hz); 247 input_inject_event(handle, EV_SND, SND_TONE, *hz);
247 if (test_bit(SND_BELL, handle->dev->sndbit)) 248 if (*hz)
249 return 0;
250 }
251 if (test_bit(SND_BELL, dev->sndbit))
248 input_inject_event(handle, EV_SND, SND_BELL, *hz ? 1 : 0); 252 input_inject_event(handle, EV_SND, SND_BELL, *hz ? 1 : 0);
249 } 253 }
250 254
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index fba76fb55abf..be832b6f8279 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -34,6 +34,16 @@
34# include <linux/efi.h> 34# include <linux/efi.h>
35#endif 35#endif
36 36
37static inline unsigned long size_inside_page(unsigned long start,
38 unsigned long size)
39{
40 unsigned long sz;
41
42 sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
43
44 return min(sz, size);
45}
46
37/* 47/*
38 * Architectures vary in how they handle caching for addresses 48 * Architectures vary in how they handle caching for addresses
39 * outside of main memory. 49 * outside of main memory.
@@ -126,9 +136,7 @@ static ssize_t read_mem(struct file * file, char __user * buf,
126#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED 136#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
127 /* we don't have page 0 mapped on sparc and m68k.. */ 137 /* we don't have page 0 mapped on sparc and m68k.. */
128 if (p < PAGE_SIZE) { 138 if (p < PAGE_SIZE) {
129 sz = PAGE_SIZE - p; 139 sz = size_inside_page(p, count);
130 if (sz > count)
131 sz = count;
132 if (sz > 0) { 140 if (sz > 0) {
133 if (clear_user(buf, sz)) 141 if (clear_user(buf, sz))
134 return -EFAULT; 142 return -EFAULT;
@@ -141,15 +149,9 @@ static ssize_t read_mem(struct file * file, char __user * buf,
141#endif 149#endif
142 150
143 while (count > 0) { 151 while (count > 0) {
144 /* 152 unsigned long remaining;
145 * Handle first page in case it's not aligned
146 */
147 if (-p & (PAGE_SIZE - 1))
148 sz = -p & (PAGE_SIZE - 1);
149 else
150 sz = PAGE_SIZE;
151 153
152 sz = min_t(unsigned long, sz, count); 154 sz = size_inside_page(p, count);
153 155
154 if (!range_is_allowed(p >> PAGE_SHIFT, count)) 156 if (!range_is_allowed(p >> PAGE_SHIFT, count))
155 return -EPERM; 157 return -EPERM;
@@ -163,12 +165,10 @@ static ssize_t read_mem(struct file * file, char __user * buf,
163 if (!ptr) 165 if (!ptr)
164 return -EFAULT; 166 return -EFAULT;
165 167
166 if (copy_to_user(buf, ptr, sz)) { 168 remaining = copy_to_user(buf, ptr, sz);
167 unxlate_dev_mem_ptr(p, ptr);
168 return -EFAULT;
169 }
170
171 unxlate_dev_mem_ptr(p, ptr); 169 unxlate_dev_mem_ptr(p, ptr);
170 if (remaining)
171 return -EFAULT;
172 172
173 buf += sz; 173 buf += sz;
174 p += sz; 174 p += sz;
@@ -196,9 +196,7 @@ static ssize_t write_mem(struct file * file, const char __user * buf,
196#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED 196#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
197 /* we don't have page 0 mapped on sparc and m68k.. */ 197 /* we don't have page 0 mapped on sparc and m68k.. */
198 if (p < PAGE_SIZE) { 198 if (p < PAGE_SIZE) {
199 unsigned long sz = PAGE_SIZE - p; 199 sz = size_inside_page(p, count);
200 if (sz > count)
201 sz = count;
202 /* Hmm. Do something? */ 200 /* Hmm. Do something? */
203 buf += sz; 201 buf += sz;
204 p += sz; 202 p += sz;
@@ -208,15 +206,7 @@ static ssize_t write_mem(struct file * file, const char __user * buf,
208#endif 206#endif
209 207
210 while (count > 0) { 208 while (count > 0) {
211 /* 209 sz = size_inside_page(p, count);
212 * Handle first page in case it's not aligned
213 */
214 if (-p & (PAGE_SIZE - 1))
215 sz = -p & (PAGE_SIZE - 1);
216 else
217 sz = PAGE_SIZE;
218
219 sz = min_t(unsigned long, sz, count);
220 210
221 if (!range_is_allowed(p >> PAGE_SHIFT, sz)) 211 if (!range_is_allowed(p >> PAGE_SHIFT, sz))
222 return -EPERM; 212 return -EPERM;
@@ -234,16 +224,14 @@ static ssize_t write_mem(struct file * file, const char __user * buf,
234 } 224 }
235 225
236 copied = copy_from_user(ptr, buf, sz); 226 copied = copy_from_user(ptr, buf, sz);
227 unxlate_dev_mem_ptr(p, ptr);
237 if (copied) { 228 if (copied) {
238 written += sz - copied; 229 written += sz - copied;
239 unxlate_dev_mem_ptr(p, ptr);
240 if (written) 230 if (written)
241 break; 231 break;
242 return -EFAULT; 232 return -EFAULT;
243 } 233 }
244 234
245 unxlate_dev_mem_ptr(p, ptr);
246
247 buf += sz; 235 buf += sz;
248 p += sz; 236 p += sz;
249 count -= sz; 237 count -= sz;
@@ -417,27 +405,18 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
417#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED 405#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
418 /* we don't have page 0 mapped on sparc and m68k.. */ 406 /* we don't have page 0 mapped on sparc and m68k.. */
419 if (p < PAGE_SIZE && low_count > 0) { 407 if (p < PAGE_SIZE && low_count > 0) {
420 size_t tmp = PAGE_SIZE - p; 408 sz = size_inside_page(p, low_count);
421 if (tmp > low_count) tmp = low_count; 409 if (clear_user(buf, sz))
422 if (clear_user(buf, tmp))
423 return -EFAULT; 410 return -EFAULT;
424 buf += tmp; 411 buf += sz;
425 p += tmp; 412 p += sz;
426 read += tmp; 413 read += sz;
427 low_count -= tmp; 414 low_count -= sz;
428 count -= tmp; 415 count -= sz;
429 } 416 }
430#endif 417#endif
431 while (low_count > 0) { 418 while (low_count > 0) {
432 /* 419 sz = size_inside_page(p, low_count);
433 * Handle first page in case it's not aligned
434 */
435 if (-p & (PAGE_SIZE - 1))
436 sz = -p & (PAGE_SIZE - 1);
437 else
438 sz = PAGE_SIZE;
439
440 sz = min_t(unsigned long, sz, low_count);
441 420
442 /* 421 /*
443 * On ia64 if a page has been mapped somewhere as 422 * On ia64 if a page has been mapped somewhere as
@@ -461,21 +440,18 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
461 if (!kbuf) 440 if (!kbuf)
462 return -ENOMEM; 441 return -ENOMEM;
463 while (count > 0) { 442 while (count > 0) {
464 int len = count; 443 sz = size_inside_page(p, count);
465 444 sz = vread(kbuf, (char *)p, sz);
466 if (len > PAGE_SIZE) 445 if (!sz)
467 len = PAGE_SIZE;
468 len = vread(kbuf, (char *)p, len);
469 if (!len)
470 break; 446 break;
471 if (copy_to_user(buf, kbuf, len)) { 447 if (copy_to_user(buf, kbuf, sz)) {
472 free_page((unsigned long)kbuf); 448 free_page((unsigned long)kbuf);
473 return -EFAULT; 449 return -EFAULT;
474 } 450 }
475 count -= len; 451 count -= sz;
476 buf += len; 452 buf += sz;
477 read += len; 453 read += sz;
478 p += len; 454 p += sz;
479 } 455 }
480 free_page((unsigned long)kbuf); 456 free_page((unsigned long)kbuf);
481 } 457 }
@@ -485,7 +461,7 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
485 461
486 462
487static inline ssize_t 463static inline ssize_t
488do_write_kmem(void *p, unsigned long realp, const char __user * buf, 464do_write_kmem(unsigned long p, const char __user *buf,
489 size_t count, loff_t *ppos) 465 size_t count, loff_t *ppos)
490{ 466{
491 ssize_t written, sz; 467 ssize_t written, sz;
@@ -494,14 +470,11 @@ do_write_kmem(void *p, unsigned long realp, const char __user * buf,
494 written = 0; 470 written = 0;
495#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED 471#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
496 /* we don't have page 0 mapped on sparc and m68k.. */ 472 /* we don't have page 0 mapped on sparc and m68k.. */
497 if (realp < PAGE_SIZE) { 473 if (p < PAGE_SIZE) {
498 unsigned long sz = PAGE_SIZE - realp; 474 sz = size_inside_page(p, count);
499 if (sz > count)
500 sz = count;
501 /* Hmm. Do something? */ 475 /* Hmm. Do something? */
502 buf += sz; 476 buf += sz;
503 p += sz; 477 p += sz;
504 realp += sz;
505 count -= sz; 478 count -= sz;
506 written += sz; 479 written += sz;
507 } 480 }
@@ -509,22 +482,15 @@ do_write_kmem(void *p, unsigned long realp, const char __user * buf,
509 482
510 while (count > 0) { 483 while (count > 0) {
511 char *ptr; 484 char *ptr;
512 /*
513 * Handle first page in case it's not aligned
514 */
515 if (-realp & (PAGE_SIZE - 1))
516 sz = -realp & (PAGE_SIZE - 1);
517 else
518 sz = PAGE_SIZE;
519 485
520 sz = min_t(unsigned long, sz, count); 486 sz = size_inside_page(p, count);
521 487
522 /* 488 /*
523 * On ia64 if a page has been mapped somewhere as 489 * On ia64 if a page has been mapped somewhere as
524 * uncached, then it must also be accessed uncached 490 * uncached, then it must also be accessed uncached
525 * by the kernel or data corruption may occur 491 * by the kernel or data corruption may occur
526 */ 492 */
527 ptr = xlate_dev_kmem_ptr(p); 493 ptr = xlate_dev_kmem_ptr((char *)p);
528 494
529 copied = copy_from_user(ptr, buf, sz); 495 copied = copy_from_user(ptr, buf, sz);
530 if (copied) { 496 if (copied) {
@@ -535,7 +501,6 @@ do_write_kmem(void *p, unsigned long realp, const char __user * buf,
535 } 501 }
536 buf += sz; 502 buf += sz;
537 p += sz; 503 p += sz;
538 realp += sz;
539 count -= sz; 504 count -= sz;
540 written += sz; 505 written += sz;
541 } 506 }
@@ -554,19 +519,14 @@ static ssize_t write_kmem(struct file * file, const char __user * buf,
554 unsigned long p = *ppos; 519 unsigned long p = *ppos;
555 ssize_t wrote = 0; 520 ssize_t wrote = 0;
556 ssize_t virtr = 0; 521 ssize_t virtr = 0;
557 ssize_t written;
558 char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */ 522 char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
559 523
560 if (p < (unsigned long) high_memory) { 524 if (p < (unsigned long) high_memory) {
561 525 unsigned long to_write = min_t(unsigned long, count,
562 wrote = count; 526 (unsigned long)high_memory - p);
563 if (count > (unsigned long) high_memory - p) 527 wrote = do_write_kmem(p, buf, to_write, ppos);
564 wrote = (unsigned long) high_memory - p; 528 if (wrote != to_write)
565 529 return wrote;
566 written = do_write_kmem((void*)p, p, buf, wrote, ppos);
567 if (written != wrote)
568 return written;
569 wrote = written;
570 p += wrote; 530 p += wrote;
571 buf += wrote; 531 buf += wrote;
572 count -= wrote; 532 count -= wrote;
@@ -577,24 +537,21 @@ static ssize_t write_kmem(struct file * file, const char __user * buf,
577 if (!kbuf) 537 if (!kbuf)
578 return wrote ? wrote : -ENOMEM; 538 return wrote ? wrote : -ENOMEM;
579 while (count > 0) { 539 while (count > 0) {
580 int len = count; 540 unsigned long sz = size_inside_page(p, count);
581 541 unsigned long n;
582 if (len > PAGE_SIZE) 542
583 len = PAGE_SIZE; 543 n = copy_from_user(kbuf, buf, sz);
584 if (len) { 544 if (n) {
585 written = copy_from_user(kbuf, buf, len); 545 if (wrote + virtr)
586 if (written) { 546 break;
587 if (wrote + virtr) 547 free_page((unsigned long)kbuf);
588 break; 548 return -EFAULT;
589 free_page((unsigned long)kbuf);
590 return -EFAULT;
591 }
592 } 549 }
593 len = vwrite(kbuf, (char *)p, len); 550 sz = vwrite(kbuf, (char *)p, sz);
594 count -= len; 551 count -= sz;
595 buf += len; 552 buf += sz;
596 virtr += len; 553 virtr += sz;
597 p += len; 554 p += sz;
598 } 555 }
599 free_page((unsigned long)kbuf); 556 free_page((unsigned long)kbuf);
600 } 557 }
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index 96f1cd086dd2..94a136e96c06 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -60,9 +60,7 @@ static DEFINE_MUTEX(misc_mtx);
60 * Assigned numbers, used for dynamic minors 60 * Assigned numbers, used for dynamic minors
61 */ 61 */
62#define DYNAMIC_MINORS 64 /* like dynamic majors */ 62#define DYNAMIC_MINORS 64 /* like dynamic majors */
63static unsigned char misc_minors[DYNAMIC_MINORS / 8]; 63static DECLARE_BITMAP(misc_minors, DYNAMIC_MINORS);
64
65extern int pmu_device_init(void);
66 64
67#ifdef CONFIG_PROC_FS 65#ifdef CONFIG_PROC_FS
68static void *misc_seq_start(struct seq_file *seq, loff_t *pos) 66static void *misc_seq_start(struct seq_file *seq, loff_t *pos)
@@ -198,24 +196,23 @@ int misc_register(struct miscdevice * misc)
198 } 196 }
199 197
200 if (misc->minor == MISC_DYNAMIC_MINOR) { 198 if (misc->minor == MISC_DYNAMIC_MINOR) {
201 int i = DYNAMIC_MINORS; 199 int i = find_first_zero_bit(misc_minors, DYNAMIC_MINORS);
202 while (--i >= 0) 200 if (i >= DYNAMIC_MINORS) {
203 if ( (misc_minors[i>>3] & (1 << (i&7))) == 0)
204 break;
205 if (i<0) {
206 mutex_unlock(&misc_mtx); 201 mutex_unlock(&misc_mtx);
207 return -EBUSY; 202 return -EBUSY;
208 } 203 }
209 misc->minor = i; 204 misc->minor = DYNAMIC_MINORS - i - 1;
205 set_bit(i, misc_minors);
210 } 206 }
211 207
212 if (misc->minor < DYNAMIC_MINORS)
213 misc_minors[misc->minor >> 3] |= 1 << (misc->minor & 7);
214 dev = MKDEV(MISC_MAJOR, misc->minor); 208 dev = MKDEV(MISC_MAJOR, misc->minor);
215 209
216 misc->this_device = device_create(misc_class, misc->parent, dev, 210 misc->this_device = device_create(misc_class, misc->parent, dev,
217 misc, "%s", misc->name); 211 misc, "%s", misc->name);
218 if (IS_ERR(misc->this_device)) { 212 if (IS_ERR(misc->this_device)) {
213 int i = DYNAMIC_MINORS - misc->minor - 1;
214 if (i < DYNAMIC_MINORS && i >= 0)
215 clear_bit(i, misc_minors);
219 err = PTR_ERR(misc->this_device); 216 err = PTR_ERR(misc->this_device);
220 goto out; 217 goto out;
221 } 218 }
@@ -242,7 +239,7 @@ int misc_register(struct miscdevice * misc)
242 239
243int misc_deregister(struct miscdevice *misc) 240int misc_deregister(struct miscdevice *misc)
244{ 241{
245 int i = misc->minor; 242 int i = DYNAMIC_MINORS - misc->minor - 1;
246 243
247 if (list_empty(&misc->list)) 244 if (list_empty(&misc->list))
248 return -EINVAL; 245 return -EINVAL;
@@ -250,9 +247,8 @@ int misc_deregister(struct miscdevice *misc)
250 mutex_lock(&misc_mtx); 247 mutex_lock(&misc_mtx);
251 list_del(&misc->list); 248 list_del(&misc->list);
252 device_destroy(misc_class, MKDEV(MISC_MAJOR, misc->minor)); 249 device_destroy(misc_class, MKDEV(MISC_MAJOR, misc->minor));
253 if (i < DYNAMIC_MINORS && i>0) { 250 if (i < DYNAMIC_MINORS && i >= 0)
254 misc_minors[i>>3] &= ~(1 << (misc->minor & 7)); 251 clear_bit(i, misc_minors);
255 }
256 mutex_unlock(&misc_mtx); 252 mutex_unlock(&misc_mtx);
257 return 0; 253 return 0;
258} 254}
diff --git a/drivers/char/random.c b/drivers/char/random.c
index dcd08635cf1b..8258982b49ec 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1245,12 +1245,8 @@ static int proc_do_uuid(ctl_table *table, int write,
1245 if (uuid[8] == 0) 1245 if (uuid[8] == 0)
1246 generate_random_uuid(uuid); 1246 generate_random_uuid(uuid);
1247 1247
1248 sprintf(buf, "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-" 1248 sprintf(buf, "%pU", uuid);
1249 "%02x%02x%02x%02x%02x%02x", 1249
1250 uuid[0], uuid[1], uuid[2], uuid[3],
1251 uuid[4], uuid[5], uuid[6], uuid[7],
1252 uuid[8], uuid[9], uuid[10], uuid[11],
1253 uuid[12], uuid[13], uuid[14], uuid[15]);
1254 fake_table.data = buf; 1250 fake_table.data = buf;
1255 fake_table.maxlen = sizeof(buf); 1251 fake_table.maxlen = sizeof(buf);
1256 1252
@@ -1310,7 +1306,7 @@ ctl_table random_table[] = {
1310 1306
1311/******************************************************************** 1307/********************************************************************
1312 * 1308 *
1313 * Random funtions for networking 1309 * Random functions for networking
1314 * 1310 *
1315 ********************************************************************/ 1311 ********************************************************************/
1316 1312
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index 44203ff599da..1ae2de7d8b4f 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -339,7 +339,7 @@ static struct sysrq_key_op sysrq_term_op = {
339 339
340static void moom_callback(struct work_struct *ignored) 340static void moom_callback(struct work_struct *ignored)
341{ 341{
342 out_of_memory(node_zonelist(0, GFP_KERNEL), GFP_KERNEL, 0); 342 out_of_memory(node_zonelist(0, GFP_KERNEL), GFP_KERNEL, 0, NULL);
343} 343}
344 344
345static DECLARE_WORK(moom_work, moom_callback); 345static DECLARE_WORK(moom_work, moom_callback);
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index 1e3d728dbf7e..50faa1fb0f06 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -164,6 +164,9 @@ module_param(default_utf8, int, S_IRUGO | S_IWUSR);
164int global_cursor_default = -1; 164int global_cursor_default = -1;
165module_param(global_cursor_default, int, S_IRUGO | S_IWUSR); 165module_param(global_cursor_default, int, S_IRUGO | S_IWUSR);
166 166
167static int cur_default = CUR_DEFAULT;
168module_param(cur_default, int, S_IRUGO | S_IWUSR);
169
167/* 170/*
168 * ignore_poke: don't unblank the screen when things are typed. This is 171 * ignore_poke: don't unblank the screen when things are typed. This is
169 * mainly for the privacy of braille terminal users. 172 * mainly for the privacy of braille terminal users.
@@ -184,12 +187,10 @@ static DECLARE_WORK(console_work, console_callback);
184 * fg_console is the current virtual console, 187 * fg_console is the current virtual console,
185 * last_console is the last used one, 188 * last_console is the last used one,
186 * want_console is the console we want to switch to, 189 * want_console is the console we want to switch to,
187 * kmsg_redirect is the console for kernel messages,
188 */ 190 */
189int fg_console; 191int fg_console;
190int last_console; 192int last_console;
191int want_console = -1; 193int want_console = -1;
192int kmsg_redirect;
193 194
194/* 195/*
195 * For each existing display, we have a pointer to console currently visible 196 * For each existing display, we have a pointer to console currently visible
@@ -1638,7 +1639,7 @@ static void reset_terminal(struct vc_data *vc, int do_clear)
1638 /* do not do set_leds here because this causes an endless tasklet loop 1639 /* do not do set_leds here because this causes an endless tasklet loop
1639 when the keyboard hasn't been initialized yet */ 1640 when the keyboard hasn't been initialized yet */
1640 1641
1641 vc->vc_cursor_type = CUR_DEFAULT; 1642 vc->vc_cursor_type = cur_default;
1642 vc->vc_complement_mask = vc->vc_s_complement_mask; 1643 vc->vc_complement_mask = vc->vc_s_complement_mask;
1643 1644
1644 default_attr(vc); 1645 default_attr(vc);
@@ -1840,7 +1841,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
1840 if (vc->vc_par[0]) 1841 if (vc->vc_par[0])
1841 vc->vc_cursor_type = vc->vc_par[0] | (vc->vc_par[1] << 8) | (vc->vc_par[2] << 16); 1842 vc->vc_cursor_type = vc->vc_par[0] | (vc->vc_par[1] << 8) | (vc->vc_par[2] << 16);
1842 else 1843 else
1843 vc->vc_cursor_type = CUR_DEFAULT; 1844 vc->vc_cursor_type = cur_default;
1844 return; 1845 return;
1845 } 1846 }
1846 break; 1847 break;
@@ -2434,6 +2435,37 @@ struct tty_driver *console_driver;
2434 2435
2435#ifdef CONFIG_VT_CONSOLE 2436#ifdef CONFIG_VT_CONSOLE
2436 2437
2438/**
2439 * vt_kmsg_redirect() - Sets/gets the kernel message console
2440 * @new: The new virtual terminal number or -1 if the console should stay
2441 * unchanged
2442 *
2443 * By default, the kernel messages are always printed on the current virtual
2444 * console. However, the user may modify that default with the
2445 * TIOCL_SETKMSGREDIRECT ioctl call.
2446 *
2447 * This function sets the kernel message console to be @new. It returns the old
2448 * virtual console number. The virtual terminal number 0 (both as parameter and
2449 * return value) means no redirection (i.e. always printed on the currently
2450 * active console).
2451 *
2452 * The parameter -1 means that only the current console is returned, but the
2453 * value is not modified. You may use the macro vt_get_kmsg_redirect() in that
2454 * case to make the code more understandable.
2455 *
2456 * When the kernel is compiled without CONFIG_VT_CONSOLE, this function ignores
2457 * the parameter and always returns 0.
2458 */
2459int vt_kmsg_redirect(int new)
2460{
2461 static int kmsg_con;
2462
2463 if (new != -1)
2464 return xchg(&kmsg_con, new);
2465 else
2466 return kmsg_con;
2467}
2468
2437/* 2469/*
2438 * Console on virtual terminal 2470 * Console on virtual terminal
2439 * 2471 *
@@ -2448,6 +2480,7 @@ static void vt_console_print(struct console *co, const char *b, unsigned count)
2448 const ushort *start; 2480 const ushort *start;
2449 ushort cnt = 0; 2481 ushort cnt = 0;
2450 ushort myx; 2482 ushort myx;
2483 int kmsg_console;
2451 2484
2452 /* console busy or not yet initialized */ 2485 /* console busy or not yet initialized */
2453 if (!printable) 2486 if (!printable)
@@ -2455,8 +2488,9 @@ static void vt_console_print(struct console *co, const char *b, unsigned count)
2455 if (!spin_trylock(&printing_lock)) 2488 if (!spin_trylock(&printing_lock))
2456 return; 2489 return;
2457 2490
2458 if (kmsg_redirect && vc_cons_allocated(kmsg_redirect - 1)) 2491 kmsg_console = vt_get_kmsg_redirect();
2459 vc = vc_cons[kmsg_redirect - 1].d; 2492 if (kmsg_console && vc_cons_allocated(kmsg_console - 1))
2493 vc = vc_cons[kmsg_console - 1].d;
2460 2494
2461 /* read `x' only after setting currcons properly (otherwise 2495 /* read `x' only after setting currcons properly (otherwise
2462 the `x' macro will read the x of the foreground console). */ 2496 the `x' macro will read the x of the foreground console). */
@@ -2613,7 +2647,7 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
2613 ret = set_vesa_blanking(p); 2647 ret = set_vesa_blanking(p);
2614 break; 2648 break;
2615 case TIOCL_GETKMSGREDIRECT: 2649 case TIOCL_GETKMSGREDIRECT:
2616 data = kmsg_redirect; 2650 data = vt_get_kmsg_redirect();
2617 ret = __put_user(data, p); 2651 ret = __put_user(data, p);
2618 break; 2652 break;
2619 case TIOCL_SETKMSGREDIRECT: 2653 case TIOCL_SETKMSGREDIRECT:
@@ -2623,7 +2657,7 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
2623 if (get_user(data, p+1)) 2657 if (get_user(data, p+1))
2624 ret = -EFAULT; 2658 ret = -EFAULT;
2625 else 2659 else
2626 kmsg_redirect = data; 2660 vt_kmsg_redirect(data);
2627 } 2661 }
2628 break; 2662 break;
2629 case TIOCL_GETFGCONSOLE: 2663 case TIOCL_GETFGCONSOLE:
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
new file mode 100644
index 000000000000..08f726c5fee5
--- /dev/null
+++ b/drivers/clocksource/Kconfig
@@ -0,0 +1,9 @@
1config CS5535_CLOCK_EVENT_SRC
2 tristate "CS5535/CS5536 high-res timer (MFGPT) events"
3 depends on GENERIC_TIME && GENERIC_CLOCKEVENTS && CS5535_MFGPT
4 help
5 This driver provides a clock event source based on the MFGPT
6 timer(s) in the CS5535 and CS5536 companion chips.
7 MFGPTs have a better resolution and max interval than the
8 generic PIT, and are suitable for use as high-res timers.
9
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index eef216f7f61d..be61ece6330b 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -2,6 +2,7 @@ obj-$(CONFIG_ATMEL_TCB_CLKSRC) += tcb_clksrc.o
2obj-$(CONFIG_X86_CYCLONE_TIMER) += cyclone.o 2obj-$(CONFIG_X86_CYCLONE_TIMER) += cyclone.o
3obj-$(CONFIG_X86_PM_TIMER) += acpi_pm.o 3obj-$(CONFIG_X86_PM_TIMER) += acpi_pm.o
4obj-$(CONFIG_SCx200HR_TIMER) += scx200_hrt.o 4obj-$(CONFIG_SCx200HR_TIMER) += scx200_hrt.o
5obj-$(CONFIG_CS5535_CLOCK_EVENT_SRC) += cs5535-clockevt.o
5obj-$(CONFIG_SH_TIMER_CMT) += sh_cmt.o 6obj-$(CONFIG_SH_TIMER_CMT) += sh_cmt.o
6obj-$(CONFIG_SH_TIMER_MTU2) += sh_mtu2.o 7obj-$(CONFIG_SH_TIMER_MTU2) += sh_mtu2.o
7obj-$(CONFIG_SH_TIMER_TMU) += sh_tmu.o 8obj-$(CONFIG_SH_TIMER_TMU) += sh_tmu.o
diff --git a/drivers/clocksource/cs5535-clockevt.c b/drivers/clocksource/cs5535-clockevt.c
new file mode 100644
index 000000000000..27d20fac19d1
--- /dev/null
+++ b/drivers/clocksource/cs5535-clockevt.c
@@ -0,0 +1,197 @@
1/*
2 * Clock event driver for the CS5535/CS5536
3 *
4 * Copyright (C) 2006, Advanced Micro Devices, Inc.
5 * Copyright (C) 2007 Andres Salomon <dilinger@debian.org>
6 * Copyright (C) 2009 Andres Salomon <dilinger@collabora.co.uk>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public License
10 * as published by the Free Software Foundation.
11 *
12 * The MFGPTs are documented in AMD Geode CS5536 Companion Device Data Book.
13 */
14
15#include <linux/kernel.h>
16#include <linux/irq.h>
17#include <linux/interrupt.h>
18#include <linux/module.h>
19#include <linux/cs5535.h>
20#include <linux/clockchips.h>
21
22#define DRV_NAME "cs5535-clockevt"
23
24static int timer_irq = CONFIG_CS5535_MFGPT_DEFAULT_IRQ;
25module_param_named(irq, timer_irq, int, 0644);
26MODULE_PARM_DESC(irq, "Which IRQ to use for the clock source MFGPT ticks.");
27
28/*
29 * We are using the 32.768kHz input clock - it's the only one that has the
30 * ranges we find desirable. The following table lists the suitable
31 * divisors and the associated Hz, minimum interval and the maximum interval:
32 *
33 * Divisor Hz Min Delta (s) Max Delta (s)
34 * 1 32768 .00048828125 2.000
35 * 2 16384 .0009765625 4.000
36 * 4 8192 .001953125 8.000
37 * 8 4096 .00390625 16.000
38 * 16 2048 .0078125 32.000
39 * 32 1024 .015625 64.000
40 * 64 512 .03125 128.000
41 * 128 256 .0625 256.000
42 * 256 128 .125 512.000
43 */
44
45static unsigned int cs5535_tick_mode = CLOCK_EVT_MODE_SHUTDOWN;
46static struct cs5535_mfgpt_timer *cs5535_event_clock;
47
48/* Selected from the table above */
49
50#define MFGPT_DIVISOR 16
51#define MFGPT_SCALE 4 /* divisor = 2^(scale) */
52#define MFGPT_HZ (32768 / MFGPT_DIVISOR)
53#define MFGPT_PERIODIC (MFGPT_HZ / HZ)
54
55/*
56 * The MFPGT timers on the CS5536 provide us with suitable timers to use
57 * as clock event sources - not as good as a HPET or APIC, but certainly
58 * better than the PIT. This isn't a general purpose MFGPT driver, but
59 * a simplified one designed specifically to act as a clock event source.
60 * For full details about the MFGPT, please consult the CS5536 data sheet.
61 */
62
63static void disable_timer(struct cs5535_mfgpt_timer *timer)
64{
65 /* avoid races by clearing CMP1 and CMP2 unconditionally */
66 cs5535_mfgpt_write(timer, MFGPT_REG_SETUP,
67 (uint16_t) ~MFGPT_SETUP_CNTEN | MFGPT_SETUP_CMP1 |
68 MFGPT_SETUP_CMP2);
69}
70
71static void start_timer(struct cs5535_mfgpt_timer *timer, uint16_t delta)
72{
73 cs5535_mfgpt_write(timer, MFGPT_REG_CMP2, delta);
74 cs5535_mfgpt_write(timer, MFGPT_REG_COUNTER, 0);
75
76 cs5535_mfgpt_write(timer, MFGPT_REG_SETUP,
77 MFGPT_SETUP_CNTEN | MFGPT_SETUP_CMP2);
78}
79
80static void mfgpt_set_mode(enum clock_event_mode mode,
81 struct clock_event_device *evt)
82{
83 disable_timer(cs5535_event_clock);
84
85 if (mode == CLOCK_EVT_MODE_PERIODIC)
86 start_timer(cs5535_event_clock, MFGPT_PERIODIC);
87
88 cs5535_tick_mode = mode;
89}
90
91static int mfgpt_next_event(unsigned long delta, struct clock_event_device *evt)
92{
93 start_timer(cs5535_event_clock, delta);
94 return 0;
95}
96
97static struct clock_event_device cs5535_clockevent = {
98 .name = DRV_NAME,
99 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
100 .set_mode = mfgpt_set_mode,
101 .set_next_event = mfgpt_next_event,
102 .rating = 250,
103 .cpumask = cpu_all_mask,
104 .shift = 32
105};
106
107static irqreturn_t mfgpt_tick(int irq, void *dev_id)
108{
109 uint16_t val = cs5535_mfgpt_read(cs5535_event_clock, MFGPT_REG_SETUP);
110
111 /* See if the interrupt was for us */
112 if (!(val & (MFGPT_SETUP_SETUP | MFGPT_SETUP_CMP2 | MFGPT_SETUP_CMP1)))
113 return IRQ_NONE;
114
115 /* Turn off the clock (and clear the event) */
116 disable_timer(cs5535_event_clock);
117
118 if (cs5535_tick_mode == CLOCK_EVT_MODE_SHUTDOWN)
119 return IRQ_HANDLED;
120
121 /* Clear the counter */
122 cs5535_mfgpt_write(cs5535_event_clock, MFGPT_REG_COUNTER, 0);
123
124 /* Restart the clock in periodic mode */
125
126 if (cs5535_tick_mode == CLOCK_EVT_MODE_PERIODIC)
127 cs5535_mfgpt_write(cs5535_event_clock, MFGPT_REG_SETUP,
128 MFGPT_SETUP_CNTEN | MFGPT_SETUP_CMP2);
129
130 cs5535_clockevent.event_handler(&cs5535_clockevent);
131 return IRQ_HANDLED;
132}
133
134static struct irqaction mfgptirq = {
135 .handler = mfgpt_tick,
136 .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_TIMER,
137 .name = DRV_NAME,
138};
139
140static int __init cs5535_mfgpt_init(void)
141{
142 struct cs5535_mfgpt_timer *timer;
143 int ret;
144 uint16_t val;
145
146 timer = cs5535_mfgpt_alloc_timer(MFGPT_TIMER_ANY, MFGPT_DOMAIN_WORKING);
147 if (!timer) {
148 printk(KERN_ERR DRV_NAME ": Could not allocate MFPGT timer\n");
149 return -ENODEV;
150 }
151 cs5535_event_clock = timer;
152
153 /* Set up the IRQ on the MFGPT side */
154 if (cs5535_mfgpt_setup_irq(timer, MFGPT_CMP2, &timer_irq)) {
155 printk(KERN_ERR DRV_NAME ": Could not set up IRQ %d\n",
156 timer_irq);
157 return -EIO;
158 }
159
160 /* And register it with the kernel */
161 ret = setup_irq(timer_irq, &mfgptirq);
162 if (ret) {
163 printk(KERN_ERR DRV_NAME ": Unable to set up the interrupt.\n");
164 goto err;
165 }
166
167 /* Set the clock scale and enable the event mode for CMP2 */
168 val = MFGPT_SCALE | (3 << 8);
169
170 cs5535_mfgpt_write(cs5535_event_clock, MFGPT_REG_SETUP, val);
171
172 /* Set up the clock event */
173 cs5535_clockevent.mult = div_sc(MFGPT_HZ, NSEC_PER_SEC,
174 cs5535_clockevent.shift);
175 cs5535_clockevent.min_delta_ns = clockevent_delta2ns(0xF,
176 &cs5535_clockevent);
177 cs5535_clockevent.max_delta_ns = clockevent_delta2ns(0xFFFE,
178 &cs5535_clockevent);
179
180 printk(KERN_INFO DRV_NAME
181 ": Registering MFGPT timer as a clock event, using IRQ %d\n",
182 timer_irq);
183 clockevents_register_device(&cs5535_clockevent);
184
185 return 0;
186
187err:
188 cs5535_mfgpt_release_irq(cs5535_event_clock, MFGPT_CMP2, &timer_irq);
189 printk(KERN_ERR DRV_NAME ": Unable to set up the MFGPT clock source\n");
190 return -EIO;
191}
192
193module_init(cs5535_mfgpt_init);
194
195MODULE_AUTHOR("Andres Salomon <dilinger@collabora.co.uk>");
196MODULE_DESCRIPTION("CS5535/CS5536 MFGPT clock event driver");
197MODULE_LICENSE("GPL");
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c
index a4bec3f919aa..1c1ceb4f218f 100644
--- a/drivers/cpuidle/governors/ladder.c
+++ b/drivers/cpuidle/governors/ladder.c
@@ -69,9 +69,6 @@ static int ladder_select_state(struct cpuidle_device *dev)
69 int last_residency, last_idx = ldev->last_state_idx; 69 int last_residency, last_idx = ldev->last_state_idx;
70 int latency_req = pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY); 70 int latency_req = pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY);
71 71
72 if (unlikely(!ldev))
73 return 0;
74
75 /* Special case when user has set very strict latency requirement */ 72 /* Special case when user has set very strict latency requirement */
76 if (unlikely(latency_req == 0)) { 73 if (unlikely(latency_req == 0)) {
77 ladder_do_selection(ldev, last_idx, 0); 74 ladder_do_selection(ldev, last_idx, 0);
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index eb140ff38c27..e02d74b1e892 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -111,6 +111,24 @@ config SH_DMAE
111 help 111 help
112 Enable support for the Renesas SuperH DMA controllers. 112 Enable support for the Renesas SuperH DMA controllers.
113 113
114config COH901318
115 bool "ST-Ericsson COH901318 DMA support"
116 select DMA_ENGINE
117 depends on ARCH_U300
118 help
119 Enable support for ST-Ericsson COH 901 318 DMA.
120
121config AMCC_PPC440SPE_ADMA
122 tristate "AMCC PPC440SPe ADMA support"
123 depends on 440SPe || 440SP
124 select DMA_ENGINE
125 select ARCH_HAS_ASYNC_TX_FIND_CHANNEL
126 help
127 Enable support for the AMCC PPC440SPe RAID engines.
128
129config ARCH_HAS_ASYNC_TX_FIND_CHANNEL
130 bool
131
114config DMA_ENGINE 132config DMA_ENGINE
115 bool 133 bool
116 134
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index eca71ba78ae9..807053d48232 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -10,3 +10,5 @@ obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
10obj-$(CONFIG_MX3_IPU) += ipu/ 10obj-$(CONFIG_MX3_IPU) += ipu/
11obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o 11obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
12obj-$(CONFIG_SH_DMAE) += shdma.o 12obj-$(CONFIG_SH_DMAE) += shdma.o
13obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
14obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index c52ac9efd0bf..f15112569c1d 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -1188,7 +1188,7 @@ static int at_dma_resume_noirq(struct device *dev)
1188 return 0; 1188 return 0;
1189} 1189}
1190 1190
1191static struct dev_pm_ops at_dma_dev_pm_ops = { 1191static const struct dev_pm_ops at_dma_dev_pm_ops = {
1192 .suspend_noirq = at_dma_suspend_noirq, 1192 .suspend_noirq = at_dma_suspend_noirq,
1193 .resume_noirq = at_dma_resume_noirq, 1193 .resume_noirq = at_dma_resume_noirq,
1194}; 1194};
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
new file mode 100644
index 000000000000..4a99cd94536b
--- /dev/null
+++ b/drivers/dma/coh901318.c
@@ -0,0 +1,1325 @@
1/*
2 * driver/dma/coh901318.c
3 *
4 * Copyright (C) 2007-2009 ST-Ericsson
5 * License terms: GNU General Public License (GPL) version 2
6 * DMA driver for COH 901 318
7 * Author: Per Friden <per.friden@stericsson.com>
8 */
9
10#include <linux/init.h>
11#include <linux/module.h>
12#include <linux/kernel.h> /* printk() */
13#include <linux/fs.h> /* everything... */
14#include <linux/slab.h> /* kmalloc() */
15#include <linux/dmaengine.h>
16#include <linux/platform_device.h>
17#include <linux/device.h>
18#include <linux/irqreturn.h>
19#include <linux/interrupt.h>
20#include <linux/io.h>
21#include <linux/uaccess.h>
22#include <linux/debugfs.h>
23#include <mach/coh901318.h>
24
25#include "coh901318_lli.h"
26
27#define COHC_2_DEV(cohc) (&cohc->chan.dev->device)
28
29#ifdef VERBOSE_DEBUG
30#define COH_DBG(x) ({ if (1) x; 0; })
31#else
32#define COH_DBG(x) ({ if (0) x; 0; })
33#endif
34
35struct coh901318_desc {
36 struct dma_async_tx_descriptor desc;
37 struct list_head node;
38 struct scatterlist *sg;
39 unsigned int sg_len;
40 struct coh901318_lli *data;
41 enum dma_data_direction dir;
42 int pending_irqs;
43 unsigned long flags;
44};
45
46struct coh901318_base {
47 struct device *dev;
48 void __iomem *virtbase;
49 struct coh901318_pool pool;
50 struct powersave pm;
51 struct dma_device dma_slave;
52 struct dma_device dma_memcpy;
53 struct coh901318_chan *chans;
54 struct coh901318_platform *platform;
55};
56
57struct coh901318_chan {
58 spinlock_t lock;
59 int allocated;
60 int completed;
61 int id;
62 int stopped;
63
64 struct work_struct free_work;
65 struct dma_chan chan;
66
67 struct tasklet_struct tasklet;
68
69 struct list_head active;
70 struct list_head queue;
71 struct list_head free;
72
73 unsigned long nbr_active_done;
74 unsigned long busy;
75 int pending_irqs;
76
77 struct coh901318_base *base;
78};
79
80static void coh901318_list_print(struct coh901318_chan *cohc,
81 struct coh901318_lli *lli)
82{
83 struct coh901318_lli *l;
84 dma_addr_t addr = virt_to_phys(lli);
85 int i = 0;
86
87 while (addr) {
88 l = phys_to_virt(addr);
89 dev_vdbg(COHC_2_DEV(cohc), "i %d, lli %p, ctrl 0x%x, src 0x%x"
90 ", dst 0x%x, link 0x%x link_virt 0x%p\n",
91 i, l, l->control, l->src_addr, l->dst_addr,
92 l->link_addr, phys_to_virt(l->link_addr));
93 i++;
94 addr = l->link_addr;
95 }
96}
97
98#ifdef CONFIG_DEBUG_FS
99
100#define COH901318_DEBUGFS_ASSIGN(x, y) (x = y)
101
102static struct coh901318_base *debugfs_dma_base;
103static struct dentry *dma_dentry;
104
105static int coh901318_debugfs_open(struct inode *inode, struct file *file)
106{
107
108 file->private_data = inode->i_private;
109 return 0;
110}
111
112static int coh901318_debugfs_read(struct file *file, char __user *buf,
113 size_t count, loff_t *f_pos)
114{
115 u64 started_channels = debugfs_dma_base->pm.started_channels;
116 int pool_count = debugfs_dma_base->pool.debugfs_pool_counter;
117 int i;
118 int ret = 0;
119 char *dev_buf;
120 char *tmp;
121 int dev_size;
122
123 dev_buf = kmalloc(4*1024, GFP_KERNEL);
124 if (dev_buf == NULL)
125 goto err_kmalloc;
126 tmp = dev_buf;
127
128 tmp += sprintf(tmp, "DMA -- enable dma channels\n");
129
130 for (i = 0; i < debugfs_dma_base->platform->max_channels; i++)
131 if (started_channels & (1 << i))
132 tmp += sprintf(tmp, "channel %d\n", i);
133
134 tmp += sprintf(tmp, "Pool alloc nbr %d\n", pool_count);
135 dev_size = tmp - dev_buf;
136
137 /* No more to read if offset != 0 */
138 if (*f_pos > dev_size)
139 goto out;
140
141 if (count > dev_size - *f_pos)
142 count = dev_size - *f_pos;
143
144 if (copy_to_user(buf, dev_buf + *f_pos, count))
145 ret = -EINVAL;
146 ret = count;
147 *f_pos += count;
148
149 out:
150 kfree(dev_buf);
151 return ret;
152
153 err_kmalloc:
154 return 0;
155}
156
157static const struct file_operations coh901318_debugfs_status_operations = {
158 .owner = THIS_MODULE,
159 .open = coh901318_debugfs_open,
160 .read = coh901318_debugfs_read,
161};
162
163
164static int __init init_coh901318_debugfs(void)
165{
166
167 dma_dentry = debugfs_create_dir("dma", NULL);
168
169 (void) debugfs_create_file("status",
170 S_IFREG | S_IRUGO,
171 dma_dentry, NULL,
172 &coh901318_debugfs_status_operations);
173 return 0;
174}
175
176static void __exit exit_coh901318_debugfs(void)
177{
178 debugfs_remove_recursive(dma_dentry);
179}
180
181module_init(init_coh901318_debugfs);
182module_exit(exit_coh901318_debugfs);
183#else
184
185#define COH901318_DEBUGFS_ASSIGN(x, y)
186
187#endif /* CONFIG_DEBUG_FS */
188
189static inline struct coh901318_chan *to_coh901318_chan(struct dma_chan *chan)
190{
191 return container_of(chan, struct coh901318_chan, chan);
192}
193
194static inline dma_addr_t
195cohc_dev_addr(struct coh901318_chan *cohc)
196{
197 return cohc->base->platform->chan_conf[cohc->id].dev_addr;
198}
199
200static inline const struct coh901318_params *
201cohc_chan_param(struct coh901318_chan *cohc)
202{
203 return &cohc->base->platform->chan_conf[cohc->id].param;
204}
205
206static inline const struct coh_dma_channel *
207cohc_chan_conf(struct coh901318_chan *cohc)
208{
209 return &cohc->base->platform->chan_conf[cohc->id];
210}
211
212static void enable_powersave(struct coh901318_chan *cohc)
213{
214 unsigned long flags;
215 struct powersave *pm = &cohc->base->pm;
216
217 spin_lock_irqsave(&pm->lock, flags);
218
219 pm->started_channels &= ~(1ULL << cohc->id);
220
221 if (!pm->started_channels) {
222 /* DMA no longer intends to access memory */
223 cohc->base->platform->access_memory_state(cohc->base->dev,
224 false);
225 }
226
227 spin_unlock_irqrestore(&pm->lock, flags);
228}
229static void disable_powersave(struct coh901318_chan *cohc)
230{
231 unsigned long flags;
232 struct powersave *pm = &cohc->base->pm;
233
234 spin_lock_irqsave(&pm->lock, flags);
235
236 if (!pm->started_channels) {
237 /* DMA intends to access memory */
238 cohc->base->platform->access_memory_state(cohc->base->dev,
239 true);
240 }
241
242 pm->started_channels |= (1ULL << cohc->id);
243
244 spin_unlock_irqrestore(&pm->lock, flags);
245}
246
247static inline int coh901318_set_ctrl(struct coh901318_chan *cohc, u32 control)
248{
249 int channel = cohc->id;
250 void __iomem *virtbase = cohc->base->virtbase;
251
252 writel(control,
253 virtbase + COH901318_CX_CTRL +
254 COH901318_CX_CTRL_SPACING * channel);
255 return 0;
256}
257
258static inline int coh901318_set_conf(struct coh901318_chan *cohc, u32 conf)
259{
260 int channel = cohc->id;
261 void __iomem *virtbase = cohc->base->virtbase;
262
263 writel(conf,
264 virtbase + COH901318_CX_CFG +
265 COH901318_CX_CFG_SPACING*channel);
266 return 0;
267}
268
269
270static int coh901318_start(struct coh901318_chan *cohc)
271{
272 u32 val;
273 int channel = cohc->id;
274 void __iomem *virtbase = cohc->base->virtbase;
275
276 disable_powersave(cohc);
277
278 val = readl(virtbase + COH901318_CX_CFG +
279 COH901318_CX_CFG_SPACING * channel);
280
281 /* Enable channel */
282 val |= COH901318_CX_CFG_CH_ENABLE;
283 writel(val, virtbase + COH901318_CX_CFG +
284 COH901318_CX_CFG_SPACING * channel);
285
286 return 0;
287}
288
289static int coh901318_prep_linked_list(struct coh901318_chan *cohc,
290 struct coh901318_lli *data)
291{
292 int channel = cohc->id;
293 void __iomem *virtbase = cohc->base->virtbase;
294
295 BUG_ON(readl(virtbase + COH901318_CX_STAT +
296 COH901318_CX_STAT_SPACING*channel) &
297 COH901318_CX_STAT_ACTIVE);
298
299 writel(data->src_addr,
300 virtbase + COH901318_CX_SRC_ADDR +
301 COH901318_CX_SRC_ADDR_SPACING * channel);
302
303 writel(data->dst_addr, virtbase +
304 COH901318_CX_DST_ADDR +
305 COH901318_CX_DST_ADDR_SPACING * channel);
306
307 writel(data->link_addr, virtbase + COH901318_CX_LNK_ADDR +
308 COH901318_CX_LNK_ADDR_SPACING * channel);
309
310 writel(data->control, virtbase + COH901318_CX_CTRL +
311 COH901318_CX_CTRL_SPACING * channel);
312
313 return 0;
314}
315static dma_cookie_t
316coh901318_assign_cookie(struct coh901318_chan *cohc,
317 struct coh901318_desc *cohd)
318{
319 dma_cookie_t cookie = cohc->chan.cookie;
320
321 if (++cookie < 0)
322 cookie = 1;
323
324 cohc->chan.cookie = cookie;
325 cohd->desc.cookie = cookie;
326
327 return cookie;
328}
329
330static struct coh901318_desc *
331coh901318_desc_get(struct coh901318_chan *cohc)
332{
333 struct coh901318_desc *desc;
334
335 if (list_empty(&cohc->free)) {
336 /* alloc new desc because we're out of used ones
337 * TODO: alloc a pile of descs instead of just one,
338 * avoid many small allocations.
339 */
340 desc = kmalloc(sizeof(struct coh901318_desc), GFP_NOWAIT);
341 if (desc == NULL)
342 goto out;
343 INIT_LIST_HEAD(&desc->node);
344 } else {
345 /* Reuse an old desc. */
346 desc = list_first_entry(&cohc->free,
347 struct coh901318_desc,
348 node);
349 list_del(&desc->node);
350 }
351
352 out:
353 return desc;
354}
355
356static void
357coh901318_desc_free(struct coh901318_chan *cohc, struct coh901318_desc *cohd)
358{
359 list_add_tail(&cohd->node, &cohc->free);
360}
361
362/* call with irq lock held */
363static void
364coh901318_desc_submit(struct coh901318_chan *cohc, struct coh901318_desc *desc)
365{
366 list_add_tail(&desc->node, &cohc->active);
367
368 BUG_ON(cohc->pending_irqs != 0);
369
370 cohc->pending_irqs = desc->pending_irqs;
371}
372
373static struct coh901318_desc *
374coh901318_first_active_get(struct coh901318_chan *cohc)
375{
376 struct coh901318_desc *d;
377
378 if (list_empty(&cohc->active))
379 return NULL;
380
381 d = list_first_entry(&cohc->active,
382 struct coh901318_desc,
383 node);
384 return d;
385}
386
387static void
388coh901318_desc_remove(struct coh901318_desc *cohd)
389{
390 list_del(&cohd->node);
391}
392
393static void
394coh901318_desc_queue(struct coh901318_chan *cohc, struct coh901318_desc *desc)
395{
396 list_add_tail(&desc->node, &cohc->queue);
397}
398
399static struct coh901318_desc *
400coh901318_first_queued(struct coh901318_chan *cohc)
401{
402 struct coh901318_desc *d;
403
404 if (list_empty(&cohc->queue))
405 return NULL;
406
407 d = list_first_entry(&cohc->queue,
408 struct coh901318_desc,
409 node);
410 return d;
411}
412
413/*
414 * DMA start/stop controls
415 */
416u32 coh901318_get_bytes_left(struct dma_chan *chan)
417{
418 unsigned long flags;
419 u32 ret;
420 struct coh901318_chan *cohc = to_coh901318_chan(chan);
421
422 spin_lock_irqsave(&cohc->lock, flags);
423
424 /* Read transfer count value */
425 ret = readl(cohc->base->virtbase +
426 COH901318_CX_CTRL+COH901318_CX_CTRL_SPACING *
427 cohc->id) & COH901318_CX_CTRL_TC_VALUE_MASK;
428
429 spin_unlock_irqrestore(&cohc->lock, flags);
430
431 return ret;
432}
433EXPORT_SYMBOL(coh901318_get_bytes_left);
434
435
436/* Stops a transfer without losing data. Enables power save.
437 Use this function in conjunction with coh901318_continue(..)
438*/
439void coh901318_stop(struct dma_chan *chan)
440{
441 u32 val;
442 unsigned long flags;
443 struct coh901318_chan *cohc = to_coh901318_chan(chan);
444 int channel = cohc->id;
445 void __iomem *virtbase = cohc->base->virtbase;
446
447 spin_lock_irqsave(&cohc->lock, flags);
448
449 /* Disable channel in HW */
450 val = readl(virtbase + COH901318_CX_CFG +
451 COH901318_CX_CFG_SPACING * channel);
452
453 /* Stopping infinit transfer */
454 if ((val & COH901318_CX_CTRL_TC_ENABLE) == 0 &&
455 (val & COH901318_CX_CFG_CH_ENABLE))
456 cohc->stopped = 1;
457
458
459 val &= ~COH901318_CX_CFG_CH_ENABLE;
460 /* Enable twice, HW bug work around */
461 writel(val, virtbase + COH901318_CX_CFG +
462 COH901318_CX_CFG_SPACING * channel);
463 writel(val, virtbase + COH901318_CX_CFG +
464 COH901318_CX_CFG_SPACING * channel);
465
466 /* Spin-wait for it to actually go inactive */
467 while (readl(virtbase + COH901318_CX_STAT+COH901318_CX_STAT_SPACING *
468 channel) & COH901318_CX_STAT_ACTIVE)
469 cpu_relax();
470
471 /* Check if we stopped an active job */
472 if ((readl(virtbase + COH901318_CX_CTRL+COH901318_CX_CTRL_SPACING *
473 channel) & COH901318_CX_CTRL_TC_VALUE_MASK) > 0)
474 cohc->stopped = 1;
475
476 enable_powersave(cohc);
477
478 spin_unlock_irqrestore(&cohc->lock, flags);
479}
480EXPORT_SYMBOL(coh901318_stop);
481
482/* Continues a transfer that has been stopped via 300_dma_stop(..).
483 Power save is handled.
484*/
485void coh901318_continue(struct dma_chan *chan)
486{
487 u32 val;
488 unsigned long flags;
489 struct coh901318_chan *cohc = to_coh901318_chan(chan);
490 int channel = cohc->id;
491
492 spin_lock_irqsave(&cohc->lock, flags);
493
494 disable_powersave(cohc);
495
496 if (cohc->stopped) {
497 /* Enable channel in HW */
498 val = readl(cohc->base->virtbase + COH901318_CX_CFG +
499 COH901318_CX_CFG_SPACING * channel);
500
501 val |= COH901318_CX_CFG_CH_ENABLE;
502
503 writel(val, cohc->base->virtbase + COH901318_CX_CFG +
504 COH901318_CX_CFG_SPACING*channel);
505
506 cohc->stopped = 0;
507 }
508
509 spin_unlock_irqrestore(&cohc->lock, flags);
510}
511EXPORT_SYMBOL(coh901318_continue);
512
513bool coh901318_filter_id(struct dma_chan *chan, void *chan_id)
514{
515 unsigned int ch_nr = (unsigned int) chan_id;
516
517 if (ch_nr == to_coh901318_chan(chan)->id)
518 return true;
519
520 return false;
521}
522EXPORT_SYMBOL(coh901318_filter_id);
523
524/*
525 * DMA channel allocation
526 */
527static int coh901318_config(struct coh901318_chan *cohc,
528 struct coh901318_params *param)
529{
530 unsigned long flags;
531 const struct coh901318_params *p;
532 int channel = cohc->id;
533 void __iomem *virtbase = cohc->base->virtbase;
534
535 spin_lock_irqsave(&cohc->lock, flags);
536
537 if (param)
538 p = param;
539 else
540 p = &cohc->base->platform->chan_conf[channel].param;
541
542 /* Clear any pending BE or TC interrupt */
543 if (channel < 32) {
544 writel(1 << channel, virtbase + COH901318_BE_INT_CLEAR1);
545 writel(1 << channel, virtbase + COH901318_TC_INT_CLEAR1);
546 } else {
547 writel(1 << (channel - 32), virtbase +
548 COH901318_BE_INT_CLEAR2);
549 writel(1 << (channel - 32), virtbase +
550 COH901318_TC_INT_CLEAR2);
551 }
552
553 coh901318_set_conf(cohc, p->config);
554 coh901318_set_ctrl(cohc, p->ctrl_lli_last);
555
556 spin_unlock_irqrestore(&cohc->lock, flags);
557
558 return 0;
559}
560
561/* must lock when calling this function
562 * start queued jobs, if any
563 * TODO: start all queued jobs in one go
564 *
565 * Returns descriptor if queued job is started otherwise NULL.
566 * If the queue is empty NULL is returned.
567 */
568static struct coh901318_desc *coh901318_queue_start(struct coh901318_chan *cohc)
569{
570 struct coh901318_desc *cohd_que;
571
572 /* start queued jobs, if any
573 * TODO: transmit all queued jobs in one go
574 */
575 cohd_que = coh901318_first_queued(cohc);
576
577 if (cohd_que != NULL) {
578 /* Remove from queue */
579 coh901318_desc_remove(cohd_que);
580 /* initiate DMA job */
581 cohc->busy = 1;
582
583 coh901318_desc_submit(cohc, cohd_que);
584
585 coh901318_prep_linked_list(cohc, cohd_que->data);
586
587 /* start dma job */
588 coh901318_start(cohc);
589
590 }
591
592 return cohd_que;
593}
594
595static void dma_tasklet(unsigned long data)
596{
597 struct coh901318_chan *cohc = (struct coh901318_chan *) data;
598 struct coh901318_desc *cohd_fin;
599 unsigned long flags;
600 dma_async_tx_callback callback;
601 void *callback_param;
602
603 spin_lock_irqsave(&cohc->lock, flags);
604
605 /* get first active entry from list */
606 cohd_fin = coh901318_first_active_get(cohc);
607
608 BUG_ON(cohd_fin->pending_irqs == 0);
609
610 if (cohd_fin == NULL)
611 goto err;
612
613 cohd_fin->pending_irqs--;
614 cohc->completed = cohd_fin->desc.cookie;
615
616 BUG_ON(cohc->nbr_active_done && cohd_fin == NULL);
617
618 if (cohc->nbr_active_done == 0)
619 return;
620
621 if (!cohd_fin->pending_irqs) {
622 /* release the lli allocation*/
623 coh901318_lli_free(&cohc->base->pool, &cohd_fin->data);
624 }
625
626 dev_vdbg(COHC_2_DEV(cohc), "[%s] chan_id %d pending_irqs %d"
627 " nbr_active_done %ld\n", __func__,
628 cohc->id, cohc->pending_irqs, cohc->nbr_active_done);
629
630 /* callback to client */
631 callback = cohd_fin->desc.callback;
632 callback_param = cohd_fin->desc.callback_param;
633
634 if (!cohd_fin->pending_irqs) {
635 coh901318_desc_remove(cohd_fin);
636
637 /* return desc to free-list */
638 coh901318_desc_free(cohc, cohd_fin);
639 }
640
641 if (cohc->nbr_active_done)
642 cohc->nbr_active_done--;
643
644 if (cohc->nbr_active_done) {
645 if (cohc_chan_conf(cohc)->priority_high)
646 tasklet_hi_schedule(&cohc->tasklet);
647 else
648 tasklet_schedule(&cohc->tasklet);
649 }
650 spin_unlock_irqrestore(&cohc->lock, flags);
651
652 if (callback)
653 callback(callback_param);
654
655 return;
656
657 err:
658 spin_unlock_irqrestore(&cohc->lock, flags);
659 dev_err(COHC_2_DEV(cohc), "[%s] No active dma desc\n", __func__);
660}
661
662
663/* called from interrupt context */
664static void dma_tc_handle(struct coh901318_chan *cohc)
665{
666 BUG_ON(!cohc->allocated && (list_empty(&cohc->active) ||
667 list_empty(&cohc->queue)));
668
669 if (!cohc->allocated)
670 return;
671
672 BUG_ON(cohc->pending_irqs == 0);
673
674 cohc->pending_irqs--;
675 cohc->nbr_active_done++;
676
677 if (cohc->pending_irqs == 0 && coh901318_queue_start(cohc) == NULL)
678 cohc->busy = 0;
679
680 BUG_ON(list_empty(&cohc->active));
681
682 if (cohc_chan_conf(cohc)->priority_high)
683 tasklet_hi_schedule(&cohc->tasklet);
684 else
685 tasklet_schedule(&cohc->tasklet);
686}
687
688
689static irqreturn_t dma_irq_handler(int irq, void *dev_id)
690{
691 u32 status1;
692 u32 status2;
693 int i;
694 int ch;
695 struct coh901318_base *base = dev_id;
696 struct coh901318_chan *cohc;
697 void __iomem *virtbase = base->virtbase;
698
699 status1 = readl(virtbase + COH901318_INT_STATUS1);
700 status2 = readl(virtbase + COH901318_INT_STATUS2);
701
702 if (unlikely(status1 == 0 && status2 == 0)) {
703 dev_warn(base->dev, "spurious DMA IRQ from no channel!\n");
704 return IRQ_HANDLED;
705 }
706
707 /* TODO: consider handle IRQ in tasklet here to
708 * minimize interrupt latency */
709
710 /* Check the first 32 DMA channels for IRQ */
711 while (status1) {
712 /* Find first bit set, return as a number. */
713 i = ffs(status1) - 1;
714 ch = i;
715
716 cohc = &base->chans[ch];
717 spin_lock(&cohc->lock);
718
719 /* Mask off this bit */
720 status1 &= ~(1 << i);
721 /* Check the individual channel bits */
722 if (test_bit(i, virtbase + COH901318_BE_INT_STATUS1)) {
723 dev_crit(COHC_2_DEV(cohc),
724 "DMA bus error on channel %d!\n", ch);
725 BUG_ON(1);
726 /* Clear BE interrupt */
727 __set_bit(i, virtbase + COH901318_BE_INT_CLEAR1);
728 } else {
729 /* Caused by TC, really? */
730 if (unlikely(!test_bit(i, virtbase +
731 COH901318_TC_INT_STATUS1))) {
732 dev_warn(COHC_2_DEV(cohc),
733 "ignoring interrupt not caused by terminal count on channel %d\n", ch);
734 /* Clear TC interrupt */
735 BUG_ON(1);
736 __set_bit(i, virtbase + COH901318_TC_INT_CLEAR1);
737 } else {
738 /* Enable powersave if transfer has finished */
739 if (!(readl(virtbase + COH901318_CX_STAT +
740 COH901318_CX_STAT_SPACING*ch) &
741 COH901318_CX_STAT_ENABLED)) {
742 enable_powersave(cohc);
743 }
744
745 /* Must clear TC interrupt before calling
746 * dma_tc_handle
747 * in case tc_handle initate a new dma job
748 */
749 __set_bit(i, virtbase + COH901318_TC_INT_CLEAR1);
750
751 dma_tc_handle(cohc);
752 }
753 }
754 spin_unlock(&cohc->lock);
755 }
756
757 /* Check the remaining 32 DMA channels for IRQ */
758 while (status2) {
759 /* Find first bit set, return as a number. */
760 i = ffs(status2) - 1;
761 ch = i + 32;
762 cohc = &base->chans[ch];
763 spin_lock(&cohc->lock);
764
765 /* Mask off this bit */
766 status2 &= ~(1 << i);
767 /* Check the individual channel bits */
768 if (test_bit(i, virtbase + COH901318_BE_INT_STATUS2)) {
769 dev_crit(COHC_2_DEV(cohc),
770 "DMA bus error on channel %d!\n", ch);
771 /* Clear BE interrupt */
772 BUG_ON(1);
773 __set_bit(i, virtbase + COH901318_BE_INT_CLEAR2);
774 } else {
775 /* Caused by TC, really? */
776 if (unlikely(!test_bit(i, virtbase +
777 COH901318_TC_INT_STATUS2))) {
778 dev_warn(COHC_2_DEV(cohc),
779 "ignoring interrupt not caused by terminal count on channel %d\n", ch);
780 /* Clear TC interrupt */
781 __set_bit(i, virtbase + COH901318_TC_INT_CLEAR2);
782 BUG_ON(1);
783 } else {
784 /* Enable powersave if transfer has finished */
785 if (!(readl(virtbase + COH901318_CX_STAT +
786 COH901318_CX_STAT_SPACING*ch) &
787 COH901318_CX_STAT_ENABLED)) {
788 enable_powersave(cohc);
789 }
790 /* Must clear TC interrupt before calling
791 * dma_tc_handle
792 * in case tc_handle initate a new dma job
793 */
794 __set_bit(i, virtbase + COH901318_TC_INT_CLEAR2);
795
796 dma_tc_handle(cohc);
797 }
798 }
799 spin_unlock(&cohc->lock);
800 }
801
802 return IRQ_HANDLED;
803}
804
805static int coh901318_alloc_chan_resources(struct dma_chan *chan)
806{
807 struct coh901318_chan *cohc = to_coh901318_chan(chan);
808
809 dev_vdbg(COHC_2_DEV(cohc), "[%s] DMA channel %d\n",
810 __func__, cohc->id);
811
812 if (chan->client_count > 1)
813 return -EBUSY;
814
815 coh901318_config(cohc, NULL);
816
817 cohc->allocated = 1;
818 cohc->completed = chan->cookie = 1;
819
820 return 1;
821}
822
823static void
824coh901318_free_chan_resources(struct dma_chan *chan)
825{
826 struct coh901318_chan *cohc = to_coh901318_chan(chan);
827 int channel = cohc->id;
828 unsigned long flags;
829
830 spin_lock_irqsave(&cohc->lock, flags);
831
832 /* Disable HW */
833 writel(0x00000000U, cohc->base->virtbase + COH901318_CX_CFG +
834 COH901318_CX_CFG_SPACING*channel);
835 writel(0x00000000U, cohc->base->virtbase + COH901318_CX_CTRL +
836 COH901318_CX_CTRL_SPACING*channel);
837
838 cohc->allocated = 0;
839
840 spin_unlock_irqrestore(&cohc->lock, flags);
841
842 chan->device->device_terminate_all(chan);
843}
844
845
846static dma_cookie_t
847coh901318_tx_submit(struct dma_async_tx_descriptor *tx)
848{
849 struct coh901318_desc *cohd = container_of(tx, struct coh901318_desc,
850 desc);
851 struct coh901318_chan *cohc = to_coh901318_chan(tx->chan);
852 unsigned long flags;
853
854 spin_lock_irqsave(&cohc->lock, flags);
855
856 tx->cookie = coh901318_assign_cookie(cohc, cohd);
857
858 coh901318_desc_queue(cohc, cohd);
859
860 spin_unlock_irqrestore(&cohc->lock, flags);
861
862 return tx->cookie;
863}
864
865static struct dma_async_tx_descriptor *
866coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
867 size_t size, unsigned long flags)
868{
869 struct coh901318_lli *data;
870 struct coh901318_desc *cohd;
871 unsigned long flg;
872 struct coh901318_chan *cohc = to_coh901318_chan(chan);
873 int lli_len;
874 u32 ctrl_last = cohc_chan_param(cohc)->ctrl_lli_last;
875
876 spin_lock_irqsave(&cohc->lock, flg);
877
878 dev_vdbg(COHC_2_DEV(cohc),
879 "[%s] channel %d src 0x%x dest 0x%x size %d\n",
880 __func__, cohc->id, src, dest, size);
881
882 if (flags & DMA_PREP_INTERRUPT)
883 /* Trigger interrupt after last lli */
884 ctrl_last |= COH901318_CX_CTRL_TC_IRQ_ENABLE;
885
886 lli_len = size >> MAX_DMA_PACKET_SIZE_SHIFT;
887 if ((lli_len << MAX_DMA_PACKET_SIZE_SHIFT) < size)
888 lli_len++;
889
890 data = coh901318_lli_alloc(&cohc->base->pool, lli_len);
891
892 if (data == NULL)
893 goto err;
894
895 cohd = coh901318_desc_get(cohc);
896 cohd->sg = NULL;
897 cohd->sg_len = 0;
898 cohd->data = data;
899
900 cohd->pending_irqs =
901 coh901318_lli_fill_memcpy(
902 &cohc->base->pool, data, src, size, dest,
903 cohc_chan_param(cohc)->ctrl_lli_chained,
904 ctrl_last);
905 cohd->flags = flags;
906
907 COH_DBG(coh901318_list_print(cohc, data));
908
909 dma_async_tx_descriptor_init(&cohd->desc, chan);
910
911 cohd->desc.tx_submit = coh901318_tx_submit;
912
913 spin_unlock_irqrestore(&cohc->lock, flg);
914
915 return &cohd->desc;
916 err:
917 spin_unlock_irqrestore(&cohc->lock, flg);
918 return NULL;
919}
920
921static struct dma_async_tx_descriptor *
922coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
923 unsigned int sg_len, enum dma_data_direction direction,
924 unsigned long flags)
925{
926 struct coh901318_chan *cohc = to_coh901318_chan(chan);
927 struct coh901318_lli *data;
928 struct coh901318_desc *cohd;
929 struct scatterlist *sg;
930 int len = 0;
931 int size;
932 int i;
933 u32 ctrl_chained = cohc_chan_param(cohc)->ctrl_lli_chained;
934 u32 ctrl = cohc_chan_param(cohc)->ctrl_lli;
935 u32 ctrl_last = cohc_chan_param(cohc)->ctrl_lli_last;
936 unsigned long flg;
937
938 if (!sgl)
939 goto out;
940 if (sgl->length == 0)
941 goto out;
942
943 spin_lock_irqsave(&cohc->lock, flg);
944
945 dev_vdbg(COHC_2_DEV(cohc), "[%s] sg_len %d dir %d\n",
946 __func__, sg_len, direction);
947
948 if (flags & DMA_PREP_INTERRUPT)
949 /* Trigger interrupt after last lli */
950 ctrl_last |= COH901318_CX_CTRL_TC_IRQ_ENABLE;
951
952 cohd = coh901318_desc_get(cohc);
953 cohd->sg = NULL;
954 cohd->sg_len = 0;
955 cohd->dir = direction;
956
957 if (direction == DMA_TO_DEVICE) {
958 u32 tx_flags = COH901318_CX_CTRL_PRDD_SOURCE |
959 COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE;
960
961 ctrl_chained |= tx_flags;
962 ctrl_last |= tx_flags;
963 ctrl |= tx_flags;
964 } else if (direction == DMA_FROM_DEVICE) {
965 u32 rx_flags = COH901318_CX_CTRL_PRDD_DEST |
966 COH901318_CX_CTRL_DST_ADDR_INC_ENABLE;
967
968 ctrl_chained |= rx_flags;
969 ctrl_last |= rx_flags;
970 ctrl |= rx_flags;
971 } else
972 goto err_direction;
973
974 dma_async_tx_descriptor_init(&cohd->desc, chan);
975
976 cohd->desc.tx_submit = coh901318_tx_submit;
977
978
979 /* The dma only supports transmitting packages up to
980 * MAX_DMA_PACKET_SIZE. Calculate to total number of
981 * dma elemts required to send the entire sg list
982 */
983 for_each_sg(sgl, sg, sg_len, i) {
984 unsigned int factor;
985 size = sg_dma_len(sg);
986
987 if (size <= MAX_DMA_PACKET_SIZE) {
988 len++;
989 continue;
990 }
991
992 factor = size >> MAX_DMA_PACKET_SIZE_SHIFT;
993 if ((factor << MAX_DMA_PACKET_SIZE_SHIFT) < size)
994 factor++;
995
996 len += factor;
997 }
998
999 data = coh901318_lli_alloc(&cohc->base->pool, len);
1000
1001 if (data == NULL)
1002 goto err_dma_alloc;
1003
1004 /* initiate allocated data list */
1005 cohd->pending_irqs =
1006 coh901318_lli_fill_sg(&cohc->base->pool, data, sgl, sg_len,
1007 cohc_dev_addr(cohc),
1008 ctrl_chained,
1009 ctrl,
1010 ctrl_last,
1011 direction, COH901318_CX_CTRL_TC_IRQ_ENABLE);
1012 cohd->data = data;
1013
1014 cohd->flags = flags;
1015
1016 COH_DBG(coh901318_list_print(cohc, data));
1017
1018 spin_unlock_irqrestore(&cohc->lock, flg);
1019
1020 return &cohd->desc;
1021 err_dma_alloc:
1022 err_direction:
1023 coh901318_desc_remove(cohd);
1024 coh901318_desc_free(cohc, cohd);
1025 spin_unlock_irqrestore(&cohc->lock, flg);
1026 out:
1027 return NULL;
1028}
1029
1030static enum dma_status
1031coh901318_is_tx_complete(struct dma_chan *chan,
1032 dma_cookie_t cookie, dma_cookie_t *done,
1033 dma_cookie_t *used)
1034{
1035 struct coh901318_chan *cohc = to_coh901318_chan(chan);
1036 dma_cookie_t last_used;
1037 dma_cookie_t last_complete;
1038 int ret;
1039
1040 last_complete = cohc->completed;
1041 last_used = chan->cookie;
1042
1043 ret = dma_async_is_complete(cookie, last_complete, last_used);
1044
1045 if (done)
1046 *done = last_complete;
1047 if (used)
1048 *used = last_used;
1049
1050 return ret;
1051}
1052
1053static void
1054coh901318_issue_pending(struct dma_chan *chan)
1055{
1056 struct coh901318_chan *cohc = to_coh901318_chan(chan);
1057 unsigned long flags;
1058
1059 spin_lock_irqsave(&cohc->lock, flags);
1060
1061 /* Busy means that pending jobs are already being processed */
1062 if (!cohc->busy)
1063 coh901318_queue_start(cohc);
1064
1065 spin_unlock_irqrestore(&cohc->lock, flags);
1066}
1067
1068static void
1069coh901318_terminate_all(struct dma_chan *chan)
1070{
1071 unsigned long flags;
1072 struct coh901318_chan *cohc = to_coh901318_chan(chan);
1073 struct coh901318_desc *cohd;
1074 void __iomem *virtbase = cohc->base->virtbase;
1075
1076 coh901318_stop(chan);
1077
1078 spin_lock_irqsave(&cohc->lock, flags);
1079
1080 /* Clear any pending BE or TC interrupt */
1081 if (cohc->id < 32) {
1082 writel(1 << cohc->id, virtbase + COH901318_BE_INT_CLEAR1);
1083 writel(1 << cohc->id, virtbase + COH901318_TC_INT_CLEAR1);
1084 } else {
1085 writel(1 << (cohc->id - 32), virtbase +
1086 COH901318_BE_INT_CLEAR2);
1087 writel(1 << (cohc->id - 32), virtbase +
1088 COH901318_TC_INT_CLEAR2);
1089 }
1090
1091 enable_powersave(cohc);
1092
1093 while ((cohd = coh901318_first_active_get(cohc))) {
1094 /* release the lli allocation*/
1095 coh901318_lli_free(&cohc->base->pool, &cohd->data);
1096
1097 coh901318_desc_remove(cohd);
1098
1099 /* return desc to free-list */
1100 coh901318_desc_free(cohc, cohd);
1101 }
1102
1103 while ((cohd = coh901318_first_queued(cohc))) {
1104 /* release the lli allocation*/
1105 coh901318_lli_free(&cohc->base->pool, &cohd->data);
1106
1107 coh901318_desc_remove(cohd);
1108
1109 /* return desc to free-list */
1110 coh901318_desc_free(cohc, cohd);
1111 }
1112
1113
1114 cohc->nbr_active_done = 0;
1115 cohc->busy = 0;
1116 cohc->pending_irqs = 0;
1117
1118 spin_unlock_irqrestore(&cohc->lock, flags);
1119}
1120void coh901318_base_init(struct dma_device *dma, const int *pick_chans,
1121 struct coh901318_base *base)
1122{
1123 int chans_i;
1124 int i = 0;
1125 struct coh901318_chan *cohc;
1126
1127 INIT_LIST_HEAD(&dma->channels);
1128
1129 for (chans_i = 0; pick_chans[chans_i] != -1; chans_i += 2) {
1130 for (i = pick_chans[chans_i]; i <= pick_chans[chans_i+1]; i++) {
1131 cohc = &base->chans[i];
1132
1133 cohc->base = base;
1134 cohc->chan.device = dma;
1135 cohc->id = i;
1136
1137 /* TODO: do we really need this lock if only one
1138 * client is connected to each channel?
1139 */
1140
1141 spin_lock_init(&cohc->lock);
1142
1143 cohc->pending_irqs = 0;
1144 cohc->nbr_active_done = 0;
1145 cohc->busy = 0;
1146 INIT_LIST_HEAD(&cohc->free);
1147 INIT_LIST_HEAD(&cohc->active);
1148 INIT_LIST_HEAD(&cohc->queue);
1149
1150 tasklet_init(&cohc->tasklet, dma_tasklet,
1151 (unsigned long) cohc);
1152
1153 list_add_tail(&cohc->chan.device_node,
1154 &dma->channels);
1155 }
1156 }
1157}
1158
1159static int __init coh901318_probe(struct platform_device *pdev)
1160{
1161 int err = 0;
1162 struct coh901318_platform *pdata;
1163 struct coh901318_base *base;
1164 int irq;
1165 struct resource *io;
1166
1167 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1168 if (!io)
1169 goto err_get_resource;
1170
1171 /* Map DMA controller registers to virtual memory */
1172 if (request_mem_region(io->start,
1173 resource_size(io),
1174 pdev->dev.driver->name) == NULL) {
1175 err = -EBUSY;
1176 goto err_request_mem;
1177 }
1178
1179 pdata = pdev->dev.platform_data;
1180 if (!pdata)
1181 goto err_no_platformdata;
1182
1183 base = kmalloc(ALIGN(sizeof(struct coh901318_base), 4) +
1184 pdata->max_channels *
1185 sizeof(struct coh901318_chan),
1186 GFP_KERNEL);
1187 if (!base)
1188 goto err_alloc_coh_dma_channels;
1189
1190 base->chans = ((void *)base) + ALIGN(sizeof(struct coh901318_base), 4);
1191
1192 base->virtbase = ioremap(io->start, resource_size(io));
1193 if (!base->virtbase) {
1194 err = -ENOMEM;
1195 goto err_no_ioremap;
1196 }
1197
1198 base->dev = &pdev->dev;
1199 base->platform = pdata;
1200 spin_lock_init(&base->pm.lock);
1201 base->pm.started_channels = 0;
1202
1203 COH901318_DEBUGFS_ASSIGN(debugfs_dma_base, base);
1204
1205 platform_set_drvdata(pdev, base);
1206
1207 irq = platform_get_irq(pdev, 0);
1208 if (irq < 0)
1209 goto err_no_irq;
1210
1211 err = request_irq(irq, dma_irq_handler, IRQF_DISABLED,
1212 "coh901318", base);
1213 if (err) {
1214 dev_crit(&pdev->dev,
1215 "Cannot allocate IRQ for DMA controller!\n");
1216 goto err_request_irq;
1217 }
1218
1219 err = coh901318_pool_create(&base->pool, &pdev->dev,
1220 sizeof(struct coh901318_lli),
1221 32);
1222 if (err)
1223 goto err_pool_create;
1224
1225 /* init channels for device transfers */
1226 coh901318_base_init(&base->dma_slave, base->platform->chans_slave,
1227 base);
1228
1229 dma_cap_zero(base->dma_slave.cap_mask);
1230 dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
1231
1232 base->dma_slave.device_alloc_chan_resources = coh901318_alloc_chan_resources;
1233 base->dma_slave.device_free_chan_resources = coh901318_free_chan_resources;
1234 base->dma_slave.device_prep_slave_sg = coh901318_prep_slave_sg;
1235 base->dma_slave.device_is_tx_complete = coh901318_is_tx_complete;
1236 base->dma_slave.device_issue_pending = coh901318_issue_pending;
1237 base->dma_slave.device_terminate_all = coh901318_terminate_all;
1238 base->dma_slave.dev = &pdev->dev;
1239
1240 err = dma_async_device_register(&base->dma_slave);
1241
1242 if (err)
1243 goto err_register_slave;
1244
1245 /* init channels for memcpy */
1246 coh901318_base_init(&base->dma_memcpy, base->platform->chans_memcpy,
1247 base);
1248
1249 dma_cap_zero(base->dma_memcpy.cap_mask);
1250 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
1251
1252 base->dma_memcpy.device_alloc_chan_resources = coh901318_alloc_chan_resources;
1253 base->dma_memcpy.device_free_chan_resources = coh901318_free_chan_resources;
1254 base->dma_memcpy.device_prep_dma_memcpy = coh901318_prep_memcpy;
1255 base->dma_memcpy.device_is_tx_complete = coh901318_is_tx_complete;
1256 base->dma_memcpy.device_issue_pending = coh901318_issue_pending;
1257 base->dma_memcpy.device_terminate_all = coh901318_terminate_all;
1258 base->dma_memcpy.dev = &pdev->dev;
1259 err = dma_async_device_register(&base->dma_memcpy);
1260
1261 if (err)
1262 goto err_register_memcpy;
1263
1264 dev_dbg(&pdev->dev, "Initialized COH901318 DMA on virtual base 0x%08x\n",
1265 (u32) base->virtbase);
1266
1267 return err;
1268
1269 err_register_memcpy:
1270 dma_async_device_unregister(&base->dma_slave);
1271 err_register_slave:
1272 coh901318_pool_destroy(&base->pool);
1273 err_pool_create:
1274 free_irq(platform_get_irq(pdev, 0), base);
1275 err_request_irq:
1276 err_no_irq:
1277 iounmap(base->virtbase);
1278 err_no_ioremap:
1279 kfree(base);
1280 err_alloc_coh_dma_channels:
1281 err_no_platformdata:
1282 release_mem_region(pdev->resource->start,
1283 resource_size(pdev->resource));
1284 err_request_mem:
1285 err_get_resource:
1286 return err;
1287}
1288
1289static int __exit coh901318_remove(struct platform_device *pdev)
1290{
1291 struct coh901318_base *base = platform_get_drvdata(pdev);
1292
1293 dma_async_device_unregister(&base->dma_memcpy);
1294 dma_async_device_unregister(&base->dma_slave);
1295 coh901318_pool_destroy(&base->pool);
1296 free_irq(platform_get_irq(pdev, 0), base);
1297 kfree(base);
1298 iounmap(base->virtbase);
1299 release_mem_region(pdev->resource->start,
1300 resource_size(pdev->resource));
1301 return 0;
1302}
1303
1304
1305static struct platform_driver coh901318_driver = {
1306 .remove = __exit_p(coh901318_remove),
1307 .driver = {
1308 .name = "coh901318",
1309 },
1310};
1311
1312int __init coh901318_init(void)
1313{
1314 return platform_driver_probe(&coh901318_driver, coh901318_probe);
1315}
1316subsys_initcall(coh901318_init);
1317
1318void __exit coh901318_exit(void)
1319{
1320 platform_driver_unregister(&coh901318_driver);
1321}
1322module_exit(coh901318_exit);
1323
1324MODULE_LICENSE("GPL");
1325MODULE_AUTHOR("Per Friden");
diff --git a/drivers/dma/coh901318_lli.c b/drivers/dma/coh901318_lli.c
new file mode 100644
index 000000000000..f5120f238a4d
--- /dev/null
+++ b/drivers/dma/coh901318_lli.c
@@ -0,0 +1,318 @@
1/*
2 * driver/dma/coh901318_lli.c
3 *
4 * Copyright (C) 2007-2009 ST-Ericsson
5 * License terms: GNU General Public License (GPL) version 2
6 * Support functions for handling lli for dma
7 * Author: Per Friden <per.friden@stericsson.com>
8 */
9
10#include <linux/dma-mapping.h>
11#include <linux/spinlock.h>
12#include <linux/dmapool.h>
13#include <linux/memory.h>
14#include <mach/coh901318.h>
15
16#include "coh901318_lli.h"
17
18#if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_U300_DEBUG))
19#define DEBUGFS_POOL_COUNTER_RESET(pool) (pool->debugfs_pool_counter = 0)
20#define DEBUGFS_POOL_COUNTER_ADD(pool, add) (pool->debugfs_pool_counter += add)
21#else
22#define DEBUGFS_POOL_COUNTER_RESET(pool)
23#define DEBUGFS_POOL_COUNTER_ADD(pool, add)
24#endif
25
26static struct coh901318_lli *
27coh901318_lli_next(struct coh901318_lli *data)
28{
29 if (data == NULL || data->link_addr == 0)
30 return NULL;
31
32 return (struct coh901318_lli *) data->virt_link_addr;
33}
34
35int coh901318_pool_create(struct coh901318_pool *pool,
36 struct device *dev,
37 size_t size, size_t align)
38{
39 spin_lock_init(&pool->lock);
40 pool->dev = dev;
41 pool->dmapool = dma_pool_create("lli_pool", dev, size, align, 0);
42
43 DEBUGFS_POOL_COUNTER_RESET(pool);
44 return 0;
45}
46
47int coh901318_pool_destroy(struct coh901318_pool *pool)
48{
49
50 dma_pool_destroy(pool->dmapool);
51 return 0;
52}
53
54struct coh901318_lli *
55coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len)
56{
57 int i;
58 struct coh901318_lli *head;
59 struct coh901318_lli *lli;
60 struct coh901318_lli *lli_prev;
61 dma_addr_t phy;
62
63 if (len == 0)
64 goto err;
65
66 spin_lock(&pool->lock);
67
68 head = dma_pool_alloc(pool->dmapool, GFP_NOWAIT, &phy);
69
70 if (head == NULL)
71 goto err;
72
73 DEBUGFS_POOL_COUNTER_ADD(pool, 1);
74
75 lli = head;
76 lli->phy_this = phy;
77
78 for (i = 1; i < len; i++) {
79 lli_prev = lli;
80
81 lli = dma_pool_alloc(pool->dmapool, GFP_NOWAIT, &phy);
82
83 if (lli == NULL)
84 goto err_clean_up;
85
86 DEBUGFS_POOL_COUNTER_ADD(pool, 1);
87 lli->phy_this = phy;
88
89 lli_prev->link_addr = phy;
90 lli_prev->virt_link_addr = lli;
91 }
92
93 lli->link_addr = 0x00000000U;
94
95 spin_unlock(&pool->lock);
96
97 return head;
98
99 err:
100 spin_unlock(&pool->lock);
101 return NULL;
102
103 err_clean_up:
104 lli_prev->link_addr = 0x00000000U;
105 spin_unlock(&pool->lock);
106 coh901318_lli_free(pool, &head);
107 return NULL;
108}
109
110void coh901318_lli_free(struct coh901318_pool *pool,
111 struct coh901318_lli **lli)
112{
113 struct coh901318_lli *l;
114 struct coh901318_lli *next;
115
116 if (lli == NULL)
117 return;
118
119 l = *lli;
120
121 if (l == NULL)
122 return;
123
124 spin_lock(&pool->lock);
125
126 while (l->link_addr) {
127 next = l->virt_link_addr;
128 dma_pool_free(pool->dmapool, l, l->phy_this);
129 DEBUGFS_POOL_COUNTER_ADD(pool, -1);
130 l = next;
131 }
132 dma_pool_free(pool->dmapool, l, l->phy_this);
133 DEBUGFS_POOL_COUNTER_ADD(pool, -1);
134
135 spin_unlock(&pool->lock);
136 *lli = NULL;
137}
138
139int
140coh901318_lli_fill_memcpy(struct coh901318_pool *pool,
141 struct coh901318_lli *lli,
142 dma_addr_t source, unsigned int size,
143 dma_addr_t destination, u32 ctrl_chained,
144 u32 ctrl_eom)
145{
146 int s = size;
147 dma_addr_t src = source;
148 dma_addr_t dst = destination;
149
150 lli->src_addr = src;
151 lli->dst_addr = dst;
152
153 while (lli->link_addr) {
154 lli->control = ctrl_chained | MAX_DMA_PACKET_SIZE;
155 lli->src_addr = src;
156 lli->dst_addr = dst;
157
158 s -= MAX_DMA_PACKET_SIZE;
159 lli = coh901318_lli_next(lli);
160
161 src += MAX_DMA_PACKET_SIZE;
162 dst += MAX_DMA_PACKET_SIZE;
163 }
164
165 lli->control = ctrl_eom | s;
166 lli->src_addr = src;
167 lli->dst_addr = dst;
168
169 /* One irq per single transfer */
170 return 1;
171}
172
173int
174coh901318_lli_fill_single(struct coh901318_pool *pool,
175 struct coh901318_lli *lli,
176 dma_addr_t buf, unsigned int size,
177 dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_eom,
178 enum dma_data_direction dir)
179{
180 int s = size;
181 dma_addr_t src;
182 dma_addr_t dst;
183
184
185 if (dir == DMA_TO_DEVICE) {
186 src = buf;
187 dst = dev_addr;
188
189 } else if (dir == DMA_FROM_DEVICE) {
190
191 src = dev_addr;
192 dst = buf;
193 } else {
194 return -EINVAL;
195 }
196
197 while (lli->link_addr) {
198 size_t block_size = MAX_DMA_PACKET_SIZE;
199 lli->control = ctrl_chained | MAX_DMA_PACKET_SIZE;
200
201 /* If we are on the next-to-final block and there will
202 * be less than half a DMA packet left for the last
203 * block, then we want to make this block a little
204 * smaller to balance the sizes. This is meant to
205 * avoid too small transfers if the buffer size is
206 * (MAX_DMA_PACKET_SIZE*N + 1) */
207 if (s < (MAX_DMA_PACKET_SIZE + MAX_DMA_PACKET_SIZE/2))
208 block_size = MAX_DMA_PACKET_SIZE/2;
209
210 s -= block_size;
211 lli->src_addr = src;
212 lli->dst_addr = dst;
213
214 lli = coh901318_lli_next(lli);
215
216 if (dir == DMA_TO_DEVICE)
217 src += block_size;
218 else if (dir == DMA_FROM_DEVICE)
219 dst += block_size;
220 }
221
222 lli->control = ctrl_eom | s;
223 lli->src_addr = src;
224 lli->dst_addr = dst;
225
226 /* One irq per single transfer */
227 return 1;
228}
229
230int
231coh901318_lli_fill_sg(struct coh901318_pool *pool,
232 struct coh901318_lli *lli,
233 struct scatterlist *sgl, unsigned int nents,
234 dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl,
235 u32 ctrl_last,
236 enum dma_data_direction dir, u32 ctrl_irq_mask)
237{
238 int i;
239 struct scatterlist *sg;
240 u32 ctrl_sg;
241 dma_addr_t src = 0;
242 dma_addr_t dst = 0;
243 int nbr_of_irq = 0;
244 u32 bytes_to_transfer;
245 u32 elem_size;
246
247 if (lli == NULL)
248 goto err;
249
250 spin_lock(&pool->lock);
251
252 if (dir == DMA_TO_DEVICE)
253 dst = dev_addr;
254 else if (dir == DMA_FROM_DEVICE)
255 src = dev_addr;
256 else
257 goto err;
258
259 for_each_sg(sgl, sg, nents, i) {
260 if (sg_is_chain(sg)) {
261 /* sg continues to the next sg-element don't
262 * send ctrl_finish until the last
263 * sg-element in the chain
264 */
265 ctrl_sg = ctrl_chained;
266 } else if (i == nents - 1)
267 ctrl_sg = ctrl_last;
268 else
269 ctrl_sg = ctrl ? ctrl : ctrl_last;
270
271
272 if ((ctrl_sg & ctrl_irq_mask))
273 nbr_of_irq++;
274
275 if (dir == DMA_TO_DEVICE)
276 /* increment source address */
277 src = sg_dma_address(sg);
278 else
279 /* increment destination address */
280 dst = sg_dma_address(sg);
281
282 bytes_to_transfer = sg_dma_len(sg);
283
284 while (bytes_to_transfer) {
285 u32 val;
286
287 if (bytes_to_transfer > MAX_DMA_PACKET_SIZE) {
288 elem_size = MAX_DMA_PACKET_SIZE;
289 val = ctrl_chained;
290 } else {
291 elem_size = bytes_to_transfer;
292 val = ctrl_sg;
293 }
294
295 lli->control = val | elem_size;
296 lli->src_addr = src;
297 lli->dst_addr = dst;
298
299 if (dir == DMA_FROM_DEVICE)
300 dst += elem_size;
301 else
302 src += elem_size;
303
304 BUG_ON(lli->link_addr & 3);
305
306 bytes_to_transfer -= elem_size;
307 lli = coh901318_lli_next(lli);
308 }
309
310 }
311 spin_unlock(&pool->lock);
312
313 /* There can be many IRQs per sg transfer */
314 return nbr_of_irq;
315 err:
316 spin_unlock(&pool->lock);
317 return -EINVAL;
318}
diff --git a/drivers/dma/coh901318_lli.h b/drivers/dma/coh901318_lli.h
new file mode 100644
index 000000000000..7bf713b79c6b
--- /dev/null
+++ b/drivers/dma/coh901318_lli.h
@@ -0,0 +1,124 @@
1/*
2 * driver/dma/coh901318_lli.h
3 *
4 * Copyright (C) 2007-2009 ST-Ericsson
5 * License terms: GNU General Public License (GPL) version 2
6 * Support functions for handling lli for coh901318
7 * Author: Per Friden <per.friden@stericsson.com>
8 */
9
10#ifndef COH901318_LLI_H
11#define COH901318_LLI_H
12
13#include <mach/coh901318.h>
14
15struct device;
16
17struct coh901318_pool {
18 spinlock_t lock;
19 struct dma_pool *dmapool;
20 struct device *dev;
21
22#ifdef CONFIG_DEBUG_FS
23 int debugfs_pool_counter;
24#endif
25};
26
27struct device;
28/**
29 * coh901318_pool_create() - Creates an dma pool for lli:s
30 * @pool: pool handle
31 * @dev: dma device
32 * @lli_nbr: number of lli:s in the pool
33 * @algin: adress alignemtn of lli:s
34 * returns 0 on success otherwise none zero
35 */
36int coh901318_pool_create(struct coh901318_pool *pool,
37 struct device *dev,
38 size_t lli_nbr, size_t align);
39
40/**
41 * coh901318_pool_destroy() - Destroys the dma pool
42 * @pool: pool handle
43 * returns 0 on success otherwise none zero
44 */
45int coh901318_pool_destroy(struct coh901318_pool *pool);
46
47/**
48 * coh901318_lli_alloc() - Allocates a linked list
49 *
50 * @pool: pool handle
51 * @len: length to list
52 * return: none NULL if success otherwise NULL
53 */
54struct coh901318_lli *
55coh901318_lli_alloc(struct coh901318_pool *pool,
56 unsigned int len);
57
58/**
59 * coh901318_lli_free() - Returns the linked list items to the pool
60 * @pool: pool handle
61 * @lli: reference to lli pointer to be freed
62 */
63void coh901318_lli_free(struct coh901318_pool *pool,
64 struct coh901318_lli **lli);
65
66/**
67 * coh901318_lli_fill_memcpy() - Prepares the lli:s for dma memcpy
68 * @pool: pool handle
69 * @lli: allocated lli
70 * @src: src address
71 * @size: transfer size
72 * @dst: destination address
73 * @ctrl_chained: ctrl for chained lli
74 * @ctrl_last: ctrl for the last lli
75 * returns number of CPU interrupts for the lli, negative on error.
76 */
77int
78coh901318_lli_fill_memcpy(struct coh901318_pool *pool,
79 struct coh901318_lli *lli,
80 dma_addr_t src, unsigned int size,
81 dma_addr_t dst, u32 ctrl_chained, u32 ctrl_last);
82
83/**
84 * coh901318_lli_fill_single() - Prepares the lli:s for dma single transfer
85 * @pool: pool handle
86 * @lli: allocated lli
87 * @buf: transfer buffer
88 * @size: transfer size
89 * @dev_addr: address of periphal
90 * @ctrl_chained: ctrl for chained lli
91 * @ctrl_last: ctrl for the last lli
92 * @dir: direction of transfer (to or from device)
93 * returns number of CPU interrupts for the lli, negative on error.
94 */
95int
96coh901318_lli_fill_single(struct coh901318_pool *pool,
97 struct coh901318_lli *lli,
98 dma_addr_t buf, unsigned int size,
99 dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_last,
100 enum dma_data_direction dir);
101
102/**
103 * coh901318_lli_fill_single() - Prepares the lli:s for dma scatter list transfer
104 * @pool: pool handle
105 * @lli: allocated lli
106 * @sg: scatter gather list
107 * @nents: number of entries in sg
108 * @dev_addr: address of periphal
109 * @ctrl_chained: ctrl for chained lli
110 * @ctrl: ctrl of middle lli
111 * @ctrl_last: ctrl for the last lli
112 * @dir: direction of transfer (to or from device)
113 * @ctrl_irq_mask: ctrl mask for CPU interrupt
114 * returns number of CPU interrupts for the lli, negative on error.
115 */
116int
117coh901318_lli_fill_sg(struct coh901318_pool *pool,
118 struct coh901318_lli *lli,
119 struct scatterlist *sg, unsigned int nents,
120 dma_addr_t dev_addr, u32 ctrl_chained,
121 u32 ctrl, u32 ctrl_last,
122 enum dma_data_direction dir, u32 ctrl_irq_mask);
123
124#endif /* COH901318_LLI_H */
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index a32a4cf7b1e0..8b905161fbf4 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -298,10 +298,6 @@ static int dmatest_func(void *data)
298 298
299 total_tests++; 299 total_tests++;
300 300
301 len = dmatest_random() % test_buf_size + 1;
302 src_off = dmatest_random() % (test_buf_size - len + 1);
303 dst_off = dmatest_random() % (test_buf_size - len + 1);
304
305 /* honor alignment restrictions */ 301 /* honor alignment restrictions */
306 if (thread->type == DMA_MEMCPY) 302 if (thread->type == DMA_MEMCPY)
307 align = dev->copy_align; 303 align = dev->copy_align;
@@ -310,7 +306,19 @@ static int dmatest_func(void *data)
310 else if (thread->type == DMA_PQ) 306 else if (thread->type == DMA_PQ)
311 align = dev->pq_align; 307 align = dev->pq_align;
312 308
309 if (1 << align > test_buf_size) {
310 pr_err("%u-byte buffer too small for %d-byte alignment\n",
311 test_buf_size, 1 << align);
312 break;
313 }
314
315 len = dmatest_random() % test_buf_size + 1;
313 len = (len >> align) << align; 316 len = (len >> align) << align;
317 if (!len)
318 len = 1 << align;
319 src_off = dmatest_random() % (test_buf_size - len + 1);
320 dst_off = dmatest_random() % (test_buf_size - len + 1);
321
314 src_off = (src_off >> align) << align; 322 src_off = (src_off >> align) << align;
315 dst_off = (dst_off >> align) << align; 323 dst_off = (dst_off >> align) << align;
316 324
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 2eea823516a7..285bed0fe17b 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -1427,7 +1427,7 @@ static int dw_resume_noirq(struct device *dev)
1427 return 0; 1427 return 0;
1428} 1428}
1429 1429
1430static struct dev_pm_ops dw_dev_pm_ops = { 1430static const struct dev_pm_ops dw_dev_pm_ops = {
1431 .suspend_noirq = dw_suspend_noirq, 1431 .suspend_noirq = dw_suspend_noirq,
1432 .resume_noirq = dw_resume_noirq, 1432 .resume_noirq = dw_resume_noirq,
1433}; 1433};
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index 645ca8d54ec4..ca6e6a0cb793 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -1470,7 +1470,7 @@ static int __devinit iop_adma_probe(struct platform_device *pdev)
1470 return -ENODEV; 1470 return -ENODEV;
1471 1471
1472 if (!devm_request_mem_region(&pdev->dev, res->start, 1472 if (!devm_request_mem_region(&pdev->dev, res->start,
1473 res->end - res->start, pdev->name)) 1473 resource_size(res), pdev->name))
1474 return -EBUSY; 1474 return -EBUSY;
1475 1475
1476 adev = kzalloc(sizeof(*adev), GFP_KERNEL); 1476 adev = kzalloc(sizeof(*adev), GFP_KERNEL);
@@ -1542,7 +1542,7 @@ static int __devinit iop_adma_probe(struct platform_device *pdev)
1542 iop_chan->device = adev; 1542 iop_chan->device = adev;
1543 1543
1544 iop_chan->mmr_base = devm_ioremap(&pdev->dev, res->start, 1544 iop_chan->mmr_base = devm_ioremap(&pdev->dev, res->start,
1545 res->end - res->start); 1545 resource_size(res));
1546 if (!iop_chan->mmr_base) { 1546 if (!iop_chan->mmr_base) {
1547 ret = -ENOMEM; 1547 ret = -ENOMEM;
1548 goto err_free_iop_chan; 1548 goto err_free_iop_chan;
diff --git a/drivers/dma/ppc4xx/Makefile b/drivers/dma/ppc4xx/Makefile
new file mode 100644
index 000000000000..b3d259b3e52a
--- /dev/null
+++ b/drivers/dma/ppc4xx/Makefile
@@ -0,0 +1 @@
obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += adma.o
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
new file mode 100644
index 000000000000..0a3478e910f0
--- /dev/null
+++ b/drivers/dma/ppc4xx/adma.c
@@ -0,0 +1,5027 @@
1/*
2 * Copyright (C) 2006-2009 DENX Software Engineering.
3 *
4 * Author: Yuri Tikhonov <yur@emcraft.com>
5 *
6 * Further porting to arch/powerpc by
7 * Anatolij Gustschin <agust@denx.de>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 *
19 * You should have received a copy of the GNU General Public License along with
20 * this program; if not, write to the Free Software Foundation, Inc., 59
21 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 *
23 * The full GNU General Public License is included in this distribution in the
24 * file called COPYING.
25 */
26
27/*
28 * This driver supports the asynchrounous DMA copy and RAID engines available
29 * on the AMCC PPC440SPe Processors.
30 * Based on the Intel Xscale(R) family of I/O Processors (IOP 32x, 33x, 134x)
31 * ADMA driver written by D.Williams.
32 */
33
34#include <linux/init.h>
35#include <linux/module.h>
36#include <linux/async_tx.h>
37#include <linux/delay.h>
38#include <linux/dma-mapping.h>
39#include <linux/spinlock.h>
40#include <linux/interrupt.h>
41#include <linux/uaccess.h>
42#include <linux/proc_fs.h>
43#include <linux/of.h>
44#include <linux/of_platform.h>
45#include <asm/dcr.h>
46#include <asm/dcr-regs.h>
47#include "adma.h"
48
49enum ppc_adma_init_code {
50 PPC_ADMA_INIT_OK = 0,
51 PPC_ADMA_INIT_MEMRES,
52 PPC_ADMA_INIT_MEMREG,
53 PPC_ADMA_INIT_ALLOC,
54 PPC_ADMA_INIT_COHERENT,
55 PPC_ADMA_INIT_CHANNEL,
56 PPC_ADMA_INIT_IRQ1,
57 PPC_ADMA_INIT_IRQ2,
58 PPC_ADMA_INIT_REGISTER
59};
60
61static char *ppc_adma_errors[] = {
62 [PPC_ADMA_INIT_OK] = "ok",
63 [PPC_ADMA_INIT_MEMRES] = "failed to get memory resource",
64 [PPC_ADMA_INIT_MEMREG] = "failed to request memory region",
65 [PPC_ADMA_INIT_ALLOC] = "failed to allocate memory for adev "
66 "structure",
67 [PPC_ADMA_INIT_COHERENT] = "failed to allocate coherent memory for "
68 "hardware descriptors",
69 [PPC_ADMA_INIT_CHANNEL] = "failed to allocate memory for channel",
70 [PPC_ADMA_INIT_IRQ1] = "failed to request first irq",
71 [PPC_ADMA_INIT_IRQ2] = "failed to request second irq",
72 [PPC_ADMA_INIT_REGISTER] = "failed to register dma async device",
73};
74
75static enum ppc_adma_init_code
76ppc440spe_adma_devices[PPC440SPE_ADMA_ENGINES_NUM];
77
78struct ppc_dma_chan_ref {
79 struct dma_chan *chan;
80 struct list_head node;
81};
82
83/* The list of channels exported by ppc440spe ADMA */
84struct list_head
85ppc440spe_adma_chan_list = LIST_HEAD_INIT(ppc440spe_adma_chan_list);
86
87/* This flag is set when want to refetch the xor chain in the interrupt
88 * handler
89 */
90static u32 do_xor_refetch;
91
92/* Pointer to DMA0, DMA1 CP/CS FIFO */
93static void *ppc440spe_dma_fifo_buf;
94
95/* Pointers to last submitted to DMA0, DMA1 CDBs */
96static struct ppc440spe_adma_desc_slot *chan_last_sub[3];
97static struct ppc440spe_adma_desc_slot *chan_first_cdb[3];
98
99/* Pointer to last linked and submitted xor CB */
100static struct ppc440spe_adma_desc_slot *xor_last_linked;
101static struct ppc440spe_adma_desc_slot *xor_last_submit;
102
103/* This array is used in data-check operations for storing a pattern */
104static char ppc440spe_qword[16];
105
106static atomic_t ppc440spe_adma_err_irq_ref;
107static dcr_host_t ppc440spe_mq_dcr_host;
108static unsigned int ppc440spe_mq_dcr_len;
109
110/* Since RXOR operations use the common register (MQ0_CF2H) for setting-up
111 * the block size in transactions, then we do not allow to activate more than
112 * only one RXOR transactions simultaneously. So use this var to store
113 * the information about is RXOR currently active (PPC440SPE_RXOR_RUN bit is
114 * set) or not (PPC440SPE_RXOR_RUN is clear).
115 */
116static unsigned long ppc440spe_rxor_state;
117
118/* These are used in enable & check routines
119 */
120static u32 ppc440spe_r6_enabled;
121static struct ppc440spe_adma_chan *ppc440spe_r6_tchan;
122static struct completion ppc440spe_r6_test_comp;
123
124static int ppc440spe_adma_dma2rxor_prep_src(
125 struct ppc440spe_adma_desc_slot *desc,
126 struct ppc440spe_rxor *cursor, int index,
127 int src_cnt, u32 addr);
128static void ppc440spe_adma_dma2rxor_set_src(
129 struct ppc440spe_adma_desc_slot *desc,
130 int index, dma_addr_t addr);
131static void ppc440spe_adma_dma2rxor_set_mult(
132 struct ppc440spe_adma_desc_slot *desc,
133 int index, u8 mult);
134
135#ifdef ADMA_LL_DEBUG
136#define ADMA_LL_DBG(x) ({ if (1) x; 0; })
137#else
138#define ADMA_LL_DBG(x) ({ if (0) x; 0; })
139#endif
140
141static void print_cb(struct ppc440spe_adma_chan *chan, void *block)
142{
143 struct dma_cdb *cdb;
144 struct xor_cb *cb;
145 int i;
146
147 switch (chan->device->id) {
148 case 0:
149 case 1:
150 cdb = block;
151
152 pr_debug("CDB at %p [%d]:\n"
153 "\t attr 0x%02x opc 0x%02x cnt 0x%08x\n"
154 "\t sg1u 0x%08x sg1l 0x%08x\n"
155 "\t sg2u 0x%08x sg2l 0x%08x\n"
156 "\t sg3u 0x%08x sg3l 0x%08x\n",
157 cdb, chan->device->id,
158 cdb->attr, cdb->opc, le32_to_cpu(cdb->cnt),
159 le32_to_cpu(cdb->sg1u), le32_to_cpu(cdb->sg1l),
160 le32_to_cpu(cdb->sg2u), le32_to_cpu(cdb->sg2l),
161 le32_to_cpu(cdb->sg3u), le32_to_cpu(cdb->sg3l)
162 );
163 break;
164 case 2:
165 cb = block;
166
167 pr_debug("CB at %p [%d]:\n"
168 "\t cbc 0x%08x cbbc 0x%08x cbs 0x%08x\n"
169 "\t cbtah 0x%08x cbtal 0x%08x\n"
170 "\t cblah 0x%08x cblal 0x%08x\n",
171 cb, chan->device->id,
172 cb->cbc, cb->cbbc, cb->cbs,
173 cb->cbtah, cb->cbtal,
174 cb->cblah, cb->cblal);
175 for (i = 0; i < 16; i++) {
176 if (i && !cb->ops[i].h && !cb->ops[i].l)
177 continue;
178 pr_debug("\t ops[%2d]: h 0x%08x l 0x%08x\n",
179 i, cb->ops[i].h, cb->ops[i].l);
180 }
181 break;
182 }
183}
184
185static void print_cb_list(struct ppc440spe_adma_chan *chan,
186 struct ppc440spe_adma_desc_slot *iter)
187{
188 for (; iter; iter = iter->hw_next)
189 print_cb(chan, iter->hw_desc);
190}
191
192static void prep_dma_xor_dbg(int id, dma_addr_t dst, dma_addr_t *src,
193 unsigned int src_cnt)
194{
195 int i;
196
197 pr_debug("\n%s(%d):\nsrc: ", __func__, id);
198 for (i = 0; i < src_cnt; i++)
199 pr_debug("\t0x%016llx ", src[i]);
200 pr_debug("dst:\n\t0x%016llx\n", dst);
201}
202
203static void prep_dma_pq_dbg(int id, dma_addr_t *dst, dma_addr_t *src,
204 unsigned int src_cnt)
205{
206 int i;
207
208 pr_debug("\n%s(%d):\nsrc: ", __func__, id);
209 for (i = 0; i < src_cnt; i++)
210 pr_debug("\t0x%016llx ", src[i]);
211 pr_debug("dst: ");
212 for (i = 0; i < 2; i++)
213 pr_debug("\t0x%016llx ", dst[i]);
214}
215
216static void prep_dma_pqzero_sum_dbg(int id, dma_addr_t *src,
217 unsigned int src_cnt,
218 const unsigned char *scf)
219{
220 int i;
221
222 pr_debug("\n%s(%d):\nsrc(coef): ", __func__, id);
223 if (scf) {
224 for (i = 0; i < src_cnt; i++)
225 pr_debug("\t0x%016llx(0x%02x) ", src[i], scf[i]);
226 } else {
227 for (i = 0; i < src_cnt; i++)
228 pr_debug("\t0x%016llx(no) ", src[i]);
229 }
230
231 pr_debug("dst: ");
232 for (i = 0; i < 2; i++)
233 pr_debug("\t0x%016llx ", src[src_cnt + i]);
234}
235
236/******************************************************************************
237 * Command (Descriptor) Blocks low-level routines
238 ******************************************************************************/
239/**
240 * ppc440spe_desc_init_interrupt - initialize the descriptor for INTERRUPT
241 * pseudo operation
242 */
243static void ppc440spe_desc_init_interrupt(struct ppc440spe_adma_desc_slot *desc,
244 struct ppc440spe_adma_chan *chan)
245{
246 struct xor_cb *p;
247
248 switch (chan->device->id) {
249 case PPC440SPE_XOR_ID:
250 p = desc->hw_desc;
251 memset(desc->hw_desc, 0, sizeof(struct xor_cb));
252 /* NOP with Command Block Complete Enable */
253 p->cbc = XOR_CBCR_CBCE_BIT;
254 break;
255 case PPC440SPE_DMA0_ID:
256 case PPC440SPE_DMA1_ID:
257 memset(desc->hw_desc, 0, sizeof(struct dma_cdb));
258 /* NOP with interrupt */
259 set_bit(PPC440SPE_DESC_INT, &desc->flags);
260 break;
261 default:
262 printk(KERN_ERR "Unsupported id %d in %s\n", chan->device->id,
263 __func__);
264 break;
265 }
266}
267
268/**
269 * ppc440spe_desc_init_null_xor - initialize the descriptor for NULL XOR
270 * pseudo operation
271 */
272static void ppc440spe_desc_init_null_xor(struct ppc440spe_adma_desc_slot *desc)
273{
274 memset(desc->hw_desc, 0, sizeof(struct xor_cb));
275 desc->hw_next = NULL;
276 desc->src_cnt = 0;
277 desc->dst_cnt = 1;
278}
279
280/**
281 * ppc440spe_desc_init_xor - initialize the descriptor for XOR operation
282 */
283static void ppc440spe_desc_init_xor(struct ppc440spe_adma_desc_slot *desc,
284 int src_cnt, unsigned long flags)
285{
286 struct xor_cb *hw_desc = desc->hw_desc;
287
288 memset(desc->hw_desc, 0, sizeof(struct xor_cb));
289 desc->hw_next = NULL;
290 desc->src_cnt = src_cnt;
291 desc->dst_cnt = 1;
292
293 hw_desc->cbc = XOR_CBCR_TGT_BIT | src_cnt;
294 if (flags & DMA_PREP_INTERRUPT)
295 /* Enable interrupt on completion */
296 hw_desc->cbc |= XOR_CBCR_CBCE_BIT;
297}
298
299/**
300 * ppc440spe_desc_init_dma2pq - initialize the descriptor for PQ
301 * operation in DMA2 controller
302 */
303static void ppc440spe_desc_init_dma2pq(struct ppc440spe_adma_desc_slot *desc,
304 int dst_cnt, int src_cnt, unsigned long flags)
305{
306 struct xor_cb *hw_desc = desc->hw_desc;
307
308 memset(desc->hw_desc, 0, sizeof(struct xor_cb));
309 desc->hw_next = NULL;
310 desc->src_cnt = src_cnt;
311 desc->dst_cnt = dst_cnt;
312 memset(desc->reverse_flags, 0, sizeof(desc->reverse_flags));
313 desc->descs_per_op = 0;
314
315 hw_desc->cbc = XOR_CBCR_TGT_BIT;
316 if (flags & DMA_PREP_INTERRUPT)
317 /* Enable interrupt on completion */
318 hw_desc->cbc |= XOR_CBCR_CBCE_BIT;
319}
320
321#define DMA_CTRL_FLAGS_LAST DMA_PREP_FENCE
322#define DMA_PREP_ZERO_P (DMA_CTRL_FLAGS_LAST << 1)
323#define DMA_PREP_ZERO_Q (DMA_PREP_ZERO_P << 1)
324
325/**
326 * ppc440spe_desc_init_dma01pq - initialize the descriptors for PQ operation
327 * with DMA0/1
328 */
329static void ppc440spe_desc_init_dma01pq(struct ppc440spe_adma_desc_slot *desc,
330 int dst_cnt, int src_cnt, unsigned long flags,
331 unsigned long op)
332{
333 struct dma_cdb *hw_desc;
334 struct ppc440spe_adma_desc_slot *iter;
335 u8 dopc;
336
337 /* Common initialization of a PQ descriptors chain */
338 set_bits(op, &desc->flags);
339 desc->src_cnt = src_cnt;
340 desc->dst_cnt = dst_cnt;
341
342 /* WXOR MULTICAST if both P and Q are being computed
343 * MV_SG1_SG2 if Q only
344 */
345 dopc = (desc->dst_cnt == DMA_DEST_MAX_NUM) ?
346 DMA_CDB_OPC_MULTICAST : DMA_CDB_OPC_MV_SG1_SG2;
347
348 list_for_each_entry(iter, &desc->group_list, chain_node) {
349 hw_desc = iter->hw_desc;
350 memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
351
352 if (likely(!list_is_last(&iter->chain_node,
353 &desc->group_list))) {
354 /* set 'next' pointer */
355 iter->hw_next = list_entry(iter->chain_node.next,
356 struct ppc440spe_adma_desc_slot, chain_node);
357 clear_bit(PPC440SPE_DESC_INT, &iter->flags);
358 } else {
359 /* this is the last descriptor.
360 * this slot will be pasted from ADMA level
361 * each time it wants to configure parameters
362 * of the transaction (src, dst, ...)
363 */
364 iter->hw_next = NULL;
365 if (flags & DMA_PREP_INTERRUPT)
366 set_bit(PPC440SPE_DESC_INT, &iter->flags);
367 else
368 clear_bit(PPC440SPE_DESC_INT, &iter->flags);
369 }
370 }
371
372 /* Set OPS depending on WXOR/RXOR type of operation */
373 if (!test_bit(PPC440SPE_DESC_RXOR, &desc->flags)) {
374 /* This is a WXOR only chain:
375 * - first descriptors are for zeroing destinations
376 * if PPC440SPE_ZERO_P/Q set;
377 * - descriptors remained are for GF-XOR operations.
378 */
379 iter = list_first_entry(&desc->group_list,
380 struct ppc440spe_adma_desc_slot,
381 chain_node);
382
383 if (test_bit(PPC440SPE_ZERO_P, &desc->flags)) {
384 hw_desc = iter->hw_desc;
385 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
386 iter = list_first_entry(&iter->chain_node,
387 struct ppc440spe_adma_desc_slot,
388 chain_node);
389 }
390
391 if (test_bit(PPC440SPE_ZERO_Q, &desc->flags)) {
392 hw_desc = iter->hw_desc;
393 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
394 iter = list_first_entry(&iter->chain_node,
395 struct ppc440spe_adma_desc_slot,
396 chain_node);
397 }
398
399 list_for_each_entry_from(iter, &desc->group_list, chain_node) {
400 hw_desc = iter->hw_desc;
401 hw_desc->opc = dopc;
402 }
403 } else {
404 /* This is either RXOR-only or mixed RXOR/WXOR */
405
406 /* The first 1 or 2 slots in chain are always RXOR,
407 * if need to calculate P & Q, then there are two
408 * RXOR slots; if only P or only Q, then there is one
409 */
410 iter = list_first_entry(&desc->group_list,
411 struct ppc440spe_adma_desc_slot,
412 chain_node);
413 hw_desc = iter->hw_desc;
414 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
415
416 if (desc->dst_cnt == DMA_DEST_MAX_NUM) {
417 iter = list_first_entry(&iter->chain_node,
418 struct ppc440spe_adma_desc_slot,
419 chain_node);
420 hw_desc = iter->hw_desc;
421 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
422 }
423
424 /* The remaining descs (if any) are WXORs */
425 if (test_bit(PPC440SPE_DESC_WXOR, &desc->flags)) {
426 iter = list_first_entry(&iter->chain_node,
427 struct ppc440spe_adma_desc_slot,
428 chain_node);
429 list_for_each_entry_from(iter, &desc->group_list,
430 chain_node) {
431 hw_desc = iter->hw_desc;
432 hw_desc->opc = dopc;
433 }
434 }
435 }
436}
437
438/**
439 * ppc440spe_desc_init_dma01pqzero_sum - initialize the descriptor
440 * for PQ_ZERO_SUM operation
441 */
442static void ppc440spe_desc_init_dma01pqzero_sum(
443 struct ppc440spe_adma_desc_slot *desc,
444 int dst_cnt, int src_cnt)
445{
446 struct dma_cdb *hw_desc;
447 struct ppc440spe_adma_desc_slot *iter;
448 int i = 0;
449 u8 dopc = (dst_cnt == 2) ? DMA_CDB_OPC_MULTICAST :
450 DMA_CDB_OPC_MV_SG1_SG2;
451 /*
452 * Initialize starting from 2nd or 3rd descriptor dependent
453 * on dst_cnt. First one or two slots are for cloning P
454 * and/or Q to chan->pdest and/or chan->qdest as we have
455 * to preserve original P/Q.
456 */
457 iter = list_first_entry(&desc->group_list,
458 struct ppc440spe_adma_desc_slot, chain_node);
459 iter = list_entry(iter->chain_node.next,
460 struct ppc440spe_adma_desc_slot, chain_node);
461
462 if (dst_cnt > 1) {
463 iter = list_entry(iter->chain_node.next,
464 struct ppc440spe_adma_desc_slot, chain_node);
465 }
466 /* initialize each source descriptor in chain */
467 list_for_each_entry_from(iter, &desc->group_list, chain_node) {
468 hw_desc = iter->hw_desc;
469 memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
470 iter->src_cnt = 0;
471 iter->dst_cnt = 0;
472
473 /* This is a ZERO_SUM operation:
474 * - <src_cnt> descriptors starting from 2nd or 3rd
475 * descriptor are for GF-XOR operations;
476 * - remaining <dst_cnt> descriptors are for checking the result
477 */
478 if (i++ < src_cnt)
479 /* MV_SG1_SG2 if only Q is being verified
480 * MULTICAST if both P and Q are being verified
481 */
482 hw_desc->opc = dopc;
483 else
484 /* DMA_CDB_OPC_DCHECK128 operation */
485 hw_desc->opc = DMA_CDB_OPC_DCHECK128;
486
487 if (likely(!list_is_last(&iter->chain_node,
488 &desc->group_list))) {
489 /* set 'next' pointer */
490 iter->hw_next = list_entry(iter->chain_node.next,
491 struct ppc440spe_adma_desc_slot,
492 chain_node);
493 } else {
494 /* this is the last descriptor.
495 * this slot will be pasted from ADMA level
496 * each time it wants to configure parameters
497 * of the transaction (src, dst, ...)
498 */
499 iter->hw_next = NULL;
500 /* always enable interrupt generation since we get
501 * the status of pqzero from the handler
502 */
503 set_bit(PPC440SPE_DESC_INT, &iter->flags);
504 }
505 }
506 desc->src_cnt = src_cnt;
507 desc->dst_cnt = dst_cnt;
508}
509
510/**
511 * ppc440spe_desc_init_memcpy - initialize the descriptor for MEMCPY operation
512 */
513static void ppc440spe_desc_init_memcpy(struct ppc440spe_adma_desc_slot *desc,
514 unsigned long flags)
515{
516 struct dma_cdb *hw_desc = desc->hw_desc;
517
518 memset(desc->hw_desc, 0, sizeof(struct dma_cdb));
519 desc->hw_next = NULL;
520 desc->src_cnt = 1;
521 desc->dst_cnt = 1;
522
523 if (flags & DMA_PREP_INTERRUPT)
524 set_bit(PPC440SPE_DESC_INT, &desc->flags);
525 else
526 clear_bit(PPC440SPE_DESC_INT, &desc->flags);
527
528 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
529}
530
531/**
532 * ppc440spe_desc_init_memset - initialize the descriptor for MEMSET operation
533 */
534static void ppc440spe_desc_init_memset(struct ppc440spe_adma_desc_slot *desc,
535 int value, unsigned long flags)
536{
537 struct dma_cdb *hw_desc = desc->hw_desc;
538
539 memset(desc->hw_desc, 0, sizeof(struct dma_cdb));
540 desc->hw_next = NULL;
541 desc->src_cnt = 1;
542 desc->dst_cnt = 1;
543
544 if (flags & DMA_PREP_INTERRUPT)
545 set_bit(PPC440SPE_DESC_INT, &desc->flags);
546 else
547 clear_bit(PPC440SPE_DESC_INT, &desc->flags);
548
549 hw_desc->sg1u = hw_desc->sg1l = cpu_to_le32((u32)value);
550 hw_desc->sg3u = hw_desc->sg3l = cpu_to_le32((u32)value);
551 hw_desc->opc = DMA_CDB_OPC_DFILL128;
552}
553
554/**
555 * ppc440spe_desc_set_src_addr - set source address into the descriptor
556 */
557static void ppc440spe_desc_set_src_addr(struct ppc440spe_adma_desc_slot *desc,
558 struct ppc440spe_adma_chan *chan,
559 int src_idx, dma_addr_t addrh,
560 dma_addr_t addrl)
561{
562 struct dma_cdb *dma_hw_desc;
563 struct xor_cb *xor_hw_desc;
564 phys_addr_t addr64, tmplow, tmphi;
565
566 switch (chan->device->id) {
567 case PPC440SPE_DMA0_ID:
568 case PPC440SPE_DMA1_ID:
569 if (!addrh) {
570 addr64 = addrl;
571 tmphi = (addr64 >> 32);
572 tmplow = (addr64 & 0xFFFFFFFF);
573 } else {
574 tmphi = addrh;
575 tmplow = addrl;
576 }
577 dma_hw_desc = desc->hw_desc;
578 dma_hw_desc->sg1l = cpu_to_le32((u32)tmplow);
579 dma_hw_desc->sg1u |= cpu_to_le32((u32)tmphi);
580 break;
581 case PPC440SPE_XOR_ID:
582 xor_hw_desc = desc->hw_desc;
583 xor_hw_desc->ops[src_idx].l = addrl;
584 xor_hw_desc->ops[src_idx].h |= addrh;
585 break;
586 }
587}
588
589/**
590 * ppc440spe_desc_set_src_mult - set source address mult into the descriptor
591 */
592static void ppc440spe_desc_set_src_mult(struct ppc440spe_adma_desc_slot *desc,
593 struct ppc440spe_adma_chan *chan, u32 mult_index,
594 int sg_index, unsigned char mult_value)
595{
596 struct dma_cdb *dma_hw_desc;
597 struct xor_cb *xor_hw_desc;
598 u32 *psgu;
599
600 switch (chan->device->id) {
601 case PPC440SPE_DMA0_ID:
602 case PPC440SPE_DMA1_ID:
603 dma_hw_desc = desc->hw_desc;
604
605 switch (sg_index) {
606 /* for RXOR operations set multiplier
607 * into source cued address
608 */
609 case DMA_CDB_SG_SRC:
610 psgu = &dma_hw_desc->sg1u;
611 break;
612 /* for WXOR operations set multiplier
613 * into destination cued address(es)
614 */
615 case DMA_CDB_SG_DST1:
616 psgu = &dma_hw_desc->sg2u;
617 break;
618 case DMA_CDB_SG_DST2:
619 psgu = &dma_hw_desc->sg3u;
620 break;
621 default:
622 BUG();
623 }
624
625 *psgu |= cpu_to_le32(mult_value << mult_index);
626 break;
627 case PPC440SPE_XOR_ID:
628 xor_hw_desc = desc->hw_desc;
629 break;
630 default:
631 BUG();
632 }
633}
634
635/**
636 * ppc440spe_desc_set_dest_addr - set destination address into the descriptor
637 */
638static void ppc440spe_desc_set_dest_addr(struct ppc440spe_adma_desc_slot *desc,
639 struct ppc440spe_adma_chan *chan,
640 dma_addr_t addrh, dma_addr_t addrl,
641 u32 dst_idx)
642{
643 struct dma_cdb *dma_hw_desc;
644 struct xor_cb *xor_hw_desc;
645 phys_addr_t addr64, tmphi, tmplow;
646 u32 *psgu, *psgl;
647
648 switch (chan->device->id) {
649 case PPC440SPE_DMA0_ID:
650 case PPC440SPE_DMA1_ID:
651 if (!addrh) {
652 addr64 = addrl;
653 tmphi = (addr64 >> 32);
654 tmplow = (addr64 & 0xFFFFFFFF);
655 } else {
656 tmphi = addrh;
657 tmplow = addrl;
658 }
659 dma_hw_desc = desc->hw_desc;
660
661 psgu = dst_idx ? &dma_hw_desc->sg3u : &dma_hw_desc->sg2u;
662 psgl = dst_idx ? &dma_hw_desc->sg3l : &dma_hw_desc->sg2l;
663
664 *psgl = cpu_to_le32((u32)tmplow);
665 *psgu |= cpu_to_le32((u32)tmphi);
666 break;
667 case PPC440SPE_XOR_ID:
668 xor_hw_desc = desc->hw_desc;
669 xor_hw_desc->cbtal = addrl;
670 xor_hw_desc->cbtah |= addrh;
671 break;
672 }
673}
674
675/**
676 * ppc440spe_desc_set_byte_count - set number of data bytes involved
677 * into the operation
678 */
679static void ppc440spe_desc_set_byte_count(struct ppc440spe_adma_desc_slot *desc,
680 struct ppc440spe_adma_chan *chan,
681 u32 byte_count)
682{
683 struct dma_cdb *dma_hw_desc;
684 struct xor_cb *xor_hw_desc;
685
686 switch (chan->device->id) {
687 case PPC440SPE_DMA0_ID:
688 case PPC440SPE_DMA1_ID:
689 dma_hw_desc = desc->hw_desc;
690 dma_hw_desc->cnt = cpu_to_le32(byte_count);
691 break;
692 case PPC440SPE_XOR_ID:
693 xor_hw_desc = desc->hw_desc;
694 xor_hw_desc->cbbc = byte_count;
695 break;
696 }
697}
698
699/**
700 * ppc440spe_desc_set_rxor_block_size - set RXOR block size
701 */
702static inline void ppc440spe_desc_set_rxor_block_size(u32 byte_count)
703{
704 /* assume that byte_count is aligned on the 512-boundary;
705 * thus write it directly to the register (bits 23:31 are
706 * reserved there).
707 */
708 dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_CF2H, byte_count);
709}
710
711/**
712 * ppc440spe_desc_set_dcheck - set CHECK pattern
713 */
714static void ppc440spe_desc_set_dcheck(struct ppc440spe_adma_desc_slot *desc,
715 struct ppc440spe_adma_chan *chan, u8 *qword)
716{
717 struct dma_cdb *dma_hw_desc;
718
719 switch (chan->device->id) {
720 case PPC440SPE_DMA0_ID:
721 case PPC440SPE_DMA1_ID:
722 dma_hw_desc = desc->hw_desc;
723 iowrite32(qword[0], &dma_hw_desc->sg3l);
724 iowrite32(qword[4], &dma_hw_desc->sg3u);
725 iowrite32(qword[8], &dma_hw_desc->sg2l);
726 iowrite32(qword[12], &dma_hw_desc->sg2u);
727 break;
728 default:
729 BUG();
730 }
731}
732
733/**
734 * ppc440spe_xor_set_link - set link address in xor CB
735 */
736static void ppc440spe_xor_set_link(struct ppc440spe_adma_desc_slot *prev_desc,
737 struct ppc440spe_adma_desc_slot *next_desc)
738{
739 struct xor_cb *xor_hw_desc = prev_desc->hw_desc;
740
741 if (unlikely(!next_desc || !(next_desc->phys))) {
742 printk(KERN_ERR "%s: next_desc=0x%p; next_desc->phys=0x%llx\n",
743 __func__, next_desc,
744 next_desc ? next_desc->phys : 0);
745 BUG();
746 }
747
748 xor_hw_desc->cbs = 0;
749 xor_hw_desc->cblal = next_desc->phys;
750 xor_hw_desc->cblah = 0;
751 xor_hw_desc->cbc |= XOR_CBCR_LNK_BIT;
752}
753
754/**
755 * ppc440spe_desc_set_link - set the address of descriptor following this
756 * descriptor in chain
757 */
758static void ppc440spe_desc_set_link(struct ppc440spe_adma_chan *chan,
759 struct ppc440spe_adma_desc_slot *prev_desc,
760 struct ppc440spe_adma_desc_slot *next_desc)
761{
762 unsigned long flags;
763 struct ppc440spe_adma_desc_slot *tail = next_desc;
764
765 if (unlikely(!prev_desc || !next_desc ||
766 (prev_desc->hw_next && prev_desc->hw_next != next_desc))) {
767 /* If previous next is overwritten something is wrong.
768 * though we may refetch from append to initiate list
769 * processing; in this case - it's ok.
770 */
771 printk(KERN_ERR "%s: prev_desc=0x%p; next_desc=0x%p; "
772 "prev->hw_next=0x%p\n", __func__, prev_desc,
773 next_desc, prev_desc ? prev_desc->hw_next : 0);
774 BUG();
775 }
776
777 local_irq_save(flags);
778
779 /* do s/w chaining both for DMA and XOR descriptors */
780 prev_desc->hw_next = next_desc;
781
782 switch (chan->device->id) {
783 case PPC440SPE_DMA0_ID:
784 case PPC440SPE_DMA1_ID:
785 break;
786 case PPC440SPE_XOR_ID:
787 /* bind descriptor to the chain */
788 while (tail->hw_next)
789 tail = tail->hw_next;
790 xor_last_linked = tail;
791
792 if (prev_desc == xor_last_submit)
793 /* do not link to the last submitted CB */
794 break;
795 ppc440spe_xor_set_link(prev_desc, next_desc);
796 break;
797 }
798
799 local_irq_restore(flags);
800}
801
802/**
803 * ppc440spe_desc_get_src_addr - extract the source address from the descriptor
804 */
805static u32 ppc440spe_desc_get_src_addr(struct ppc440spe_adma_desc_slot *desc,
806 struct ppc440spe_adma_chan *chan, int src_idx)
807{
808 struct dma_cdb *dma_hw_desc;
809 struct xor_cb *xor_hw_desc;
810
811 switch (chan->device->id) {
812 case PPC440SPE_DMA0_ID:
813 case PPC440SPE_DMA1_ID:
814 dma_hw_desc = desc->hw_desc;
815 /* May have 0, 1, 2, or 3 sources */
816 switch (dma_hw_desc->opc) {
817 case DMA_CDB_OPC_NO_OP:
818 case DMA_CDB_OPC_DFILL128:
819 return 0;
820 case DMA_CDB_OPC_DCHECK128:
821 if (unlikely(src_idx)) {
822 printk(KERN_ERR "%s: try to get %d source for"
823 " DCHECK128\n", __func__, src_idx);
824 BUG();
825 }
826 return le32_to_cpu(dma_hw_desc->sg1l);
827 case DMA_CDB_OPC_MULTICAST:
828 case DMA_CDB_OPC_MV_SG1_SG2:
829 if (unlikely(src_idx > 2)) {
830 printk(KERN_ERR "%s: try to get %d source from"
831 " DMA descr\n", __func__, src_idx);
832 BUG();
833 }
834 if (src_idx) {
835 if (le32_to_cpu(dma_hw_desc->sg1u) &
836 DMA_CUED_XOR_WIN_MSK) {
837 u8 region;
838
839 if (src_idx == 1)
840 return le32_to_cpu(
841 dma_hw_desc->sg1l) +
842 desc->unmap_len;
843
844 region = (le32_to_cpu(
845 dma_hw_desc->sg1u)) >>
846 DMA_CUED_REGION_OFF;
847
848 region &= DMA_CUED_REGION_MSK;
849 switch (region) {
850 case DMA_RXOR123:
851 return le32_to_cpu(
852 dma_hw_desc->sg1l) +
853 (desc->unmap_len << 1);
854 case DMA_RXOR124:
855 return le32_to_cpu(
856 dma_hw_desc->sg1l) +
857 (desc->unmap_len * 3);
858 case DMA_RXOR125:
859 return le32_to_cpu(
860 dma_hw_desc->sg1l) +
861 (desc->unmap_len << 2);
862 default:
863 printk(KERN_ERR
864 "%s: try to"
865 " get src3 for region %02x"
866 "PPC440SPE_DESC_RXOR12?\n",
867 __func__, region);
868 BUG();
869 }
870 } else {
871 printk(KERN_ERR
872 "%s: try to get %d"
873 " source for non-cued descr\n",
874 __func__, src_idx);
875 BUG();
876 }
877 }
878 return le32_to_cpu(dma_hw_desc->sg1l);
879 default:
880 printk(KERN_ERR "%s: unknown OPC 0x%02x\n",
881 __func__, dma_hw_desc->opc);
882 BUG();
883 }
884 return le32_to_cpu(dma_hw_desc->sg1l);
885 case PPC440SPE_XOR_ID:
886 /* May have up to 16 sources */
887 xor_hw_desc = desc->hw_desc;
888 return xor_hw_desc->ops[src_idx].l;
889 }
890 return 0;
891}
892
893/**
894 * ppc440spe_desc_get_dest_addr - extract the destination address from the
895 * descriptor
896 */
897static u32 ppc440spe_desc_get_dest_addr(struct ppc440spe_adma_desc_slot *desc,
898 struct ppc440spe_adma_chan *chan, int idx)
899{
900 struct dma_cdb *dma_hw_desc;
901 struct xor_cb *xor_hw_desc;
902
903 switch (chan->device->id) {
904 case PPC440SPE_DMA0_ID:
905 case PPC440SPE_DMA1_ID:
906 dma_hw_desc = desc->hw_desc;
907
908 if (likely(!idx))
909 return le32_to_cpu(dma_hw_desc->sg2l);
910 return le32_to_cpu(dma_hw_desc->sg3l);
911 case PPC440SPE_XOR_ID:
912 xor_hw_desc = desc->hw_desc;
913 return xor_hw_desc->cbtal;
914 }
915 return 0;
916}
917
918/**
919 * ppc440spe_desc_get_src_num - extract the number of source addresses from
920 * the descriptor
921 */
922static u32 ppc440spe_desc_get_src_num(struct ppc440spe_adma_desc_slot *desc,
923 struct ppc440spe_adma_chan *chan)
924{
925 struct dma_cdb *dma_hw_desc;
926 struct xor_cb *xor_hw_desc;
927
928 switch (chan->device->id) {
929 case PPC440SPE_DMA0_ID:
930 case PPC440SPE_DMA1_ID:
931 dma_hw_desc = desc->hw_desc;
932
933 switch (dma_hw_desc->opc) {
934 case DMA_CDB_OPC_NO_OP:
935 case DMA_CDB_OPC_DFILL128:
936 return 0;
937 case DMA_CDB_OPC_DCHECK128:
938 return 1;
939 case DMA_CDB_OPC_MV_SG1_SG2:
940 case DMA_CDB_OPC_MULTICAST:
941 /*
942 * Only for RXOR operations we have more than
943 * one source
944 */
945 if (le32_to_cpu(dma_hw_desc->sg1u) &
946 DMA_CUED_XOR_WIN_MSK) {
947 /* RXOR op, there are 2 or 3 sources */
948 if (((le32_to_cpu(dma_hw_desc->sg1u) >>
949 DMA_CUED_REGION_OFF) &
950 DMA_CUED_REGION_MSK) == DMA_RXOR12) {
951 /* RXOR 1-2 */
952 return 2;
953 } else {
954 /* RXOR 1-2-3/1-2-4/1-2-5 */
955 return 3;
956 }
957 }
958 return 1;
959 default:
960 printk(KERN_ERR "%s: unknown OPC 0x%02x\n",
961 __func__, dma_hw_desc->opc);
962 BUG();
963 }
964 case PPC440SPE_XOR_ID:
965 /* up to 16 sources */
966 xor_hw_desc = desc->hw_desc;
967 return xor_hw_desc->cbc & XOR_CDCR_OAC_MSK;
968 default:
969 BUG();
970 }
971 return 0;
972}
973
974/**
975 * ppc440spe_desc_get_dst_num - get the number of destination addresses in
976 * this descriptor
977 */
978static u32 ppc440spe_desc_get_dst_num(struct ppc440spe_adma_desc_slot *desc,
979 struct ppc440spe_adma_chan *chan)
980{
981 struct dma_cdb *dma_hw_desc;
982
983 switch (chan->device->id) {
984 case PPC440SPE_DMA0_ID:
985 case PPC440SPE_DMA1_ID:
986 /* May be 1 or 2 destinations */
987 dma_hw_desc = desc->hw_desc;
988 switch (dma_hw_desc->opc) {
989 case DMA_CDB_OPC_NO_OP:
990 case DMA_CDB_OPC_DCHECK128:
991 return 0;
992 case DMA_CDB_OPC_MV_SG1_SG2:
993 case DMA_CDB_OPC_DFILL128:
994 return 1;
995 case DMA_CDB_OPC_MULTICAST:
996 if (desc->dst_cnt == 2)
997 return 2;
998 else
999 return 1;
1000 default:
1001 printk(KERN_ERR "%s: unknown OPC 0x%02x\n",
1002 __func__, dma_hw_desc->opc);
1003 BUG();
1004 }
1005 case PPC440SPE_XOR_ID:
1006 /* Always only 1 destination */
1007 return 1;
1008 default:
1009 BUG();
1010 }
1011 return 0;
1012}
1013
1014/**
1015 * ppc440spe_desc_get_link - get the address of the descriptor that
1016 * follows this one
1017 */
1018static inline u32 ppc440spe_desc_get_link(struct ppc440spe_adma_desc_slot *desc,
1019 struct ppc440spe_adma_chan *chan)
1020{
1021 if (!desc->hw_next)
1022 return 0;
1023
1024 return desc->hw_next->phys;
1025}
1026
1027/**
1028 * ppc440spe_desc_is_aligned - check alignment
1029 */
1030static inline int ppc440spe_desc_is_aligned(
1031 struct ppc440spe_adma_desc_slot *desc, int num_slots)
1032{
1033 return (desc->idx & (num_slots - 1)) ? 0 : 1;
1034}
1035
1036/**
1037 * ppc440spe_chan_xor_slot_count - get the number of slots necessary for
1038 * XOR operation
1039 */
1040static int ppc440spe_chan_xor_slot_count(size_t len, int src_cnt,
1041 int *slots_per_op)
1042{
1043 int slot_cnt;
1044
1045 /* each XOR descriptor provides up to 16 source operands */
1046 slot_cnt = *slots_per_op = (src_cnt + XOR_MAX_OPS - 1)/XOR_MAX_OPS;
1047
1048 if (likely(len <= PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT))
1049 return slot_cnt;
1050
1051 printk(KERN_ERR "%s: len %d > max %d !!\n",
1052 __func__, len, PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT);
1053 BUG();
1054 return slot_cnt;
1055}
1056
1057/**
1058 * ppc440spe_dma2_pq_slot_count - get the number of slots necessary for
1059 * DMA2 PQ operation
1060 */
1061static int ppc440spe_dma2_pq_slot_count(dma_addr_t *srcs,
1062 int src_cnt, size_t len)
1063{
1064 signed long long order = 0;
1065 int state = 0;
1066 int addr_count = 0;
1067 int i;
1068 for (i = 1; i < src_cnt; i++) {
1069 dma_addr_t cur_addr = srcs[i];
1070 dma_addr_t old_addr = srcs[i-1];
1071 switch (state) {
1072 case 0:
1073 if (cur_addr == old_addr + len) {
1074 /* direct RXOR */
1075 order = 1;
1076 state = 1;
1077 if (i == src_cnt-1)
1078 addr_count++;
1079 } else if (old_addr == cur_addr + len) {
1080 /* reverse RXOR */
1081 order = -1;
1082 state = 1;
1083 if (i == src_cnt-1)
1084 addr_count++;
1085 } else {
1086 state = 3;
1087 }
1088 break;
1089 case 1:
1090 if (i == src_cnt-2 || (order == -1
1091 && cur_addr != old_addr - len)) {
1092 order = 0;
1093 state = 0;
1094 addr_count++;
1095 } else if (cur_addr == old_addr + len*order) {
1096 state = 2;
1097 if (i == src_cnt-1)
1098 addr_count++;
1099 } else if (cur_addr == old_addr + 2*len) {
1100 state = 2;
1101 if (i == src_cnt-1)
1102 addr_count++;
1103 } else if (cur_addr == old_addr + 3*len) {
1104 state = 2;
1105 if (i == src_cnt-1)
1106 addr_count++;
1107 } else {
1108 order = 0;
1109 state = 0;
1110 addr_count++;
1111 }
1112 break;
1113 case 2:
1114 order = 0;
1115 state = 0;
1116 addr_count++;
1117 break;
1118 }
1119 if (state == 3)
1120 break;
1121 }
1122 if (src_cnt <= 1 || (state != 1 && state != 2)) {
1123 pr_err("%s: src_cnt=%d, state=%d, addr_count=%d, order=%lld\n",
1124 __func__, src_cnt, state, addr_count, order);
1125 for (i = 0; i < src_cnt; i++)
1126 pr_err("\t[%d] 0x%llx \n", i, srcs[i]);
1127 BUG();
1128 }
1129
1130 return (addr_count + XOR_MAX_OPS - 1) / XOR_MAX_OPS;
1131}
1132
1133
1134/******************************************************************************
1135 * ADMA channel low-level routines
1136 ******************************************************************************/
1137
1138static u32
1139ppc440spe_chan_get_current_descriptor(struct ppc440spe_adma_chan *chan);
1140static void ppc440spe_chan_append(struct ppc440spe_adma_chan *chan);
1141
1142/**
1143 * ppc440spe_adma_device_clear_eot_status - interrupt ack to XOR or DMA engine
1144 */
1145static void ppc440spe_adma_device_clear_eot_status(
1146 struct ppc440spe_adma_chan *chan)
1147{
1148 struct dma_regs *dma_reg;
1149 struct xor_regs *xor_reg;
1150 u8 *p = chan->device->dma_desc_pool_virt;
1151 struct dma_cdb *cdb;
1152 u32 rv, i;
1153
1154 switch (chan->device->id) {
1155 case PPC440SPE_DMA0_ID:
1156 case PPC440SPE_DMA1_ID:
1157 /* read FIFO to ack */
1158 dma_reg = chan->device->dma_reg;
1159 while ((rv = ioread32(&dma_reg->csfpl))) {
1160 i = rv & DMA_CDB_ADDR_MSK;
1161 cdb = (struct dma_cdb *)&p[i -
1162 (u32)chan->device->dma_desc_pool];
1163
1164 /* Clear opcode to ack. This is necessary for
1165 * ZeroSum operations only
1166 */
1167 cdb->opc = 0;
1168
1169 if (test_bit(PPC440SPE_RXOR_RUN,
1170 &ppc440spe_rxor_state)) {
1171 /* probably this is a completed RXOR op,
1172 * get pointer to CDB using the fact that
1173 * physical and virtual addresses of CDB
1174 * in pools have the same offsets
1175 */
1176 if (le32_to_cpu(cdb->sg1u) &
1177 DMA_CUED_XOR_BASE) {
1178 /* this is a RXOR */
1179 clear_bit(PPC440SPE_RXOR_RUN,
1180 &ppc440spe_rxor_state);
1181 }
1182 }
1183
1184 if (rv & DMA_CDB_STATUS_MSK) {
1185 /* ZeroSum check failed
1186 */
1187 struct ppc440spe_adma_desc_slot *iter;
1188 dma_addr_t phys = rv & ~DMA_CDB_MSK;
1189
1190 /*
1191 * Update the status of corresponding
1192 * descriptor.
1193 */
1194 list_for_each_entry(iter, &chan->chain,
1195 chain_node) {
1196 if (iter->phys == phys)
1197 break;
1198 }
1199 /*
1200 * if cannot find the corresponding
1201 * slot it's a bug
1202 */
1203 BUG_ON(&iter->chain_node == &chan->chain);
1204
1205 if (iter->xor_check_result) {
1206 if (test_bit(PPC440SPE_DESC_PCHECK,
1207 &iter->flags)) {
1208 *iter->xor_check_result |=
1209 SUM_CHECK_P_RESULT;
1210 } else
1211 if (test_bit(PPC440SPE_DESC_QCHECK,
1212 &iter->flags)) {
1213 *iter->xor_check_result |=
1214 SUM_CHECK_Q_RESULT;
1215 } else
1216 BUG();
1217 }
1218 }
1219 }
1220
1221 rv = ioread32(&dma_reg->dsts);
1222 if (rv) {
1223 pr_err("DMA%d err status: 0x%x\n",
1224 chan->device->id, rv);
1225 /* write back to clear */
1226 iowrite32(rv, &dma_reg->dsts);
1227 }
1228 break;
1229 case PPC440SPE_XOR_ID:
1230 /* reset status bits to ack */
1231 xor_reg = chan->device->xor_reg;
1232 rv = ioread32be(&xor_reg->sr);
1233 iowrite32be(rv, &xor_reg->sr);
1234
1235 if (rv & (XOR_IE_ICBIE_BIT|XOR_IE_ICIE_BIT|XOR_IE_RPTIE_BIT)) {
1236 if (rv & XOR_IE_RPTIE_BIT) {
1237 /* Read PLB Timeout Error.
1238 * Try to resubmit the CB
1239 */
1240 u32 val = ioread32be(&xor_reg->ccbalr);
1241
1242 iowrite32be(val, &xor_reg->cblalr);
1243
1244 val = ioread32be(&xor_reg->crsr);
1245 iowrite32be(val | XOR_CRSR_XAE_BIT,
1246 &xor_reg->crsr);
1247 } else
1248 pr_err("XOR ERR 0x%x status\n", rv);
1249 break;
1250 }
1251
1252 /* if the XORcore is idle, but there are unprocessed CBs
1253 * then refetch the s/w chain here
1254 */
1255 if (!(ioread32be(&xor_reg->sr) & XOR_SR_XCP_BIT) &&
1256 do_xor_refetch)
1257 ppc440spe_chan_append(chan);
1258 break;
1259 }
1260}
1261
1262/**
1263 * ppc440spe_chan_is_busy - get the channel status
1264 */
1265static int ppc440spe_chan_is_busy(struct ppc440spe_adma_chan *chan)
1266{
1267 struct dma_regs *dma_reg;
1268 struct xor_regs *xor_reg;
1269 int busy = 0;
1270
1271 switch (chan->device->id) {
1272 case PPC440SPE_DMA0_ID:
1273 case PPC440SPE_DMA1_ID:
1274 dma_reg = chan->device->dma_reg;
1275 /* if command FIFO's head and tail pointers are equal and
1276 * status tail is the same as command, then channel is free
1277 */
1278 if (ioread16(&dma_reg->cpfhp) != ioread16(&dma_reg->cpftp) ||
1279 ioread16(&dma_reg->cpftp) != ioread16(&dma_reg->csftp))
1280 busy = 1;
1281 break;
1282 case PPC440SPE_XOR_ID:
1283 /* use the special status bit for the XORcore
1284 */
1285 xor_reg = chan->device->xor_reg;
1286 busy = (ioread32be(&xor_reg->sr) & XOR_SR_XCP_BIT) ? 1 : 0;
1287 break;
1288 }
1289
1290 return busy;
1291}
1292
1293/**
1294 * ppc440spe_chan_set_first_xor_descriptor - init XORcore chain
1295 */
1296static void ppc440spe_chan_set_first_xor_descriptor(
1297 struct ppc440spe_adma_chan *chan,
1298 struct ppc440spe_adma_desc_slot *next_desc)
1299{
1300 struct xor_regs *xor_reg = chan->device->xor_reg;
1301
1302 if (ioread32be(&xor_reg->sr) & XOR_SR_XCP_BIT)
1303 printk(KERN_INFO "%s: Warn: XORcore is running "
1304 "when try to set the first CDB!\n",
1305 __func__);
1306
1307 xor_last_submit = xor_last_linked = next_desc;
1308
1309 iowrite32be(XOR_CRSR_64BA_BIT, &xor_reg->crsr);
1310
1311 iowrite32be(next_desc->phys, &xor_reg->cblalr);
1312 iowrite32be(0, &xor_reg->cblahr);
1313 iowrite32be(ioread32be(&xor_reg->cbcr) | XOR_CBCR_LNK_BIT,
1314 &xor_reg->cbcr);
1315
1316 chan->hw_chain_inited = 1;
1317}
1318
1319/**
1320 * ppc440spe_dma_put_desc - put DMA0,1 descriptor to FIFO.
1321 * called with irqs disabled
1322 */
1323static void ppc440spe_dma_put_desc(struct ppc440spe_adma_chan *chan,
1324 struct ppc440spe_adma_desc_slot *desc)
1325{
1326 u32 pcdb;
1327 struct dma_regs *dma_reg = chan->device->dma_reg;
1328
1329 pcdb = desc->phys;
1330 if (!test_bit(PPC440SPE_DESC_INT, &desc->flags))
1331 pcdb |= DMA_CDB_NO_INT;
1332
1333 chan_last_sub[chan->device->id] = desc;
1334
1335 ADMA_LL_DBG(print_cb(chan, desc->hw_desc));
1336
1337 iowrite32(pcdb, &dma_reg->cpfpl);
1338}
1339
1340/**
1341 * ppc440spe_chan_append - update the h/w chain in the channel
1342 */
1343static void ppc440spe_chan_append(struct ppc440spe_adma_chan *chan)
1344{
1345 struct xor_regs *xor_reg;
1346 struct ppc440spe_adma_desc_slot *iter;
1347 struct xor_cb *xcb;
1348 u32 cur_desc;
1349 unsigned long flags;
1350
1351 local_irq_save(flags);
1352
1353 switch (chan->device->id) {
1354 case PPC440SPE_DMA0_ID:
1355 case PPC440SPE_DMA1_ID:
1356 cur_desc = ppc440spe_chan_get_current_descriptor(chan);
1357
1358 if (likely(cur_desc)) {
1359 iter = chan_last_sub[chan->device->id];
1360 BUG_ON(!iter);
1361 } else {
1362 /* first peer */
1363 iter = chan_first_cdb[chan->device->id];
1364 BUG_ON(!iter);
1365 ppc440spe_dma_put_desc(chan, iter);
1366 chan->hw_chain_inited = 1;
1367 }
1368
1369 /* is there something new to append */
1370 if (!iter->hw_next)
1371 break;
1372
1373 /* flush descriptors from the s/w queue to fifo */
1374 list_for_each_entry_continue(iter, &chan->chain, chain_node) {
1375 ppc440spe_dma_put_desc(chan, iter);
1376 if (!iter->hw_next)
1377 break;
1378 }
1379 break;
1380 case PPC440SPE_XOR_ID:
1381 /* update h/w links and refetch */
1382 if (!xor_last_submit->hw_next)
1383 break;
1384
1385 xor_reg = chan->device->xor_reg;
1386 /* the last linked CDB has to generate an interrupt
1387 * that we'd be able to append the next lists to h/w
1388 * regardless of the XOR engine state at the moment of
1389 * appending of these next lists
1390 */
1391 xcb = xor_last_linked->hw_desc;
1392 xcb->cbc |= XOR_CBCR_CBCE_BIT;
1393
1394 if (!(ioread32be(&xor_reg->sr) & XOR_SR_XCP_BIT)) {
1395 /* XORcore is idle. Refetch now */
1396 do_xor_refetch = 0;
1397 ppc440spe_xor_set_link(xor_last_submit,
1398 xor_last_submit->hw_next);
1399
1400 ADMA_LL_DBG(print_cb_list(chan,
1401 xor_last_submit->hw_next));
1402
1403 xor_last_submit = xor_last_linked;
1404 iowrite32be(ioread32be(&xor_reg->crsr) |
1405 XOR_CRSR_RCBE_BIT | XOR_CRSR_64BA_BIT,
1406 &xor_reg->crsr);
1407 } else {
1408 /* XORcore is running. Refetch later in the handler */
1409 do_xor_refetch = 1;
1410 }
1411
1412 break;
1413 }
1414
1415 local_irq_restore(flags);
1416}
1417
1418/**
1419 * ppc440spe_chan_get_current_descriptor - get the currently executed descriptor
1420 */
1421static u32
1422ppc440spe_chan_get_current_descriptor(struct ppc440spe_adma_chan *chan)
1423{
1424 struct dma_regs *dma_reg;
1425 struct xor_regs *xor_reg;
1426
1427 if (unlikely(!chan->hw_chain_inited))
1428 /* h/w descriptor chain is not initialized yet */
1429 return 0;
1430
1431 switch (chan->device->id) {
1432 case PPC440SPE_DMA0_ID:
1433 case PPC440SPE_DMA1_ID:
1434 dma_reg = chan->device->dma_reg;
1435 return ioread32(&dma_reg->acpl) & (~DMA_CDB_MSK);
1436 case PPC440SPE_XOR_ID:
1437 xor_reg = chan->device->xor_reg;
1438 return ioread32be(&xor_reg->ccbalr);
1439 }
1440 return 0;
1441}
1442
1443/**
1444 * ppc440spe_chan_run - enable the channel
1445 */
1446static void ppc440spe_chan_run(struct ppc440spe_adma_chan *chan)
1447{
1448 struct xor_regs *xor_reg;
1449
1450 switch (chan->device->id) {
1451 case PPC440SPE_DMA0_ID:
1452 case PPC440SPE_DMA1_ID:
1453 /* DMAs are always enabled, do nothing */
1454 break;
1455 case PPC440SPE_XOR_ID:
1456 /* drain write buffer */
1457 xor_reg = chan->device->xor_reg;
1458
1459 /* fetch descriptor pointed to in <link> */
1460 iowrite32be(XOR_CRSR_64BA_BIT | XOR_CRSR_XAE_BIT,
1461 &xor_reg->crsr);
1462 break;
1463 }
1464}
1465
1466/******************************************************************************
1467 * ADMA device level
1468 ******************************************************************************/
1469
1470static void ppc440spe_chan_start_null_xor(struct ppc440spe_adma_chan *chan);
1471static int ppc440spe_adma_alloc_chan_resources(struct dma_chan *chan);
1472
1473static dma_cookie_t
1474ppc440spe_adma_tx_submit(struct dma_async_tx_descriptor *tx);
1475
1476static void ppc440spe_adma_set_dest(struct ppc440spe_adma_desc_slot *tx,
1477 dma_addr_t addr, int index);
1478static void
1479ppc440spe_adma_memcpy_xor_set_src(struct ppc440spe_adma_desc_slot *tx,
1480 dma_addr_t addr, int index);
1481
1482static void
1483ppc440spe_adma_pq_set_dest(struct ppc440spe_adma_desc_slot *tx,
1484 dma_addr_t *paddr, unsigned long flags);
1485static void
1486ppc440spe_adma_pq_set_src(struct ppc440spe_adma_desc_slot *tx,
1487 dma_addr_t addr, int index);
1488static void
1489ppc440spe_adma_pq_set_src_mult(struct ppc440spe_adma_desc_slot *tx,
1490 unsigned char mult, int index, int dst_pos);
1491static void
1492ppc440spe_adma_pqzero_sum_set_dest(struct ppc440spe_adma_desc_slot *tx,
1493 dma_addr_t paddr, dma_addr_t qaddr);
1494
1495static struct page *ppc440spe_rxor_srcs[32];
1496
1497/**
1498 * ppc440spe_can_rxor - check if the operands may be processed with RXOR
1499 */
1500static int ppc440spe_can_rxor(struct page **srcs, int src_cnt, size_t len)
1501{
1502 int i, order = 0, state = 0;
1503 int idx = 0;
1504
1505 if (unlikely(!(src_cnt > 1)))
1506 return 0;
1507
1508 BUG_ON(src_cnt > ARRAY_SIZE(ppc440spe_rxor_srcs));
1509
1510 /* Skip holes in the source list before checking */
1511 for (i = 0; i < src_cnt; i++) {
1512 if (!srcs[i])
1513 continue;
1514 ppc440spe_rxor_srcs[idx++] = srcs[i];
1515 }
1516 src_cnt = idx;
1517
1518 for (i = 1; i < src_cnt; i++) {
1519 char *cur_addr = page_address(ppc440spe_rxor_srcs[i]);
1520 char *old_addr = page_address(ppc440spe_rxor_srcs[i - 1]);
1521
1522 switch (state) {
1523 case 0:
1524 if (cur_addr == old_addr + len) {
1525 /* direct RXOR */
1526 order = 1;
1527 state = 1;
1528 } else if (old_addr == cur_addr + len) {
1529 /* reverse RXOR */
1530 order = -1;
1531 state = 1;
1532 } else
1533 goto out;
1534 break;
1535 case 1:
1536 if ((i == src_cnt - 2) ||
1537 (order == -1 && cur_addr != old_addr - len)) {
1538 order = 0;
1539 state = 0;
1540 } else if ((cur_addr == old_addr + len * order) ||
1541 (cur_addr == old_addr + 2 * len) ||
1542 (cur_addr == old_addr + 3 * len)) {
1543 state = 2;
1544 } else {
1545 order = 0;
1546 state = 0;
1547 }
1548 break;
1549 case 2:
1550 order = 0;
1551 state = 0;
1552 break;
1553 }
1554 }
1555
1556out:
1557 if (state == 1 || state == 2)
1558 return 1;
1559
1560 return 0;
1561}
1562
1563/**
1564 * ppc440spe_adma_device_estimate - estimate the efficiency of processing
1565 * the operation given on this channel. It's assumed that 'chan' is
1566 * capable to process 'cap' type of operation.
1567 * @chan: channel to use
1568 * @cap: type of transaction
1569 * @dst_lst: array of destination pointers
1570 * @dst_cnt: number of destination operands
1571 * @src_lst: array of source pointers
1572 * @src_cnt: number of source operands
1573 * @src_sz: size of each source operand
1574 */
1575static int ppc440spe_adma_estimate(struct dma_chan *chan,
1576 enum dma_transaction_type cap, struct page **dst_lst, int dst_cnt,
1577 struct page **src_lst, int src_cnt, size_t src_sz)
1578{
1579 int ef = 1;
1580
1581 if (cap == DMA_PQ || cap == DMA_PQ_VAL) {
1582 /* If RAID-6 capabilities were not activated don't try
1583 * to use them
1584 */
1585 if (unlikely(!ppc440spe_r6_enabled))
1586 return -1;
1587 }
1588 /* In the current implementation of ppc440spe ADMA driver it
1589 * makes sense to pick out only pq case, because it may be
1590 * processed:
1591 * (1) either using Biskup method on DMA2;
1592 * (2) or on DMA0/1.
1593 * Thus we give a favour to (1) if the sources are suitable;
1594 * else let it be processed on one of the DMA0/1 engines.
1595 * In the sum_product case where destination is also the
1596 * source process it on DMA0/1 only.
1597 */
1598 if (cap == DMA_PQ && chan->chan_id == PPC440SPE_XOR_ID) {
1599
1600 if (dst_cnt == 1 && src_cnt == 2 && dst_lst[0] == src_lst[1])
1601 ef = 0; /* sum_product case, process on DMA0/1 */
1602 else if (ppc440spe_can_rxor(src_lst, src_cnt, src_sz))
1603 ef = 3; /* override (DMA0/1 + idle) */
1604 else
1605 ef = 0; /* can't process on DMA2 if !rxor */
1606 }
1607
1608 /* channel idleness increases the priority */
1609 if (likely(ef) &&
1610 !ppc440spe_chan_is_busy(to_ppc440spe_adma_chan(chan)))
1611 ef++;
1612
1613 return ef;
1614}
1615
1616struct dma_chan *
1617ppc440spe_async_tx_find_best_channel(enum dma_transaction_type cap,
1618 struct page **dst_lst, int dst_cnt, struct page **src_lst,
1619 int src_cnt, size_t src_sz)
1620{
1621 struct dma_chan *best_chan = NULL;
1622 struct ppc_dma_chan_ref *ref;
1623 int best_rank = -1;
1624
1625 if (unlikely(!src_sz))
1626 return NULL;
1627 if (src_sz > PAGE_SIZE) {
1628 /*
1629 * should a user of the api ever pass > PAGE_SIZE requests
1630 * we sort out cases where temporary page-sized buffers
1631 * are used.
1632 */
1633 switch (cap) {
1634 case DMA_PQ:
1635 if (src_cnt == 1 && dst_lst[1] == src_lst[0])
1636 return NULL;
1637 if (src_cnt == 2 && dst_lst[1] == src_lst[1])
1638 return NULL;
1639 break;
1640 case DMA_PQ_VAL:
1641 case DMA_XOR_VAL:
1642 return NULL;
1643 default:
1644 break;
1645 }
1646 }
1647
1648 list_for_each_entry(ref, &ppc440spe_adma_chan_list, node) {
1649 if (dma_has_cap(cap, ref->chan->device->cap_mask)) {
1650 int rank;
1651
1652 rank = ppc440spe_adma_estimate(ref->chan, cap, dst_lst,
1653 dst_cnt, src_lst, src_cnt, src_sz);
1654 if (rank > best_rank) {
1655 best_rank = rank;
1656 best_chan = ref->chan;
1657 }
1658 }
1659 }
1660
1661 return best_chan;
1662}
1663EXPORT_SYMBOL_GPL(ppc440spe_async_tx_find_best_channel);
1664
1665/**
1666 * ppc440spe_get_group_entry - get group entry with index idx
1667 * @tdesc: is the last allocated slot in the group.
1668 */
1669static struct ppc440spe_adma_desc_slot *
1670ppc440spe_get_group_entry(struct ppc440spe_adma_desc_slot *tdesc, u32 entry_idx)
1671{
1672 struct ppc440spe_adma_desc_slot *iter = tdesc->group_head;
1673 int i = 0;
1674
1675 if (entry_idx < 0 || entry_idx >= (tdesc->src_cnt + tdesc->dst_cnt)) {
1676 printk("%s: entry_idx %d, src_cnt %d, dst_cnt %d\n",
1677 __func__, entry_idx, tdesc->src_cnt, tdesc->dst_cnt);
1678 BUG();
1679 }
1680
1681 list_for_each_entry(iter, &tdesc->group_list, chain_node) {
1682 if (i++ == entry_idx)
1683 break;
1684 }
1685 return iter;
1686}
1687
1688/**
1689 * ppc440spe_adma_free_slots - flags descriptor slots for reuse
1690 * @slot: Slot to free
1691 * Caller must hold &ppc440spe_chan->lock while calling this function
1692 */
1693static void ppc440spe_adma_free_slots(struct ppc440spe_adma_desc_slot *slot,
1694 struct ppc440spe_adma_chan *chan)
1695{
1696 int stride = slot->slots_per_op;
1697
1698 while (stride--) {
1699 slot->slots_per_op = 0;
1700 slot = list_entry(slot->slot_node.next,
1701 struct ppc440spe_adma_desc_slot,
1702 slot_node);
1703 }
1704}
1705
1706static void ppc440spe_adma_unmap(struct ppc440spe_adma_chan *chan,
1707 struct ppc440spe_adma_desc_slot *desc)
1708{
1709 u32 src_cnt, dst_cnt;
1710 dma_addr_t addr;
1711
1712 /*
1713 * get the number of sources & destination
1714 * included in this descriptor and unmap
1715 * them all
1716 */
1717 src_cnt = ppc440spe_desc_get_src_num(desc, chan);
1718 dst_cnt = ppc440spe_desc_get_dst_num(desc, chan);
1719
1720 /* unmap destinations */
1721 if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
1722 while (dst_cnt--) {
1723 addr = ppc440spe_desc_get_dest_addr(
1724 desc, chan, dst_cnt);
1725 dma_unmap_page(chan->device->dev,
1726 addr, desc->unmap_len,
1727 DMA_FROM_DEVICE);
1728 }
1729 }
1730
1731 /* unmap sources */
1732 if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
1733 while (src_cnt--) {
1734 addr = ppc440spe_desc_get_src_addr(
1735 desc, chan, src_cnt);
1736 dma_unmap_page(chan->device->dev,
1737 addr, desc->unmap_len,
1738 DMA_TO_DEVICE);
1739 }
1740 }
1741}
1742
1743/**
1744 * ppc440spe_adma_run_tx_complete_actions - call functions to be called
1745 * upon completion
1746 */
1747static dma_cookie_t ppc440spe_adma_run_tx_complete_actions(
1748 struct ppc440spe_adma_desc_slot *desc,
1749 struct ppc440spe_adma_chan *chan,
1750 dma_cookie_t cookie)
1751{
1752 int i;
1753
1754 BUG_ON(desc->async_tx.cookie < 0);
1755 if (desc->async_tx.cookie > 0) {
1756 cookie = desc->async_tx.cookie;
1757 desc->async_tx.cookie = 0;
1758
1759 /* call the callback (must not sleep or submit new
1760 * operations to this channel)
1761 */
1762 if (desc->async_tx.callback)
1763 desc->async_tx.callback(
1764 desc->async_tx.callback_param);
1765
1766 /* unmap dma addresses
1767 * (unmap_single vs unmap_page?)
1768 *
1769 * actually, ppc's dma_unmap_page() functions are empty, so
1770 * the following code is just for the sake of completeness
1771 */
1772 if (chan && chan->needs_unmap && desc->group_head &&
1773 desc->unmap_len) {
1774 struct ppc440spe_adma_desc_slot *unmap =
1775 desc->group_head;
1776 /* assume 1 slot per op always */
1777 u32 slot_count = unmap->slot_cnt;
1778
1779 /* Run through the group list and unmap addresses */
1780 for (i = 0; i < slot_count; i++) {
1781 BUG_ON(!unmap);
1782 ppc440spe_adma_unmap(chan, unmap);
1783 unmap = unmap->hw_next;
1784 }
1785 }
1786 }
1787
1788 /* run dependent operations */
1789 dma_run_dependencies(&desc->async_tx);
1790
1791 return cookie;
1792}
1793
1794/**
1795 * ppc440spe_adma_clean_slot - clean up CDB slot (if ack is set)
1796 */
1797static int ppc440spe_adma_clean_slot(struct ppc440spe_adma_desc_slot *desc,
1798 struct ppc440spe_adma_chan *chan)
1799{
1800 /* the client is allowed to attach dependent operations
1801 * until 'ack' is set
1802 */
1803 if (!async_tx_test_ack(&desc->async_tx))
1804 return 0;
1805
1806 /* leave the last descriptor in the chain
1807 * so we can append to it
1808 */
1809 if (list_is_last(&desc->chain_node, &chan->chain) ||
1810 desc->phys == ppc440spe_chan_get_current_descriptor(chan))
1811 return 1;
1812
1813 if (chan->device->id != PPC440SPE_XOR_ID) {
1814 /* our DMA interrupt handler clears opc field of
1815 * each processed descriptor. For all types of
1816 * operations except for ZeroSum we do not actually
1817 * need ack from the interrupt handler. ZeroSum is a
1818 * special case since the result of this operation
1819 * is available from the handler only, so if we see
1820 * such type of descriptor (which is unprocessed yet)
1821 * then leave it in chain.
1822 */
1823 struct dma_cdb *cdb = desc->hw_desc;
1824 if (cdb->opc == DMA_CDB_OPC_DCHECK128)
1825 return 1;
1826 }
1827
1828 dev_dbg(chan->device->common.dev, "\tfree slot %llx: %d stride: %d\n",
1829 desc->phys, desc->idx, desc->slots_per_op);
1830
1831 list_del(&desc->chain_node);
1832 ppc440spe_adma_free_slots(desc, chan);
1833 return 0;
1834}
1835
1836/**
1837 * __ppc440spe_adma_slot_cleanup - this is the common clean-up routine
1838 * which runs through the channel CDBs list until reach the descriptor
1839 * currently processed. When routine determines that all CDBs of group
1840 * are completed then corresponding callbacks (if any) are called and slots
1841 * are freed.
1842 */
1843static void __ppc440spe_adma_slot_cleanup(struct ppc440spe_adma_chan *chan)
1844{
1845 struct ppc440spe_adma_desc_slot *iter, *_iter, *group_start = NULL;
1846 dma_cookie_t cookie = 0;
1847 u32 current_desc = ppc440spe_chan_get_current_descriptor(chan);
1848 int busy = ppc440spe_chan_is_busy(chan);
1849 int seen_current = 0, slot_cnt = 0, slots_per_op = 0;
1850
1851 dev_dbg(chan->device->common.dev, "ppc440spe adma%d: %s\n",
1852 chan->device->id, __func__);
1853
1854 if (!current_desc) {
1855 /* There were no transactions yet, so
1856 * nothing to clean
1857 */
1858 return;
1859 }
1860
1861 /* free completed slots from the chain starting with
1862 * the oldest descriptor
1863 */
1864 list_for_each_entry_safe(iter, _iter, &chan->chain,
1865 chain_node) {
1866 dev_dbg(chan->device->common.dev, "\tcookie: %d slot: %d "
1867 "busy: %d this_desc: %#llx next_desc: %#x "
1868 "cur: %#x ack: %d\n",
1869 iter->async_tx.cookie, iter->idx, busy, iter->phys,
1870 ppc440spe_desc_get_link(iter, chan), current_desc,
1871 async_tx_test_ack(&iter->async_tx));
1872 prefetch(_iter);
1873 prefetch(&_iter->async_tx);
1874
1875 /* do not advance past the current descriptor loaded into the
1876 * hardware channel,subsequent descriptors are either in process
1877 * or have not been submitted
1878 */
1879 if (seen_current)
1880 break;
1881
1882 /* stop the search if we reach the current descriptor and the
1883 * channel is busy, or if it appears that the current descriptor
1884 * needs to be re-read (i.e. has been appended to)
1885 */
1886 if (iter->phys == current_desc) {
1887 BUG_ON(seen_current++);
1888 if (busy || ppc440spe_desc_get_link(iter, chan)) {
1889 /* not all descriptors of the group have
1890 * been completed; exit.
1891 */
1892 break;
1893 }
1894 }
1895
1896 /* detect the start of a group transaction */
1897 if (!slot_cnt && !slots_per_op) {
1898 slot_cnt = iter->slot_cnt;
1899 slots_per_op = iter->slots_per_op;
1900 if (slot_cnt <= slots_per_op) {
1901 slot_cnt = 0;
1902 slots_per_op = 0;
1903 }
1904 }
1905
1906 if (slot_cnt) {
1907 if (!group_start)
1908 group_start = iter;
1909 slot_cnt -= slots_per_op;
1910 }
1911
1912 /* all the members of a group are complete */
1913 if (slots_per_op != 0 && slot_cnt == 0) {
1914 struct ppc440spe_adma_desc_slot *grp_iter, *_grp_iter;
1915 int end_of_chain = 0;
1916
1917 /* clean up the group */
1918 slot_cnt = group_start->slot_cnt;
1919 grp_iter = group_start;
1920 list_for_each_entry_safe_from(grp_iter, _grp_iter,
1921 &chan->chain, chain_node) {
1922
1923 cookie = ppc440spe_adma_run_tx_complete_actions(
1924 grp_iter, chan, cookie);
1925
1926 slot_cnt -= slots_per_op;
1927 end_of_chain = ppc440spe_adma_clean_slot(
1928 grp_iter, chan);
1929 if (end_of_chain && slot_cnt) {
1930 /* Should wait for ZeroSum completion */
1931 if (cookie > 0)
1932 chan->completed_cookie = cookie;
1933 return;
1934 }
1935
1936 if (slot_cnt == 0 || end_of_chain)
1937 break;
1938 }
1939
1940 /* the group should be complete at this point */
1941 BUG_ON(slot_cnt);
1942
1943 slots_per_op = 0;
1944 group_start = NULL;
1945 if (end_of_chain)
1946 break;
1947 else
1948 continue;
1949 } else if (slots_per_op) /* wait for group completion */
1950 continue;
1951
1952 cookie = ppc440spe_adma_run_tx_complete_actions(iter, chan,
1953 cookie);
1954
1955 if (ppc440spe_adma_clean_slot(iter, chan))
1956 break;
1957 }
1958
1959 BUG_ON(!seen_current);
1960
1961 if (cookie > 0) {
1962 chan->completed_cookie = cookie;
1963 pr_debug("\tcompleted cookie %d\n", cookie);
1964 }
1965
1966}
1967
1968/**
1969 * ppc440spe_adma_tasklet - clean up watch-dog initiator
1970 */
1971static void ppc440spe_adma_tasklet(unsigned long data)
1972{
1973 struct ppc440spe_adma_chan *chan = (struct ppc440spe_adma_chan *) data;
1974
1975 spin_lock_nested(&chan->lock, SINGLE_DEPTH_NESTING);
1976 __ppc440spe_adma_slot_cleanup(chan);
1977 spin_unlock(&chan->lock);
1978}
1979
1980/**
1981 * ppc440spe_adma_slot_cleanup - clean up scheduled initiator
1982 */
1983static void ppc440spe_adma_slot_cleanup(struct ppc440spe_adma_chan *chan)
1984{
1985 spin_lock_bh(&chan->lock);
1986 __ppc440spe_adma_slot_cleanup(chan);
1987 spin_unlock_bh(&chan->lock);
1988}
1989
1990/**
1991 * ppc440spe_adma_alloc_slots - allocate free slots (if any)
1992 */
1993static struct ppc440spe_adma_desc_slot *ppc440spe_adma_alloc_slots(
1994 struct ppc440spe_adma_chan *chan, int num_slots,
1995 int slots_per_op)
1996{
1997 struct ppc440spe_adma_desc_slot *iter = NULL, *_iter;
1998 struct ppc440spe_adma_desc_slot *alloc_start = NULL;
1999 struct list_head chain = LIST_HEAD_INIT(chain);
2000 int slots_found, retry = 0;
2001
2002
2003 BUG_ON(!num_slots || !slots_per_op);
2004 /* start search from the last allocated descrtiptor
2005 * if a contiguous allocation can not be found start searching
2006 * from the beginning of the list
2007 */
2008retry:
2009 slots_found = 0;
2010 if (retry == 0)
2011 iter = chan->last_used;
2012 else
2013 iter = list_entry(&chan->all_slots,
2014 struct ppc440spe_adma_desc_slot,
2015 slot_node);
2016 list_for_each_entry_safe_continue(iter, _iter, &chan->all_slots,
2017 slot_node) {
2018 prefetch(_iter);
2019 prefetch(&_iter->async_tx);
2020 if (iter->slots_per_op) {
2021 slots_found = 0;
2022 continue;
2023 }
2024
2025 /* start the allocation if the slot is correctly aligned */
2026 if (!slots_found++)
2027 alloc_start = iter;
2028
2029 if (slots_found == num_slots) {
2030 struct ppc440spe_adma_desc_slot *alloc_tail = NULL;
2031 struct ppc440spe_adma_desc_slot *last_used = NULL;
2032
2033 iter = alloc_start;
2034 while (num_slots) {
2035 int i;
2036 /* pre-ack all but the last descriptor */
2037 if (num_slots != slots_per_op)
2038 async_tx_ack(&iter->async_tx);
2039
2040 list_add_tail(&iter->chain_node, &chain);
2041 alloc_tail = iter;
2042 iter->async_tx.cookie = 0;
2043 iter->hw_next = NULL;
2044 iter->flags = 0;
2045 iter->slot_cnt = num_slots;
2046 iter->xor_check_result = NULL;
2047 for (i = 0; i < slots_per_op; i++) {
2048 iter->slots_per_op = slots_per_op - i;
2049 last_used = iter;
2050 iter = list_entry(iter->slot_node.next,
2051 struct ppc440spe_adma_desc_slot,
2052 slot_node);
2053 }
2054 num_slots -= slots_per_op;
2055 }
2056 alloc_tail->group_head = alloc_start;
2057 alloc_tail->async_tx.cookie = -EBUSY;
2058 list_splice(&chain, &alloc_tail->group_list);
2059 chan->last_used = last_used;
2060 return alloc_tail;
2061 }
2062 }
2063 if (!retry++)
2064 goto retry;
2065
2066 /* try to free some slots if the allocation fails */
2067 tasklet_schedule(&chan->irq_tasklet);
2068 return NULL;
2069}
2070
2071/**
2072 * ppc440spe_adma_alloc_chan_resources - allocate pools for CDB slots
2073 */
2074static int ppc440spe_adma_alloc_chan_resources(struct dma_chan *chan)
2075{
2076 struct ppc440spe_adma_chan *ppc440spe_chan;
2077 struct ppc440spe_adma_desc_slot *slot = NULL;
2078 char *hw_desc;
2079 int i, db_sz;
2080 int init;
2081
2082 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
2083 init = ppc440spe_chan->slots_allocated ? 0 : 1;
2084 chan->chan_id = ppc440spe_chan->device->id;
2085
2086 /* Allocate descriptor slots */
2087 i = ppc440spe_chan->slots_allocated;
2088 if (ppc440spe_chan->device->id != PPC440SPE_XOR_ID)
2089 db_sz = sizeof(struct dma_cdb);
2090 else
2091 db_sz = sizeof(struct xor_cb);
2092
2093 for (; i < (ppc440spe_chan->device->pool_size / db_sz); i++) {
2094 slot = kzalloc(sizeof(struct ppc440spe_adma_desc_slot),
2095 GFP_KERNEL);
2096 if (!slot) {
2097 printk(KERN_INFO "SPE ADMA Channel only initialized"
2098 " %d descriptor slots", i--);
2099 break;
2100 }
2101
2102 hw_desc = (char *) ppc440spe_chan->device->dma_desc_pool_virt;
2103 slot->hw_desc = (void *) &hw_desc[i * db_sz];
2104 dma_async_tx_descriptor_init(&slot->async_tx, chan);
2105 slot->async_tx.tx_submit = ppc440spe_adma_tx_submit;
2106 INIT_LIST_HEAD(&slot->chain_node);
2107 INIT_LIST_HEAD(&slot->slot_node);
2108 INIT_LIST_HEAD(&slot->group_list);
2109 slot->phys = ppc440spe_chan->device->dma_desc_pool + i * db_sz;
2110 slot->idx = i;
2111
2112 spin_lock_bh(&ppc440spe_chan->lock);
2113 ppc440spe_chan->slots_allocated++;
2114 list_add_tail(&slot->slot_node, &ppc440spe_chan->all_slots);
2115 spin_unlock_bh(&ppc440spe_chan->lock);
2116 }
2117
2118 if (i && !ppc440spe_chan->last_used) {
2119 ppc440spe_chan->last_used =
2120 list_entry(ppc440spe_chan->all_slots.next,
2121 struct ppc440spe_adma_desc_slot,
2122 slot_node);
2123 }
2124
2125 dev_dbg(ppc440spe_chan->device->common.dev,
2126 "ppc440spe adma%d: allocated %d descriptor slots\n",
2127 ppc440spe_chan->device->id, i);
2128
2129 /* initialize the channel and the chain with a null operation */
2130 if (init) {
2131 switch (ppc440spe_chan->device->id) {
2132 case PPC440SPE_DMA0_ID:
2133 case PPC440SPE_DMA1_ID:
2134 ppc440spe_chan->hw_chain_inited = 0;
2135 /* Use WXOR for self-testing */
2136 if (!ppc440spe_r6_tchan)
2137 ppc440spe_r6_tchan = ppc440spe_chan;
2138 break;
2139 case PPC440SPE_XOR_ID:
2140 ppc440spe_chan_start_null_xor(ppc440spe_chan);
2141 break;
2142 default:
2143 BUG();
2144 }
2145 ppc440spe_chan->needs_unmap = 1;
2146 }
2147
2148 return (i > 0) ? i : -ENOMEM;
2149}
2150
2151/**
2152 * ppc440spe_desc_assign_cookie - assign a cookie
2153 */
2154static dma_cookie_t ppc440spe_desc_assign_cookie(
2155 struct ppc440spe_adma_chan *chan,
2156 struct ppc440spe_adma_desc_slot *desc)
2157{
2158 dma_cookie_t cookie = chan->common.cookie;
2159
2160 cookie++;
2161 if (cookie < 0)
2162 cookie = 1;
2163 chan->common.cookie = desc->async_tx.cookie = cookie;
2164 return cookie;
2165}
2166
2167/**
2168 * ppc440spe_rxor_set_region_data -
2169 */
2170static void ppc440spe_rxor_set_region(struct ppc440spe_adma_desc_slot *desc,
2171 u8 xor_arg_no, u32 mask)
2172{
2173 struct xor_cb *xcb = desc->hw_desc;
2174
2175 xcb->ops[xor_arg_no].h |= mask;
2176}
2177
2178/**
2179 * ppc440spe_rxor_set_src -
2180 */
2181static void ppc440spe_rxor_set_src(struct ppc440spe_adma_desc_slot *desc,
2182 u8 xor_arg_no, dma_addr_t addr)
2183{
2184 struct xor_cb *xcb = desc->hw_desc;
2185
2186 xcb->ops[xor_arg_no].h |= DMA_CUED_XOR_BASE;
2187 xcb->ops[xor_arg_no].l = addr;
2188}
2189
2190/**
2191 * ppc440spe_rxor_set_mult -
2192 */
2193static void ppc440spe_rxor_set_mult(struct ppc440spe_adma_desc_slot *desc,
2194 u8 xor_arg_no, u8 idx, u8 mult)
2195{
2196 struct xor_cb *xcb = desc->hw_desc;
2197
2198 xcb->ops[xor_arg_no].h |= mult << (DMA_CUED_MULT1_OFF + idx * 8);
2199}
2200
2201/**
2202 * ppc440spe_adma_check_threshold - append CDBs to h/w chain if threshold
2203 * has been achieved
2204 */
2205static void ppc440spe_adma_check_threshold(struct ppc440spe_adma_chan *chan)
2206{
2207 dev_dbg(chan->device->common.dev, "ppc440spe adma%d: pending: %d\n",
2208 chan->device->id, chan->pending);
2209
2210 if (chan->pending >= PPC440SPE_ADMA_THRESHOLD) {
2211 chan->pending = 0;
2212 ppc440spe_chan_append(chan);
2213 }
2214}
2215
2216/**
2217 * ppc440spe_adma_tx_submit - submit new descriptor group to the channel
2218 * (it's not necessary that descriptors will be submitted to the h/w
2219 * chains too right now)
2220 */
2221static dma_cookie_t ppc440spe_adma_tx_submit(struct dma_async_tx_descriptor *tx)
2222{
2223 struct ppc440spe_adma_desc_slot *sw_desc;
2224 struct ppc440spe_adma_chan *chan = to_ppc440spe_adma_chan(tx->chan);
2225 struct ppc440spe_adma_desc_slot *group_start, *old_chain_tail;
2226 int slot_cnt;
2227 int slots_per_op;
2228 dma_cookie_t cookie;
2229
2230 sw_desc = tx_to_ppc440spe_adma_slot(tx);
2231
2232 group_start = sw_desc->group_head;
2233 slot_cnt = group_start->slot_cnt;
2234 slots_per_op = group_start->slots_per_op;
2235
2236 spin_lock_bh(&chan->lock);
2237
2238 cookie = ppc440spe_desc_assign_cookie(chan, sw_desc);
2239
2240 if (unlikely(list_empty(&chan->chain))) {
2241 /* first peer */
2242 list_splice_init(&sw_desc->group_list, &chan->chain);
2243 chan_first_cdb[chan->device->id] = group_start;
2244 } else {
2245 /* isn't first peer, bind CDBs to chain */
2246 old_chain_tail = list_entry(chan->chain.prev,
2247 struct ppc440spe_adma_desc_slot,
2248 chain_node);
2249 list_splice_init(&sw_desc->group_list,
2250 &old_chain_tail->chain_node);
2251 /* fix up the hardware chain */
2252 ppc440spe_desc_set_link(chan, old_chain_tail, group_start);
2253 }
2254
2255 /* increment the pending count by the number of operations */
2256 chan->pending += slot_cnt / slots_per_op;
2257 ppc440spe_adma_check_threshold(chan);
2258 spin_unlock_bh(&chan->lock);
2259
2260 dev_dbg(chan->device->common.dev,
2261 "ppc440spe adma%d: %s cookie: %d slot: %d tx %p\n",
2262 chan->device->id, __func__,
2263 sw_desc->async_tx.cookie, sw_desc->idx, sw_desc);
2264
2265 return cookie;
2266}
2267
2268/**
2269 * ppc440spe_adma_prep_dma_interrupt - prepare CDB for a pseudo DMA operation
2270 */
2271static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_interrupt(
2272 struct dma_chan *chan, unsigned long flags)
2273{
2274 struct ppc440spe_adma_chan *ppc440spe_chan;
2275 struct ppc440spe_adma_desc_slot *sw_desc, *group_start;
2276 int slot_cnt, slots_per_op;
2277
2278 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
2279
2280 dev_dbg(ppc440spe_chan->device->common.dev,
2281 "ppc440spe adma%d: %s\n", ppc440spe_chan->device->id,
2282 __func__);
2283
2284 spin_lock_bh(&ppc440spe_chan->lock);
2285 slot_cnt = slots_per_op = 1;
2286 sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt,
2287 slots_per_op);
2288 if (sw_desc) {
2289 group_start = sw_desc->group_head;
2290 ppc440spe_desc_init_interrupt(group_start, ppc440spe_chan);
2291 group_start->unmap_len = 0;
2292 sw_desc->async_tx.flags = flags;
2293 }
2294 spin_unlock_bh(&ppc440spe_chan->lock);
2295
2296 return sw_desc ? &sw_desc->async_tx : NULL;
2297}
2298
2299/**
2300 * ppc440spe_adma_prep_dma_memcpy - prepare CDB for a MEMCPY operation
2301 */
2302static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_memcpy(
2303 struct dma_chan *chan, dma_addr_t dma_dest,
2304 dma_addr_t dma_src, size_t len, unsigned long flags)
2305{
2306 struct ppc440spe_adma_chan *ppc440spe_chan;
2307 struct ppc440spe_adma_desc_slot *sw_desc, *group_start;
2308 int slot_cnt, slots_per_op;
2309
2310 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
2311
2312 if (unlikely(!len))
2313 return NULL;
2314
2315 BUG_ON(unlikely(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT));
2316
2317 spin_lock_bh(&ppc440spe_chan->lock);
2318
2319 dev_dbg(ppc440spe_chan->device->common.dev,
2320 "ppc440spe adma%d: %s len: %u int_en %d\n",
2321 ppc440spe_chan->device->id, __func__, len,
2322 flags & DMA_PREP_INTERRUPT ? 1 : 0);
2323 slot_cnt = slots_per_op = 1;
2324 sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt,
2325 slots_per_op);
2326 if (sw_desc) {
2327 group_start = sw_desc->group_head;
2328 ppc440spe_desc_init_memcpy(group_start, flags);
2329 ppc440spe_adma_set_dest(group_start, dma_dest, 0);
2330 ppc440spe_adma_memcpy_xor_set_src(group_start, dma_src, 0);
2331 ppc440spe_desc_set_byte_count(group_start, ppc440spe_chan, len);
2332 sw_desc->unmap_len = len;
2333 sw_desc->async_tx.flags = flags;
2334 }
2335 spin_unlock_bh(&ppc440spe_chan->lock);
2336
2337 return sw_desc ? &sw_desc->async_tx : NULL;
2338}
2339
2340/**
2341 * ppc440spe_adma_prep_dma_memset - prepare CDB for a MEMSET operation
2342 */
2343static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_memset(
2344 struct dma_chan *chan, dma_addr_t dma_dest, int value,
2345 size_t len, unsigned long flags)
2346{
2347 struct ppc440spe_adma_chan *ppc440spe_chan;
2348 struct ppc440spe_adma_desc_slot *sw_desc, *group_start;
2349 int slot_cnt, slots_per_op;
2350
2351 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
2352
2353 if (unlikely(!len))
2354 return NULL;
2355
2356 BUG_ON(unlikely(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT));
2357
2358 spin_lock_bh(&ppc440spe_chan->lock);
2359
2360 dev_dbg(ppc440spe_chan->device->common.dev,
2361 "ppc440spe adma%d: %s cal: %u len: %u int_en %d\n",
2362 ppc440spe_chan->device->id, __func__, value, len,
2363 flags & DMA_PREP_INTERRUPT ? 1 : 0);
2364
2365 slot_cnt = slots_per_op = 1;
2366 sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt,
2367 slots_per_op);
2368 if (sw_desc) {
2369 group_start = sw_desc->group_head;
2370 ppc440spe_desc_init_memset(group_start, value, flags);
2371 ppc440spe_adma_set_dest(group_start, dma_dest, 0);
2372 ppc440spe_desc_set_byte_count(group_start, ppc440spe_chan, len);
2373 sw_desc->unmap_len = len;
2374 sw_desc->async_tx.flags = flags;
2375 }
2376 spin_unlock_bh(&ppc440spe_chan->lock);
2377
2378 return sw_desc ? &sw_desc->async_tx : NULL;
2379}
2380
2381/**
2382 * ppc440spe_adma_prep_dma_xor - prepare CDB for a XOR operation
2383 */
2384static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_xor(
2385 struct dma_chan *chan, dma_addr_t dma_dest,
2386 dma_addr_t *dma_src, u32 src_cnt, size_t len,
2387 unsigned long flags)
2388{
2389 struct ppc440spe_adma_chan *ppc440spe_chan;
2390 struct ppc440spe_adma_desc_slot *sw_desc, *group_start;
2391 int slot_cnt, slots_per_op;
2392
2393 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
2394
2395 ADMA_LL_DBG(prep_dma_xor_dbg(ppc440spe_chan->device->id,
2396 dma_dest, dma_src, src_cnt));
2397 if (unlikely(!len))
2398 return NULL;
2399 BUG_ON(unlikely(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT));
2400
2401 dev_dbg(ppc440spe_chan->device->common.dev,
2402 "ppc440spe adma%d: %s src_cnt: %d len: %u int_en: %d\n",
2403 ppc440spe_chan->device->id, __func__, src_cnt, len,
2404 flags & DMA_PREP_INTERRUPT ? 1 : 0);
2405
2406 spin_lock_bh(&ppc440spe_chan->lock);
2407 slot_cnt = ppc440spe_chan_xor_slot_count(len, src_cnt, &slots_per_op);
2408 sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt,
2409 slots_per_op);
2410 if (sw_desc) {
2411 group_start = sw_desc->group_head;
2412 ppc440spe_desc_init_xor(group_start, src_cnt, flags);
2413 ppc440spe_adma_set_dest(group_start, dma_dest, 0);
2414 while (src_cnt--)
2415 ppc440spe_adma_memcpy_xor_set_src(group_start,
2416 dma_src[src_cnt], src_cnt);
2417 ppc440spe_desc_set_byte_count(group_start, ppc440spe_chan, len);
2418 sw_desc->unmap_len = len;
2419 sw_desc->async_tx.flags = flags;
2420 }
2421 spin_unlock_bh(&ppc440spe_chan->lock);
2422
2423 return sw_desc ? &sw_desc->async_tx : NULL;
2424}
2425
2426static inline void
2427ppc440spe_desc_set_xor_src_cnt(struct ppc440spe_adma_desc_slot *desc,
2428 int src_cnt);
2429static void ppc440spe_init_rxor_cursor(struct ppc440spe_rxor *cursor);
2430
2431/**
2432 * ppc440spe_adma_init_dma2rxor_slot -
2433 */
2434static void ppc440spe_adma_init_dma2rxor_slot(
2435 struct ppc440spe_adma_desc_slot *desc,
2436 dma_addr_t *src, int src_cnt)
2437{
2438 int i;
2439
2440 /* initialize CDB */
2441 for (i = 0; i < src_cnt; i++) {
2442 ppc440spe_adma_dma2rxor_prep_src(desc, &desc->rxor_cursor, i,
2443 desc->src_cnt, (u32)src[i]);
2444 }
2445}
2446
2447/**
2448 * ppc440spe_dma01_prep_mult -
2449 * for Q operation where destination is also the source
2450 */
2451static struct ppc440spe_adma_desc_slot *ppc440spe_dma01_prep_mult(
2452 struct ppc440spe_adma_chan *ppc440spe_chan,
2453 dma_addr_t *dst, int dst_cnt, dma_addr_t *src, int src_cnt,
2454 const unsigned char *scf, size_t len, unsigned long flags)
2455{
2456 struct ppc440spe_adma_desc_slot *sw_desc = NULL;
2457 unsigned long op = 0;
2458 int slot_cnt;
2459
2460 set_bit(PPC440SPE_DESC_WXOR, &op);
2461 slot_cnt = 2;
2462
2463 spin_lock_bh(&ppc440spe_chan->lock);
2464
2465 /* use WXOR, each descriptor occupies one slot */
2466 sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, 1);
2467 if (sw_desc) {
2468 struct ppc440spe_adma_chan *chan;
2469 struct ppc440spe_adma_desc_slot *iter;
2470 struct dma_cdb *hw_desc;
2471
2472 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
2473 set_bits(op, &sw_desc->flags);
2474 sw_desc->src_cnt = src_cnt;
2475 sw_desc->dst_cnt = dst_cnt;
2476 /* First descriptor, zero data in the destination and copy it
2477 * to q page using MULTICAST transfer.
2478 */
2479 iter = list_first_entry(&sw_desc->group_list,
2480 struct ppc440spe_adma_desc_slot,
2481 chain_node);
2482 memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
2483 /* set 'next' pointer */
2484 iter->hw_next = list_entry(iter->chain_node.next,
2485 struct ppc440spe_adma_desc_slot,
2486 chain_node);
2487 clear_bit(PPC440SPE_DESC_INT, &iter->flags);
2488 hw_desc = iter->hw_desc;
2489 hw_desc->opc = DMA_CDB_OPC_MULTICAST;
2490
2491 ppc440spe_desc_set_dest_addr(iter, chan,
2492 DMA_CUED_XOR_BASE, dst[0], 0);
2493 ppc440spe_desc_set_dest_addr(iter, chan, 0, dst[1], 1);
2494 ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB,
2495 src[0]);
2496 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
2497 iter->unmap_len = len;
2498
2499 /*
2500 * Second descriptor, multiply data from the q page
2501 * and store the result in real destination.
2502 */
2503 iter = list_first_entry(&iter->chain_node,
2504 struct ppc440spe_adma_desc_slot,
2505 chain_node);
2506 memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
2507 iter->hw_next = NULL;
2508 if (flags & DMA_PREP_INTERRUPT)
2509 set_bit(PPC440SPE_DESC_INT, &iter->flags);
2510 else
2511 clear_bit(PPC440SPE_DESC_INT, &iter->flags);
2512
2513 hw_desc = iter->hw_desc;
2514 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
2515 ppc440spe_desc_set_src_addr(iter, chan, 0,
2516 DMA_CUED_XOR_HB, dst[1]);
2517 ppc440spe_desc_set_dest_addr(iter, chan,
2518 DMA_CUED_XOR_BASE, dst[0], 0);
2519
2520 ppc440spe_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF,
2521 DMA_CDB_SG_DST1, scf[0]);
2522 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
2523 iter->unmap_len = len;
2524 sw_desc->async_tx.flags = flags;
2525 }
2526
2527 spin_unlock_bh(&ppc440spe_chan->lock);
2528
2529 return sw_desc;
2530}
2531
2532/**
2533 * ppc440spe_dma01_prep_sum_product -
2534 * Dx = A*(P+Pxy) + B*(Q+Qxy) operation where destination is also
2535 * the source.
2536 */
2537static struct ppc440spe_adma_desc_slot *ppc440spe_dma01_prep_sum_product(
2538 struct ppc440spe_adma_chan *ppc440spe_chan,
2539 dma_addr_t *dst, dma_addr_t *src, int src_cnt,
2540 const unsigned char *scf, size_t len, unsigned long flags)
2541{
2542 struct ppc440spe_adma_desc_slot *sw_desc = NULL;
2543 unsigned long op = 0;
2544 int slot_cnt;
2545
2546 set_bit(PPC440SPE_DESC_WXOR, &op);
2547 slot_cnt = 3;
2548
2549 spin_lock_bh(&ppc440spe_chan->lock);
2550
2551 /* WXOR, each descriptor occupies one slot */
2552 sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, 1);
2553 if (sw_desc) {
2554 struct ppc440spe_adma_chan *chan;
2555 struct ppc440spe_adma_desc_slot *iter;
2556 struct dma_cdb *hw_desc;
2557
2558 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
2559 set_bits(op, &sw_desc->flags);
2560 sw_desc->src_cnt = src_cnt;
2561 sw_desc->dst_cnt = 1;
2562 /* 1st descriptor, src[1] data to q page and zero destination */
2563 iter = list_first_entry(&sw_desc->group_list,
2564 struct ppc440spe_adma_desc_slot,
2565 chain_node);
2566 memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
2567 iter->hw_next = list_entry(iter->chain_node.next,
2568 struct ppc440spe_adma_desc_slot,
2569 chain_node);
2570 clear_bit(PPC440SPE_DESC_INT, &iter->flags);
2571 hw_desc = iter->hw_desc;
2572 hw_desc->opc = DMA_CDB_OPC_MULTICAST;
2573
2574 ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE,
2575 *dst, 0);
2576 ppc440spe_desc_set_dest_addr(iter, chan, 0,
2577 ppc440spe_chan->qdest, 1);
2578 ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB,
2579 src[1]);
2580 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
2581 iter->unmap_len = len;
2582
2583 /* 2nd descriptor, multiply src[1] data and store the
2584 * result in destination */
2585 iter = list_first_entry(&iter->chain_node,
2586 struct ppc440spe_adma_desc_slot,
2587 chain_node);
2588 memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
2589 /* set 'next' pointer */
2590 iter->hw_next = list_entry(iter->chain_node.next,
2591 struct ppc440spe_adma_desc_slot,
2592 chain_node);
2593 if (flags & DMA_PREP_INTERRUPT)
2594 set_bit(PPC440SPE_DESC_INT, &iter->flags);
2595 else
2596 clear_bit(PPC440SPE_DESC_INT, &iter->flags);
2597
2598 hw_desc = iter->hw_desc;
2599 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
2600 ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB,
2601 ppc440spe_chan->qdest);
2602 ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE,
2603 *dst, 0);
2604 ppc440spe_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF,
2605 DMA_CDB_SG_DST1, scf[1]);
2606 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
2607 iter->unmap_len = len;
2608
2609 /*
2610 * 3rd descriptor, multiply src[0] data and xor it
2611 * with destination
2612 */
2613 iter = list_first_entry(&iter->chain_node,
2614 struct ppc440spe_adma_desc_slot,
2615 chain_node);
2616 memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
2617 iter->hw_next = NULL;
2618 if (flags & DMA_PREP_INTERRUPT)
2619 set_bit(PPC440SPE_DESC_INT, &iter->flags);
2620 else
2621 clear_bit(PPC440SPE_DESC_INT, &iter->flags);
2622
2623 hw_desc = iter->hw_desc;
2624 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
2625 ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB,
2626 src[0]);
2627 ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE,
2628 *dst, 0);
2629 ppc440spe_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF,
2630 DMA_CDB_SG_DST1, scf[0]);
2631 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
2632 iter->unmap_len = len;
2633 sw_desc->async_tx.flags = flags;
2634 }
2635
2636 spin_unlock_bh(&ppc440spe_chan->lock);
2637
2638 return sw_desc;
2639}
2640
2641static struct ppc440spe_adma_desc_slot *ppc440spe_dma01_prep_pq(
2642 struct ppc440spe_adma_chan *ppc440spe_chan,
2643 dma_addr_t *dst, int dst_cnt, dma_addr_t *src, int src_cnt,
2644 const unsigned char *scf, size_t len, unsigned long flags)
2645{
2646 int slot_cnt;
2647 struct ppc440spe_adma_desc_slot *sw_desc = NULL, *iter;
2648 unsigned long op = 0;
2649 unsigned char mult = 1;
2650
2651 pr_debug("%s: dst_cnt %d, src_cnt %d, len %d\n",
2652 __func__, dst_cnt, src_cnt, len);
2653 /* select operations WXOR/RXOR depending on the
2654 * source addresses of operators and the number
2655 * of destinations (RXOR support only Q-parity calculations)
2656 */
2657 set_bit(PPC440SPE_DESC_WXOR, &op);
2658 if (!test_and_set_bit(PPC440SPE_RXOR_RUN, &ppc440spe_rxor_state)) {
2659 /* no active RXOR;
2660 * do RXOR if:
2661 * - there are more than 1 source,
2662 * - len is aligned on 512-byte boundary,
2663 * - source addresses fit to one of 4 possible regions.
2664 */
2665 if (src_cnt > 1 &&
2666 !(len & MQ0_CF2H_RXOR_BS_MASK) &&
2667 (src[0] + len) == src[1]) {
2668 /* may do RXOR R1 R2 */
2669 set_bit(PPC440SPE_DESC_RXOR, &op);
2670 if (src_cnt != 2) {
2671 /* may try to enhance region of RXOR */
2672 if ((src[1] + len) == src[2]) {
2673 /* do RXOR R1 R2 R3 */
2674 set_bit(PPC440SPE_DESC_RXOR123,
2675 &op);
2676 } else if ((src[1] + len * 2) == src[2]) {
2677 /* do RXOR R1 R2 R4 */
2678 set_bit(PPC440SPE_DESC_RXOR124, &op);
2679 } else if ((src[1] + len * 3) == src[2]) {
2680 /* do RXOR R1 R2 R5 */
2681 set_bit(PPC440SPE_DESC_RXOR125,
2682 &op);
2683 } else {
2684 /* do RXOR R1 R2 */
2685 set_bit(PPC440SPE_DESC_RXOR12,
2686 &op);
2687 }
2688 } else {
2689 /* do RXOR R1 R2 */
2690 set_bit(PPC440SPE_DESC_RXOR12, &op);
2691 }
2692 }
2693
2694 if (!test_bit(PPC440SPE_DESC_RXOR, &op)) {
2695 /* can not do this operation with RXOR */
2696 clear_bit(PPC440SPE_RXOR_RUN,
2697 &ppc440spe_rxor_state);
2698 } else {
2699 /* can do; set block size right now */
2700 ppc440spe_desc_set_rxor_block_size(len);
2701 }
2702 }
2703
2704 /* Number of necessary slots depends on operation type selected */
2705 if (!test_bit(PPC440SPE_DESC_RXOR, &op)) {
2706 /* This is a WXOR only chain. Need descriptors for each
2707 * source to GF-XOR them with WXOR, and need descriptors
2708 * for each destination to zero them with WXOR
2709 */
2710 slot_cnt = src_cnt;
2711
2712 if (flags & DMA_PREP_ZERO_P) {
2713 slot_cnt++;
2714 set_bit(PPC440SPE_ZERO_P, &op);
2715 }
2716 if (flags & DMA_PREP_ZERO_Q) {
2717 slot_cnt++;
2718 set_bit(PPC440SPE_ZERO_Q, &op);
2719 }
2720 } else {
2721 /* Need 1/2 descriptor for RXOR operation, and
2722 * need (src_cnt - (2 or 3)) for WXOR of sources
2723 * remained (if any)
2724 */
2725 slot_cnt = dst_cnt;
2726
2727 if (flags & DMA_PREP_ZERO_P)
2728 set_bit(PPC440SPE_ZERO_P, &op);
2729 if (flags & DMA_PREP_ZERO_Q)
2730 set_bit(PPC440SPE_ZERO_Q, &op);
2731
2732 if (test_bit(PPC440SPE_DESC_RXOR12, &op))
2733 slot_cnt += src_cnt - 2;
2734 else
2735 slot_cnt += src_cnt - 3;
2736
2737 /* Thus we have either RXOR only chain or
2738 * mixed RXOR/WXOR
2739 */
2740 if (slot_cnt == dst_cnt)
2741 /* RXOR only chain */
2742 clear_bit(PPC440SPE_DESC_WXOR, &op);
2743 }
2744
2745 spin_lock_bh(&ppc440spe_chan->lock);
2746 /* for both RXOR/WXOR each descriptor occupies one slot */
2747 sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, 1);
2748 if (sw_desc) {
2749 ppc440spe_desc_init_dma01pq(sw_desc, dst_cnt, src_cnt,
2750 flags, op);
2751
2752 /* setup dst/src/mult */
2753 pr_debug("%s: set dst descriptor 0, 1: 0x%016llx, 0x%016llx\n",
2754 __func__, dst[0], dst[1]);
2755 ppc440spe_adma_pq_set_dest(sw_desc, dst, flags);
2756 while (src_cnt--) {
2757 ppc440spe_adma_pq_set_src(sw_desc, src[src_cnt],
2758 src_cnt);
2759
2760 /* NOTE: "Multi = 0 is equivalent to = 1" as it
2761 * stated in 440SPSPe_RAID6_Addendum_UM_1_17.pdf
2762 * doesn't work for RXOR with DMA0/1! Instead, multi=0
2763 * leads to zeroing source data after RXOR.
2764 * So, for P case set-up mult=1 explicitly.
2765 */
2766 if (!(flags & DMA_PREP_PQ_DISABLE_Q))
2767 mult = scf[src_cnt];
2768 ppc440spe_adma_pq_set_src_mult(sw_desc,
2769 mult, src_cnt, dst_cnt - 1);
2770 }
2771
2772 /* Setup byte count foreach slot just allocated */
2773 sw_desc->async_tx.flags = flags;
2774 list_for_each_entry(iter, &sw_desc->group_list,
2775 chain_node) {
2776 ppc440spe_desc_set_byte_count(iter,
2777 ppc440spe_chan, len);
2778 iter->unmap_len = len;
2779 }
2780 }
2781 spin_unlock_bh(&ppc440spe_chan->lock);
2782
2783 return sw_desc;
2784}
2785
2786static struct ppc440spe_adma_desc_slot *ppc440spe_dma2_prep_pq(
2787 struct ppc440spe_adma_chan *ppc440spe_chan,
2788 dma_addr_t *dst, int dst_cnt, dma_addr_t *src, int src_cnt,
2789 const unsigned char *scf, size_t len, unsigned long flags)
2790{
2791 int slot_cnt, descs_per_op;
2792 struct ppc440spe_adma_desc_slot *sw_desc = NULL, *iter;
2793 unsigned long op = 0;
2794 unsigned char mult = 1;
2795
2796 BUG_ON(!dst_cnt);
2797 /*pr_debug("%s: dst_cnt %d, src_cnt %d, len %d\n",
2798 __func__, dst_cnt, src_cnt, len);*/
2799
2800 spin_lock_bh(&ppc440spe_chan->lock);
2801 descs_per_op = ppc440spe_dma2_pq_slot_count(src, src_cnt, len);
2802 if (descs_per_op < 0) {
2803 spin_unlock_bh(&ppc440spe_chan->lock);
2804 return NULL;
2805 }
2806
2807 /* depending on number of sources we have 1 or 2 RXOR chains */
2808 slot_cnt = descs_per_op * dst_cnt;
2809
2810 sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, 1);
2811 if (sw_desc) {
2812 op = slot_cnt;
2813 sw_desc->async_tx.flags = flags;
2814 list_for_each_entry(iter, &sw_desc->group_list, chain_node) {
2815 ppc440spe_desc_init_dma2pq(iter, dst_cnt, src_cnt,
2816 --op ? 0 : flags);
2817 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan,
2818 len);
2819 iter->unmap_len = len;
2820
2821 ppc440spe_init_rxor_cursor(&(iter->rxor_cursor));
2822 iter->rxor_cursor.len = len;
2823 iter->descs_per_op = descs_per_op;
2824 }
2825 op = 0;
2826 list_for_each_entry(iter, &sw_desc->group_list, chain_node) {
2827 op++;
2828 if (op % descs_per_op == 0)
2829 ppc440spe_adma_init_dma2rxor_slot(iter, src,
2830 src_cnt);
2831 if (likely(!list_is_last(&iter->chain_node,
2832 &sw_desc->group_list))) {
2833 /* set 'next' pointer */
2834 iter->hw_next =
2835 list_entry(iter->chain_node.next,
2836 struct ppc440spe_adma_desc_slot,
2837 chain_node);
2838 ppc440spe_xor_set_link(iter, iter->hw_next);
2839 } else {
2840 /* this is the last descriptor. */
2841 iter->hw_next = NULL;
2842 }
2843 }
2844
2845 /* fixup head descriptor */
2846 sw_desc->dst_cnt = dst_cnt;
2847 if (flags & DMA_PREP_ZERO_P)
2848 set_bit(PPC440SPE_ZERO_P, &sw_desc->flags);
2849 if (flags & DMA_PREP_ZERO_Q)
2850 set_bit(PPC440SPE_ZERO_Q, &sw_desc->flags);
2851
2852 /* setup dst/src/mult */
2853 ppc440spe_adma_pq_set_dest(sw_desc, dst, flags);
2854
2855 while (src_cnt--) {
2856 /* handle descriptors (if dst_cnt == 2) inside
2857 * the ppc440spe_adma_pq_set_srcxxx() functions
2858 */
2859 ppc440spe_adma_pq_set_src(sw_desc, src[src_cnt],
2860 src_cnt);
2861 if (!(flags & DMA_PREP_PQ_DISABLE_Q))
2862 mult = scf[src_cnt];
2863 ppc440spe_adma_pq_set_src_mult(sw_desc,
2864 mult, src_cnt, dst_cnt - 1);
2865 }
2866 }
2867 spin_unlock_bh(&ppc440spe_chan->lock);
2868 ppc440spe_desc_set_rxor_block_size(len);
2869 return sw_desc;
2870}
2871
2872/**
2873 * ppc440spe_adma_prep_dma_pq - prepare CDB (group) for a GF-XOR operation
2874 */
2875static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_pq(
2876 struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
2877 unsigned int src_cnt, const unsigned char *scf,
2878 size_t len, unsigned long flags)
2879{
2880 struct ppc440spe_adma_chan *ppc440spe_chan;
2881 struct ppc440spe_adma_desc_slot *sw_desc = NULL;
2882 int dst_cnt = 0;
2883
2884 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
2885
2886 ADMA_LL_DBG(prep_dma_pq_dbg(ppc440spe_chan->device->id,
2887 dst, src, src_cnt));
2888 BUG_ON(!len);
2889 BUG_ON(unlikely(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT));
2890 BUG_ON(!src_cnt);
2891
2892 if (src_cnt == 1 && dst[1] == src[0]) {
2893 dma_addr_t dest[2];
2894
2895 /* dst[1] is real destination (Q) */
2896 dest[0] = dst[1];
2897 /* this is the page to multicast source data to */
2898 dest[1] = ppc440spe_chan->qdest;
2899 sw_desc = ppc440spe_dma01_prep_mult(ppc440spe_chan,
2900 dest, 2, src, src_cnt, scf, len, flags);
2901 return sw_desc ? &sw_desc->async_tx : NULL;
2902 }
2903
2904 if (src_cnt == 2 && dst[1] == src[1]) {
2905 sw_desc = ppc440spe_dma01_prep_sum_product(ppc440spe_chan,
2906 &dst[1], src, 2, scf, len, flags);
2907 return sw_desc ? &sw_desc->async_tx : NULL;
2908 }
2909
2910 if (!(flags & DMA_PREP_PQ_DISABLE_P)) {
2911 BUG_ON(!dst[0]);
2912 dst_cnt++;
2913 flags |= DMA_PREP_ZERO_P;
2914 }
2915
2916 if (!(flags & DMA_PREP_PQ_DISABLE_Q)) {
2917 BUG_ON(!dst[1]);
2918 dst_cnt++;
2919 flags |= DMA_PREP_ZERO_Q;
2920 }
2921
2922 BUG_ON(!dst_cnt);
2923
2924 dev_dbg(ppc440spe_chan->device->common.dev,
2925 "ppc440spe adma%d: %s src_cnt: %d len: %u int_en: %d\n",
2926 ppc440spe_chan->device->id, __func__, src_cnt, len,
2927 flags & DMA_PREP_INTERRUPT ? 1 : 0);
2928
2929 switch (ppc440spe_chan->device->id) {
2930 case PPC440SPE_DMA0_ID:
2931 case PPC440SPE_DMA1_ID:
2932 sw_desc = ppc440spe_dma01_prep_pq(ppc440spe_chan,
2933 dst, dst_cnt, src, src_cnt, scf,
2934 len, flags);
2935 break;
2936
2937 case PPC440SPE_XOR_ID:
2938 sw_desc = ppc440spe_dma2_prep_pq(ppc440spe_chan,
2939 dst, dst_cnt, src, src_cnt, scf,
2940 len, flags);
2941 break;
2942 }
2943
2944 return sw_desc ? &sw_desc->async_tx : NULL;
2945}
2946
2947/**
2948 * ppc440spe_adma_prep_dma_pqzero_sum - prepare CDB group for
2949 * a PQ_ZERO_SUM operation
2950 */
2951static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_pqzero_sum(
2952 struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
2953 unsigned int src_cnt, const unsigned char *scf, size_t len,
2954 enum sum_check_flags *pqres, unsigned long flags)
2955{
2956 struct ppc440spe_adma_chan *ppc440spe_chan;
2957 struct ppc440spe_adma_desc_slot *sw_desc, *iter;
2958 dma_addr_t pdest, qdest;
2959 int slot_cnt, slots_per_op, idst, dst_cnt;
2960
2961 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
2962
2963 if (flags & DMA_PREP_PQ_DISABLE_P)
2964 pdest = 0;
2965 else
2966 pdest = pq[0];
2967
2968 if (flags & DMA_PREP_PQ_DISABLE_Q)
2969 qdest = 0;
2970 else
2971 qdest = pq[1];
2972
2973 ADMA_LL_DBG(prep_dma_pqzero_sum_dbg(ppc440spe_chan->device->id,
2974 src, src_cnt, scf));
2975
2976 /* Always use WXOR for P/Q calculations (two destinations).
2977 * Need 1 or 2 extra slots to verify results are zero.
2978 */
2979 idst = dst_cnt = (pdest && qdest) ? 2 : 1;
2980
2981 /* One additional slot per destination to clone P/Q
2982 * before calculation (we have to preserve destinations).
2983 */
2984 slot_cnt = src_cnt + dst_cnt * 2;
2985 slots_per_op = 1;
2986
2987 spin_lock_bh(&ppc440spe_chan->lock);
2988 sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt,
2989 slots_per_op);
2990 if (sw_desc) {
2991 ppc440spe_desc_init_dma01pqzero_sum(sw_desc, dst_cnt, src_cnt);
2992
2993 /* Setup byte count for each slot just allocated */
2994 sw_desc->async_tx.flags = flags;
2995 list_for_each_entry(iter, &sw_desc->group_list, chain_node) {
2996 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan,
2997 len);
2998 iter->unmap_len = len;
2999 }
3000
3001 if (pdest) {
3002 struct dma_cdb *hw_desc;
3003 struct ppc440spe_adma_chan *chan;
3004
3005 iter = sw_desc->group_head;
3006 chan = to_ppc440spe_adma_chan(iter->async_tx.chan);
3007 memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
3008 iter->hw_next = list_entry(iter->chain_node.next,
3009 struct ppc440spe_adma_desc_slot,
3010 chain_node);
3011 hw_desc = iter->hw_desc;
3012 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
3013 iter->src_cnt = 0;
3014 iter->dst_cnt = 0;
3015 ppc440spe_desc_set_dest_addr(iter, chan, 0,
3016 ppc440spe_chan->pdest, 0);
3017 ppc440spe_desc_set_src_addr(iter, chan, 0, 0, pdest);
3018 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan,
3019 len);
3020 iter->unmap_len = 0;
3021 /* override pdest to preserve original P */
3022 pdest = ppc440spe_chan->pdest;
3023 }
3024 if (qdest) {
3025 struct dma_cdb *hw_desc;
3026 struct ppc440spe_adma_chan *chan;
3027
3028 iter = list_first_entry(&sw_desc->group_list,
3029 struct ppc440spe_adma_desc_slot,
3030 chain_node);
3031 chan = to_ppc440spe_adma_chan(iter->async_tx.chan);
3032
3033 if (pdest) {
3034 iter = list_entry(iter->chain_node.next,
3035 struct ppc440spe_adma_desc_slot,
3036 chain_node);
3037 }
3038
3039 memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
3040 iter->hw_next = list_entry(iter->chain_node.next,
3041 struct ppc440spe_adma_desc_slot,
3042 chain_node);
3043 hw_desc = iter->hw_desc;
3044 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
3045 iter->src_cnt = 0;
3046 iter->dst_cnt = 0;
3047 ppc440spe_desc_set_dest_addr(iter, chan, 0,
3048 ppc440spe_chan->qdest, 0);
3049 ppc440spe_desc_set_src_addr(iter, chan, 0, 0, qdest);
3050 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan,
3051 len);
3052 iter->unmap_len = 0;
3053 /* override qdest to preserve original Q */
3054 qdest = ppc440spe_chan->qdest;
3055 }
3056
3057 /* Setup destinations for P/Q ops */
3058 ppc440spe_adma_pqzero_sum_set_dest(sw_desc, pdest, qdest);
3059
3060 /* Setup zero QWORDs into DCHECK CDBs */
3061 idst = dst_cnt;
3062 list_for_each_entry_reverse(iter, &sw_desc->group_list,
3063 chain_node) {
3064 /*
3065 * The last CDB corresponds to Q-parity check,
3066 * the one before last CDB corresponds
3067 * P-parity check
3068 */
3069 if (idst == DMA_DEST_MAX_NUM) {
3070 if (idst == dst_cnt) {
3071 set_bit(PPC440SPE_DESC_QCHECK,
3072 &iter->flags);
3073 } else {
3074 set_bit(PPC440SPE_DESC_PCHECK,
3075 &iter->flags);
3076 }
3077 } else {
3078 if (qdest) {
3079 set_bit(PPC440SPE_DESC_QCHECK,
3080 &iter->flags);
3081 } else {
3082 set_bit(PPC440SPE_DESC_PCHECK,
3083 &iter->flags);
3084 }
3085 }
3086 iter->xor_check_result = pqres;
3087
3088 /*
3089 * set it to zero, if check fail then result will
3090 * be updated
3091 */
3092 *iter->xor_check_result = 0;
3093 ppc440spe_desc_set_dcheck(iter, ppc440spe_chan,
3094 ppc440spe_qword);
3095
3096 if (!(--dst_cnt))
3097 break;
3098 }
3099
3100 /* Setup sources and mults for P/Q ops */
3101 list_for_each_entry_continue_reverse(iter, &sw_desc->group_list,
3102 chain_node) {
3103 struct ppc440spe_adma_chan *chan;
3104 u32 mult_dst;
3105
3106 chan = to_ppc440spe_adma_chan(iter->async_tx.chan);
3107 ppc440spe_desc_set_src_addr(iter, chan, 0,
3108 DMA_CUED_XOR_HB,
3109 src[src_cnt - 1]);
3110 if (qdest) {
3111 mult_dst = (dst_cnt - 1) ? DMA_CDB_SG_DST2 :
3112 DMA_CDB_SG_DST1;
3113 ppc440spe_desc_set_src_mult(iter, chan,
3114 DMA_CUED_MULT1_OFF,
3115 mult_dst,
3116 scf[src_cnt - 1]);
3117 }
3118 if (!(--src_cnt))
3119 break;
3120 }
3121 }
3122 spin_unlock_bh(&ppc440spe_chan->lock);
3123 return sw_desc ? &sw_desc->async_tx : NULL;
3124}
3125
3126/**
3127 * ppc440spe_adma_prep_dma_xor_zero_sum - prepare CDB group for
3128 * XOR ZERO_SUM operation
3129 */
3130static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_xor_zero_sum(
3131 struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt,
3132 size_t len, enum sum_check_flags *result, unsigned long flags)
3133{
3134 struct dma_async_tx_descriptor *tx;
3135 dma_addr_t pq[2];
3136
3137 /* validate P, disable Q */
3138 pq[0] = src[0];
3139 pq[1] = 0;
3140 flags |= DMA_PREP_PQ_DISABLE_Q;
3141
3142 tx = ppc440spe_adma_prep_dma_pqzero_sum(chan, pq, &src[1],
3143 src_cnt - 1, 0, len,
3144 result, flags);
3145 return tx;
3146}
3147
3148/**
3149 * ppc440spe_adma_set_dest - set destination address into descriptor
3150 */
3151static void ppc440spe_adma_set_dest(struct ppc440spe_adma_desc_slot *sw_desc,
3152 dma_addr_t addr, int index)
3153{
3154 struct ppc440spe_adma_chan *chan;
3155
3156 BUG_ON(index >= sw_desc->dst_cnt);
3157
3158 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
3159
3160 switch (chan->device->id) {
3161 case PPC440SPE_DMA0_ID:
3162 case PPC440SPE_DMA1_ID:
3163 /* to do: support transfers lengths >
3164 * PPC440SPE_ADMA_DMA/XOR_MAX_BYTE_COUNT
3165 */
3166 ppc440spe_desc_set_dest_addr(sw_desc->group_head,
3167 chan, 0, addr, index);
3168 break;
3169 case PPC440SPE_XOR_ID:
3170 sw_desc = ppc440spe_get_group_entry(sw_desc, index);
3171 ppc440spe_desc_set_dest_addr(sw_desc,
3172 chan, 0, addr, index);
3173 break;
3174 }
3175}
3176
3177static void ppc440spe_adma_pq_zero_op(struct ppc440spe_adma_desc_slot *iter,
3178 struct ppc440spe_adma_chan *chan, dma_addr_t addr)
3179{
3180 /* To clear destinations update the descriptor
3181 * (P or Q depending on index) as follows:
3182 * addr is destination (0 corresponds to SG2):
3183 */
3184 ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE, addr, 0);
3185
3186 /* ... and the addr is source: */
3187 ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB, addr);
3188
3189 /* addr is always SG2 then the mult is always DST1 */
3190 ppc440spe_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF,
3191 DMA_CDB_SG_DST1, 1);
3192}
3193
3194/**
3195 * ppc440spe_adma_pq_set_dest - set destination address into descriptor
3196 * for the PQXOR operation
3197 */
3198static void ppc440spe_adma_pq_set_dest(struct ppc440spe_adma_desc_slot *sw_desc,
3199 dma_addr_t *addrs, unsigned long flags)
3200{
3201 struct ppc440spe_adma_desc_slot *iter;
3202 struct ppc440spe_adma_chan *chan;
3203 dma_addr_t paddr, qaddr;
3204 dma_addr_t addr = 0, ppath, qpath;
3205 int index = 0, i;
3206
3207 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
3208
3209 if (flags & DMA_PREP_PQ_DISABLE_P)
3210 paddr = 0;
3211 else
3212 paddr = addrs[0];
3213
3214 if (flags & DMA_PREP_PQ_DISABLE_Q)
3215 qaddr = 0;
3216 else
3217 qaddr = addrs[1];
3218
3219 if (!paddr || !qaddr)
3220 addr = paddr ? paddr : qaddr;
3221
3222 switch (chan->device->id) {
3223 case PPC440SPE_DMA0_ID:
3224 case PPC440SPE_DMA1_ID:
3225 /* walk through the WXOR source list and set P/Q-destinations
3226 * for each slot:
3227 */
3228 if (!test_bit(PPC440SPE_DESC_RXOR, &sw_desc->flags)) {
3229 /* This is WXOR-only chain; may have 1/2 zero descs */
3230 if (test_bit(PPC440SPE_ZERO_P, &sw_desc->flags))
3231 index++;
3232 if (test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags))
3233 index++;
3234
3235 iter = ppc440spe_get_group_entry(sw_desc, index);
3236 if (addr) {
3237 /* one destination */
3238 list_for_each_entry_from(iter,
3239 &sw_desc->group_list, chain_node)
3240 ppc440spe_desc_set_dest_addr(iter, chan,
3241 DMA_CUED_XOR_BASE, addr, 0);
3242 } else {
3243 /* two destinations */
3244 list_for_each_entry_from(iter,
3245 &sw_desc->group_list, chain_node) {
3246 ppc440spe_desc_set_dest_addr(iter, chan,
3247 DMA_CUED_XOR_BASE, paddr, 0);
3248 ppc440spe_desc_set_dest_addr(iter, chan,
3249 DMA_CUED_XOR_BASE, qaddr, 1);
3250 }
3251 }
3252
3253 if (index) {
3254 /* To clear destinations update the descriptor
3255 * (1st,2nd, or both depending on flags)
3256 */
3257 index = 0;
3258 if (test_bit(PPC440SPE_ZERO_P,
3259 &sw_desc->flags)) {
3260 iter = ppc440spe_get_group_entry(
3261 sw_desc, index++);
3262 ppc440spe_adma_pq_zero_op(iter, chan,
3263 paddr);
3264 }
3265
3266 if (test_bit(PPC440SPE_ZERO_Q,
3267 &sw_desc->flags)) {
3268 iter = ppc440spe_get_group_entry(
3269 sw_desc, index++);
3270 ppc440spe_adma_pq_zero_op(iter, chan,
3271 qaddr);
3272 }
3273
3274 return;
3275 }
3276 } else {
3277 /* This is RXOR-only or RXOR/WXOR mixed chain */
3278
3279 /* If we want to include destination into calculations,
3280 * then make dest addresses cued with mult=1 (XOR).
3281 */
3282 ppath = test_bit(PPC440SPE_ZERO_P, &sw_desc->flags) ?
3283 DMA_CUED_XOR_HB :
3284 DMA_CUED_XOR_BASE |
3285 (1 << DMA_CUED_MULT1_OFF);
3286 qpath = test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags) ?
3287 DMA_CUED_XOR_HB :
3288 DMA_CUED_XOR_BASE |
3289 (1 << DMA_CUED_MULT1_OFF);
3290
3291 /* Setup destination(s) in RXOR slot(s) */
3292 iter = ppc440spe_get_group_entry(sw_desc, index++);
3293 ppc440spe_desc_set_dest_addr(iter, chan,
3294 paddr ? ppath : qpath,
3295 paddr ? paddr : qaddr, 0);
3296 if (!addr) {
3297 /* two destinations */
3298 iter = ppc440spe_get_group_entry(sw_desc,
3299 index++);
3300 ppc440spe_desc_set_dest_addr(iter, chan,
3301 qpath, qaddr, 0);
3302 }
3303
3304 if (test_bit(PPC440SPE_DESC_WXOR, &sw_desc->flags)) {
3305 /* Setup destination(s) in remaining WXOR
3306 * slots
3307 */
3308 iter = ppc440spe_get_group_entry(sw_desc,
3309 index);
3310 if (addr) {
3311 /* one destination */
3312 list_for_each_entry_from(iter,
3313 &sw_desc->group_list,
3314 chain_node)
3315 ppc440spe_desc_set_dest_addr(
3316 iter, chan,
3317 DMA_CUED_XOR_BASE,
3318 addr, 0);
3319
3320 } else {
3321 /* two destinations */
3322 list_for_each_entry_from(iter,
3323 &sw_desc->group_list,
3324 chain_node) {
3325 ppc440spe_desc_set_dest_addr(
3326 iter, chan,
3327 DMA_CUED_XOR_BASE,
3328 paddr, 0);
3329 ppc440spe_desc_set_dest_addr(
3330 iter, chan,
3331 DMA_CUED_XOR_BASE,
3332 qaddr, 1);
3333 }
3334 }
3335 }
3336
3337 }
3338 break;
3339
3340 case PPC440SPE_XOR_ID:
3341 /* DMA2 descriptors have only 1 destination, so there are
3342 * two chains - one for each dest.
3343 * If we want to include destination into calculations,
3344 * then make dest addresses cued with mult=1 (XOR).
3345 */
3346 ppath = test_bit(PPC440SPE_ZERO_P, &sw_desc->flags) ?
3347 DMA_CUED_XOR_HB :
3348 DMA_CUED_XOR_BASE |
3349 (1 << DMA_CUED_MULT1_OFF);
3350
3351 qpath = test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags) ?
3352 DMA_CUED_XOR_HB :
3353 DMA_CUED_XOR_BASE |
3354 (1 << DMA_CUED_MULT1_OFF);
3355
3356 iter = ppc440spe_get_group_entry(sw_desc, 0);
3357 for (i = 0; i < sw_desc->descs_per_op; i++) {
3358 ppc440spe_desc_set_dest_addr(iter, chan,
3359 paddr ? ppath : qpath,
3360 paddr ? paddr : qaddr, 0);
3361 iter = list_entry(iter->chain_node.next,
3362 struct ppc440spe_adma_desc_slot,
3363 chain_node);
3364 }
3365
3366 if (!addr) {
3367 /* Two destinations; setup Q here */
3368 iter = ppc440spe_get_group_entry(sw_desc,
3369 sw_desc->descs_per_op);
3370 for (i = 0; i < sw_desc->descs_per_op; i++) {
3371 ppc440spe_desc_set_dest_addr(iter,
3372 chan, qpath, qaddr, 0);
3373 iter = list_entry(iter->chain_node.next,
3374 struct ppc440spe_adma_desc_slot,
3375 chain_node);
3376 }
3377 }
3378
3379 break;
3380 }
3381}
3382
3383/**
3384 * ppc440spe_adma_pq_zero_sum_set_dest - set destination address into descriptor
3385 * for the PQ_ZERO_SUM operation
3386 */
3387static void ppc440spe_adma_pqzero_sum_set_dest(
3388 struct ppc440spe_adma_desc_slot *sw_desc,
3389 dma_addr_t paddr, dma_addr_t qaddr)
3390{
3391 struct ppc440spe_adma_desc_slot *iter, *end;
3392 struct ppc440spe_adma_chan *chan;
3393 dma_addr_t addr = 0;
3394 int idx;
3395
3396 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
3397
3398 /* walk through the WXOR source list and set P/Q-destinations
3399 * for each slot
3400 */
3401 idx = (paddr && qaddr) ? 2 : 1;
3402 /* set end */
3403 list_for_each_entry_reverse(end, &sw_desc->group_list,
3404 chain_node) {
3405 if (!(--idx))
3406 break;
3407 }
3408 /* set start */
3409 idx = (paddr && qaddr) ? 2 : 1;
3410 iter = ppc440spe_get_group_entry(sw_desc, idx);
3411
3412 if (paddr && qaddr) {
3413 /* two destinations */
3414 list_for_each_entry_from(iter, &sw_desc->group_list,
3415 chain_node) {
3416 if (unlikely(iter == end))
3417 break;
3418 ppc440spe_desc_set_dest_addr(iter, chan,
3419 DMA_CUED_XOR_BASE, paddr, 0);
3420 ppc440spe_desc_set_dest_addr(iter, chan,
3421 DMA_CUED_XOR_BASE, qaddr, 1);
3422 }
3423 } else {
3424 /* one destination */
3425 addr = paddr ? paddr : qaddr;
3426 list_for_each_entry_from(iter, &sw_desc->group_list,
3427 chain_node) {
3428 if (unlikely(iter == end))
3429 break;
3430 ppc440spe_desc_set_dest_addr(iter, chan,
3431 DMA_CUED_XOR_BASE, addr, 0);
3432 }
3433 }
3434
3435 /* The remaining descriptors are DATACHECK. These have no need in
3436 * destination. Actually, these destinations are used there
3437 * as sources for check operation. So, set addr as source.
3438 */
3439 ppc440spe_desc_set_src_addr(end, chan, 0, 0, addr ? addr : paddr);
3440
3441 if (!addr) {
3442 end = list_entry(end->chain_node.next,
3443 struct ppc440spe_adma_desc_slot, chain_node);
3444 ppc440spe_desc_set_src_addr(end, chan, 0, 0, qaddr);
3445 }
3446}
3447
3448/**
3449 * ppc440spe_desc_set_xor_src_cnt - set source count into descriptor
3450 */
3451static inline void ppc440spe_desc_set_xor_src_cnt(
3452 struct ppc440spe_adma_desc_slot *desc,
3453 int src_cnt)
3454{
3455 struct xor_cb *hw_desc = desc->hw_desc;
3456
3457 hw_desc->cbc &= ~XOR_CDCR_OAC_MSK;
3458 hw_desc->cbc |= src_cnt;
3459}
3460
3461/**
3462 * ppc440spe_adma_pq_set_src - set source address into descriptor
3463 */
3464static void ppc440spe_adma_pq_set_src(struct ppc440spe_adma_desc_slot *sw_desc,
3465 dma_addr_t addr, int index)
3466{
3467 struct ppc440spe_adma_chan *chan;
3468 dma_addr_t haddr = 0;
3469 struct ppc440spe_adma_desc_slot *iter = NULL;
3470
3471 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
3472
3473 switch (chan->device->id) {
3474 case PPC440SPE_DMA0_ID:
3475 case PPC440SPE_DMA1_ID:
3476 /* DMA0,1 may do: WXOR, RXOR, RXOR+WXORs chain
3477 */
3478 if (test_bit(PPC440SPE_DESC_RXOR, &sw_desc->flags)) {
3479 /* RXOR-only or RXOR/WXOR operation */
3480 int iskip = test_bit(PPC440SPE_DESC_RXOR12,
3481 &sw_desc->flags) ? 2 : 3;
3482
3483 if (index == 0) {
3484 /* 1st slot (RXOR) */
3485 /* setup sources region (R1-2-3, R1-2-4,
3486 * or R1-2-5)
3487 */
3488 if (test_bit(PPC440SPE_DESC_RXOR12,
3489 &sw_desc->flags))
3490 haddr = DMA_RXOR12 <<
3491 DMA_CUED_REGION_OFF;
3492 else if (test_bit(PPC440SPE_DESC_RXOR123,
3493 &sw_desc->flags))
3494 haddr = DMA_RXOR123 <<
3495 DMA_CUED_REGION_OFF;
3496 else if (test_bit(PPC440SPE_DESC_RXOR124,
3497 &sw_desc->flags))
3498 haddr = DMA_RXOR124 <<
3499 DMA_CUED_REGION_OFF;
3500 else if (test_bit(PPC440SPE_DESC_RXOR125,
3501 &sw_desc->flags))
3502 haddr = DMA_RXOR125 <<
3503 DMA_CUED_REGION_OFF;
3504 else
3505 BUG();
3506 haddr |= DMA_CUED_XOR_BASE;
3507 iter = ppc440spe_get_group_entry(sw_desc, 0);
3508 } else if (index < iskip) {
3509 /* 1st slot (RXOR)
3510 * shall actually set source address only once
3511 * instead of first <iskip>
3512 */
3513 iter = NULL;
3514 } else {
3515 /* 2nd/3d and next slots (WXOR);
3516 * skip first slot with RXOR
3517 */
3518 haddr = DMA_CUED_XOR_HB;
3519 iter = ppc440spe_get_group_entry(sw_desc,
3520 index - iskip + sw_desc->dst_cnt);
3521 }
3522 } else {
3523 int znum = 0;
3524
3525 /* WXOR-only operation; skip first slots with
3526 * zeroing destinations
3527 */
3528 if (test_bit(PPC440SPE_ZERO_P, &sw_desc->flags))
3529 znum++;
3530 if (test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags))
3531 znum++;
3532
3533 haddr = DMA_CUED_XOR_HB;
3534 iter = ppc440spe_get_group_entry(sw_desc,
3535 index + znum);
3536 }
3537
3538 if (likely(iter)) {
3539 ppc440spe_desc_set_src_addr(iter, chan, 0, haddr, addr);
3540
3541 if (!index &&
3542 test_bit(PPC440SPE_DESC_RXOR, &sw_desc->flags) &&
3543 sw_desc->dst_cnt == 2) {
3544 /* if we have two destinations for RXOR, then
3545 * setup source in the second descr too
3546 */
3547 iter = ppc440spe_get_group_entry(sw_desc, 1);
3548 ppc440spe_desc_set_src_addr(iter, chan, 0,
3549 haddr, addr);
3550 }
3551 }
3552 break;
3553
3554 case PPC440SPE_XOR_ID:
3555 /* DMA2 may do Biskup */
3556 iter = sw_desc->group_head;
3557 if (iter->dst_cnt == 2) {
3558 /* both P & Q calculations required; set P src here */
3559 ppc440spe_adma_dma2rxor_set_src(iter, index, addr);
3560
3561 /* this is for Q */
3562 iter = ppc440spe_get_group_entry(sw_desc,
3563 sw_desc->descs_per_op);
3564 }
3565 ppc440spe_adma_dma2rxor_set_src(iter, index, addr);
3566 break;
3567 }
3568}
3569
3570/**
3571 * ppc440spe_adma_memcpy_xor_set_src - set source address into descriptor
3572 */
3573static void ppc440spe_adma_memcpy_xor_set_src(
3574 struct ppc440spe_adma_desc_slot *sw_desc,
3575 dma_addr_t addr, int index)
3576{
3577 struct ppc440spe_adma_chan *chan;
3578
3579 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
3580 sw_desc = sw_desc->group_head;
3581
3582 if (likely(sw_desc))
3583 ppc440spe_desc_set_src_addr(sw_desc, chan, index, 0, addr);
3584}
3585
3586/**
3587 * ppc440spe_adma_dma2rxor_inc_addr -
3588 */
3589static void ppc440spe_adma_dma2rxor_inc_addr(
3590 struct ppc440spe_adma_desc_slot *desc,
3591 struct ppc440spe_rxor *cursor, int index, int src_cnt)
3592{
3593 cursor->addr_count++;
3594 if (index == src_cnt - 1) {
3595 ppc440spe_desc_set_xor_src_cnt(desc, cursor->addr_count);
3596 } else if (cursor->addr_count == XOR_MAX_OPS) {
3597 ppc440spe_desc_set_xor_src_cnt(desc, cursor->addr_count);
3598 cursor->addr_count = 0;
3599 cursor->desc_count++;
3600 }
3601}
3602
3603/**
3604 * ppc440spe_adma_dma2rxor_prep_src - setup RXOR types in DMA2 CDB
3605 */
3606static int ppc440spe_adma_dma2rxor_prep_src(
3607 struct ppc440spe_adma_desc_slot *hdesc,
3608 struct ppc440spe_rxor *cursor, int index,
3609 int src_cnt, u32 addr)
3610{
3611 int rval = 0;
3612 u32 sign;
3613 struct ppc440spe_adma_desc_slot *desc = hdesc;
3614 int i;
3615
3616 for (i = 0; i < cursor->desc_count; i++) {
3617 desc = list_entry(hdesc->chain_node.next,
3618 struct ppc440spe_adma_desc_slot,
3619 chain_node);
3620 }
3621
3622 switch (cursor->state) {
3623 case 0:
3624 if (addr == cursor->addrl + cursor->len) {
3625 /* direct RXOR */
3626 cursor->state = 1;
3627 cursor->xor_count++;
3628 if (index == src_cnt-1) {
3629 ppc440spe_rxor_set_region(desc,
3630 cursor->addr_count,
3631 DMA_RXOR12 << DMA_CUED_REGION_OFF);
3632 ppc440spe_adma_dma2rxor_inc_addr(
3633 desc, cursor, index, src_cnt);
3634 }
3635 } else if (cursor->addrl == addr + cursor->len) {
3636 /* reverse RXOR */
3637 cursor->state = 1;
3638 cursor->xor_count++;
3639 set_bit(cursor->addr_count, &desc->reverse_flags[0]);
3640 if (index == src_cnt-1) {
3641 ppc440spe_rxor_set_region(desc,
3642 cursor->addr_count,
3643 DMA_RXOR12 << DMA_CUED_REGION_OFF);
3644 ppc440spe_adma_dma2rxor_inc_addr(
3645 desc, cursor, index, src_cnt);
3646 }
3647 } else {
3648 printk(KERN_ERR "Cannot build "
3649 "DMA2 RXOR command block.\n");
3650 BUG();
3651 }
3652 break;
3653 case 1:
3654 sign = test_bit(cursor->addr_count,
3655 desc->reverse_flags)
3656 ? -1 : 1;
3657 if (index == src_cnt-2 || (sign == -1
3658 && addr != cursor->addrl - 2*cursor->len)) {
3659 cursor->state = 0;
3660 cursor->xor_count = 1;
3661 cursor->addrl = addr;
3662 ppc440spe_rxor_set_region(desc,
3663 cursor->addr_count,
3664 DMA_RXOR12 << DMA_CUED_REGION_OFF);
3665 ppc440spe_adma_dma2rxor_inc_addr(
3666 desc, cursor, index, src_cnt);
3667 } else if (addr == cursor->addrl + 2*sign*cursor->len) {
3668 cursor->state = 2;
3669 cursor->xor_count = 0;
3670 ppc440spe_rxor_set_region(desc,
3671 cursor->addr_count,
3672 DMA_RXOR123 << DMA_CUED_REGION_OFF);
3673 if (index == src_cnt-1) {
3674 ppc440spe_adma_dma2rxor_inc_addr(
3675 desc, cursor, index, src_cnt);
3676 }
3677 } else if (addr == cursor->addrl + 3*cursor->len) {
3678 cursor->state = 2;
3679 cursor->xor_count = 0;
3680 ppc440spe_rxor_set_region(desc,
3681 cursor->addr_count,
3682 DMA_RXOR124 << DMA_CUED_REGION_OFF);
3683 if (index == src_cnt-1) {
3684 ppc440spe_adma_dma2rxor_inc_addr(
3685 desc, cursor, index, src_cnt);
3686 }
3687 } else if (addr == cursor->addrl + 4*cursor->len) {
3688 cursor->state = 2;
3689 cursor->xor_count = 0;
3690 ppc440spe_rxor_set_region(desc,
3691 cursor->addr_count,
3692 DMA_RXOR125 << DMA_CUED_REGION_OFF);
3693 if (index == src_cnt-1) {
3694 ppc440spe_adma_dma2rxor_inc_addr(
3695 desc, cursor, index, src_cnt);
3696 }
3697 } else {
3698 cursor->state = 0;
3699 cursor->xor_count = 1;
3700 cursor->addrl = addr;
3701 ppc440spe_rxor_set_region(desc,
3702 cursor->addr_count,
3703 DMA_RXOR12 << DMA_CUED_REGION_OFF);
3704 ppc440spe_adma_dma2rxor_inc_addr(
3705 desc, cursor, index, src_cnt);
3706 }
3707 break;
3708 case 2:
3709 cursor->state = 0;
3710 cursor->addrl = addr;
3711 cursor->xor_count++;
3712 if (index) {
3713 ppc440spe_adma_dma2rxor_inc_addr(
3714 desc, cursor, index, src_cnt);
3715 }
3716 break;
3717 }
3718
3719 return rval;
3720}
3721
3722/**
3723 * ppc440spe_adma_dma2rxor_set_src - set RXOR source address; it's assumed that
3724 * ppc440spe_adma_dma2rxor_prep_src() has already done prior this call
3725 */
3726static void ppc440spe_adma_dma2rxor_set_src(
3727 struct ppc440spe_adma_desc_slot *desc,
3728 int index, dma_addr_t addr)
3729{
3730 struct xor_cb *xcb = desc->hw_desc;
3731 int k = 0, op = 0, lop = 0;
3732
3733 /* get the RXOR operand which corresponds to index addr */
3734 while (op <= index) {
3735 lop = op;
3736 if (k == XOR_MAX_OPS) {
3737 k = 0;
3738 desc = list_entry(desc->chain_node.next,
3739 struct ppc440spe_adma_desc_slot, chain_node);
3740 xcb = desc->hw_desc;
3741
3742 }
3743 if ((xcb->ops[k++].h & (DMA_RXOR12 << DMA_CUED_REGION_OFF)) ==
3744 (DMA_RXOR12 << DMA_CUED_REGION_OFF))
3745 op += 2;
3746 else
3747 op += 3;
3748 }
3749
3750 BUG_ON(k < 1);
3751
3752 if (test_bit(k-1, desc->reverse_flags)) {
3753 /* reverse operand order; put last op in RXOR group */
3754 if (index == op - 1)
3755 ppc440spe_rxor_set_src(desc, k - 1, addr);
3756 } else {
3757 /* direct operand order; put first op in RXOR group */
3758 if (index == lop)
3759 ppc440spe_rxor_set_src(desc, k - 1, addr);
3760 }
3761}
3762
3763/**
3764 * ppc440spe_adma_dma2rxor_set_mult - set RXOR multipliers; it's assumed that
3765 * ppc440spe_adma_dma2rxor_prep_src() has already done prior this call
3766 */
3767static void ppc440spe_adma_dma2rxor_set_mult(
3768 struct ppc440spe_adma_desc_slot *desc,
3769 int index, u8 mult)
3770{
3771 struct xor_cb *xcb = desc->hw_desc;
3772 int k = 0, op = 0, lop = 0;
3773
3774 /* get the RXOR operand which corresponds to index mult */
3775 while (op <= index) {
3776 lop = op;
3777 if (k == XOR_MAX_OPS) {
3778 k = 0;
3779 desc = list_entry(desc->chain_node.next,
3780 struct ppc440spe_adma_desc_slot,
3781 chain_node);
3782 xcb = desc->hw_desc;
3783
3784 }
3785 if ((xcb->ops[k++].h & (DMA_RXOR12 << DMA_CUED_REGION_OFF)) ==
3786 (DMA_RXOR12 << DMA_CUED_REGION_OFF))
3787 op += 2;
3788 else
3789 op += 3;
3790 }
3791
3792 BUG_ON(k < 1);
3793 if (test_bit(k-1, desc->reverse_flags)) {
3794 /* reverse order */
3795 ppc440spe_rxor_set_mult(desc, k - 1, op - index - 1, mult);
3796 } else {
3797 /* direct order */
3798 ppc440spe_rxor_set_mult(desc, k - 1, index - lop, mult);
3799 }
3800}
3801
3802/**
3803 * ppc440spe_init_rxor_cursor -
3804 */
3805static void ppc440spe_init_rxor_cursor(struct ppc440spe_rxor *cursor)
3806{
3807 memset(cursor, 0, sizeof(struct ppc440spe_rxor));
3808 cursor->state = 2;
3809}
3810
3811/**
3812 * ppc440spe_adma_pq_set_src_mult - set multiplication coefficient into
3813 * descriptor for the PQXOR operation
3814 */
3815static void ppc440spe_adma_pq_set_src_mult(
3816 struct ppc440spe_adma_desc_slot *sw_desc,
3817 unsigned char mult, int index, int dst_pos)
3818{
3819 struct ppc440spe_adma_chan *chan;
3820 u32 mult_idx, mult_dst;
3821 struct ppc440spe_adma_desc_slot *iter = NULL, *iter1 = NULL;
3822
3823 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
3824
3825 switch (chan->device->id) {
3826 case PPC440SPE_DMA0_ID:
3827 case PPC440SPE_DMA1_ID:
3828 if (test_bit(PPC440SPE_DESC_RXOR, &sw_desc->flags)) {
3829 int region = test_bit(PPC440SPE_DESC_RXOR12,
3830 &sw_desc->flags) ? 2 : 3;
3831
3832 if (index < region) {
3833 /* RXOR multipliers */
3834 iter = ppc440spe_get_group_entry(sw_desc,
3835 sw_desc->dst_cnt - 1);
3836 if (sw_desc->dst_cnt == 2)
3837 iter1 = ppc440spe_get_group_entry(
3838 sw_desc, 0);
3839
3840 mult_idx = DMA_CUED_MULT1_OFF + (index << 3);
3841 mult_dst = DMA_CDB_SG_SRC;
3842 } else {
3843 /* WXOR multiplier */
3844 iter = ppc440spe_get_group_entry(sw_desc,
3845 index - region +
3846 sw_desc->dst_cnt);
3847 mult_idx = DMA_CUED_MULT1_OFF;
3848 mult_dst = dst_pos ? DMA_CDB_SG_DST2 :
3849 DMA_CDB_SG_DST1;
3850 }
3851 } else {
3852 int znum = 0;
3853
3854 /* WXOR-only;
3855 * skip first slots with destinations (if ZERO_DST has
3856 * place)
3857 */
3858 if (test_bit(PPC440SPE_ZERO_P, &sw_desc->flags))
3859 znum++;
3860 if (test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags))
3861 znum++;
3862
3863 iter = ppc440spe_get_group_entry(sw_desc, index + znum);
3864 mult_idx = DMA_CUED_MULT1_OFF;
3865 mult_dst = dst_pos ? DMA_CDB_SG_DST2 : DMA_CDB_SG_DST1;
3866 }
3867
3868 if (likely(iter)) {
3869 ppc440spe_desc_set_src_mult(iter, chan,
3870 mult_idx, mult_dst, mult);
3871
3872 if (unlikely(iter1)) {
3873 /* if we have two destinations for RXOR, then
3874 * we've just set Q mult. Set-up P now.
3875 */
3876 ppc440spe_desc_set_src_mult(iter1, chan,
3877 mult_idx, mult_dst, 1);
3878 }
3879
3880 }
3881 break;
3882
3883 case PPC440SPE_XOR_ID:
3884 iter = sw_desc->group_head;
3885 if (sw_desc->dst_cnt == 2) {
3886 /* both P & Q calculations required; set P mult here */
3887 ppc440spe_adma_dma2rxor_set_mult(iter, index, 1);
3888
3889 /* and then set Q mult */
3890 iter = ppc440spe_get_group_entry(sw_desc,
3891 sw_desc->descs_per_op);
3892 }
3893 ppc440spe_adma_dma2rxor_set_mult(iter, index, mult);
3894 break;
3895 }
3896}
3897
3898/**
3899 * ppc440spe_adma_free_chan_resources - free the resources allocated
3900 */
3901static void ppc440spe_adma_free_chan_resources(struct dma_chan *chan)
3902{
3903 struct ppc440spe_adma_chan *ppc440spe_chan;
3904 struct ppc440spe_adma_desc_slot *iter, *_iter;
3905 int in_use_descs = 0;
3906
3907 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
3908 ppc440spe_adma_slot_cleanup(ppc440spe_chan);
3909
3910 spin_lock_bh(&ppc440spe_chan->lock);
3911 list_for_each_entry_safe(iter, _iter, &ppc440spe_chan->chain,
3912 chain_node) {
3913 in_use_descs++;
3914 list_del(&iter->chain_node);
3915 }
3916 list_for_each_entry_safe_reverse(iter, _iter,
3917 &ppc440spe_chan->all_slots, slot_node) {
3918 list_del(&iter->slot_node);
3919 kfree(iter);
3920 ppc440spe_chan->slots_allocated--;
3921 }
3922 ppc440spe_chan->last_used = NULL;
3923
3924 dev_dbg(ppc440spe_chan->device->common.dev,
3925 "ppc440spe adma%d %s slots_allocated %d\n",
3926 ppc440spe_chan->device->id,
3927 __func__, ppc440spe_chan->slots_allocated);
3928 spin_unlock_bh(&ppc440spe_chan->lock);
3929
3930 /* one is ok since we left it on there on purpose */
3931 if (in_use_descs > 1)
3932 printk(KERN_ERR "SPE: Freeing %d in use descriptors!\n",
3933 in_use_descs - 1);
3934}
3935
3936/**
3937 * ppc440spe_adma_is_complete - poll the status of an ADMA transaction
3938 * @chan: ADMA channel handle
3939 * @cookie: ADMA transaction identifier
3940 */
3941static enum dma_status ppc440spe_adma_is_complete(struct dma_chan *chan,
3942 dma_cookie_t cookie, dma_cookie_t *done, dma_cookie_t *used)
3943{
3944 struct ppc440spe_adma_chan *ppc440spe_chan;
3945 dma_cookie_t last_used;
3946 dma_cookie_t last_complete;
3947 enum dma_status ret;
3948
3949 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
3950 last_used = chan->cookie;
3951 last_complete = ppc440spe_chan->completed_cookie;
3952
3953 if (done)
3954 *done = last_complete;
3955 if (used)
3956 *used = last_used;
3957
3958 ret = dma_async_is_complete(cookie, last_complete, last_used);
3959 if (ret == DMA_SUCCESS)
3960 return ret;
3961
3962 ppc440spe_adma_slot_cleanup(ppc440spe_chan);
3963
3964 last_used = chan->cookie;
3965 last_complete = ppc440spe_chan->completed_cookie;
3966
3967 if (done)
3968 *done = last_complete;
3969 if (used)
3970 *used = last_used;
3971
3972 return dma_async_is_complete(cookie, last_complete, last_used);
3973}
3974
3975/**
3976 * ppc440spe_adma_eot_handler - end of transfer interrupt handler
3977 */
3978static irqreturn_t ppc440spe_adma_eot_handler(int irq, void *data)
3979{
3980 struct ppc440spe_adma_chan *chan = data;
3981
3982 dev_dbg(chan->device->common.dev,
3983 "ppc440spe adma%d: %s\n", chan->device->id, __func__);
3984
3985 tasklet_schedule(&chan->irq_tasklet);
3986 ppc440spe_adma_device_clear_eot_status(chan);
3987
3988 return IRQ_HANDLED;
3989}
3990
3991/**
3992 * ppc440spe_adma_err_handler - DMA error interrupt handler;
3993 * do the same things as a eot handler
3994 */
3995static irqreturn_t ppc440spe_adma_err_handler(int irq, void *data)
3996{
3997 struct ppc440spe_adma_chan *chan = data;
3998
3999 dev_dbg(chan->device->common.dev,
4000 "ppc440spe adma%d: %s\n", chan->device->id, __func__);
4001
4002 tasklet_schedule(&chan->irq_tasklet);
4003 ppc440spe_adma_device_clear_eot_status(chan);
4004
4005 return IRQ_HANDLED;
4006}
4007
4008/**
4009 * ppc440spe_test_callback - called when test operation has been done
4010 */
4011static void ppc440spe_test_callback(void *unused)
4012{
4013 complete(&ppc440spe_r6_test_comp);
4014}
4015
4016/**
4017 * ppc440spe_adma_issue_pending - flush all pending descriptors to h/w
4018 */
4019static void ppc440spe_adma_issue_pending(struct dma_chan *chan)
4020{
4021 struct ppc440spe_adma_chan *ppc440spe_chan;
4022
4023 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
4024 dev_dbg(ppc440spe_chan->device->common.dev,
4025 "ppc440spe adma%d: %s %d \n", ppc440spe_chan->device->id,
4026 __func__, ppc440spe_chan->pending);
4027
4028 if (ppc440spe_chan->pending) {
4029 ppc440spe_chan->pending = 0;
4030 ppc440spe_chan_append(ppc440spe_chan);
4031 }
4032}
4033
4034/**
4035 * ppc440spe_chan_start_null_xor - initiate the first XOR operation (DMA engines
4036 * use FIFOs (as opposite to chains used in XOR) so this is a XOR
4037 * specific operation)
4038 */
4039static void ppc440spe_chan_start_null_xor(struct ppc440spe_adma_chan *chan)
4040{
4041 struct ppc440spe_adma_desc_slot *sw_desc, *group_start;
4042 dma_cookie_t cookie;
4043 int slot_cnt, slots_per_op;
4044
4045 dev_dbg(chan->device->common.dev,
4046 "ppc440spe adma%d: %s\n", chan->device->id, __func__);
4047
4048 spin_lock_bh(&chan->lock);
4049 slot_cnt = ppc440spe_chan_xor_slot_count(0, 2, &slots_per_op);
4050 sw_desc = ppc440spe_adma_alloc_slots(chan, slot_cnt, slots_per_op);
4051 if (sw_desc) {
4052 group_start = sw_desc->group_head;
4053 list_splice_init(&sw_desc->group_list, &chan->chain);
4054 async_tx_ack(&sw_desc->async_tx);
4055 ppc440spe_desc_init_null_xor(group_start);
4056
4057 cookie = chan->common.cookie;
4058 cookie++;
4059 if (cookie <= 1)
4060 cookie = 2;
4061
4062 /* initialize the completed cookie to be less than
4063 * the most recently used cookie
4064 */
4065 chan->completed_cookie = cookie - 1;
4066 chan->common.cookie = sw_desc->async_tx.cookie = cookie;
4067
4068 /* channel should not be busy */
4069 BUG_ON(ppc440spe_chan_is_busy(chan));
4070
4071 /* set the descriptor address */
4072 ppc440spe_chan_set_first_xor_descriptor(chan, sw_desc);
4073
4074 /* run the descriptor */
4075 ppc440spe_chan_run(chan);
4076 } else
4077 printk(KERN_ERR "ppc440spe adma%d"
4078 " failed to allocate null descriptor\n",
4079 chan->device->id);
4080 spin_unlock_bh(&chan->lock);
4081}
4082
4083/**
4084 * ppc440spe_test_raid6 - test are RAID-6 capabilities enabled successfully.
4085 * For this we just perform one WXOR operation with the same source
4086 * and destination addresses, the GF-multiplier is 1; so if RAID-6
4087 * capabilities are enabled then we'll get src/dst filled with zero.
4088 */
4089static int ppc440spe_test_raid6(struct ppc440spe_adma_chan *chan)
4090{
4091 struct ppc440spe_adma_desc_slot *sw_desc, *iter;
4092 struct page *pg;
4093 char *a;
4094 dma_addr_t dma_addr, addrs[2];
4095 unsigned long op = 0;
4096 int rval = 0;
4097
4098 set_bit(PPC440SPE_DESC_WXOR, &op);
4099
4100 pg = alloc_page(GFP_KERNEL);
4101 if (!pg)
4102 return -ENOMEM;
4103
4104 spin_lock_bh(&chan->lock);
4105 sw_desc = ppc440spe_adma_alloc_slots(chan, 1, 1);
4106 if (sw_desc) {
4107 /* 1 src, 1 dsr, int_ena, WXOR */
4108 ppc440spe_desc_init_dma01pq(sw_desc, 1, 1, 1, op);
4109 list_for_each_entry(iter, &sw_desc->group_list, chain_node) {
4110 ppc440spe_desc_set_byte_count(iter, chan, PAGE_SIZE);
4111 iter->unmap_len = PAGE_SIZE;
4112 }
4113 } else {
4114 rval = -EFAULT;
4115 spin_unlock_bh(&chan->lock);
4116 goto exit;
4117 }
4118 spin_unlock_bh(&chan->lock);
4119
4120 /* Fill the test page with ones */
4121 memset(page_address(pg), 0xFF, PAGE_SIZE);
4122 dma_addr = dma_map_page(chan->device->dev, pg, 0,
4123 PAGE_SIZE, DMA_BIDIRECTIONAL);
4124
4125 /* Setup addresses */
4126 ppc440spe_adma_pq_set_src(sw_desc, dma_addr, 0);
4127 ppc440spe_adma_pq_set_src_mult(sw_desc, 1, 0, 0);
4128 addrs[0] = dma_addr;
4129 addrs[1] = 0;
4130 ppc440spe_adma_pq_set_dest(sw_desc, addrs, DMA_PREP_PQ_DISABLE_Q);
4131
4132 async_tx_ack(&sw_desc->async_tx);
4133 sw_desc->async_tx.callback = ppc440spe_test_callback;
4134 sw_desc->async_tx.callback_param = NULL;
4135
4136 init_completion(&ppc440spe_r6_test_comp);
4137
4138 ppc440spe_adma_tx_submit(&sw_desc->async_tx);
4139 ppc440spe_adma_issue_pending(&chan->common);
4140
4141 wait_for_completion(&ppc440spe_r6_test_comp);
4142
4143 /* Now check if the test page is zeroed */
4144 a = page_address(pg);
4145 if ((*(u32 *)a) == 0 && memcmp(a, a+4, PAGE_SIZE-4) == 0) {
4146 /* page is zero - RAID-6 enabled */
4147 rval = 0;
4148 } else {
4149 /* RAID-6 was not enabled */
4150 rval = -EINVAL;
4151 }
4152exit:
4153 __free_page(pg);
4154 return rval;
4155}
4156
4157static void ppc440spe_adma_init_capabilities(struct ppc440spe_adma_device *adev)
4158{
4159 switch (adev->id) {
4160 case PPC440SPE_DMA0_ID:
4161 case PPC440SPE_DMA1_ID:
4162 dma_cap_set(DMA_MEMCPY, adev->common.cap_mask);
4163 dma_cap_set(DMA_INTERRUPT, adev->common.cap_mask);
4164 dma_cap_set(DMA_MEMSET, adev->common.cap_mask);
4165 dma_cap_set(DMA_PQ, adev->common.cap_mask);
4166 dma_cap_set(DMA_PQ_VAL, adev->common.cap_mask);
4167 dma_cap_set(DMA_XOR_VAL, adev->common.cap_mask);
4168 break;
4169 case PPC440SPE_XOR_ID:
4170 dma_cap_set(DMA_XOR, adev->common.cap_mask);
4171 dma_cap_set(DMA_PQ, adev->common.cap_mask);
4172 dma_cap_set(DMA_INTERRUPT, adev->common.cap_mask);
4173 adev->common.cap_mask = adev->common.cap_mask;
4174 break;
4175 }
4176
4177 /* Set base routines */
4178 adev->common.device_alloc_chan_resources =
4179 ppc440spe_adma_alloc_chan_resources;
4180 adev->common.device_free_chan_resources =
4181 ppc440spe_adma_free_chan_resources;
4182 adev->common.device_is_tx_complete = ppc440spe_adma_is_complete;
4183 adev->common.device_issue_pending = ppc440spe_adma_issue_pending;
4184
4185 /* Set prep routines based on capability */
4186 if (dma_has_cap(DMA_MEMCPY, adev->common.cap_mask)) {
4187 adev->common.device_prep_dma_memcpy =
4188 ppc440spe_adma_prep_dma_memcpy;
4189 }
4190 if (dma_has_cap(DMA_MEMSET, adev->common.cap_mask)) {
4191 adev->common.device_prep_dma_memset =
4192 ppc440spe_adma_prep_dma_memset;
4193 }
4194 if (dma_has_cap(DMA_XOR, adev->common.cap_mask)) {
4195 adev->common.max_xor = XOR_MAX_OPS;
4196 adev->common.device_prep_dma_xor =
4197 ppc440spe_adma_prep_dma_xor;
4198 }
4199 if (dma_has_cap(DMA_PQ, adev->common.cap_mask)) {
4200 switch (adev->id) {
4201 case PPC440SPE_DMA0_ID:
4202 dma_set_maxpq(&adev->common,
4203 DMA0_FIFO_SIZE / sizeof(struct dma_cdb), 0);
4204 break;
4205 case PPC440SPE_DMA1_ID:
4206 dma_set_maxpq(&adev->common,
4207 DMA1_FIFO_SIZE / sizeof(struct dma_cdb), 0);
4208 break;
4209 case PPC440SPE_XOR_ID:
4210 adev->common.max_pq = XOR_MAX_OPS * 3;
4211 break;
4212 }
4213 adev->common.device_prep_dma_pq =
4214 ppc440spe_adma_prep_dma_pq;
4215 }
4216 if (dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask)) {
4217 switch (adev->id) {
4218 case PPC440SPE_DMA0_ID:
4219 adev->common.max_pq = DMA0_FIFO_SIZE /
4220 sizeof(struct dma_cdb);
4221 break;
4222 case PPC440SPE_DMA1_ID:
4223 adev->common.max_pq = DMA1_FIFO_SIZE /
4224 sizeof(struct dma_cdb);
4225 break;
4226 }
4227 adev->common.device_prep_dma_pq_val =
4228 ppc440spe_adma_prep_dma_pqzero_sum;
4229 }
4230 if (dma_has_cap(DMA_XOR_VAL, adev->common.cap_mask)) {
4231 switch (adev->id) {
4232 case PPC440SPE_DMA0_ID:
4233 adev->common.max_xor = DMA0_FIFO_SIZE /
4234 sizeof(struct dma_cdb);
4235 break;
4236 case PPC440SPE_DMA1_ID:
4237 adev->common.max_xor = DMA1_FIFO_SIZE /
4238 sizeof(struct dma_cdb);
4239 break;
4240 }
4241 adev->common.device_prep_dma_xor_val =
4242 ppc440spe_adma_prep_dma_xor_zero_sum;
4243 }
4244 if (dma_has_cap(DMA_INTERRUPT, adev->common.cap_mask)) {
4245 adev->common.device_prep_dma_interrupt =
4246 ppc440spe_adma_prep_dma_interrupt;
4247 }
4248 pr_info("%s: AMCC(R) PPC440SP(E) ADMA Engine: "
4249 "( %s%s%s%s%s%s%s)\n",
4250 dev_name(adev->dev),
4251 dma_has_cap(DMA_PQ, adev->common.cap_mask) ? "pq " : "",
4252 dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask) ? "pq_val " : "",
4253 dma_has_cap(DMA_XOR, adev->common.cap_mask) ? "xor " : "",
4254 dma_has_cap(DMA_XOR_VAL, adev->common.cap_mask) ? "xor_val " : "",
4255 dma_has_cap(DMA_MEMCPY, adev->common.cap_mask) ? "memcpy " : "",
4256 dma_has_cap(DMA_MEMSET, adev->common.cap_mask) ? "memset " : "",
4257 dma_has_cap(DMA_INTERRUPT, adev->common.cap_mask) ? "intr " : "");
4258}
4259
4260static int ppc440spe_adma_setup_irqs(struct ppc440spe_adma_device *adev,
4261 struct ppc440spe_adma_chan *chan,
4262 int *initcode)
4263{
4264 struct device_node *np;
4265 int ret;
4266
4267 np = container_of(adev->dev, struct of_device, dev)->node;
4268 if (adev->id != PPC440SPE_XOR_ID) {
4269 adev->err_irq = irq_of_parse_and_map(np, 1);
4270 if (adev->err_irq == NO_IRQ) {
4271 dev_warn(adev->dev, "no err irq resource?\n");
4272 *initcode = PPC_ADMA_INIT_IRQ2;
4273 adev->err_irq = -ENXIO;
4274 } else
4275 atomic_inc(&ppc440spe_adma_err_irq_ref);
4276 } else {
4277 adev->err_irq = -ENXIO;
4278 }
4279
4280 adev->irq = irq_of_parse_and_map(np, 0);
4281 if (adev->irq == NO_IRQ) {
4282 dev_err(adev->dev, "no irq resource\n");
4283 *initcode = PPC_ADMA_INIT_IRQ1;
4284 ret = -ENXIO;
4285 goto err_irq_map;
4286 }
4287 dev_dbg(adev->dev, "irq %d, err irq %d\n",
4288 adev->irq, adev->err_irq);
4289
4290 ret = request_irq(adev->irq, ppc440spe_adma_eot_handler,
4291 0, dev_driver_string(adev->dev), chan);
4292 if (ret) {
4293 dev_err(adev->dev, "can't request irq %d\n",
4294 adev->irq);
4295 *initcode = PPC_ADMA_INIT_IRQ1;
4296 ret = -EIO;
4297 goto err_req1;
4298 }
4299
4300 /* only DMA engines have a separate error IRQ
4301 * so it's Ok if err_irq < 0 in XOR engine case.
4302 */
4303 if (adev->err_irq > 0) {
4304 /* both DMA engines share common error IRQ */
4305 ret = request_irq(adev->err_irq,
4306 ppc440spe_adma_err_handler,
4307 IRQF_SHARED,
4308 dev_driver_string(adev->dev),
4309 chan);
4310 if (ret) {
4311 dev_err(adev->dev, "can't request irq %d\n",
4312 adev->err_irq);
4313 *initcode = PPC_ADMA_INIT_IRQ2;
4314 ret = -EIO;
4315 goto err_req2;
4316 }
4317 }
4318
4319 if (adev->id == PPC440SPE_XOR_ID) {
4320 /* enable XOR engine interrupts */
4321 iowrite32be(XOR_IE_CBCIE_BIT | XOR_IE_ICBIE_BIT |
4322 XOR_IE_ICIE_BIT | XOR_IE_RPTIE_BIT,
4323 &adev->xor_reg->ier);
4324 } else {
4325 u32 mask, enable;
4326
4327 np = of_find_compatible_node(NULL, NULL, "ibm,i2o-440spe");
4328 if (!np) {
4329 pr_err("%s: can't find I2O device tree node\n",
4330 __func__);
4331 ret = -ENODEV;
4332 goto err_req2;
4333 }
4334 adev->i2o_reg = of_iomap(np, 0);
4335 if (!adev->i2o_reg) {
4336 pr_err("%s: failed to map I2O registers\n", __func__);
4337 of_node_put(np);
4338 ret = -EINVAL;
4339 goto err_req2;
4340 }
4341 of_node_put(np);
4342 /* Unmask 'CS FIFO Attention' interrupts and
4343 * enable generating interrupts on errors
4344 */
4345 enable = (adev->id == PPC440SPE_DMA0_ID) ?
4346 ~(I2O_IOPIM_P0SNE | I2O_IOPIM_P0EM) :
4347 ~(I2O_IOPIM_P1SNE | I2O_IOPIM_P1EM);
4348 mask = ioread32(&adev->i2o_reg->iopim) & enable;
4349 iowrite32(mask, &adev->i2o_reg->iopim);
4350 }
4351 return 0;
4352
4353err_req2:
4354 free_irq(adev->irq, chan);
4355err_req1:
4356 irq_dispose_mapping(adev->irq);
4357err_irq_map:
4358 if (adev->err_irq > 0) {
4359 if (atomic_dec_and_test(&ppc440spe_adma_err_irq_ref))
4360 irq_dispose_mapping(adev->err_irq);
4361 }
4362 return ret;
4363}
4364
4365static void ppc440spe_adma_release_irqs(struct ppc440spe_adma_device *adev,
4366 struct ppc440spe_adma_chan *chan)
4367{
4368 u32 mask, disable;
4369
4370 if (adev->id == PPC440SPE_XOR_ID) {
4371 /* disable XOR engine interrupts */
4372 mask = ioread32be(&adev->xor_reg->ier);
4373 mask &= ~(XOR_IE_CBCIE_BIT | XOR_IE_ICBIE_BIT |
4374 XOR_IE_ICIE_BIT | XOR_IE_RPTIE_BIT);
4375 iowrite32be(mask, &adev->xor_reg->ier);
4376 } else {
4377 /* disable DMAx engine interrupts */
4378 disable = (adev->id == PPC440SPE_DMA0_ID) ?
4379 (I2O_IOPIM_P0SNE | I2O_IOPIM_P0EM) :
4380 (I2O_IOPIM_P1SNE | I2O_IOPIM_P1EM);
4381 mask = ioread32(&adev->i2o_reg->iopim) | disable;
4382 iowrite32(mask, &adev->i2o_reg->iopim);
4383 }
4384 free_irq(adev->irq, chan);
4385 irq_dispose_mapping(adev->irq);
4386 if (adev->err_irq > 0) {
4387 free_irq(adev->err_irq, chan);
4388 if (atomic_dec_and_test(&ppc440spe_adma_err_irq_ref)) {
4389 irq_dispose_mapping(adev->err_irq);
4390 iounmap(adev->i2o_reg);
4391 }
4392 }
4393}
4394
4395/**
4396 * ppc440spe_adma_probe - probe the asynch device
4397 */
4398static int __devinit ppc440spe_adma_probe(struct of_device *ofdev,
4399 const struct of_device_id *match)
4400{
4401 struct device_node *np = ofdev->node;
4402 struct resource res;
4403 struct ppc440spe_adma_device *adev;
4404 struct ppc440spe_adma_chan *chan;
4405 struct ppc_dma_chan_ref *ref, *_ref;
4406 int ret = 0, initcode = PPC_ADMA_INIT_OK;
4407 const u32 *idx;
4408 int len;
4409 void *regs;
4410 u32 id, pool_size;
4411
4412 if (of_device_is_compatible(np, "amcc,xor-accelerator")) {
4413 id = PPC440SPE_XOR_ID;
4414 /* As far as the XOR engine is concerned, it does not
4415 * use FIFOs but uses linked list. So there is no dependency
4416 * between pool size to allocate and the engine configuration.
4417 */
4418 pool_size = PAGE_SIZE << 1;
4419 } else {
4420 /* it is DMA0 or DMA1 */
4421 idx = of_get_property(np, "cell-index", &len);
4422 if (!idx || (len != sizeof(u32))) {
4423 dev_err(&ofdev->dev, "Device node %s has missing "
4424 "or invalid cell-index property\n",
4425 np->full_name);
4426 return -EINVAL;
4427 }
4428 id = *idx;
4429 /* DMA0,1 engines use FIFO to maintain CDBs, so we
4430 * should allocate the pool accordingly to size of this
4431 * FIFO. Thus, the pool size depends on the FIFO depth:
4432 * how much CDBs pointers the FIFO may contain then so
4433 * much CDBs we should provide in the pool.
4434 * That is
4435 * CDB size = 32B;
4436 * CDBs number = (DMA0_FIFO_SIZE >> 3);
4437 * Pool size = CDBs number * CDB size =
4438 * = (DMA0_FIFO_SIZE >> 3) << 5 = DMA0_FIFO_SIZE << 2.
4439 */
4440 pool_size = (id == PPC440SPE_DMA0_ID) ?
4441 DMA0_FIFO_SIZE : DMA1_FIFO_SIZE;
4442 pool_size <<= 2;
4443 }
4444
4445 if (of_address_to_resource(np, 0, &res)) {
4446 dev_err(&ofdev->dev, "failed to get memory resource\n");
4447 initcode = PPC_ADMA_INIT_MEMRES;
4448 ret = -ENODEV;
4449 goto out;
4450 }
4451
4452 if (!request_mem_region(res.start, resource_size(&res),
4453 dev_driver_string(&ofdev->dev))) {
4454 dev_err(&ofdev->dev, "failed to request memory region "
4455 "(0x%016llx-0x%016llx)\n",
4456 (u64)res.start, (u64)res.end);
4457 initcode = PPC_ADMA_INIT_MEMREG;
4458 ret = -EBUSY;
4459 goto out;
4460 }
4461
4462 /* create a device */
4463 adev = kzalloc(sizeof(*adev), GFP_KERNEL);
4464 if (!adev) {
4465 dev_err(&ofdev->dev, "failed to allocate device\n");
4466 initcode = PPC_ADMA_INIT_ALLOC;
4467 ret = -ENOMEM;
4468 goto err_adev_alloc;
4469 }
4470
4471 adev->id = id;
4472 adev->pool_size = pool_size;
4473 /* allocate coherent memory for hardware descriptors */
4474 adev->dma_desc_pool_virt = dma_alloc_coherent(&ofdev->dev,
4475 adev->pool_size, &adev->dma_desc_pool,
4476 GFP_KERNEL);
4477 if (adev->dma_desc_pool_virt == NULL) {
4478 dev_err(&ofdev->dev, "failed to allocate %d bytes of coherent "
4479 "memory for hardware descriptors\n",
4480 adev->pool_size);
4481 initcode = PPC_ADMA_INIT_COHERENT;
4482 ret = -ENOMEM;
4483 goto err_dma_alloc;
4484 }
4485 dev_dbg(&ofdev->dev, "allocted descriptor pool virt 0x%p phys 0x%llx\n",
4486 adev->dma_desc_pool_virt, (u64)adev->dma_desc_pool);
4487
4488 regs = ioremap(res.start, resource_size(&res));
4489 if (!regs) {
4490 dev_err(&ofdev->dev, "failed to ioremap regs!\n");
4491 goto err_regs_alloc;
4492 }
4493
4494 if (adev->id == PPC440SPE_XOR_ID) {
4495 adev->xor_reg = regs;
4496 /* Reset XOR */
4497 iowrite32be(XOR_CRSR_XASR_BIT, &adev->xor_reg->crsr);
4498 iowrite32be(XOR_CRSR_64BA_BIT, &adev->xor_reg->crrr);
4499 } else {
4500 size_t fifo_size = (adev->id == PPC440SPE_DMA0_ID) ?
4501 DMA0_FIFO_SIZE : DMA1_FIFO_SIZE;
4502 adev->dma_reg = regs;
4503 /* DMAx_FIFO_SIZE is defined in bytes,
4504 * <fsiz> - is defined in number of CDB pointers (8byte).
4505 * DMA FIFO Length = CSlength + CPlength, where
4506 * CSlength = CPlength = (fsiz + 1) * 8.
4507 */
4508 iowrite32(DMA_FIFO_ENABLE | ((fifo_size >> 3) - 2),
4509 &adev->dma_reg->fsiz);
4510 /* Configure DMA engine */
4511 iowrite32(DMA_CFG_DXEPR_HP | DMA_CFG_DFMPP_HP | DMA_CFG_FALGN,
4512 &adev->dma_reg->cfg);
4513 /* Clear Status */
4514 iowrite32(~0, &adev->dma_reg->dsts);
4515 }
4516
4517 adev->dev = &ofdev->dev;
4518 adev->common.dev = &ofdev->dev;
4519 INIT_LIST_HEAD(&adev->common.channels);
4520 dev_set_drvdata(&ofdev->dev, adev);
4521
4522 /* create a channel */
4523 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
4524 if (!chan) {
4525 dev_err(&ofdev->dev, "can't allocate channel structure\n");
4526 initcode = PPC_ADMA_INIT_CHANNEL;
4527 ret = -ENOMEM;
4528 goto err_chan_alloc;
4529 }
4530
4531 spin_lock_init(&chan->lock);
4532 INIT_LIST_HEAD(&chan->chain);
4533 INIT_LIST_HEAD(&chan->all_slots);
4534 chan->device = adev;
4535 chan->common.device = &adev->common;
4536 list_add_tail(&chan->common.device_node, &adev->common.channels);
4537 tasklet_init(&chan->irq_tasklet, ppc440spe_adma_tasklet,
4538 (unsigned long)chan);
4539
4540 /* allocate and map helper pages for async validation or
4541 * async_mult/async_sum_product operations on DMA0/1.
4542 */
4543 if (adev->id != PPC440SPE_XOR_ID) {
4544 chan->pdest_page = alloc_page(GFP_KERNEL);
4545 chan->qdest_page = alloc_page(GFP_KERNEL);
4546 if (!chan->pdest_page ||
4547 !chan->qdest_page) {
4548 if (chan->pdest_page)
4549 __free_page(chan->pdest_page);
4550 if (chan->qdest_page)
4551 __free_page(chan->qdest_page);
4552 ret = -ENOMEM;
4553 goto err_page_alloc;
4554 }
4555 chan->pdest = dma_map_page(&ofdev->dev, chan->pdest_page, 0,
4556 PAGE_SIZE, DMA_BIDIRECTIONAL);
4557 chan->qdest = dma_map_page(&ofdev->dev, chan->qdest_page, 0,
4558 PAGE_SIZE, DMA_BIDIRECTIONAL);
4559 }
4560
4561 ref = kmalloc(sizeof(*ref), GFP_KERNEL);
4562 if (ref) {
4563 ref->chan = &chan->common;
4564 INIT_LIST_HEAD(&ref->node);
4565 list_add_tail(&ref->node, &ppc440spe_adma_chan_list);
4566 } else {
4567 dev_err(&ofdev->dev, "failed to allocate channel reference!\n");
4568 ret = -ENOMEM;
4569 goto err_ref_alloc;
4570 }
4571
4572 ret = ppc440spe_adma_setup_irqs(adev, chan, &initcode);
4573 if (ret)
4574 goto err_irq;
4575
4576 ppc440spe_adma_init_capabilities(adev);
4577
4578 ret = dma_async_device_register(&adev->common);
4579 if (ret) {
4580 initcode = PPC_ADMA_INIT_REGISTER;
4581 dev_err(&ofdev->dev, "failed to register dma device\n");
4582 goto err_dev_reg;
4583 }
4584
4585 goto out;
4586
4587err_dev_reg:
4588 ppc440spe_adma_release_irqs(adev, chan);
4589err_irq:
4590 list_for_each_entry_safe(ref, _ref, &ppc440spe_adma_chan_list, node) {
4591 if (chan == to_ppc440spe_adma_chan(ref->chan)) {
4592 list_del(&ref->node);
4593 kfree(ref);
4594 }
4595 }
4596err_ref_alloc:
4597 if (adev->id != PPC440SPE_XOR_ID) {
4598 dma_unmap_page(&ofdev->dev, chan->pdest,
4599 PAGE_SIZE, DMA_BIDIRECTIONAL);
4600 dma_unmap_page(&ofdev->dev, chan->qdest,
4601 PAGE_SIZE, DMA_BIDIRECTIONAL);
4602 __free_page(chan->pdest_page);
4603 __free_page(chan->qdest_page);
4604 }
4605err_page_alloc:
4606 kfree(chan);
4607err_chan_alloc:
4608 if (adev->id == PPC440SPE_XOR_ID)
4609 iounmap(adev->xor_reg);
4610 else
4611 iounmap(adev->dma_reg);
4612err_regs_alloc:
4613 dma_free_coherent(adev->dev, adev->pool_size,
4614 adev->dma_desc_pool_virt,
4615 adev->dma_desc_pool);
4616err_dma_alloc:
4617 kfree(adev);
4618err_adev_alloc:
4619 release_mem_region(res.start, resource_size(&res));
4620out:
4621 if (id < PPC440SPE_ADMA_ENGINES_NUM)
4622 ppc440spe_adma_devices[id] = initcode;
4623
4624 return ret;
4625}
4626
4627/**
4628 * ppc440spe_adma_remove - remove the asynch device
4629 */
4630static int __devexit ppc440spe_adma_remove(struct of_device *ofdev)
4631{
4632 struct ppc440spe_adma_device *adev = dev_get_drvdata(&ofdev->dev);
4633 struct device_node *np = ofdev->node;
4634 struct resource res;
4635 struct dma_chan *chan, *_chan;
4636 struct ppc_dma_chan_ref *ref, *_ref;
4637 struct ppc440spe_adma_chan *ppc440spe_chan;
4638
4639 dev_set_drvdata(&ofdev->dev, NULL);
4640 if (adev->id < PPC440SPE_ADMA_ENGINES_NUM)
4641 ppc440spe_adma_devices[adev->id] = -1;
4642
4643 dma_async_device_unregister(&adev->common);
4644
4645 list_for_each_entry_safe(chan, _chan, &adev->common.channels,
4646 device_node) {
4647 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
4648 ppc440spe_adma_release_irqs(adev, ppc440spe_chan);
4649 tasklet_kill(&ppc440spe_chan->irq_tasklet);
4650 if (adev->id != PPC440SPE_XOR_ID) {
4651 dma_unmap_page(&ofdev->dev, ppc440spe_chan->pdest,
4652 PAGE_SIZE, DMA_BIDIRECTIONAL);
4653 dma_unmap_page(&ofdev->dev, ppc440spe_chan->qdest,
4654 PAGE_SIZE, DMA_BIDIRECTIONAL);
4655 __free_page(ppc440spe_chan->pdest_page);
4656 __free_page(ppc440spe_chan->qdest_page);
4657 }
4658 list_for_each_entry_safe(ref, _ref, &ppc440spe_adma_chan_list,
4659 node) {
4660 if (ppc440spe_chan ==
4661 to_ppc440spe_adma_chan(ref->chan)) {
4662 list_del(&ref->node);
4663 kfree(ref);
4664 }
4665 }
4666 list_del(&chan->device_node);
4667 kfree(ppc440spe_chan);
4668 }
4669
4670 dma_free_coherent(adev->dev, adev->pool_size,
4671 adev->dma_desc_pool_virt, adev->dma_desc_pool);
4672 if (adev->id == PPC440SPE_XOR_ID)
4673 iounmap(adev->xor_reg);
4674 else
4675 iounmap(adev->dma_reg);
4676 of_address_to_resource(np, 0, &res);
4677 release_mem_region(res.start, resource_size(&res));
4678 kfree(adev);
4679 return 0;
4680}
4681
4682/*
4683 * /sys driver interface to enable h/w RAID-6 capabilities
4684 * Files created in e.g. /sys/devices/plb.0/400100100.dma0/driver/
4685 * directory are "devices", "enable" and "poly".
4686 * "devices" shows available engines.
4687 * "enable" is used to enable RAID-6 capabilities or to check
4688 * whether these has been activated.
4689 * "poly" allows setting/checking used polynomial (for PPC440SPe only).
4690 */
4691
4692static ssize_t show_ppc440spe_devices(struct device_driver *dev, char *buf)
4693{
4694 ssize_t size = 0;
4695 int i;
4696
4697 for (i = 0; i < PPC440SPE_ADMA_ENGINES_NUM; i++) {
4698 if (ppc440spe_adma_devices[i] == -1)
4699 continue;
4700 size += snprintf(buf + size, PAGE_SIZE - size,
4701 "PPC440SP(E)-ADMA.%d: %s\n", i,
4702 ppc_adma_errors[ppc440spe_adma_devices[i]]);
4703 }
4704 return size;
4705}
4706
4707static ssize_t show_ppc440spe_r6enable(struct device_driver *dev, char *buf)
4708{
4709 return snprintf(buf, PAGE_SIZE,
4710 "PPC440SP(e) RAID-6 capabilities are %sABLED.\n",
4711 ppc440spe_r6_enabled ? "EN" : "DIS");
4712}
4713
4714static ssize_t store_ppc440spe_r6enable(struct device_driver *dev,
4715 const char *buf, size_t count)
4716{
4717 unsigned long val;
4718
4719 if (!count || count > 11)
4720 return -EINVAL;
4721
4722 if (!ppc440spe_r6_tchan)
4723 return -EFAULT;
4724
4725 /* Write a key */
4726 sscanf(buf, "%lx", &val);
4727 dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_XORBA, val);
4728 isync();
4729
4730 /* Verify whether it really works now */
4731 if (ppc440spe_test_raid6(ppc440spe_r6_tchan) == 0) {
4732 pr_info("PPC440SP(e) RAID-6 has been activated "
4733 "successfully\n");
4734 ppc440spe_r6_enabled = 1;
4735 } else {
4736 pr_info("PPC440SP(e) RAID-6 hasn't been activated!"
4737 " Error key ?\n");
4738 ppc440spe_r6_enabled = 0;
4739 }
4740 return count;
4741}
4742
4743static ssize_t show_ppc440spe_r6poly(struct device_driver *dev, char *buf)
4744{
4745 ssize_t size = 0;
4746 u32 reg;
4747
4748#ifdef CONFIG_440SP
4749 /* 440SP has fixed polynomial */
4750 reg = 0x4d;
4751#else
4752 reg = dcr_read(ppc440spe_mq_dcr_host, DCRN_MQ0_CFBHL);
4753 reg >>= MQ0_CFBHL_POLY;
4754 reg &= 0xFF;
4755#endif
4756
4757 size = snprintf(buf, PAGE_SIZE, "PPC440SP(e) RAID-6 driver "
4758 "uses 0x1%02x polynomial.\n", reg);
4759 return size;
4760}
4761
4762static ssize_t store_ppc440spe_r6poly(struct device_driver *dev,
4763 const char *buf, size_t count)
4764{
4765 unsigned long reg, val;
4766
4767#ifdef CONFIG_440SP
4768 /* 440SP uses default 0x14D polynomial only */
4769 return -EINVAL;
4770#endif
4771
4772 if (!count || count > 6)
4773 return -EINVAL;
4774
4775 /* e.g., 0x14D or 0x11D */
4776 sscanf(buf, "%lx", &val);
4777
4778 if (val & ~0x1FF)
4779 return -EINVAL;
4780
4781 val &= 0xFF;
4782 reg = dcr_read(ppc440spe_mq_dcr_host, DCRN_MQ0_CFBHL);
4783 reg &= ~(0xFF << MQ0_CFBHL_POLY);
4784 reg |= val << MQ0_CFBHL_POLY;
4785 dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_CFBHL, reg);
4786
4787 return count;
4788}
4789
4790static DRIVER_ATTR(devices, S_IRUGO, show_ppc440spe_devices, NULL);
4791static DRIVER_ATTR(enable, S_IRUGO | S_IWUSR, show_ppc440spe_r6enable,
4792 store_ppc440spe_r6enable);
4793static DRIVER_ATTR(poly, S_IRUGO | S_IWUSR, show_ppc440spe_r6poly,
4794 store_ppc440spe_r6poly);
4795
4796/*
4797 * Common initialisation for RAID engines; allocate memory for
4798 * DMAx FIFOs, perform configuration common for all DMA engines.
4799 * Further DMA engine specific configuration is done at probe time.
4800 */
4801static int ppc440spe_configure_raid_devices(void)
4802{
4803 struct device_node *np;
4804 struct resource i2o_res;
4805 struct i2o_regs __iomem *i2o_reg;
4806 dcr_host_t i2o_dcr_host;
4807 unsigned int dcr_base, dcr_len;
4808 int i, ret;
4809
4810 np = of_find_compatible_node(NULL, NULL, "ibm,i2o-440spe");
4811 if (!np) {
4812 pr_err("%s: can't find I2O device tree node\n",
4813 __func__);
4814 return -ENODEV;
4815 }
4816
4817 if (of_address_to_resource(np, 0, &i2o_res)) {
4818 of_node_put(np);
4819 return -EINVAL;
4820 }
4821
4822 i2o_reg = of_iomap(np, 0);
4823 if (!i2o_reg) {
4824 pr_err("%s: failed to map I2O registers\n", __func__);
4825 of_node_put(np);
4826 return -EINVAL;
4827 }
4828
4829 /* Get I2O DCRs base */
4830 dcr_base = dcr_resource_start(np, 0);
4831 dcr_len = dcr_resource_len(np, 0);
4832 if (!dcr_base && !dcr_len) {
4833 pr_err("%s: can't get DCR registers base/len!\n",
4834 np->full_name);
4835 of_node_put(np);
4836 iounmap(i2o_reg);
4837 return -ENODEV;
4838 }
4839
4840 i2o_dcr_host = dcr_map(np, dcr_base, dcr_len);
4841 if (!DCR_MAP_OK(i2o_dcr_host)) {
4842 pr_err("%s: failed to map DCRs!\n", np->full_name);
4843 of_node_put(np);
4844 iounmap(i2o_reg);
4845 return -ENODEV;
4846 }
4847 of_node_put(np);
4848
4849 /* Provide memory regions for DMA's FIFOs: I2O, DMA0 and DMA1 share
4850 * the base address of FIFO memory space.
4851 * Actually we need twice more physical memory than programmed in the
4852 * <fsiz> register (because there are two FIFOs for each DMA: CP and CS)
4853 */
4854 ppc440spe_dma_fifo_buf = kmalloc((DMA0_FIFO_SIZE + DMA1_FIFO_SIZE) << 1,
4855 GFP_KERNEL);
4856 if (!ppc440spe_dma_fifo_buf) {
4857 pr_err("%s: DMA FIFO buffer allocation failed.\n", __func__);
4858 iounmap(i2o_reg);
4859 dcr_unmap(i2o_dcr_host, dcr_len);
4860 return -ENOMEM;
4861 }
4862
4863 /*
4864 * Configure h/w
4865 */
4866 /* Reset I2O/DMA */
4867 mtdcri(SDR0, DCRN_SDR0_SRST, DCRN_SDR0_SRST_I2ODMA);
4868 mtdcri(SDR0, DCRN_SDR0_SRST, 0);
4869
4870 /* Setup the base address of mmaped registers */
4871 dcr_write(i2o_dcr_host, DCRN_I2O0_IBAH, (u32)(i2o_res.start >> 32));
4872 dcr_write(i2o_dcr_host, DCRN_I2O0_IBAL, (u32)(i2o_res.start) |
4873 I2O_REG_ENABLE);
4874 dcr_unmap(i2o_dcr_host, dcr_len);
4875
4876 /* Setup FIFO memory space base address */
4877 iowrite32(0, &i2o_reg->ifbah);
4878 iowrite32(((u32)__pa(ppc440spe_dma_fifo_buf)), &i2o_reg->ifbal);
4879
4880 /* set zero FIFO size for I2O, so the whole
4881 * ppc440spe_dma_fifo_buf is used by DMAs.
4882 * DMAx_FIFOs will be configured while probe.
4883 */
4884 iowrite32(0, &i2o_reg->ifsiz);
4885 iounmap(i2o_reg);
4886
4887 /* To prepare WXOR/RXOR functionality we need access to
4888 * Memory Queue Module DCRs (finally it will be enabled
4889 * via /sys interface of the ppc440spe ADMA driver).
4890 */
4891 np = of_find_compatible_node(NULL, NULL, "ibm,mq-440spe");
4892 if (!np) {
4893 pr_err("%s: can't find MQ device tree node\n",
4894 __func__);
4895 ret = -ENODEV;
4896 goto out_free;
4897 }
4898
4899 /* Get MQ DCRs base */
4900 dcr_base = dcr_resource_start(np, 0);
4901 dcr_len = dcr_resource_len(np, 0);
4902 if (!dcr_base && !dcr_len) {
4903 pr_err("%s: can't get DCR registers base/len!\n",
4904 np->full_name);
4905 ret = -ENODEV;
4906 goto out_mq;
4907 }
4908
4909 ppc440spe_mq_dcr_host = dcr_map(np, dcr_base, dcr_len);
4910 if (!DCR_MAP_OK(ppc440spe_mq_dcr_host)) {
4911 pr_err("%s: failed to map DCRs!\n", np->full_name);
4912 ret = -ENODEV;
4913 goto out_mq;
4914 }
4915 of_node_put(np);
4916 ppc440spe_mq_dcr_len = dcr_len;
4917
4918 /* Set HB alias */
4919 dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_BAUH, DMA_CUED_XOR_HB);
4920
4921 /* Set:
4922 * - LL transaction passing limit to 1;
4923 * - Memory controller cycle limit to 1;
4924 * - Galois Polynomial to 0x14d (default)
4925 */
4926 dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_CFBHL,
4927 (1 << MQ0_CFBHL_TPLM) | (1 << MQ0_CFBHL_HBCL) |
4928 (PPC440SPE_DEFAULT_POLY << MQ0_CFBHL_POLY));
4929
4930 atomic_set(&ppc440spe_adma_err_irq_ref, 0);
4931 for (i = 0; i < PPC440SPE_ADMA_ENGINES_NUM; i++)
4932 ppc440spe_adma_devices[i] = -1;
4933
4934 return 0;
4935
4936out_mq:
4937 of_node_put(np);
4938out_free:
4939 kfree(ppc440spe_dma_fifo_buf);
4940 return ret;
4941}
4942
4943static struct of_device_id __devinitdata ppc440spe_adma_of_match[] = {
4944 { .compatible = "ibm,dma-440spe", },
4945 { .compatible = "amcc,xor-accelerator", },
4946 {},
4947};
4948MODULE_DEVICE_TABLE(of, ppc440spe_adma_of_match);
4949
4950static struct of_platform_driver ppc440spe_adma_driver = {
4951 .match_table = ppc440spe_adma_of_match,
4952 .probe = ppc440spe_adma_probe,
4953 .remove = __devexit_p(ppc440spe_adma_remove),
4954 .driver = {
4955 .name = "PPC440SP(E)-ADMA",
4956 .owner = THIS_MODULE,
4957 },
4958};
4959
4960static __init int ppc440spe_adma_init(void)
4961{
4962 int ret;
4963
4964 ret = ppc440spe_configure_raid_devices();
4965 if (ret)
4966 return ret;
4967
4968 ret = of_register_platform_driver(&ppc440spe_adma_driver);
4969 if (ret) {
4970 pr_err("%s: failed to register platform driver\n",
4971 __func__);
4972 goto out_reg;
4973 }
4974
4975 /* Initialization status */
4976 ret = driver_create_file(&ppc440spe_adma_driver.driver,
4977 &driver_attr_devices);
4978 if (ret)
4979 goto out_dev;
4980
4981 /* RAID-6 h/w enable entry */
4982 ret = driver_create_file(&ppc440spe_adma_driver.driver,
4983 &driver_attr_enable);
4984 if (ret)
4985 goto out_en;
4986
4987 /* GF polynomial to use */
4988 ret = driver_create_file(&ppc440spe_adma_driver.driver,
4989 &driver_attr_poly);
4990 if (!ret)
4991 return ret;
4992
4993 driver_remove_file(&ppc440spe_adma_driver.driver,
4994 &driver_attr_enable);
4995out_en:
4996 driver_remove_file(&ppc440spe_adma_driver.driver,
4997 &driver_attr_devices);
4998out_dev:
4999 /* User will not be able to enable h/w RAID-6 */
5000 pr_err("%s: failed to create RAID-6 driver interface\n",
5001 __func__);
5002 of_unregister_platform_driver(&ppc440spe_adma_driver);
5003out_reg:
5004 dcr_unmap(ppc440spe_mq_dcr_host, ppc440spe_mq_dcr_len);
5005 kfree(ppc440spe_dma_fifo_buf);
5006 return ret;
5007}
5008
5009static void __exit ppc440spe_adma_exit(void)
5010{
5011 driver_remove_file(&ppc440spe_adma_driver.driver,
5012 &driver_attr_poly);
5013 driver_remove_file(&ppc440spe_adma_driver.driver,
5014 &driver_attr_enable);
5015 driver_remove_file(&ppc440spe_adma_driver.driver,
5016 &driver_attr_devices);
5017 of_unregister_platform_driver(&ppc440spe_adma_driver);
5018 dcr_unmap(ppc440spe_mq_dcr_host, ppc440spe_mq_dcr_len);
5019 kfree(ppc440spe_dma_fifo_buf);
5020}
5021
5022arch_initcall(ppc440spe_adma_init);
5023module_exit(ppc440spe_adma_exit);
5024
5025MODULE_AUTHOR("Yuri Tikhonov <yur@emcraft.com>");
5026MODULE_DESCRIPTION("PPC440SPE ADMA Engine Driver");
5027MODULE_LICENSE("GPL");
diff --git a/drivers/dma/ppc4xx/adma.h b/drivers/dma/ppc4xx/adma.h
new file mode 100644
index 000000000000..8ada5a812e3b
--- /dev/null
+++ b/drivers/dma/ppc4xx/adma.h
@@ -0,0 +1,195 @@
1/*
2 * 2006-2009 (C) DENX Software Engineering.
3 *
4 * Author: Yuri Tikhonov <yur@emcraft.com>
5 *
6 * This file is licensed under the terms of the GNU General Public License
7 * version 2. This program is licensed "as is" without any warranty of
8 * any kind, whether express or implied.
9 */
10
11#ifndef _PPC440SPE_ADMA_H
12#define _PPC440SPE_ADMA_H
13
14#include <linux/types.h>
15#include "dma.h"
16#include "xor.h"
17
18#define to_ppc440spe_adma_chan(chan) \
19 container_of(chan, struct ppc440spe_adma_chan, common)
20#define to_ppc440spe_adma_device(dev) \
21 container_of(dev, struct ppc440spe_adma_device, common)
22#define tx_to_ppc440spe_adma_slot(tx) \
23 container_of(tx, struct ppc440spe_adma_desc_slot, async_tx)
24
25/* Default polynomial (for 440SP is only available) */
26#define PPC440SPE_DEFAULT_POLY 0x4d
27
28#define PPC440SPE_ADMA_ENGINES_NUM (XOR_ENGINES_NUM + DMA_ENGINES_NUM)
29
30#define PPC440SPE_ADMA_WATCHDOG_MSEC 3
31#define PPC440SPE_ADMA_THRESHOLD 1
32
33#define PPC440SPE_DMA0_ID 0
34#define PPC440SPE_DMA1_ID 1
35#define PPC440SPE_XOR_ID 2
36
37#define PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT 0xFFFFFFUL
38/* this is the XOR_CBBCR width */
39#define PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT (1 << 31)
40#define PPC440SPE_ADMA_ZERO_SUM_MAX_BYTE_COUNT PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT
41
42#define PPC440SPE_RXOR_RUN 0
43
44#define MQ0_CF2H_RXOR_BS_MASK 0x1FF
45
46#undef ADMA_LL_DEBUG
47
48/**
49 * struct ppc440spe_adma_device - internal representation of an ADMA device
50 * @dev: device
51 * @dma_reg: base for DMAx register access
52 * @xor_reg: base for XOR register access
53 * @i2o_reg: base for I2O register access
54 * @id: HW ADMA Device selector
55 * @dma_desc_pool_virt: base of DMA descriptor region (CPU address)
56 * @dma_desc_pool: base of DMA descriptor region (DMA address)
57 * @pool_size: size of the pool
58 * @irq: DMAx or XOR irq number
59 * @err_irq: DMAx error irq number
60 * @common: embedded struct dma_device
61 */
62struct ppc440spe_adma_device {
63 struct device *dev;
64 struct dma_regs __iomem *dma_reg;
65 struct xor_regs __iomem *xor_reg;
66 struct i2o_regs __iomem *i2o_reg;
67 int id;
68 void *dma_desc_pool_virt;
69 dma_addr_t dma_desc_pool;
70 size_t pool_size;
71 int irq;
72 int err_irq;
73 struct dma_device common;
74};
75
76/**
77 * struct ppc440spe_adma_chan - internal representation of an ADMA channel
78 * @lock: serializes enqueue/dequeue operations to the slot pool
79 * @device: parent device
80 * @chain: device chain view of the descriptors
81 * @common: common dmaengine channel object members
82 * @all_slots: complete domain of slots usable by the channel
83 * @pending: allows batching of hardware operations
84 * @completed_cookie: identifier for the most recently completed operation
85 * @slots_allocated: records the actual size of the descriptor slot pool
86 * @hw_chain_inited: h/w descriptor chain initialization flag
87 * @irq_tasklet: bottom half where ppc440spe_adma_slot_cleanup runs
88 * @needs_unmap: if buffers should not be unmapped upon final processing
89 * @pdest_page: P destination page for async validate operation
90 * @qdest_page: Q destination page for async validate operation
91 * @pdest: P dma addr for async validate operation
92 * @qdest: Q dma addr for async validate operation
93 */
94struct ppc440spe_adma_chan {
95 spinlock_t lock;
96 struct ppc440spe_adma_device *device;
97 struct list_head chain;
98 struct dma_chan common;
99 struct list_head all_slots;
100 struct ppc440spe_adma_desc_slot *last_used;
101 int pending;
102 dma_cookie_t completed_cookie;
103 int slots_allocated;
104 int hw_chain_inited;
105 struct tasklet_struct irq_tasklet;
106 u8 needs_unmap;
107 struct page *pdest_page;
108 struct page *qdest_page;
109 dma_addr_t pdest;
110 dma_addr_t qdest;
111};
112
113struct ppc440spe_rxor {
114 u32 addrl;
115 u32 addrh;
116 int len;
117 int xor_count;
118 int addr_count;
119 int desc_count;
120 int state;
121};
122
123/**
124 * struct ppc440spe_adma_desc_slot - PPC440SPE-ADMA software descriptor
125 * @phys: hardware address of the hardware descriptor chain
126 * @group_head: first operation in a transaction
127 * @hw_next: pointer to the next descriptor in chain
128 * @async_tx: support for the async_tx api
129 * @slot_node: node on the iop_adma_chan.all_slots list
130 * @chain_node: node on the op_adma_chan.chain list
131 * @group_list: list of slots that make up a multi-descriptor transaction
132 * for example transfer lengths larger than the supported hw max
133 * @unmap_len: transaction bytecount
134 * @hw_desc: virtual address of the hardware descriptor chain
135 * @stride: currently chained or not
136 * @idx: pool index
137 * @slot_cnt: total slots used in an transaction (group of operations)
138 * @src_cnt: number of sources set in this descriptor
139 * @dst_cnt: number of destinations set in the descriptor
140 * @slots_per_op: number of slots per operation
141 * @descs_per_op: number of slot per P/Q operation see comment
142 * for ppc440spe_prep_dma_pqxor function
143 * @flags: desc state/type
144 * @reverse_flags: 1 if a corresponding rxor address uses reversed address order
145 * @xor_check_result: result of zero sum
146 * @crc32_result: result crc calculation
147 */
148struct ppc440spe_adma_desc_slot {
149 dma_addr_t phys;
150 struct ppc440spe_adma_desc_slot *group_head;
151 struct ppc440spe_adma_desc_slot *hw_next;
152 struct dma_async_tx_descriptor async_tx;
153 struct list_head slot_node;
154 struct list_head chain_node; /* node in channel ops list */
155 struct list_head group_list; /* list */
156 unsigned int unmap_len;
157 void *hw_desc;
158 u16 stride;
159 u16 idx;
160 u16 slot_cnt;
161 u8 src_cnt;
162 u8 dst_cnt;
163 u8 slots_per_op;
164 u8 descs_per_op;
165 unsigned long flags;
166 unsigned long reverse_flags[8];
167
168#define PPC440SPE_DESC_INT 0 /* generate interrupt on complete */
169#define PPC440SPE_ZERO_P 1 /* clear P destionaion */
170#define PPC440SPE_ZERO_Q 2 /* clear Q destination */
171#define PPC440SPE_COHERENT 3 /* src/dst are coherent */
172
173#define PPC440SPE_DESC_WXOR 4 /* WXORs are in chain */
174#define PPC440SPE_DESC_RXOR 5 /* RXOR is in chain */
175
176#define PPC440SPE_DESC_RXOR123 8 /* CDB for RXOR123 operation */
177#define PPC440SPE_DESC_RXOR124 9 /* CDB for RXOR124 operation */
178#define PPC440SPE_DESC_RXOR125 10 /* CDB for RXOR125 operation */
179#define PPC440SPE_DESC_RXOR12 11 /* CDB for RXOR12 operation */
180#define PPC440SPE_DESC_RXOR_REV 12 /* CDB has srcs in reversed order */
181
182#define PPC440SPE_DESC_PCHECK 13
183#define PPC440SPE_DESC_QCHECK 14
184
185#define PPC440SPE_DESC_RXOR_MSK 0x3
186
187 struct ppc440spe_rxor rxor_cursor;
188
189 union {
190 u32 *xor_check_result;
191 u32 *crc32_result;
192 };
193};
194
195#endif /* _PPC440SPE_ADMA_H */
diff --git a/drivers/dma/ppc4xx/dma.h b/drivers/dma/ppc4xx/dma.h
new file mode 100644
index 000000000000..bcde2df2f373
--- /dev/null
+++ b/drivers/dma/ppc4xx/dma.h
@@ -0,0 +1,223 @@
1/*
2 * 440SPe's DMA engines support header file
3 *
4 * 2006-2009 (C) DENX Software Engineering.
5 *
6 * Author: Yuri Tikhonov <yur@emcraft.com>
7 *
8 * This file is licensed under the term of the GNU General Public License
9 * version 2. The program licensed "as is" without any warranty of any
10 * kind, whether express or implied.
11 */
12
13#ifndef _PPC440SPE_DMA_H
14#define _PPC440SPE_DMA_H
15
16#include <linux/types.h>
17
18/* Number of elements in the array with statical CDBs */
19#define MAX_STAT_DMA_CDBS 16
20/* Number of DMA engines available on the contoller */
21#define DMA_ENGINES_NUM 2
22
23/* Maximum h/w supported number of destinations */
24#define DMA_DEST_MAX_NUM 2
25
26/* FIFO's params */
27#define DMA0_FIFO_SIZE 0x1000
28#define DMA1_FIFO_SIZE 0x1000
29#define DMA_FIFO_ENABLE (1<<12)
30
31/* DMA Configuration Register. Data Transfer Engine PLB Priority: */
32#define DMA_CFG_DXEPR_LP (0<<26)
33#define DMA_CFG_DXEPR_HP (3<<26)
34#define DMA_CFG_DXEPR_HHP (2<<26)
35#define DMA_CFG_DXEPR_HHHP (1<<26)
36
37/* DMA Configuration Register. DMA FIFO Manager PLB Priority: */
38#define DMA_CFG_DFMPP_LP (0<<23)
39#define DMA_CFG_DFMPP_HP (3<<23)
40#define DMA_CFG_DFMPP_HHP (2<<23)
41#define DMA_CFG_DFMPP_HHHP (1<<23)
42
43/* DMA Configuration Register. Force 64-byte Alignment */
44#define DMA_CFG_FALGN (1 << 19)
45
46/*UIC0:*/
47#define D0CPF_INT (1<<12)
48#define D0CSF_INT (1<<11)
49#define D1CPF_INT (1<<10)
50#define D1CSF_INT (1<<9)
51/*UIC1:*/
52#define DMAE_INT (1<<9)
53
54/* I2O IOP Interrupt Mask Register */
55#define I2O_IOPIM_P0SNE (1<<3)
56#define I2O_IOPIM_P0EM (1<<5)
57#define I2O_IOPIM_P1SNE (1<<6)
58#define I2O_IOPIM_P1EM (1<<8)
59
60/* DMA CDB fields */
61#define DMA_CDB_MSK (0xF)
62#define DMA_CDB_64B_ADDR (1<<2)
63#define DMA_CDB_NO_INT (1<<3)
64#define DMA_CDB_STATUS_MSK (0x3)
65#define DMA_CDB_ADDR_MSK (0xFFFFFFF0)
66
67/* DMA CDB OpCodes */
68#define DMA_CDB_OPC_NO_OP (0x00)
69#define DMA_CDB_OPC_MV_SG1_SG2 (0x01)
70#define DMA_CDB_OPC_MULTICAST (0x05)
71#define DMA_CDB_OPC_DFILL128 (0x24)
72#define DMA_CDB_OPC_DCHECK128 (0x23)
73
74#define DMA_CUED_XOR_BASE (0x10000000)
75#define DMA_CUED_XOR_HB (0x00000008)
76
77#ifdef CONFIG_440SP
78#define DMA_CUED_MULT1_OFF 0
79#define DMA_CUED_MULT2_OFF 8
80#define DMA_CUED_MULT3_OFF 16
81#define DMA_CUED_REGION_OFF 24
82#define DMA_CUED_XOR_WIN_MSK (0xFC000000)
83#else
84#define DMA_CUED_MULT1_OFF 2
85#define DMA_CUED_MULT2_OFF 10
86#define DMA_CUED_MULT3_OFF 18
87#define DMA_CUED_REGION_OFF 26
88#define DMA_CUED_XOR_WIN_MSK (0xF0000000)
89#endif
90
91#define DMA_CUED_REGION_MSK 0x3
92#define DMA_RXOR123 0x0
93#define DMA_RXOR124 0x1
94#define DMA_RXOR125 0x2
95#define DMA_RXOR12 0x3
96
97/* S/G addresses */
98#define DMA_CDB_SG_SRC 1
99#define DMA_CDB_SG_DST1 2
100#define DMA_CDB_SG_DST2 3
101
102/*
103 * DMAx engines Command Descriptor Block Type
104 */
105struct dma_cdb {
106 /*
107 * Basic CDB structure (Table 20-17, p.499, 440spe_um_1_22.pdf)
108 */
109 u8 pad0[2]; /* reserved */
110 u8 attr; /* attributes */
111 u8 opc; /* opcode */
112 u32 sg1u; /* upper SG1 address */
113 u32 sg1l; /* lower SG1 address */
114 u32 cnt; /* SG count, 3B used */
115 u32 sg2u; /* upper SG2 address */
116 u32 sg2l; /* lower SG2 address */
117 u32 sg3u; /* upper SG3 address */
118 u32 sg3l; /* lower SG3 address */
119};
120
121/*
122 * DMAx hardware registers (p.515 in 440SPe UM 1.22)
123 */
124struct dma_regs {
125 u32 cpfpl;
126 u32 cpfph;
127 u32 csfpl;
128 u32 csfph;
129 u32 dsts;
130 u32 cfg;
131 u8 pad0[0x8];
132 u16 cpfhp;
133 u16 cpftp;
134 u16 csfhp;
135 u16 csftp;
136 u8 pad1[0x8];
137 u32 acpl;
138 u32 acph;
139 u32 s1bpl;
140 u32 s1bph;
141 u32 s2bpl;
142 u32 s2bph;
143 u32 s3bpl;
144 u32 s3bph;
145 u8 pad2[0x10];
146 u32 earl;
147 u32 earh;
148 u8 pad3[0x8];
149 u32 seat;
150 u32 sead;
151 u32 op;
152 u32 fsiz;
153};
154
155/*
156 * I2O hardware registers (p.528 in 440SPe UM 1.22)
157 */
158struct i2o_regs {
159 u32 ists;
160 u32 iseat;
161 u32 isead;
162 u8 pad0[0x14];
163 u32 idbel;
164 u8 pad1[0xc];
165 u32 ihis;
166 u32 ihim;
167 u8 pad2[0x8];
168 u32 ihiq;
169 u32 ihoq;
170 u8 pad3[0x8];
171 u32 iopis;
172 u32 iopim;
173 u32 iopiq;
174 u8 iopoq;
175 u8 pad4[3];
176 u16 iiflh;
177 u16 iiflt;
178 u16 iiplh;
179 u16 iiplt;
180 u16 ioflh;
181 u16 ioflt;
182 u16 ioplh;
183 u16 ioplt;
184 u32 iidc;
185 u32 ictl;
186 u32 ifcpp;
187 u8 pad5[0x4];
188 u16 mfac0;
189 u16 mfac1;
190 u16 mfac2;
191 u16 mfac3;
192 u16 mfac4;
193 u16 mfac5;
194 u16 mfac6;
195 u16 mfac7;
196 u16 ifcfh;
197 u16 ifcht;
198 u8 pad6[0x4];
199 u32 iifmc;
200 u32 iodb;
201 u32 iodbc;
202 u32 ifbal;
203 u32 ifbah;
204 u32 ifsiz;
205 u32 ispd0;
206 u32 ispd1;
207 u32 ispd2;
208 u32 ispd3;
209 u32 ihipl;
210 u32 ihiph;
211 u32 ihopl;
212 u32 ihoph;
213 u32 iiipl;
214 u32 iiiph;
215 u32 iiopl;
216 u32 iioph;
217 u32 ifcpl;
218 u32 ifcph;
219 u8 pad7[0x8];
220 u32 iopt;
221};
222
223#endif /* _PPC440SPE_DMA_H */
diff --git a/drivers/dma/ppc4xx/xor.h b/drivers/dma/ppc4xx/xor.h
new file mode 100644
index 000000000000..daed7384daac
--- /dev/null
+++ b/drivers/dma/ppc4xx/xor.h
@@ -0,0 +1,110 @@
1/*
2 * 440SPe's XOR engines support header file
3 *
4 * 2006-2009 (C) DENX Software Engineering.
5 *
6 * Author: Yuri Tikhonov <yur@emcraft.com>
7 *
8 * This file is licensed under the term of the GNU General Public License
9 * version 2. The program licensed "as is" without any warranty of any
10 * kind, whether express or implied.
11 */
12
13#ifndef _PPC440SPE_XOR_H
14#define _PPC440SPE_XOR_H
15
16#include <linux/types.h>
17
18/* Number of XOR engines available on the contoller */
19#define XOR_ENGINES_NUM 1
20
21/* Number of operands supported in the h/w */
22#define XOR_MAX_OPS 16
23
24/*
25 * XOR Command Block Control Register bits
26 */
27#define XOR_CBCR_LNK_BIT (1<<31) /* link present */
28#define XOR_CBCR_TGT_BIT (1<<30) /* target present */
29#define XOR_CBCR_CBCE_BIT (1<<29) /* command block compete enable */
30#define XOR_CBCR_RNZE_BIT (1<<28) /* result not zero enable */
31#define XOR_CBCR_XNOR_BIT (1<<15) /* XOR/XNOR */
32#define XOR_CDCR_OAC_MSK (0x7F) /* operand address count */
33
34/*
35 * XORCore Status Register bits
36 */
37#define XOR_SR_XCP_BIT (1<<31) /* core processing */
38#define XOR_SR_ICB_BIT (1<<17) /* invalid CB */
39#define XOR_SR_IC_BIT (1<<16) /* invalid command */
40#define XOR_SR_IPE_BIT (1<<15) /* internal parity error */
41#define XOR_SR_RNZ_BIT (1<<2) /* result not Zero */
42#define XOR_SR_CBC_BIT (1<<1) /* CB complete */
43#define XOR_SR_CBLC_BIT (1<<0) /* CB list complete */
44
45/*
46 * XORCore Control Set and Reset Register bits
47 */
48#define XOR_CRSR_XASR_BIT (1<<31) /* soft reset */
49#define XOR_CRSR_XAE_BIT (1<<30) /* enable */
50#define XOR_CRSR_RCBE_BIT (1<<29) /* refetch CB enable */
51#define XOR_CRSR_PAUS_BIT (1<<28) /* pause */
52#define XOR_CRSR_64BA_BIT (1<<27) /* 64/32 CB format */
53#define XOR_CRSR_CLP_BIT (1<<25) /* continue list processing */
54
55/*
56 * XORCore Interrupt Enable Register
57 */
58#define XOR_IE_ICBIE_BIT (1<<17) /* Invalid Command Block IRQ Enable */
59#define XOR_IE_ICIE_BIT (1<<16) /* Invalid Command IRQ Enable */
60#define XOR_IE_RPTIE_BIT (1<<14) /* Read PLB Timeout Error IRQ Enable */
61#define XOR_IE_CBCIE_BIT (1<<1) /* CB complete interrupt enable */
62#define XOR_IE_CBLCI_BIT (1<<0) /* CB list complete interrupt enable */
63
64/*
65 * XOR Accelerator engine Command Block Type
66 */
67struct xor_cb {
68 /*
69 * Basic 64-bit format XOR CB (Table 19-1, p.463, 440spe_um_1_22.pdf)
70 */
71 u32 cbc; /* control */
72 u32 cbbc; /* byte count */
73 u32 cbs; /* status */
74 u8 pad0[4]; /* reserved */
75 u32 cbtah; /* target address high */
76 u32 cbtal; /* target address low */
77 u32 cblah; /* link address high */
78 u32 cblal; /* link address low */
79 struct {
80 u32 h;
81 u32 l;
82 } __attribute__ ((packed)) ops[16];
83} __attribute__ ((packed));
84
85/*
86 * XOR hardware registers Table 19-3, UM 1.22
87 */
88struct xor_regs {
89 u32 op_ar[16][2]; /* operand address[0]-high,[1]-low registers */
90 u8 pad0[352]; /* reserved */
91 u32 cbcr; /* CB control register */
92 u32 cbbcr; /* CB byte count register */
93 u32 cbsr; /* CB status register */
94 u8 pad1[4]; /* reserved */
95 u32 cbtahr; /* operand target address high register */
96 u32 cbtalr; /* operand target address low register */
97 u32 cblahr; /* CB link address high register */
98 u32 cblalr; /* CB link address low register */
99 u32 crsr; /* control set register */
100 u32 crrr; /* control reset register */
101 u32 ccbahr; /* current CB address high register */
102 u32 ccbalr; /* current CB address low register */
103 u32 plbr; /* PLB configuration register */
104 u32 ier; /* interrupt enable register */
105 u32 pecr; /* parity error count register */
106 u32 sr; /* status register */
107 u32 revidr; /* revision ID register */
108};
109
110#endif /* _PPC440SPE_XOR_H */
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index 034ecf0ace03..2e4a54c8afeb 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -80,17 +80,17 @@ static int sh_dmae_rst(int id)
80 unsigned short dmaor; 80 unsigned short dmaor;
81 81
82 sh_dmae_ctl_stop(id); 82 sh_dmae_ctl_stop(id);
83 dmaor = (dmaor_read_reg(id)|DMAOR_INIT); 83 dmaor = dmaor_read_reg(id) | DMAOR_INIT;
84 84
85 dmaor_write_reg(id, dmaor); 85 dmaor_write_reg(id, dmaor);
86 if ((dmaor_read_reg(id) & (DMAOR_AE | DMAOR_NMIF))) { 86 if (dmaor_read_reg(id) & (DMAOR_AE | DMAOR_NMIF)) {
87 pr_warning(KERN_ERR "dma-sh: Can't initialize DMAOR.\n"); 87 pr_warning(KERN_ERR "dma-sh: Can't initialize DMAOR.\n");
88 return -EINVAL; 88 return -EINVAL;
89 } 89 }
90 return 0; 90 return 0;
91} 91}
92 92
93static int dmae_is_idle(struct sh_dmae_chan *sh_chan) 93static int dmae_is_busy(struct sh_dmae_chan *sh_chan)
94{ 94{
95 u32 chcr = sh_dmae_readl(sh_chan, CHCR); 95 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
96 if (chcr & CHCR_DE) { 96 if (chcr & CHCR_DE) {
@@ -110,15 +110,14 @@ static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs hw)
110{ 110{
111 sh_dmae_writel(sh_chan, hw.sar, SAR); 111 sh_dmae_writel(sh_chan, hw.sar, SAR);
112 sh_dmae_writel(sh_chan, hw.dar, DAR); 112 sh_dmae_writel(sh_chan, hw.dar, DAR);
113 sh_dmae_writel(sh_chan, 113 sh_dmae_writel(sh_chan, hw.tcr >> calc_xmit_shift(sh_chan), TCR);
114 (hw.tcr >> calc_xmit_shift(sh_chan)), TCR);
115} 114}
116 115
117static void dmae_start(struct sh_dmae_chan *sh_chan) 116static void dmae_start(struct sh_dmae_chan *sh_chan)
118{ 117{
119 u32 chcr = sh_dmae_readl(sh_chan, CHCR); 118 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
120 119
121 chcr |= (CHCR_DE|CHCR_IE); 120 chcr |= CHCR_DE | CHCR_IE;
122 sh_dmae_writel(sh_chan, chcr, CHCR); 121 sh_dmae_writel(sh_chan, chcr, CHCR);
123} 122}
124 123
@@ -132,7 +131,7 @@ static void dmae_halt(struct sh_dmae_chan *sh_chan)
132 131
133static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) 132static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
134{ 133{
135 int ret = dmae_is_idle(sh_chan); 134 int ret = dmae_is_busy(sh_chan);
136 /* When DMA was working, can not set data to CHCR */ 135 /* When DMA was working, can not set data to CHCR */
137 if (ret) 136 if (ret)
138 return ret; 137 return ret;
@@ -149,7 +148,7 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
149{ 148{
150 u32 addr; 149 u32 addr;
151 int shift = 0; 150 int shift = 0;
152 int ret = dmae_is_idle(sh_chan); 151 int ret = dmae_is_busy(sh_chan);
153 if (ret) 152 if (ret)
154 return ret; 153 return ret;
155 154
@@ -307,7 +306,7 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
307 new = sh_dmae_get_desc(sh_chan); 306 new = sh_dmae_get_desc(sh_chan);
308 if (!new) { 307 if (!new) {
309 dev_err(sh_chan->dev, 308 dev_err(sh_chan->dev,
310 "No free memory for link descriptor\n"); 309 "No free memory for link descriptor\n");
311 goto err_get_desc; 310 goto err_get_desc;
312 } 311 }
313 312
@@ -388,7 +387,7 @@ static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
388 struct sh_dmae_regs hw; 387 struct sh_dmae_regs hw;
389 388
390 /* DMA work check */ 389 /* DMA work check */
391 if (dmae_is_idle(sh_chan)) 390 if (dmae_is_busy(sh_chan))
392 return; 391 return;
393 392
394 /* Find the first un-transfer desciptor */ 393 /* Find the first un-transfer desciptor */
@@ -497,8 +496,9 @@ static void dmae_do_tasklet(unsigned long data)
497 struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data; 496 struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
498 struct sh_desc *desc, *_desc, *cur_desc = NULL; 497 struct sh_desc *desc, *_desc, *cur_desc = NULL;
499 u32 sar_buf = sh_dmae_readl(sh_chan, SAR); 498 u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
499
500 list_for_each_entry_safe(desc, _desc, 500 list_for_each_entry_safe(desc, _desc,
501 &sh_chan->ld_queue, node) { 501 &sh_chan->ld_queue, node) {
502 if ((desc->hw.sar + desc->hw.tcr) == sar_buf) { 502 if ((desc->hw.sar + desc->hw.tcr) == sar_buf) {
503 cur_desc = desc; 503 cur_desc = desc;
504 break; 504 break;
@@ -543,8 +543,8 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id)
543 /* alloc channel */ 543 /* alloc channel */
544 new_sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL); 544 new_sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL);
545 if (!new_sh_chan) { 545 if (!new_sh_chan) {
546 dev_err(shdev->common.dev, "No free memory for allocating " 546 dev_err(shdev->common.dev,
547 "dma channels!\n"); 547 "No free memory for allocating dma channels!\n");
548 return -ENOMEM; 548 return -ENOMEM;
549 } 549 }
550 550
@@ -586,8 +586,8 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id)
586 "sh-dmae%d", new_sh_chan->id); 586 "sh-dmae%d", new_sh_chan->id);
587 587
588 /* set up channel irq */ 588 /* set up channel irq */
589 err = request_irq(irq, &sh_dmae_interrupt, 589 err = request_irq(irq, &sh_dmae_interrupt, irqflags,
590 irqflags, new_sh_chan->dev_id, new_sh_chan); 590 new_sh_chan->dev_id, new_sh_chan);
591 if (err) { 591 if (err) {
592 dev_err(shdev->common.dev, "DMA channel %d request_irq error " 592 dev_err(shdev->common.dev, "DMA channel %d request_irq error "
593 "with return %d\n", id, err); 593 "with return %d\n", id, err);
@@ -676,6 +676,8 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
676 shdev->common.device_is_tx_complete = sh_dmae_is_complete; 676 shdev->common.device_is_tx_complete = sh_dmae_is_complete;
677 shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending; 677 shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending;
678 shdev->common.dev = &pdev->dev; 678 shdev->common.dev = &pdev->dev;
679 /* Default transfer size of 32 bytes requires 32-byte alignment */
680 shdev->common.copy_align = 5;
679 681
680#if defined(CONFIG_CPU_SH4) 682#if defined(CONFIG_CPU_SH4)
681 /* Non Mix IRQ mode SH7722/SH7730 etc... */ 683 /* Non Mix IRQ mode SH7722/SH7730 etc... */
@@ -688,8 +690,8 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
688 } 690 }
689 691
690 for (ecnt = 0 ; ecnt < ARRAY_SIZE(eirq); ecnt++) { 692 for (ecnt = 0 ; ecnt < ARRAY_SIZE(eirq); ecnt++) {
691 err = request_irq(eirq[ecnt], sh_dmae_err, 693 err = request_irq(eirq[ecnt], sh_dmae_err, irqflags,
692 irqflags, "DMAC Address Error", shdev); 694 "DMAC Address Error", shdev);
693 if (err) { 695 if (err) {
694 dev_err(&pdev->dev, "DMA device request_irq" 696 dev_err(&pdev->dev, "DMA device request_irq"
695 "error (irq %d) with return %d\n", 697 "error (irq %d) with return %d\n",
diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h
index 2b4bc15a2c0a..60b81e529b42 100644
--- a/drivers/dma/shdma.h
+++ b/drivers/dma/shdma.h
@@ -35,15 +35,15 @@ struct sh_desc {
35 35
36struct sh_dmae_chan { 36struct sh_dmae_chan {
37 dma_cookie_t completed_cookie; /* The maximum cookie completed */ 37 dma_cookie_t completed_cookie; /* The maximum cookie completed */
38 spinlock_t desc_lock; /* Descriptor operation lock */ 38 spinlock_t desc_lock; /* Descriptor operation lock */
39 struct list_head ld_queue; /* Link descriptors queue */ 39 struct list_head ld_queue; /* Link descriptors queue */
40 struct list_head ld_free; /* Link descriptors free */ 40 struct list_head ld_free; /* Link descriptors free */
41 struct dma_chan common; /* DMA common channel */ 41 struct dma_chan common; /* DMA common channel */
42 struct device *dev; /* Channel device */ 42 struct device *dev; /* Channel device */
43 struct tasklet_struct tasklet; /* Tasklet */ 43 struct tasklet_struct tasklet; /* Tasklet */
44 int descs_allocated; /* desc count */ 44 int descs_allocated; /* desc count */
45 int id; /* Raw id of this channel */ 45 int id; /* Raw id of this channel */
46 char dev_id[16]; /* unique name per DMAC of channel */ 46 char dev_id[16]; /* unique name per DMAC of channel */
47 47
48 /* Set chcr */ 48 /* Set chcr */
49 int (*set_chcr)(struct sh_dmae_chan *sh_chan, u32 regs); 49 int (*set_chcr)(struct sh_dmae_chan *sh_chan, u32 regs);
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index fb6bb64e8861..3ebc61067e54 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -1313,7 +1313,7 @@ static int txx9dmac_resume_noirq(struct device *dev)
1313 1313
1314} 1314}
1315 1315
1316static struct dev_pm_ops txx9dmac_dev_pm_ops = { 1316static const struct dev_pm_ops txx9dmac_dev_pm_ops = {
1317 .suspend_noirq = txx9dmac_suspend_noirq, 1317 .suspend_noirq = txx9dmac_suspend_noirq,
1318 .resume_noirq = txx9dmac_resume_noirq, 1318 .resume_noirq = txx9dmac_resume_noirq,
1319}; 1319};
diff --git a/drivers/edac/edac_mce_amd.c b/drivers/edac/edac_mce_amd.c
index c693fcc2213c..8fc91a019620 100644
--- a/drivers/edac/edac_mce_amd.c
+++ b/drivers/edac/edac_mce_amd.c
@@ -299,6 +299,12 @@ void amd_decode_nb_mce(int node_id, struct err_regs *regs, int handle_errors)
299 if (!handle_errors) 299 if (!handle_errors)
300 return; 300 return;
301 301
302 /*
303 * GART TLB error reporting is disabled by default. Bail out early.
304 */
305 if (TLB_ERROR(ec) && !report_gart_errors)
306 return;
307
302 pr_emerg(" Northbridge Error, node %d", node_id); 308 pr_emerg(" Northbridge Error, node %d", node_id);
303 309
304 /* 310 /*
@@ -310,10 +316,9 @@ void amd_decode_nb_mce(int node_id, struct err_regs *regs, int handle_errors)
310 if (regs->nbsh & K8_NBSH_ERR_CPU_VAL) 316 if (regs->nbsh & K8_NBSH_ERR_CPU_VAL)
311 pr_cont(", core: %u\n", (u8)(regs->nbsh & 0xf)); 317 pr_cont(", core: %u\n", (u8)(regs->nbsh & 0xf));
312 } else { 318 } else {
313 pr_cont(", core: %d\n", ilog2((regs->nbsh & 0xf))); 319 pr_cont(", core: %d\n", fls((regs->nbsh & 0xf) - 1));
314 } 320 }
315 321
316
317 pr_emerg("%s.\n", EXT_ERR_MSG(xec)); 322 pr_emerg("%s.\n", EXT_ERR_MSG(xec));
318 323
319 if (BUS_ERROR(ec) && nb_bus_decoder) 324 if (BUS_ERROR(ec) && nb_bus_decoder)
@@ -333,21 +338,6 @@ static void amd_decode_fr_mce(u64 mc5_status)
333static inline void amd_decode_err_code(unsigned int ec) 338static inline void amd_decode_err_code(unsigned int ec)
334{ 339{
335 if (TLB_ERROR(ec)) { 340 if (TLB_ERROR(ec)) {
336 /*
337 * GART errors are intended to help graphics driver developers
338 * to detect bad GART PTEs. It is recommended by AMD to disable
339 * GART table walk error reporting by default[1] (currently
340 * being disabled in mce_cpu_quirks()) and according to the
341 * comment in mce_cpu_quirks(), such GART errors can be
342 * incorrectly triggered. We may see these errors anyway and
343 * unless requested by the user, they won't be reported.
344 *
345 * [1] section 13.10.1 on BIOS and Kernel Developers Guide for
346 * AMD NPT family 0Fh processors
347 */
348 if (!report_gart_errors)
349 return;
350
351 pr_emerg(" Transaction: %s, Cache Level %s\n", 341 pr_emerg(" Transaction: %s, Cache Level %s\n",
352 TT_MSG(ec), LL_MSG(ec)); 342 TT_MSG(ec), LL_MSG(ec));
353 } else if (MEM_ERROR(ec)) { 343 } else if (MEM_ERROR(ec)) {
diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
index 22db05a67bfb..7785d8ffa404 100644
--- a/drivers/edac/i5100_edac.c
+++ b/drivers/edac/i5100_edac.c
@@ -9,6 +9,11 @@
9 * Intel 5100X Chipset Memory Controller Hub (MCH) - Datasheet 9 * Intel 5100X Chipset Memory Controller Hub (MCH) - Datasheet
10 * http://download.intel.com/design/chipsets/datashts/318378.pdf 10 * http://download.intel.com/design/chipsets/datashts/318378.pdf
11 * 11 *
12 * The intel 5100 has two independent channels. EDAC core currently
13 * can not reflect this configuration so instead the chip-select
14 * rows for each respective channel are layed out one after another,
15 * the first half belonging to channel 0, the second half belonging
16 * to channel 1.
12 */ 17 */
13#include <linux/module.h> 18#include <linux/module.h>
14#include <linux/init.h> 19#include <linux/init.h>
@@ -25,6 +30,8 @@
25 30
26/* device 16, func 1 */ 31/* device 16, func 1 */
27#define I5100_MC 0x40 /* Memory Control Register */ 32#define I5100_MC 0x40 /* Memory Control Register */
33#define I5100_MC_SCRBEN_MASK (1 << 7)
34#define I5100_MC_SCRBDONE_MASK (1 << 4)
28#define I5100_MS 0x44 /* Memory Status Register */ 35#define I5100_MS 0x44 /* Memory Status Register */
29#define I5100_SPDDATA 0x48 /* Serial Presence Detect Status Reg */ 36#define I5100_SPDDATA 0x48 /* Serial Presence Detect Status Reg */
30#define I5100_SPDCMD 0x4c /* Serial Presence Detect Command Reg */ 37#define I5100_SPDCMD 0x4c /* Serial Presence Detect Command Reg */
@@ -72,11 +79,21 @@
72 79
73/* bit field accessors */ 80/* bit field accessors */
74 81
82static inline u32 i5100_mc_scrben(u32 mc)
83{
84 return mc >> 7 & 1;
85}
86
75static inline u32 i5100_mc_errdeten(u32 mc) 87static inline u32 i5100_mc_errdeten(u32 mc)
76{ 88{
77 return mc >> 5 & 1; 89 return mc >> 5 & 1;
78} 90}
79 91
92static inline u32 i5100_mc_scrbdone(u32 mc)
93{
94 return mc >> 4 & 1;
95}
96
80static inline u16 i5100_spddata_rdo(u16 a) 97static inline u16 i5100_spddata_rdo(u16 a)
81{ 98{
82 return a >> 15 & 1; 99 return a >> 15 & 1;
@@ -265,42 +282,43 @@ static inline u32 i5100_recmemb_ras(u32 a)
265} 282}
266 283
267/* some generic limits */ 284/* some generic limits */
268#define I5100_MAX_RANKS_PER_CTLR 6 285#define I5100_MAX_RANKS_PER_CHAN 6
269#define I5100_MAX_CTLRS 2 286#define I5100_CHANNELS 2
270#define I5100_MAX_RANKS_PER_DIMM 4 287#define I5100_MAX_RANKS_PER_DIMM 4
271#define I5100_DIMM_ADDR_LINES (6 - 3) /* 64 bits / 8 bits per byte */ 288#define I5100_DIMM_ADDR_LINES (6 - 3) /* 64 bits / 8 bits per byte */
272#define I5100_MAX_DIMM_SLOTS_PER_CTLR 4 289#define I5100_MAX_DIMM_SLOTS_PER_CHAN 4
273#define I5100_MAX_RANK_INTERLEAVE 4 290#define I5100_MAX_RANK_INTERLEAVE 4
274#define I5100_MAX_DMIRS 5 291#define I5100_MAX_DMIRS 5
292#define I5100_SCRUB_REFRESH_RATE (5 * 60 * HZ)
275 293
276struct i5100_priv { 294struct i5100_priv {
277 /* ranks on each dimm -- 0 maps to not present -- obtained via SPD */ 295 /* ranks on each dimm -- 0 maps to not present -- obtained via SPD */
278 int dimm_numrank[I5100_MAX_CTLRS][I5100_MAX_DIMM_SLOTS_PER_CTLR]; 296 int dimm_numrank[I5100_CHANNELS][I5100_MAX_DIMM_SLOTS_PER_CHAN];
279 297
280 /* 298 /*
281 * mainboard chip select map -- maps i5100 chip selects to 299 * mainboard chip select map -- maps i5100 chip selects to
282 * DIMM slot chip selects. In the case of only 4 ranks per 300 * DIMM slot chip selects. In the case of only 4 ranks per
283 * controller, the mapping is fairly obvious but not unique. 301 * channel, the mapping is fairly obvious but not unique.
284 * we map -1 -> NC and assume both controllers use the same 302 * we map -1 -> NC and assume both channels use the same
285 * map... 303 * map...
286 * 304 *
287 */ 305 */
288 int dimm_csmap[I5100_MAX_DIMM_SLOTS_PER_CTLR][I5100_MAX_RANKS_PER_DIMM]; 306 int dimm_csmap[I5100_MAX_DIMM_SLOTS_PER_CHAN][I5100_MAX_RANKS_PER_DIMM];
289 307
290 /* memory interleave range */ 308 /* memory interleave range */
291 struct { 309 struct {
292 u64 limit; 310 u64 limit;
293 unsigned way[2]; 311 unsigned way[2];
294 } mir[I5100_MAX_CTLRS]; 312 } mir[I5100_CHANNELS];
295 313
296 /* adjusted memory interleave range register */ 314 /* adjusted memory interleave range register */
297 unsigned amir[I5100_MAX_CTLRS]; 315 unsigned amir[I5100_CHANNELS];
298 316
299 /* dimm interleave range */ 317 /* dimm interleave range */
300 struct { 318 struct {
301 unsigned rank[I5100_MAX_RANK_INTERLEAVE]; 319 unsigned rank[I5100_MAX_RANK_INTERLEAVE];
302 u64 limit; 320 u64 limit;
303 } dmir[I5100_MAX_CTLRS][I5100_MAX_DMIRS]; 321 } dmir[I5100_CHANNELS][I5100_MAX_DMIRS];
304 322
305 /* memory technology registers... */ 323 /* memory technology registers... */
306 struct { 324 struct {
@@ -310,30 +328,33 @@ struct i5100_priv {
310 unsigned numbank; /* 2 or 3 lines */ 328 unsigned numbank; /* 2 or 3 lines */
311 unsigned numrow; /* 13 .. 16 lines */ 329 unsigned numrow; /* 13 .. 16 lines */
312 unsigned numcol; /* 11 .. 12 lines */ 330 unsigned numcol; /* 11 .. 12 lines */
313 } mtr[I5100_MAX_CTLRS][I5100_MAX_RANKS_PER_CTLR]; 331 } mtr[I5100_CHANNELS][I5100_MAX_RANKS_PER_CHAN];
314 332
315 u64 tolm; /* top of low memory in bytes */ 333 u64 tolm; /* top of low memory in bytes */
316 unsigned ranksperctlr; /* number of ranks per controller */ 334 unsigned ranksperchan; /* number of ranks per channel */
317 335
318 struct pci_dev *mc; /* device 16 func 1 */ 336 struct pci_dev *mc; /* device 16 func 1 */
319 struct pci_dev *ch0mm; /* device 21 func 0 */ 337 struct pci_dev *ch0mm; /* device 21 func 0 */
320 struct pci_dev *ch1mm; /* device 22 func 0 */ 338 struct pci_dev *ch1mm; /* device 22 func 0 */
339
340 struct delayed_work i5100_scrubbing;
341 int scrub_enable;
321}; 342};
322 343
323/* map a rank/ctlr to a slot number on the mainboard */ 344/* map a rank/chan to a slot number on the mainboard */
324static int i5100_rank_to_slot(const struct mem_ctl_info *mci, 345static int i5100_rank_to_slot(const struct mem_ctl_info *mci,
325 int ctlr, int rank) 346 int chan, int rank)
326{ 347{
327 const struct i5100_priv *priv = mci->pvt_info; 348 const struct i5100_priv *priv = mci->pvt_info;
328 int i; 349 int i;
329 350
330 for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CTLR; i++) { 351 for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CHAN; i++) {
331 int j; 352 int j;
332 const int numrank = priv->dimm_numrank[ctlr][i]; 353 const int numrank = priv->dimm_numrank[chan][i];
333 354
334 for (j = 0; j < numrank; j++) 355 for (j = 0; j < numrank; j++)
335 if (priv->dimm_csmap[i][j] == rank) 356 if (priv->dimm_csmap[i][j] == rank)
336 return i * 2 + ctlr; 357 return i * 2 + chan;
337 } 358 }
338 359
339 return -1; 360 return -1;
@@ -374,32 +395,32 @@ static const char *i5100_err_msg(unsigned err)
374 return "none"; 395 return "none";
375} 396}
376 397
377/* convert csrow index into a rank (per controller -- 0..5) */ 398/* convert csrow index into a rank (per channel -- 0..5) */
378static int i5100_csrow_to_rank(const struct mem_ctl_info *mci, int csrow) 399static int i5100_csrow_to_rank(const struct mem_ctl_info *mci, int csrow)
379{ 400{
380 const struct i5100_priv *priv = mci->pvt_info; 401 const struct i5100_priv *priv = mci->pvt_info;
381 402
382 return csrow % priv->ranksperctlr; 403 return csrow % priv->ranksperchan;
383} 404}
384 405
385/* convert csrow index into a controller (0..1) */ 406/* convert csrow index into a channel (0..1) */
386static int i5100_csrow_to_cntlr(const struct mem_ctl_info *mci, int csrow) 407static int i5100_csrow_to_chan(const struct mem_ctl_info *mci, int csrow)
387{ 408{
388 const struct i5100_priv *priv = mci->pvt_info; 409 const struct i5100_priv *priv = mci->pvt_info;
389 410
390 return csrow / priv->ranksperctlr; 411 return csrow / priv->ranksperchan;
391} 412}
392 413
393static unsigned i5100_rank_to_csrow(const struct mem_ctl_info *mci, 414static unsigned i5100_rank_to_csrow(const struct mem_ctl_info *mci,
394 int ctlr, int rank) 415 int chan, int rank)
395{ 416{
396 const struct i5100_priv *priv = mci->pvt_info; 417 const struct i5100_priv *priv = mci->pvt_info;
397 418
398 return ctlr * priv->ranksperctlr + rank; 419 return chan * priv->ranksperchan + rank;
399} 420}
400 421
401static void i5100_handle_ce(struct mem_ctl_info *mci, 422static void i5100_handle_ce(struct mem_ctl_info *mci,
402 int ctlr, 423 int chan,
403 unsigned bank, 424 unsigned bank,
404 unsigned rank, 425 unsigned rank,
405 unsigned long syndrome, 426 unsigned long syndrome,
@@ -407,12 +428,12 @@ static void i5100_handle_ce(struct mem_ctl_info *mci,
407 unsigned ras, 428 unsigned ras,
408 const char *msg) 429 const char *msg)
409{ 430{
410 const int csrow = i5100_rank_to_csrow(mci, ctlr, rank); 431 const int csrow = i5100_rank_to_csrow(mci, chan, rank);
411 432
412 printk(KERN_ERR 433 printk(KERN_ERR
413 "CE ctlr %d, bank %u, rank %u, syndrome 0x%lx, " 434 "CE chan %d, bank %u, rank %u, syndrome 0x%lx, "
414 "cas %u, ras %u, csrow %u, label \"%s\": %s\n", 435 "cas %u, ras %u, csrow %u, label \"%s\": %s\n",
415 ctlr, bank, rank, syndrome, cas, ras, 436 chan, bank, rank, syndrome, cas, ras,
416 csrow, mci->csrows[csrow].channels[0].label, msg); 437 csrow, mci->csrows[csrow].channels[0].label, msg);
417 438
418 mci->ce_count++; 439 mci->ce_count++;
@@ -421,7 +442,7 @@ static void i5100_handle_ce(struct mem_ctl_info *mci,
421} 442}
422 443
423static void i5100_handle_ue(struct mem_ctl_info *mci, 444static void i5100_handle_ue(struct mem_ctl_info *mci,
424 int ctlr, 445 int chan,
425 unsigned bank, 446 unsigned bank,
426 unsigned rank, 447 unsigned rank,
427 unsigned long syndrome, 448 unsigned long syndrome,
@@ -429,23 +450,23 @@ static void i5100_handle_ue(struct mem_ctl_info *mci,
429 unsigned ras, 450 unsigned ras,
430 const char *msg) 451 const char *msg)
431{ 452{
432 const int csrow = i5100_rank_to_csrow(mci, ctlr, rank); 453 const int csrow = i5100_rank_to_csrow(mci, chan, rank);
433 454
434 printk(KERN_ERR 455 printk(KERN_ERR
435 "UE ctlr %d, bank %u, rank %u, syndrome 0x%lx, " 456 "UE chan %d, bank %u, rank %u, syndrome 0x%lx, "
436 "cas %u, ras %u, csrow %u, label \"%s\": %s\n", 457 "cas %u, ras %u, csrow %u, label \"%s\": %s\n",
437 ctlr, bank, rank, syndrome, cas, ras, 458 chan, bank, rank, syndrome, cas, ras,
438 csrow, mci->csrows[csrow].channels[0].label, msg); 459 csrow, mci->csrows[csrow].channels[0].label, msg);
439 460
440 mci->ue_count++; 461 mci->ue_count++;
441 mci->csrows[csrow].ue_count++; 462 mci->csrows[csrow].ue_count++;
442} 463}
443 464
444static void i5100_read_log(struct mem_ctl_info *mci, int ctlr, 465static void i5100_read_log(struct mem_ctl_info *mci, int chan,
445 u32 ferr, u32 nerr) 466 u32 ferr, u32 nerr)
446{ 467{
447 struct i5100_priv *priv = mci->pvt_info; 468 struct i5100_priv *priv = mci->pvt_info;
448 struct pci_dev *pdev = (ctlr) ? priv->ch1mm : priv->ch0mm; 469 struct pci_dev *pdev = (chan) ? priv->ch1mm : priv->ch0mm;
449 u32 dw; 470 u32 dw;
450 u32 dw2; 471 u32 dw2;
451 unsigned syndrome = 0; 472 unsigned syndrome = 0;
@@ -484,7 +505,7 @@ static void i5100_read_log(struct mem_ctl_info *mci, int ctlr,
484 else 505 else
485 msg = i5100_err_msg(nerr); 506 msg = i5100_err_msg(nerr);
486 507
487 i5100_handle_ce(mci, ctlr, bank, rank, syndrome, cas, ras, msg); 508 i5100_handle_ce(mci, chan, bank, rank, syndrome, cas, ras, msg);
488 } 509 }
489 510
490 if (i5100_validlog_nrecmemvalid(dw)) { 511 if (i5100_validlog_nrecmemvalid(dw)) {
@@ -506,7 +527,7 @@ static void i5100_read_log(struct mem_ctl_info *mci, int ctlr,
506 else 527 else
507 msg = i5100_err_msg(nerr); 528 msg = i5100_err_msg(nerr);
508 529
509 i5100_handle_ue(mci, ctlr, bank, rank, syndrome, cas, ras, msg); 530 i5100_handle_ue(mci, chan, bank, rank, syndrome, cas, ras, msg);
510 } 531 }
511 532
512 pci_write_config_dword(pdev, I5100_VALIDLOG, dw); 533 pci_write_config_dword(pdev, I5100_VALIDLOG, dw);
@@ -534,6 +555,80 @@ static void i5100_check_error(struct mem_ctl_info *mci)
534 } 555 }
535} 556}
536 557
558/* The i5100 chipset will scrub the entire memory once, then
559 * set a done bit. Continuous scrubbing is achieved by enqueing
560 * delayed work to a workqueue, checking every few minutes if
561 * the scrubbing has completed and if so reinitiating it.
562 */
563
564static void i5100_refresh_scrubbing(struct work_struct *work)
565{
566 struct delayed_work *i5100_scrubbing = container_of(work,
567 struct delayed_work,
568 work);
569 struct i5100_priv *priv = container_of(i5100_scrubbing,
570 struct i5100_priv,
571 i5100_scrubbing);
572 u32 dw;
573
574 pci_read_config_dword(priv->mc, I5100_MC, &dw);
575
576 if (priv->scrub_enable) {
577
578 pci_read_config_dword(priv->mc, I5100_MC, &dw);
579
580 if (i5100_mc_scrbdone(dw)) {
581 dw |= I5100_MC_SCRBEN_MASK;
582 pci_write_config_dword(priv->mc, I5100_MC, dw);
583 pci_read_config_dword(priv->mc, I5100_MC, &dw);
584 }
585
586 schedule_delayed_work(&(priv->i5100_scrubbing),
587 I5100_SCRUB_REFRESH_RATE);
588 }
589}
590/*
591 * The bandwidth is based on experimentation, feel free to refine it.
592 */
593static int i5100_set_scrub_rate(struct mem_ctl_info *mci,
594 u32 *bandwidth)
595{
596 struct i5100_priv *priv = mci->pvt_info;
597 u32 dw;
598
599 pci_read_config_dword(priv->mc, I5100_MC, &dw);
600 if (*bandwidth) {
601 priv->scrub_enable = 1;
602 dw |= I5100_MC_SCRBEN_MASK;
603 schedule_delayed_work(&(priv->i5100_scrubbing),
604 I5100_SCRUB_REFRESH_RATE);
605 } else {
606 priv->scrub_enable = 0;
607 dw &= ~I5100_MC_SCRBEN_MASK;
608 cancel_delayed_work(&(priv->i5100_scrubbing));
609 }
610 pci_write_config_dword(priv->mc, I5100_MC, dw);
611
612 pci_read_config_dword(priv->mc, I5100_MC, &dw);
613
614 *bandwidth = 5900000 * i5100_mc_scrben(dw);
615
616 return 0;
617}
618
619static int i5100_get_scrub_rate(struct mem_ctl_info *mci,
620 u32 *bandwidth)
621{
622 struct i5100_priv *priv = mci->pvt_info;
623 u32 dw;
624
625 pci_read_config_dword(priv->mc, I5100_MC, &dw);
626
627 *bandwidth = 5900000 * i5100_mc_scrben(dw);
628
629 return 0;
630}
631
537static struct pci_dev *pci_get_device_func(unsigned vendor, 632static struct pci_dev *pci_get_device_func(unsigned vendor,
538 unsigned device, 633 unsigned device,
539 unsigned func) 634 unsigned func)
@@ -557,19 +652,19 @@ static unsigned long __devinit i5100_npages(struct mem_ctl_info *mci,
557 int csrow) 652 int csrow)
558{ 653{
559 struct i5100_priv *priv = mci->pvt_info; 654 struct i5100_priv *priv = mci->pvt_info;
560 const unsigned ctlr_rank = i5100_csrow_to_rank(mci, csrow); 655 const unsigned chan_rank = i5100_csrow_to_rank(mci, csrow);
561 const unsigned ctlr = i5100_csrow_to_cntlr(mci, csrow); 656 const unsigned chan = i5100_csrow_to_chan(mci, csrow);
562 unsigned addr_lines; 657 unsigned addr_lines;
563 658
564 /* dimm present? */ 659 /* dimm present? */
565 if (!priv->mtr[ctlr][ctlr_rank].present) 660 if (!priv->mtr[chan][chan_rank].present)
566 return 0ULL; 661 return 0ULL;
567 662
568 addr_lines = 663 addr_lines =
569 I5100_DIMM_ADDR_LINES + 664 I5100_DIMM_ADDR_LINES +
570 priv->mtr[ctlr][ctlr_rank].numcol + 665 priv->mtr[chan][chan_rank].numcol +
571 priv->mtr[ctlr][ctlr_rank].numrow + 666 priv->mtr[chan][chan_rank].numrow +
572 priv->mtr[ctlr][ctlr_rank].numbank; 667 priv->mtr[chan][chan_rank].numbank;
573 668
574 return (unsigned long) 669 return (unsigned long)
575 ((unsigned long long) (1ULL << addr_lines) / PAGE_SIZE); 670 ((unsigned long long) (1ULL << addr_lines) / PAGE_SIZE);
@@ -581,11 +676,11 @@ static void __devinit i5100_init_mtr(struct mem_ctl_info *mci)
581 struct pci_dev *mms[2] = { priv->ch0mm, priv->ch1mm }; 676 struct pci_dev *mms[2] = { priv->ch0mm, priv->ch1mm };
582 int i; 677 int i;
583 678
584 for (i = 0; i < I5100_MAX_CTLRS; i++) { 679 for (i = 0; i < I5100_CHANNELS; i++) {
585 int j; 680 int j;
586 struct pci_dev *pdev = mms[i]; 681 struct pci_dev *pdev = mms[i];
587 682
588 for (j = 0; j < I5100_MAX_RANKS_PER_CTLR; j++) { 683 for (j = 0; j < I5100_MAX_RANKS_PER_CHAN; j++) {
589 const unsigned addr = 684 const unsigned addr =
590 (j < 4) ? I5100_MTR_0 + j * 2 : 685 (j < 4) ? I5100_MTR_0 + j * 2 :
591 I5100_MTR_4 + (j - 4) * 2; 686 I5100_MTR_4 + (j - 4) * 2;
@@ -644,7 +739,6 @@ static int i5100_read_spd_byte(const struct mem_ctl_info *mci,
644 * fill dimm chip select map 739 * fill dimm chip select map
645 * 740 *
646 * FIXME: 741 * FIXME:
647 * o only valid for 4 ranks per controller
648 * o not the only way to may chip selects to dimm slots 742 * o not the only way to may chip selects to dimm slots
649 * o investigate if there is some way to obtain this map from the bios 743 * o investigate if there is some way to obtain this map from the bios
650 */ 744 */
@@ -653,9 +747,7 @@ static void __devinit i5100_init_dimm_csmap(struct mem_ctl_info *mci)
653 struct i5100_priv *priv = mci->pvt_info; 747 struct i5100_priv *priv = mci->pvt_info;
654 int i; 748 int i;
655 749
656 WARN_ON(priv->ranksperctlr != 4); 750 for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CHAN; i++) {
657
658 for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CTLR; i++) {
659 int j; 751 int j;
660 752
661 for (j = 0; j < I5100_MAX_RANKS_PER_DIMM; j++) 753 for (j = 0; j < I5100_MAX_RANKS_PER_DIMM; j++)
@@ -663,12 +755,21 @@ static void __devinit i5100_init_dimm_csmap(struct mem_ctl_info *mci)
663 } 755 }
664 756
665 /* only 2 chip selects per slot... */ 757 /* only 2 chip selects per slot... */
666 priv->dimm_csmap[0][0] = 0; 758 if (priv->ranksperchan == 4) {
667 priv->dimm_csmap[0][1] = 3; 759 priv->dimm_csmap[0][0] = 0;
668 priv->dimm_csmap[1][0] = 1; 760 priv->dimm_csmap[0][1] = 3;
669 priv->dimm_csmap[1][1] = 2; 761 priv->dimm_csmap[1][0] = 1;
670 priv->dimm_csmap[2][0] = 2; 762 priv->dimm_csmap[1][1] = 2;
671 priv->dimm_csmap[3][0] = 3; 763 priv->dimm_csmap[2][0] = 2;
764 priv->dimm_csmap[3][0] = 3;
765 } else {
766 priv->dimm_csmap[0][0] = 0;
767 priv->dimm_csmap[0][1] = 1;
768 priv->dimm_csmap[1][0] = 2;
769 priv->dimm_csmap[1][1] = 3;
770 priv->dimm_csmap[2][0] = 4;
771 priv->dimm_csmap[2][1] = 5;
772 }
672} 773}
673 774
674static void __devinit i5100_init_dimm_layout(struct pci_dev *pdev, 775static void __devinit i5100_init_dimm_layout(struct pci_dev *pdev,
@@ -677,10 +778,10 @@ static void __devinit i5100_init_dimm_layout(struct pci_dev *pdev,
677 struct i5100_priv *priv = mci->pvt_info; 778 struct i5100_priv *priv = mci->pvt_info;
678 int i; 779 int i;
679 780
680 for (i = 0; i < I5100_MAX_CTLRS; i++) { 781 for (i = 0; i < I5100_CHANNELS; i++) {
681 int j; 782 int j;
682 783
683 for (j = 0; j < I5100_MAX_DIMM_SLOTS_PER_CTLR; j++) { 784 for (j = 0; j < I5100_MAX_DIMM_SLOTS_PER_CHAN; j++) {
684 u8 rank; 785 u8 rank;
685 786
686 if (i5100_read_spd_byte(mci, i, j, 5, &rank) < 0) 787 if (i5100_read_spd_byte(mci, i, j, 5, &rank) < 0)
@@ -720,7 +821,7 @@ static void __devinit i5100_init_interleaving(struct pci_dev *pdev,
720 pci_read_config_word(pdev, I5100_AMIR_1, &w); 821 pci_read_config_word(pdev, I5100_AMIR_1, &w);
721 priv->amir[1] = w; 822 priv->amir[1] = w;
722 823
723 for (i = 0; i < I5100_MAX_CTLRS; i++) { 824 for (i = 0; i < I5100_CHANNELS; i++) {
724 int j; 825 int j;
725 826
726 for (j = 0; j < 5; j++) { 827 for (j = 0; j < 5; j++) {
@@ -747,7 +848,7 @@ static void __devinit i5100_init_csrows(struct mem_ctl_info *mci)
747 848
748 for (i = 0; i < mci->nr_csrows; i++) { 849 for (i = 0; i < mci->nr_csrows; i++) {
749 const unsigned long npages = i5100_npages(mci, i); 850 const unsigned long npages = i5100_npages(mci, i);
750 const unsigned cntlr = i5100_csrow_to_cntlr(mci, i); 851 const unsigned chan = i5100_csrow_to_chan(mci, i);
751 const unsigned rank = i5100_csrow_to_rank(mci, i); 852 const unsigned rank = i5100_csrow_to_rank(mci, i);
752 853
753 if (!npages) 854 if (!npages)
@@ -765,7 +866,7 @@ static void __devinit i5100_init_csrows(struct mem_ctl_info *mci)
765 mci->csrows[i].grain = 32; 866 mci->csrows[i].grain = 32;
766 mci->csrows[i].csrow_idx = i; 867 mci->csrows[i].csrow_idx = i;
767 mci->csrows[i].dtype = 868 mci->csrows[i].dtype =
768 (priv->mtr[cntlr][rank].width == 4) ? DEV_X4 : DEV_X8; 869 (priv->mtr[chan][rank].width == 4) ? DEV_X4 : DEV_X8;
769 mci->csrows[i].ue_count = 0; 870 mci->csrows[i].ue_count = 0;
770 mci->csrows[i].ce_count = 0; 871 mci->csrows[i].ce_count = 0;
771 mci->csrows[i].mtype = MEM_RDDR2; 872 mci->csrows[i].mtype = MEM_RDDR2;
@@ -777,7 +878,7 @@ static void __devinit i5100_init_csrows(struct mem_ctl_info *mci)
777 mci->csrows[i].channels[0].csrow = mci->csrows + i; 878 mci->csrows[i].channels[0].csrow = mci->csrows + i;
778 snprintf(mci->csrows[i].channels[0].label, 879 snprintf(mci->csrows[i].channels[0].label,
779 sizeof(mci->csrows[i].channels[0].label), 880 sizeof(mci->csrows[i].channels[0].label),
780 "DIMM%u", i5100_rank_to_slot(mci, cntlr, rank)); 881 "DIMM%u", i5100_rank_to_slot(mci, chan, rank));
781 882
782 total_pages += npages; 883 total_pages += npages;
783 } 884 }
@@ -815,13 +916,6 @@ static int __devinit i5100_init_one(struct pci_dev *pdev,
815 pci_read_config_dword(pdev, I5100_MS, &dw); 916 pci_read_config_dword(pdev, I5100_MS, &dw);
816 ranksperch = !!(dw & (1 << 8)) * 2 + 4; 917 ranksperch = !!(dw & (1 << 8)) * 2 + 4;
817 918
818 if (ranksperch != 4) {
819 /* FIXME: get 6 ranks / controller to work - need hw... */
820 printk(KERN_INFO "i5100_edac: unsupported configuration.\n");
821 ret = -ENODEV;
822 goto bail_pdev;
823 }
824
825 /* enable error reporting... */ 919 /* enable error reporting... */
826 pci_read_config_dword(pdev, I5100_EMASK_MEM, &dw); 920 pci_read_config_dword(pdev, I5100_EMASK_MEM, &dw);
827 dw &= ~I5100_FERR_NF_MEM_ANY_MASK; 921 dw &= ~I5100_FERR_NF_MEM_ANY_MASK;
@@ -864,11 +958,21 @@ static int __devinit i5100_init_one(struct pci_dev *pdev,
864 mci->dev = &pdev->dev; 958 mci->dev = &pdev->dev;
865 959
866 priv = mci->pvt_info; 960 priv = mci->pvt_info;
867 priv->ranksperctlr = ranksperch; 961 priv->ranksperchan = ranksperch;
868 priv->mc = pdev; 962 priv->mc = pdev;
869 priv->ch0mm = ch0mm; 963 priv->ch0mm = ch0mm;
870 priv->ch1mm = ch1mm; 964 priv->ch1mm = ch1mm;
871 965
966 INIT_DELAYED_WORK(&(priv->i5100_scrubbing), i5100_refresh_scrubbing);
967
968 /* If scrubbing was already enabled by the bios, start maintaining it */
969 pci_read_config_dword(pdev, I5100_MC, &dw);
970 if (i5100_mc_scrben(dw)) {
971 priv->scrub_enable = 1;
972 schedule_delayed_work(&(priv->i5100_scrubbing),
973 I5100_SCRUB_REFRESH_RATE);
974 }
975
872 i5100_init_dimm_layout(pdev, mci); 976 i5100_init_dimm_layout(pdev, mci);
873 i5100_init_interleaving(pdev, mci); 977 i5100_init_interleaving(pdev, mci);
874 978
@@ -882,6 +986,8 @@ static int __devinit i5100_init_one(struct pci_dev *pdev,
882 mci->ctl_page_to_phys = NULL; 986 mci->ctl_page_to_phys = NULL;
883 987
884 mci->edac_check = i5100_check_error; 988 mci->edac_check = i5100_check_error;
989 mci->set_sdram_scrub_rate = i5100_set_scrub_rate;
990 mci->get_sdram_scrub_rate = i5100_get_scrub_rate;
885 991
886 i5100_init_csrows(mci); 992 i5100_init_csrows(mci);
887 993
@@ -897,12 +1003,14 @@ static int __devinit i5100_init_one(struct pci_dev *pdev,
897 1003
898 if (edac_mc_add_mc(mci)) { 1004 if (edac_mc_add_mc(mci)) {
899 ret = -ENODEV; 1005 ret = -ENODEV;
900 goto bail_mc; 1006 goto bail_scrub;
901 } 1007 }
902 1008
903 return ret; 1009 return ret;
904 1010
905bail_mc: 1011bail_scrub:
1012 priv->scrub_enable = 0;
1013 cancel_delayed_work_sync(&(priv->i5100_scrubbing));
906 edac_mc_free(mci); 1014 edac_mc_free(mci);
907 1015
908bail_disable_ch1: 1016bail_disable_ch1:
@@ -935,6 +1043,10 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev)
935 return; 1043 return;
936 1044
937 priv = mci->pvt_info; 1045 priv = mci->pvt_info;
1046
1047 priv->scrub_enable = 0;
1048 cancel_delayed_work_sync(&(priv->i5100_scrubbing));
1049
938 pci_disable_device(pdev); 1050 pci_disable_device(pdev);
939 pci_disable_device(priv->ch0mm); 1051 pci_disable_device(priv->ch0mm);
940 pci_disable_device(priv->ch1mm); 1052 pci_disable_device(priv->ch1mm);
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index ebb9e51deb0c..1b03ba1d0834 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -7,7 +7,7 @@ menu "Firmware Drivers"
7 7
8config EDD 8config EDD
9 tristate "BIOS Enhanced Disk Drive calls determine boot disk" 9 tristate "BIOS Enhanced Disk Drive calls determine boot disk"
10 depends on !IA64 10 depends on X86
11 help 11 help
12 Say Y or M here if you want to enable BIOS Enhanced Disk Drive 12 Say Y or M here if you want to enable BIOS Enhanced Disk Drive
13 Services real mode BIOS calls to determine which disk 13 Services real mode BIOS calls to determine which disk
@@ -28,7 +28,7 @@ config EDD_OFF
28 28
29config FIRMWARE_MEMMAP 29config FIRMWARE_MEMMAP
30 bool "Add firmware-provided memory map to sysfs" if EMBEDDED 30 bool "Add firmware-provided memory map to sysfs" if EMBEDDED
31 default (X86_64 || X86_32) 31 default X86
32 help 32 help
33 Add the firmware-provided (unmodified) memory map to /sys/firmware/memmap. 33 Add the firmware-provided (unmodified) memory map to /sys/firmware/memmap.
34 That memory map is used for example by kexec to set up parameter area 34 That memory map is used for example by kexec to set up parameter area
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 3a2ccb09e2f8..31b983d9462c 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -169,10 +169,7 @@ static void __init dmi_save_uuid(const struct dmi_header *dm, int slot, int inde
169 if (!s) 169 if (!s)
170 return; 170 return;
171 171
172 sprintf(s, 172 sprintf(s, "%pUB", d);
173 "%02X%02X%02X%02X-%02X%02X-%02X%02X-%02X%02X-%02X%02X%02X%02X%02X%02X",
174 d[0], d[1], d[2], d[3], d[4], d[5], d[6], d[7],
175 d[8], d[9], d[10], d[11], d[12], d[13], d[14], d[15]);
176 173
177 dmi_ident[slot] = s; 174 dmi_ident[slot] = s;
178} 175}
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 2ad0128c63c6..a019b49ecc9b 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -174,6 +174,16 @@ config GPIO_ADP5520
174 174
175comment "PCI GPIO expanders:" 175comment "PCI GPIO expanders:"
176 176
177config GPIO_CS5535
178 tristate "AMD CS5535/CS5536 GPIO support"
179 depends on PCI && !CS5535_GPIO
180 help
181 The AMD CS5535 and CS5536 southbridges support 28 GPIO pins that
182 can be used for quite a number of things. The CS5535/6 is found on
183 AMD Geode and Lemote Yeeloong devices.
184
185 If unsure, say N.
186
177config GPIO_BT8XX 187config GPIO_BT8XX
178 tristate "BT8XX GPIO abuser" 188 tristate "BT8XX GPIO abuser"
179 depends on PCI && VIDEO_BT848=n 189 depends on PCI && VIDEO_BT848=n
@@ -196,6 +206,12 @@ config GPIO_LANGWELL
196 help 206 help
197 Say Y here to support Intel Moorestown platform GPIO. 207 Say Y here to support Intel Moorestown platform GPIO.
198 208
209config GPIO_TIMBERDALE
210 bool "Support for timberdale GPIO IP"
211 depends on MFD_TIMBERDALE && GPIOLIB && HAS_IOMEM
212 ---help---
213 Add support for the GPIO IP in the timberdale FPGA.
214
199comment "SPI GPIO expanders:" 215comment "SPI GPIO expanders:"
200 216
201config GPIO_MAX7301 217config GPIO_MAX7301
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 00a532c9a1e2..52fe4cf734c7 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -13,9 +13,11 @@ obj-$(CONFIG_GPIO_MCP23S08) += mcp23s08.o
13obj-$(CONFIG_GPIO_PCA953X) += pca953x.o 13obj-$(CONFIG_GPIO_PCA953X) += pca953x.o
14obj-$(CONFIG_GPIO_PCF857X) += pcf857x.o 14obj-$(CONFIG_GPIO_PCF857X) += pcf857x.o
15obj-$(CONFIG_GPIO_PL061) += pl061.o 15obj-$(CONFIG_GPIO_PL061) += pl061.o
16obj-$(CONFIG_GPIO_TIMBERDALE) += timbgpio.o
16obj-$(CONFIG_GPIO_TWL4030) += twl4030-gpio.o 17obj-$(CONFIG_GPIO_TWL4030) += twl4030-gpio.o
17obj-$(CONFIG_GPIO_UCB1400) += ucb1400_gpio.o 18obj-$(CONFIG_GPIO_UCB1400) += ucb1400_gpio.o
18obj-$(CONFIG_GPIO_XILINX) += xilinx_gpio.o 19obj-$(CONFIG_GPIO_XILINX) += xilinx_gpio.o
20obj-$(CONFIG_GPIO_CS5535) += cs5535-gpio.o
19obj-$(CONFIG_GPIO_BT8XX) += bt8xxgpio.o 21obj-$(CONFIG_GPIO_BT8XX) += bt8xxgpio.o
20obj-$(CONFIG_GPIO_VR41XX) += vr41xx_giu.o 22obj-$(CONFIG_GPIO_VR41XX) += vr41xx_giu.o
21obj-$(CONFIG_GPIO_WM831X) += wm831x-gpio.o 23obj-$(CONFIG_GPIO_WM831X) += wm831x-gpio.o
diff --git a/drivers/gpio/cs5535-gpio.c b/drivers/gpio/cs5535-gpio.c
new file mode 100644
index 000000000000..0fdbe94f24a3
--- /dev/null
+++ b/drivers/gpio/cs5535-gpio.c
@@ -0,0 +1,355 @@
1/*
2 * AMD CS5535/CS5536 GPIO driver
3 * Copyright (C) 2006 Advanced Micro Devices, Inc.
4 * Copyright (C) 2007-2009 Andres Salomon <dilinger@collabora.co.uk>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public License
8 * as published by the Free Software Foundation.
9 */
10
11#include <linux/kernel.h>
12#include <linux/spinlock.h>
13#include <linux/module.h>
14#include <linux/pci.h>
15#include <linux/gpio.h>
16#include <linux/io.h>
17#include <linux/cs5535.h>
18
19#define DRV_NAME "cs5535-gpio"
20#define GPIO_BAR 1
21
22/*
23 * Some GPIO pins
24 * 31-29,23 : reserved (always mask out)
25 * 28 : Power Button
26 * 26 : PME#
27 * 22-16 : LPC
28 * 14,15 : SMBus
29 * 9,8 : UART1
30 * 7 : PCI INTB
31 * 3,4 : UART2/DDC
32 * 2 : IDE_IRQ0
33 * 1 : AC_BEEP
34 * 0 : PCI INTA
35 *
36 * If a mask was not specified, allow all except
37 * reserved and Power Button
38 */
39#define GPIO_DEFAULT_MASK 0x0F7FFFFF
40
41static ulong mask = GPIO_DEFAULT_MASK;
42module_param_named(mask, mask, ulong, 0444);
43MODULE_PARM_DESC(mask, "GPIO channel mask.");
44
45static struct cs5535_gpio_chip {
46 struct gpio_chip chip;
47 resource_size_t base;
48
49 struct pci_dev *pdev;
50 spinlock_t lock;
51} cs5535_gpio_chip;
52
53/*
54 * The CS5535/CS5536 GPIOs support a number of extra features not defined
55 * by the gpio_chip API, so these are exported. For a full list of the
56 * registers, see include/linux/cs5535.h.
57 */
58
59static void __cs5535_gpio_set(struct cs5535_gpio_chip *chip, unsigned offset,
60 unsigned int reg)
61{
62 if (offset < 16)
63 /* low bank register */
64 outl(1 << offset, chip->base + reg);
65 else
66 /* high bank register */
67 outl(1 << (offset - 16), chip->base + 0x80 + reg);
68}
69
70void cs5535_gpio_set(unsigned offset, unsigned int reg)
71{
72 struct cs5535_gpio_chip *chip = &cs5535_gpio_chip;
73 unsigned long flags;
74
75 spin_lock_irqsave(&chip->lock, flags);
76 __cs5535_gpio_set(chip, offset, reg);
77 spin_unlock_irqrestore(&chip->lock, flags);
78}
79EXPORT_SYMBOL_GPL(cs5535_gpio_set);
80
81static void __cs5535_gpio_clear(struct cs5535_gpio_chip *chip, unsigned offset,
82 unsigned int reg)
83{
84 if (offset < 16)
85 /* low bank register */
86 outl(1 << (offset + 16), chip->base + reg);
87 else
88 /* high bank register */
89 outl(1 << offset, chip->base + 0x80 + reg);
90}
91
92void cs5535_gpio_clear(unsigned offset, unsigned int reg)
93{
94 struct cs5535_gpio_chip *chip = &cs5535_gpio_chip;
95 unsigned long flags;
96
97 spin_lock_irqsave(&chip->lock, flags);
98 __cs5535_gpio_clear(chip, offset, reg);
99 spin_unlock_irqrestore(&chip->lock, flags);
100}
101EXPORT_SYMBOL_GPL(cs5535_gpio_clear);
102
103int cs5535_gpio_isset(unsigned offset, unsigned int reg)
104{
105 struct cs5535_gpio_chip *chip = &cs5535_gpio_chip;
106 unsigned long flags;
107 long val;
108
109 spin_lock_irqsave(&chip->lock, flags);
110 if (offset < 16)
111 /* low bank register */
112 val = inl(chip->base + reg);
113 else {
114 /* high bank register */
115 val = inl(chip->base + 0x80 + reg);
116 offset -= 16;
117 }
118 spin_unlock_irqrestore(&chip->lock, flags);
119
120 return (val & (1 << offset)) ? 1 : 0;
121}
122EXPORT_SYMBOL_GPL(cs5535_gpio_isset);
123
124/*
125 * Generic gpio_chip API support.
126 */
127
128static int chip_gpio_request(struct gpio_chip *c, unsigned offset)
129{
130 struct cs5535_gpio_chip *chip = (struct cs5535_gpio_chip *) c;
131 unsigned long flags;
132
133 spin_lock_irqsave(&chip->lock, flags);
134
135 /* check if this pin is available */
136 if ((mask & (1 << offset)) == 0) {
137 dev_info(&chip->pdev->dev,
138 "pin %u is not available (check mask)\n", offset);
139 spin_unlock_irqrestore(&chip->lock, flags);
140 return -EINVAL;
141 }
142
143 /* disable output aux 1 & 2 on this pin */
144 __cs5535_gpio_clear(chip, offset, GPIO_OUTPUT_AUX1);
145 __cs5535_gpio_clear(chip, offset, GPIO_OUTPUT_AUX2);
146
147 /* disable input aux 1 on this pin */
148 __cs5535_gpio_clear(chip, offset, GPIO_INPUT_AUX1);
149
150 spin_unlock_irqrestore(&chip->lock, flags);
151
152 return 0;
153}
154
155static int chip_gpio_get(struct gpio_chip *chip, unsigned offset)
156{
157 return cs5535_gpio_isset(offset, GPIO_OUTPUT_VAL);
158}
159
160static void chip_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
161{
162 if (val)
163 cs5535_gpio_set(offset, GPIO_OUTPUT_VAL);
164 else
165 cs5535_gpio_clear(offset, GPIO_OUTPUT_VAL);
166}
167
168static int chip_direction_input(struct gpio_chip *c, unsigned offset)
169{
170 struct cs5535_gpio_chip *chip = (struct cs5535_gpio_chip *) c;
171 unsigned long flags;
172
173 spin_lock_irqsave(&chip->lock, flags);
174 __cs5535_gpio_set(chip, offset, GPIO_INPUT_ENABLE);
175 spin_unlock_irqrestore(&chip->lock, flags);
176
177 return 0;
178}
179
180static int chip_direction_output(struct gpio_chip *c, unsigned offset, int val)
181{
182 struct cs5535_gpio_chip *chip = (struct cs5535_gpio_chip *) c;
183 unsigned long flags;
184
185 spin_lock_irqsave(&chip->lock, flags);
186
187 __cs5535_gpio_set(chip, offset, GPIO_OUTPUT_ENABLE);
188 if (val)
189 __cs5535_gpio_set(chip, offset, GPIO_OUTPUT_VAL);
190 else
191 __cs5535_gpio_clear(chip, offset, GPIO_OUTPUT_VAL);
192
193 spin_unlock_irqrestore(&chip->lock, flags);
194
195 return 0;
196}
197
198static char *cs5535_gpio_names[] = {
199 "GPIO0", "GPIO1", "GPIO2", "GPIO3",
200 "GPIO4", "GPIO5", "GPIO6", "GPIO7",
201 "GPIO8", "GPIO9", "GPIO10", "GPIO11",
202 "GPIO12", "GPIO13", "GPIO14", "GPIO15",
203 "GPIO16", "GPIO17", "GPIO18", "GPIO19",
204 "GPIO20", "GPIO21", "GPIO22", NULL,
205 "GPIO24", "GPIO25", "GPIO26", "GPIO27",
206 "GPIO28", NULL, NULL, NULL,
207};
208
209static struct cs5535_gpio_chip cs5535_gpio_chip = {
210 .chip = {
211 .owner = THIS_MODULE,
212 .label = DRV_NAME,
213
214 .base = 0,
215 .ngpio = 32,
216 .names = cs5535_gpio_names,
217 .request = chip_gpio_request,
218
219 .get = chip_gpio_get,
220 .set = chip_gpio_set,
221
222 .direction_input = chip_direction_input,
223 .direction_output = chip_direction_output,
224 },
225};
226
227static int __init cs5535_gpio_probe(struct pci_dev *pdev,
228 const struct pci_device_id *pci_id)
229{
230 int err;
231 ulong mask_orig = mask;
232
233 /* There are two ways to get the GPIO base address; one is by
234 * fetching it from MSR_LBAR_GPIO, the other is by reading the
235 * PCI BAR info. The latter method is easier (especially across
236 * different architectures), so we'll stick with that for now. If
237 * it turns out to be unreliable in the face of crappy BIOSes, we
238 * can always go back to using MSRs.. */
239
240 err = pci_enable_device_io(pdev);
241 if (err) {
242 dev_err(&pdev->dev, "can't enable device IO\n");
243 goto done;
244 }
245
246 err = pci_request_region(pdev, GPIO_BAR, DRV_NAME);
247 if (err) {
248 dev_err(&pdev->dev, "can't alloc PCI BAR #%d\n", GPIO_BAR);
249 goto done;
250 }
251
252 /* set up the driver-specific struct */
253 cs5535_gpio_chip.base = pci_resource_start(pdev, GPIO_BAR);
254 cs5535_gpio_chip.pdev = pdev;
255 spin_lock_init(&cs5535_gpio_chip.lock);
256
257 dev_info(&pdev->dev, "allocated PCI BAR #%d: base 0x%llx\n", GPIO_BAR,
258 (unsigned long long) cs5535_gpio_chip.base);
259
260 /* mask out reserved pins */
261 mask &= 0x1F7FFFFF;
262
263 /* do not allow pin 28, Power Button, as there's special handling
264 * in the PMC needed. (note 12, p. 48) */
265 mask &= ~(1 << 28);
266
267 if (mask_orig != mask)
268 dev_info(&pdev->dev, "mask changed from 0x%08lX to 0x%08lX\n",
269 mask_orig, mask);
270
271 /* finally, register with the generic GPIO API */
272 err = gpiochip_add(&cs5535_gpio_chip.chip);
273 if (err)
274 goto release_region;
275
276 dev_info(&pdev->dev, DRV_NAME ": GPIO support successfully loaded.\n");
277 return 0;
278
279release_region:
280 pci_release_region(pdev, GPIO_BAR);
281done:
282 return err;
283}
284
285static void __exit cs5535_gpio_remove(struct pci_dev *pdev)
286{
287 int err;
288
289 err = gpiochip_remove(&cs5535_gpio_chip.chip);
290 if (err) {
291 /* uhh? */
292 dev_err(&pdev->dev, "unable to remove gpio_chip?\n");
293 }
294 pci_release_region(pdev, GPIO_BAR);
295}
296
297static struct pci_device_id cs5535_gpio_pci_tbl[] = {
298 { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_ISA) },
299 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA) },
300 { 0, },
301};
302MODULE_DEVICE_TABLE(pci, cs5535_gpio_pci_tbl);
303
304/*
305 * We can't use the standard PCI driver registration stuff here, since
306 * that allows only one driver to bind to each PCI device (and we want
307 * multiple drivers to be able to bind to the device). Instead, manually
308 * scan for the PCI device, request a single region, and keep track of the
309 * devices that we're using.
310 */
311
312static int __init cs5535_gpio_scan_pci(void)
313{
314 struct pci_dev *pdev;
315 int err = -ENODEV;
316 int i;
317
318 for (i = 0; i < ARRAY_SIZE(cs5535_gpio_pci_tbl); i++) {
319 pdev = pci_get_device(cs5535_gpio_pci_tbl[i].vendor,
320 cs5535_gpio_pci_tbl[i].device, NULL);
321 if (pdev) {
322 err = cs5535_gpio_probe(pdev, &cs5535_gpio_pci_tbl[i]);
323 if (err)
324 pci_dev_put(pdev);
325
326 /* we only support a single CS5535/6 southbridge */
327 break;
328 }
329 }
330
331 return err;
332}
333
334static void __exit cs5535_gpio_free_pci(void)
335{
336 cs5535_gpio_remove(cs5535_gpio_chip.pdev);
337 pci_dev_put(cs5535_gpio_chip.pdev);
338}
339
340static int __init cs5535_gpio_init(void)
341{
342 return cs5535_gpio_scan_pci();
343}
344
345static void __exit cs5535_gpio_exit(void)
346{
347 cs5535_gpio_free_pci();
348}
349
350module_init(cs5535_gpio_init);
351module_exit(cs5535_gpio_exit);
352
353MODULE_AUTHOR("Andres Salomon <dilinger@collabora.co.uk>");
354MODULE_DESCRIPTION("AMD CS5535/CS5536 GPIO driver");
355MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 50de0f5750d8..a25ad284a272 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -53,6 +53,7 @@ struct gpio_desc {
53#define FLAG_SYSFS 4 /* exported via /sys/class/gpio/control */ 53#define FLAG_SYSFS 4 /* exported via /sys/class/gpio/control */
54#define FLAG_TRIG_FALL 5 /* trigger on falling edge */ 54#define FLAG_TRIG_FALL 5 /* trigger on falling edge */
55#define FLAG_TRIG_RISE 6 /* trigger on rising edge */ 55#define FLAG_TRIG_RISE 6 /* trigger on rising edge */
56#define FLAG_ACTIVE_LOW 7 /* sysfs value has active low */
56 57
57#define PDESC_ID_SHIFT 16 /* add new flags before this one */ 58#define PDESC_ID_SHIFT 16 /* add new flags before this one */
58 59
@@ -210,6 +211,11 @@ static DEFINE_MUTEX(sysfs_lock);
210 * * configures behavior of poll(2) on /value 211 * * configures behavior of poll(2) on /value
211 * * available only if pin can generate IRQs on input 212 * * available only if pin can generate IRQs on input
212 * * is read/write as "none", "falling", "rising", or "both" 213 * * is read/write as "none", "falling", "rising", or "both"
214 * /active_low
215 * * configures polarity of /value
216 * * is read/write as zero/nonzero
217 * * also affects existing and subsequent "falling" and "rising"
218 * /edge configuration
213 */ 219 */
214 220
215static ssize_t gpio_direction_show(struct device *dev, 221static ssize_t gpio_direction_show(struct device *dev,
@@ -255,7 +261,7 @@ static ssize_t gpio_direction_store(struct device *dev,
255 return status ? : size; 261 return status ? : size;
256} 262}
257 263
258static const DEVICE_ATTR(direction, 0644, 264static /* const */ DEVICE_ATTR(direction, 0644,
259 gpio_direction_show, gpio_direction_store); 265 gpio_direction_show, gpio_direction_store);
260 266
261static ssize_t gpio_value_show(struct device *dev, 267static ssize_t gpio_value_show(struct device *dev,
@@ -267,10 +273,17 @@ static ssize_t gpio_value_show(struct device *dev,
267 273
268 mutex_lock(&sysfs_lock); 274 mutex_lock(&sysfs_lock);
269 275
270 if (!test_bit(FLAG_EXPORT, &desc->flags)) 276 if (!test_bit(FLAG_EXPORT, &desc->flags)) {
271 status = -EIO; 277 status = -EIO;
272 else 278 } else {
273 status = sprintf(buf, "%d\n", !!gpio_get_value_cansleep(gpio)); 279 int value;
280
281 value = !!gpio_get_value_cansleep(gpio);
282 if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
283 value = !value;
284
285 status = sprintf(buf, "%d\n", value);
286 }
274 287
275 mutex_unlock(&sysfs_lock); 288 mutex_unlock(&sysfs_lock);
276 return status; 289 return status;
@@ -294,6 +307,8 @@ static ssize_t gpio_value_store(struct device *dev,
294 307
295 status = strict_strtol(buf, 0, &value); 308 status = strict_strtol(buf, 0, &value);
296 if (status == 0) { 309 if (status == 0) {
310 if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
311 value = !value;
297 gpio_set_value_cansleep(gpio, value != 0); 312 gpio_set_value_cansleep(gpio, value != 0);
298 status = size; 313 status = size;
299 } 314 }
@@ -303,7 +318,7 @@ static ssize_t gpio_value_store(struct device *dev,
303 return status; 318 return status;
304} 319}
305 320
306static /*const*/ DEVICE_ATTR(value, 0644, 321static const DEVICE_ATTR(value, 0644,
307 gpio_value_show, gpio_value_store); 322 gpio_value_show, gpio_value_store);
308 323
309static irqreturn_t gpio_sysfs_irq(int irq, void *priv) 324static irqreturn_t gpio_sysfs_irq(int irq, void *priv)
@@ -352,9 +367,11 @@ static int gpio_setup_irq(struct gpio_desc *desc, struct device *dev,
352 367
353 irq_flags = IRQF_SHARED; 368 irq_flags = IRQF_SHARED;
354 if (test_bit(FLAG_TRIG_FALL, &gpio_flags)) 369 if (test_bit(FLAG_TRIG_FALL, &gpio_flags))
355 irq_flags |= IRQF_TRIGGER_FALLING; 370 irq_flags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
371 IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
356 if (test_bit(FLAG_TRIG_RISE, &gpio_flags)) 372 if (test_bit(FLAG_TRIG_RISE, &gpio_flags))
357 irq_flags |= IRQF_TRIGGER_RISING; 373 irq_flags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
374 IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
358 375
359 if (!pdesc) { 376 if (!pdesc) {
360 pdesc = kmalloc(sizeof(*pdesc), GFP_KERNEL); 377 pdesc = kmalloc(sizeof(*pdesc), GFP_KERNEL);
@@ -475,9 +492,79 @@ found:
475 492
476static DEVICE_ATTR(edge, 0644, gpio_edge_show, gpio_edge_store); 493static DEVICE_ATTR(edge, 0644, gpio_edge_show, gpio_edge_store);
477 494
495static int sysfs_set_active_low(struct gpio_desc *desc, struct device *dev,
496 int value)
497{
498 int status = 0;
499
500 if (!!test_bit(FLAG_ACTIVE_LOW, &desc->flags) == !!value)
501 return 0;
502
503 if (value)
504 set_bit(FLAG_ACTIVE_LOW, &desc->flags);
505 else
506 clear_bit(FLAG_ACTIVE_LOW, &desc->flags);
507
508 /* reconfigure poll(2) support if enabled on one edge only */
509 if (dev != NULL && (!!test_bit(FLAG_TRIG_RISE, &desc->flags) ^
510 !!test_bit(FLAG_TRIG_FALL, &desc->flags))) {
511 unsigned long trigger_flags = desc->flags & GPIO_TRIGGER_MASK;
512
513 gpio_setup_irq(desc, dev, 0);
514 status = gpio_setup_irq(desc, dev, trigger_flags);
515 }
516
517 return status;
518}
519
520static ssize_t gpio_active_low_show(struct device *dev,
521 struct device_attribute *attr, char *buf)
522{
523 const struct gpio_desc *desc = dev_get_drvdata(dev);
524 ssize_t status;
525
526 mutex_lock(&sysfs_lock);
527
528 if (!test_bit(FLAG_EXPORT, &desc->flags))
529 status = -EIO;
530 else
531 status = sprintf(buf, "%d\n",
532 !!test_bit(FLAG_ACTIVE_LOW, &desc->flags));
533
534 mutex_unlock(&sysfs_lock);
535
536 return status;
537}
538
539static ssize_t gpio_active_low_store(struct device *dev,
540 struct device_attribute *attr, const char *buf, size_t size)
541{
542 struct gpio_desc *desc = dev_get_drvdata(dev);
543 ssize_t status;
544
545 mutex_lock(&sysfs_lock);
546
547 if (!test_bit(FLAG_EXPORT, &desc->flags)) {
548 status = -EIO;
549 } else {
550 long value;
551
552 status = strict_strtol(buf, 0, &value);
553 if (status == 0)
554 status = sysfs_set_active_low(desc, dev, value != 0);
555 }
556
557 mutex_unlock(&sysfs_lock);
558
559 return status ? : size;
560}
561
562static const DEVICE_ATTR(active_low, 0644,
563 gpio_active_low_show, gpio_active_low_store);
564
478static const struct attribute *gpio_attrs[] = { 565static const struct attribute *gpio_attrs[] = {
479 &dev_attr_direction.attr,
480 &dev_attr_value.attr, 566 &dev_attr_value.attr,
567 &dev_attr_active_low.attr,
481 NULL, 568 NULL,
482}; 569};
483 570
@@ -662,12 +749,12 @@ int gpio_export(unsigned gpio, bool direction_may_change)
662 dev = device_create(&gpio_class, desc->chip->dev, MKDEV(0, 0), 749 dev = device_create(&gpio_class, desc->chip->dev, MKDEV(0, 0),
663 desc, ioname ? ioname : "gpio%d", gpio); 750 desc, ioname ? ioname : "gpio%d", gpio);
664 if (!IS_ERR(dev)) { 751 if (!IS_ERR(dev)) {
665 if (direction_may_change) 752 status = sysfs_create_group(&dev->kobj,
666 status = sysfs_create_group(&dev->kobj,
667 &gpio_attr_group); 753 &gpio_attr_group);
668 else 754
755 if (!status && direction_may_change)
669 status = device_create_file(dev, 756 status = device_create_file(dev,
670 &dev_attr_value); 757 &dev_attr_direction);
671 758
672 if (!status && gpio_to_irq(gpio) >= 0 759 if (!status && gpio_to_irq(gpio) >= 0
673 && (direction_may_change 760 && (direction_may_change
@@ -744,6 +831,55 @@ done:
744} 831}
745EXPORT_SYMBOL_GPL(gpio_export_link); 832EXPORT_SYMBOL_GPL(gpio_export_link);
746 833
834
835/**
836 * gpio_sysfs_set_active_low - set the polarity of gpio sysfs value
837 * @gpio: gpio to change
838 * @value: non-zero to use active low, i.e. inverted values
839 *
840 * Set the polarity of /sys/class/gpio/gpioN/value sysfs attribute.
841 * The GPIO does not have to be exported yet. If poll(2) support has
842 * been enabled for either rising or falling edge, it will be
843 * reconfigured to follow the new polarity.
844 *
845 * Returns zero on success, else an error.
846 */
847int gpio_sysfs_set_active_low(unsigned gpio, int value)
848{
849 struct gpio_desc *desc;
850 struct device *dev = NULL;
851 int status = -EINVAL;
852
853 if (!gpio_is_valid(gpio))
854 goto done;
855
856 mutex_lock(&sysfs_lock);
857
858 desc = &gpio_desc[gpio];
859
860 if (test_bit(FLAG_EXPORT, &desc->flags)) {
861 struct device *dev;
862
863 dev = class_find_device(&gpio_class, NULL, desc, match_export);
864 if (dev == NULL) {
865 status = -ENODEV;
866 goto unlock;
867 }
868 }
869
870 status = sysfs_set_active_low(desc, dev, value);
871
872unlock:
873 mutex_unlock(&sysfs_lock);
874
875done:
876 if (status)
877 pr_debug("%s: gpio%d status %d\n", __func__, gpio, status);
878
879 return status;
880}
881EXPORT_SYMBOL_GPL(gpio_sysfs_set_active_low);
882
747/** 883/**
748 * gpio_unexport - reverse effect of gpio_export() 884 * gpio_unexport - reverse effect of gpio_export()
749 * @gpio: gpio to make unavailable 885 * @gpio: gpio to make unavailable
@@ -1094,6 +1230,7 @@ void gpio_free(unsigned gpio)
1094 } 1230 }
1095 desc_set_label(desc, NULL); 1231 desc_set_label(desc, NULL);
1096 module_put(desc->chip->owner); 1232 module_put(desc->chip->owner);
1233 clear_bit(FLAG_ACTIVE_LOW, &desc->flags);
1097 clear_bit(FLAG_REQUESTED, &desc->flags); 1234 clear_bit(FLAG_REQUESTED, &desc->flags);
1098 } else 1235 } else
1099 WARN_ON(extra_checks); 1236 WARN_ON(extra_checks);
diff --git a/drivers/gpio/langwell_gpio.c b/drivers/gpio/langwell_gpio.c
index 4baf3d7d0f8e..6c0ebbdc659e 100644
--- a/drivers/gpio/langwell_gpio.c
+++ b/drivers/gpio/langwell_gpio.c
@@ -123,7 +123,7 @@ static int lnw_irq_type(unsigned irq, unsigned type)
123 void __iomem *grer = (void __iomem *)(&lnw->reg_base->GRER[reg]); 123 void __iomem *grer = (void __iomem *)(&lnw->reg_base->GRER[reg]);
124 void __iomem *gfer = (void __iomem *)(&lnw->reg_base->GFER[reg]); 124 void __iomem *gfer = (void __iomem *)(&lnw->reg_base->GFER[reg]);
125 125
126 if (gpio < 0 || gpio > lnw->chip.ngpio) 126 if (gpio >= lnw->chip.ngpio)
127 return -EINVAL; 127 return -EINVAL;
128 spin_lock_irqsave(&lnw->lock, flags); 128 spin_lock_irqsave(&lnw->lock, flags);
129 if (type & IRQ_TYPE_EDGE_RISING) 129 if (type & IRQ_TYPE_EDGE_RISING)
diff --git a/drivers/gpio/timbgpio.c b/drivers/gpio/timbgpio.c
new file mode 100644
index 000000000000..a4d344ba8e5c
--- /dev/null
+++ b/drivers/gpio/timbgpio.c
@@ -0,0 +1,342 @@
1/*
2 * timbgpio.c timberdale FPGA GPIO driver
3 * Copyright (c) 2009 Intel Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 */
18
19/* Supports:
20 * Timberdale FPGA GPIO
21 */
22
23#include <linux/module.h>
24#include <linux/gpio.h>
25#include <linux/platform_device.h>
26#include <linux/io.h>
27#include <linux/timb_gpio.h>
28#include <linux/interrupt.h>
29
30#define DRIVER_NAME "timb-gpio"
31
32#define TGPIOVAL 0x00
33#define TGPIODIR 0x04
34#define TGPIO_IER 0x08
35#define TGPIO_ISR 0x0c
36#define TGPIO_IPR 0x10
37#define TGPIO_ICR 0x14
38#define TGPIO_FLR 0x18
39#define TGPIO_LVR 0x1c
40
41struct timbgpio {
42 void __iomem *membase;
43 spinlock_t lock; /* mutual exclusion */
44 struct gpio_chip gpio;
45 int irq_base;
46};
47
48static int timbgpio_update_bit(struct gpio_chip *gpio, unsigned index,
49 unsigned offset, bool enabled)
50{
51 struct timbgpio *tgpio = container_of(gpio, struct timbgpio, gpio);
52 u32 reg;
53
54 spin_lock(&tgpio->lock);
55 reg = ioread32(tgpio->membase + offset);
56
57 if (enabled)
58 reg |= (1 << index);
59 else
60 reg &= ~(1 << index);
61
62 iowrite32(reg, tgpio->membase + offset);
63 spin_unlock(&tgpio->lock);
64
65 return 0;
66}
67
68static int timbgpio_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
69{
70 return timbgpio_update_bit(gpio, nr, TGPIODIR, true);
71}
72
73static int timbgpio_gpio_get(struct gpio_chip *gpio, unsigned nr)
74{
75 struct timbgpio *tgpio = container_of(gpio, struct timbgpio, gpio);
76 u32 value;
77
78 value = ioread32(tgpio->membase + TGPIOVAL);
79 return (value & (1 << nr)) ? 1 : 0;
80}
81
82static int timbgpio_gpio_direction_output(struct gpio_chip *gpio,
83 unsigned nr, int val)
84{
85 return timbgpio_update_bit(gpio, nr, TGPIODIR, false);
86}
87
88static void timbgpio_gpio_set(struct gpio_chip *gpio,
89 unsigned nr, int val)
90{
91 timbgpio_update_bit(gpio, nr, TGPIOVAL, val != 0);
92}
93
94static int timbgpio_to_irq(struct gpio_chip *gpio, unsigned offset)
95{
96 struct timbgpio *tgpio = container_of(gpio, struct timbgpio, gpio);
97
98 if (tgpio->irq_base <= 0)
99 return -EINVAL;
100
101 return tgpio->irq_base + offset;
102}
103
104/*
105 * GPIO IRQ
106 */
107static void timbgpio_irq_disable(unsigned irq)
108{
109 struct timbgpio *tgpio = get_irq_chip_data(irq);
110 int offset = irq - tgpio->irq_base;
111
112 timbgpio_update_bit(&tgpio->gpio, offset, TGPIO_IER, 0);
113}
114
115static void timbgpio_irq_enable(unsigned irq)
116{
117 struct timbgpio *tgpio = get_irq_chip_data(irq);
118 int offset = irq - tgpio->irq_base;
119
120 timbgpio_update_bit(&tgpio->gpio, offset, TGPIO_IER, 1);
121}
122
123static int timbgpio_irq_type(unsigned irq, unsigned trigger)
124{
125 struct timbgpio *tgpio = get_irq_chip_data(irq);
126 int offset = irq - tgpio->irq_base;
127 unsigned long flags;
128 u32 lvr, flr;
129
130 if (offset < 0 || offset > tgpio->gpio.ngpio)
131 return -EINVAL;
132
133 spin_lock_irqsave(&tgpio->lock, flags);
134
135 lvr = ioread32(tgpio->membase + TGPIO_LVR);
136 flr = ioread32(tgpio->membase + TGPIO_FLR);
137
138 if (trigger & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
139 flr &= ~(1 << offset);
140 if (trigger & IRQ_TYPE_LEVEL_HIGH)
141 lvr |= 1 << offset;
142 else
143 lvr &= ~(1 << offset);
144 }
145
146 if ((trigger & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
147 return -EINVAL;
148 else {
149 flr |= 1 << offset;
150 /* opposite compared to the datasheet, but it mirrors the
151 * reality
152 */
153 if (trigger & IRQ_TYPE_EDGE_FALLING)
154 lvr |= 1 << offset;
155 else
156 lvr &= ~(1 << offset);
157 }
158
159 iowrite32(lvr, tgpio->membase + TGPIO_LVR);
160 iowrite32(flr, tgpio->membase + TGPIO_FLR);
161 iowrite32(1 << offset, tgpio->membase + TGPIO_ICR);
162 spin_unlock_irqrestore(&tgpio->lock, flags);
163
164 return 0;
165}
166
167static void timbgpio_irq(unsigned int irq, struct irq_desc *desc)
168{
169 struct timbgpio *tgpio = get_irq_data(irq);
170 unsigned long ipr;
171 int offset;
172
173 desc->chip->ack(irq);
174 ipr = ioread32(tgpio->membase + TGPIO_IPR);
175 iowrite32(ipr, tgpio->membase + TGPIO_ICR);
176
177 for_each_bit(offset, &ipr, tgpio->gpio.ngpio)
178 generic_handle_irq(timbgpio_to_irq(&tgpio->gpio, offset));
179}
180
181static struct irq_chip timbgpio_irqchip = {
182 .name = "GPIO",
183 .enable = timbgpio_irq_enable,
184 .disable = timbgpio_irq_disable,
185 .set_type = timbgpio_irq_type,
186};
187
188static int __devinit timbgpio_probe(struct platform_device *pdev)
189{
190 int err, i;
191 struct gpio_chip *gc;
192 struct timbgpio *tgpio;
193 struct resource *iomem;
194 struct timbgpio_platform_data *pdata = pdev->dev.platform_data;
195 int irq = platform_get_irq(pdev, 0);
196
197 if (!pdata || pdata->nr_pins > 32) {
198 err = -EINVAL;
199 goto err_mem;
200 }
201
202 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
203 if (!iomem) {
204 err = -EINVAL;
205 goto err_mem;
206 }
207
208 tgpio = kzalloc(sizeof(*tgpio), GFP_KERNEL);
209 if (!tgpio) {
210 err = -EINVAL;
211 goto err_mem;
212 }
213 tgpio->irq_base = pdata->irq_base;
214
215 spin_lock_init(&tgpio->lock);
216
217 if (!request_mem_region(iomem->start, resource_size(iomem),
218 DRIVER_NAME)) {
219 err = -EBUSY;
220 goto err_request;
221 }
222
223 tgpio->membase = ioremap(iomem->start, resource_size(iomem));
224 if (!tgpio->membase) {
225 err = -ENOMEM;
226 goto err_ioremap;
227 }
228
229 gc = &tgpio->gpio;
230
231 gc->label = dev_name(&pdev->dev);
232 gc->owner = THIS_MODULE;
233 gc->dev = &pdev->dev;
234 gc->direction_input = timbgpio_gpio_direction_input;
235 gc->get = timbgpio_gpio_get;
236 gc->direction_output = timbgpio_gpio_direction_output;
237 gc->set = timbgpio_gpio_set;
238 gc->to_irq = (irq >= 0 && tgpio->irq_base > 0) ? timbgpio_to_irq : NULL;
239 gc->dbg_show = NULL;
240 gc->base = pdata->gpio_base;
241 gc->ngpio = pdata->nr_pins;
242 gc->can_sleep = 0;
243
244 err = gpiochip_add(gc);
245 if (err)
246 goto err_chipadd;
247
248 platform_set_drvdata(pdev, tgpio);
249
250 /* make sure to disable interrupts */
251 iowrite32(0x0, tgpio->membase + TGPIO_IER);
252
253 if (irq < 0 || tgpio->irq_base <= 0)
254 return 0;
255
256 for (i = 0; i < pdata->nr_pins; i++) {
257 set_irq_chip_and_handler_name(tgpio->irq_base + i,
258 &timbgpio_irqchip, handle_simple_irq, "mux");
259 set_irq_chip_data(tgpio->irq_base + i, tgpio);
260#ifdef CONFIG_ARM
261 set_irq_flags(tgpio->irq_base + i, IRQF_VALID | IRQF_PROBE);
262#endif
263 }
264
265 set_irq_data(irq, tgpio);
266 set_irq_chained_handler(irq, timbgpio_irq);
267
268 return 0;
269
270err_chipadd:
271 iounmap(tgpio->membase);
272err_ioremap:
273 release_mem_region(iomem->start, resource_size(iomem));
274err_request:
275 kfree(tgpio);
276err_mem:
277 printk(KERN_ERR DRIVER_NAME": Failed to register GPIOs: %d\n", err);
278
279 return err;
280}
281
282static int __devexit timbgpio_remove(struct platform_device *pdev)
283{
284 int err;
285 struct timbgpio_platform_data *pdata = pdev->dev.platform_data;
286 struct timbgpio *tgpio = platform_get_drvdata(pdev);
287 struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
288 int irq = platform_get_irq(pdev, 0);
289
290 if (irq >= 0 && tgpio->irq_base > 0) {
291 int i;
292 for (i = 0; i < pdata->nr_pins; i++) {
293 set_irq_chip(tgpio->irq_base + i, NULL);
294 set_irq_chip_data(tgpio->irq_base + i, NULL);
295 }
296
297 set_irq_handler(irq, NULL);
298 set_irq_data(irq, NULL);
299 }
300
301 err = gpiochip_remove(&tgpio->gpio);
302 if (err)
303 printk(KERN_ERR DRIVER_NAME": failed to remove gpio_chip\n");
304
305 iounmap(tgpio->membase);
306 release_mem_region(iomem->start, resource_size(iomem));
307 kfree(tgpio);
308
309 platform_set_drvdata(pdev, NULL);
310
311 return 0;
312}
313
314static struct platform_driver timbgpio_platform_driver = {
315 .driver = {
316 .name = DRIVER_NAME,
317 .owner = THIS_MODULE,
318 },
319 .probe = timbgpio_probe,
320 .remove = timbgpio_remove,
321};
322
323/*--------------------------------------------------------------------------*/
324
325static int __init timbgpio_init(void)
326{
327 return platform_driver_register(&timbgpio_platform_driver);
328}
329
330static void __exit timbgpio_exit(void)
331{
332 platform_driver_unregister(&timbgpio_platform_driver);
333}
334
335module_init(timbgpio_init);
336module_exit(timbgpio_exit);
337
338MODULE_DESCRIPTION("Timberdale GPIO driver");
339MODULE_LICENSE("GPL v2");
340MODULE_AUTHOR("Mocean Laboratories");
341MODULE_ALIAS("platform:"DRIVER_NAME);
342
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index d823e6319516..b1bc1ea182b8 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -30,11 +30,12 @@ config DRM_NOUVEAU_DEBUG
30 via debugfs. 30 via debugfs.
31 31
32menu "I2C encoder or helper chips" 32menu "I2C encoder or helper chips"
33 depends on DRM 33 depends on DRM && I2C
34 34
35config DRM_I2C_CH7006 35config DRM_I2C_CH7006
36 tristate "Chrontel ch7006 TV encoder" 36 tristate "Chrontel ch7006 TV encoder"
37 default m if DRM_NOUVEAU 37 depends on DRM_NOUVEAU
38 default m
38 help 39 help
39 Support for Chrontel ch7006 and similar TV encoders, found 40 Support for Chrontel ch7006 and similar TV encoders, found
40 on some nVidia video cards. 41 on some nVidia video cards.
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 320a14bceb99..aa2dfbc3e351 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -311,8 +311,10 @@ nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
311 struct drm_device *dev = dev_priv->dev; 311 struct drm_device *dev = dev_priv->dev;
312 312
313 switch (dev_priv->gart_info.type) { 313 switch (dev_priv->gart_info.type) {
314#if __OS_HAS_AGP
314 case NOUVEAU_GART_AGP: 315 case NOUVEAU_GART_AGP:
315 return ttm_agp_backend_init(bdev, dev->agp->bridge); 316 return ttm_agp_backend_init(bdev, dev->agp->bridge);
317#endif
316 case NOUVEAU_GART_SGDMA: 318 case NOUVEAU_GART_SGDMA:
317 return nouveau_sgdma_init_ttm(dev); 319 return nouveau_sgdma_init_ttm(dev);
318 default: 320 default:
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 0cff7eb3690a..dacac9a0842a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -205,7 +205,7 @@ nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
205 schedule_timeout(1); 205 schedule_timeout(1);
206 206
207 if (intr && signal_pending(current)) { 207 if (intr && signal_pending(current)) {
208 ret = -ERESTART; 208 ret = -ERESTARTSYS;
209 break; 209 break;
210 } 210 }
211 } 211 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 11f831f0ddc5..18fd8ac9fca7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -342,8 +342,6 @@ retry:
342 } 342 }
343 343
344 ret = ttm_bo_wait_cpu(&nvbo->bo, false); 344 ret = ttm_bo_wait_cpu(&nvbo->bo, false);
345 if (ret == -ERESTART)
346 ret = -EAGAIN;
347 if (ret) 345 if (ret)
348 return ret; 346 return ret;
349 goto retry; 347 goto retry;
@@ -915,8 +913,6 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
915 goto out; 913 goto out;
916 914
917 ret = ttm_bo_wait_cpu(&nvbo->bo, no_wait); 915 ret = ttm_bo_wait_cpu(&nvbo->bo, no_wait);
918 if (ret == -ERESTART)
919 ret = -EAGAIN;
920 if (ret) 916 if (ret)
921 goto out; 917 goto out;
922 } 918 }
@@ -925,9 +921,6 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
925 ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait); 921 ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait);
926 } else { 922 } else {
927 ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait); 923 ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait);
928 if (ret == -ERESTART)
929 ret = -EAGAIN;
930 else
931 if (ret == 0) 924 if (ret == 0)
932 nvbo->cpu_filp = file_priv; 925 nvbo->cpu_filp = file_priv;
933 } 926 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 02755712ed3d..5158a12f7844 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -407,6 +407,7 @@ uint64_t nouveau_mem_fb_amount(struct drm_device *dev)
407 return 0; 407 return 0;
408} 408}
409 409
410#if __OS_HAS_AGP
410static void nouveau_mem_reset_agp(struct drm_device *dev) 411static void nouveau_mem_reset_agp(struct drm_device *dev)
411{ 412{
412 uint32_t saved_pci_nv_1, saved_pci_nv_19, pmc_enable; 413 uint32_t saved_pci_nv_1, saved_pci_nv_19, pmc_enable;
@@ -432,10 +433,12 @@ static void nouveau_mem_reset_agp(struct drm_device *dev)
432 nv_wr32(dev, NV04_PBUS_PCI_NV_19, saved_pci_nv_19); 433 nv_wr32(dev, NV04_PBUS_PCI_NV_19, saved_pci_nv_19);
433 nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1); 434 nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1);
434} 435}
436#endif
435 437
436int 438int
437nouveau_mem_init_agp(struct drm_device *dev) 439nouveau_mem_init_agp(struct drm_device *dev)
438{ 440{
441#if __OS_HAS_AGP
439 struct drm_nouveau_private *dev_priv = dev->dev_private; 442 struct drm_nouveau_private *dev_priv = dev->dev_private;
440 struct drm_agp_info info; 443 struct drm_agp_info info;
441 struct drm_agp_mode mode; 444 struct drm_agp_mode mode;
@@ -471,6 +474,7 @@ nouveau_mem_init_agp(struct drm_device *dev)
471 dev_priv->gart_info.type = NOUVEAU_GART_AGP; 474 dev_priv->gart_info.type = NOUVEAU_GART_AGP;
472 dev_priv->gart_info.aper_base = info.aperture_base; 475 dev_priv->gart_info.aper_base = info.aperture_base;
473 dev_priv->gart_info.aper_size = info.aperture_size; 476 dev_priv->gart_info.aper_size = info.aperture_size;
477#endif
474 return 0; 478 return 0;
475} 479}
476 480
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
index d3e0a2a6acf8..7e8547cb5833 100644
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -252,8 +252,9 @@ nv40_grctx_init(struct drm_device *dev)
252 memcpy(pgraph->ctxprog, fw->data, fw->size); 252 memcpy(pgraph->ctxprog, fw->data, fw->size);
253 253
254 cp = pgraph->ctxprog; 254 cp = pgraph->ctxprog;
255 if (cp->signature != 0x5043564e || cp->version != 0 || 255 if (le32_to_cpu(cp->signature) != 0x5043564e ||
256 cp->length != ((fw->size - 7) / 4)) { 256 cp->version != 0 ||
257 le16_to_cpu(cp->length) != ((fw->size - 7) / 4)) {
257 NV_ERROR(dev, "ctxprog invalid\n"); 258 NV_ERROR(dev, "ctxprog invalid\n");
258 release_firmware(fw); 259 release_firmware(fw);
259 nv40_grctx_fini(dev); 260 nv40_grctx_fini(dev);
@@ -281,8 +282,9 @@ nv40_grctx_init(struct drm_device *dev)
281 memcpy(pgraph->ctxvals, fw->data, fw->size); 282 memcpy(pgraph->ctxvals, fw->data, fw->size);
282 283
283 cv = (void *)pgraph->ctxvals; 284 cv = (void *)pgraph->ctxvals;
284 if (cv->signature != 0x5643564e || cv->version != 0 || 285 if (le32_to_cpu(cv->signature) != 0x5643564e ||
285 cv->length != ((fw->size - 9) / 8)) { 286 cv->version != 0 ||
287 le32_to_cpu(cv->length) != ((fw->size - 9) / 8)) {
286 NV_ERROR(dev, "ctxvals invalid\n"); 288 NV_ERROR(dev, "ctxvals invalid\n");
287 release_firmware(fw); 289 release_firmware(fw);
288 nv40_grctx_fini(dev); 290 nv40_grctx_fini(dev);
@@ -294,8 +296,9 @@ nv40_grctx_init(struct drm_device *dev)
294 cp = pgraph->ctxprog; 296 cp = pgraph->ctxprog;
295 297
296 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); 298 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
297 for (i = 0; i < cp->length; i++) 299 for (i = 0; i < le16_to_cpu(cp->length); i++)
298 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp->data[i]); 300 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA,
301 le32_to_cpu(cp->data[i]));
299 302
300 pgraph->accel_blocked = false; 303 pgraph->accel_blocked = false;
301 return 0; 304 return 0;
@@ -329,8 +332,9 @@ nv40_grctx_vals_load(struct drm_device *dev, struct nouveau_gpuobj *ctx)
329 if (!cv) 332 if (!cv)
330 return; 333 return;
331 334
332 for (i = 0; i < cv->length; i++) 335 for (i = 0; i < le32_to_cpu(cv->length); i++)
333 nv_wo32(dev, ctx, cv->data[i].offset, cv->data[i].value); 336 nv_wo32(dev, ctx, le32_to_cpu(cv->data[i].offset),
337 le32_to_cpu(cv->data[i].value));
334} 338}
335 339
336/* 340/*
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index feb52eee4314..b5f5fe75e6af 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -49,7 +49,7 @@ radeon-y += radeon_device.o radeon_kms.o \
49 radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \ 49 radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \
50 rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \ 50 rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
51 r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \ 51 r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
52 r600_blit_kms.o radeon_pm.o atombios_dp.o 52 r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o
53 53
54radeon-$(CONFIG_COMPAT) += radeon_ioc32.o 54radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
55 55
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 824cc6480a06..84e5df766d3f 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1374,7 +1374,6 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1374 case RADEON_TXFORMAT_ARGB4444: 1374 case RADEON_TXFORMAT_ARGB4444:
1375 case RADEON_TXFORMAT_VYUY422: 1375 case RADEON_TXFORMAT_VYUY422:
1376 case RADEON_TXFORMAT_YVYU422: 1376 case RADEON_TXFORMAT_YVYU422:
1377 case RADEON_TXFORMAT_DXT1:
1378 case RADEON_TXFORMAT_SHADOW16: 1377 case RADEON_TXFORMAT_SHADOW16:
1379 case RADEON_TXFORMAT_LDUDV655: 1378 case RADEON_TXFORMAT_LDUDV655:
1380 case RADEON_TXFORMAT_DUDV88: 1379 case RADEON_TXFORMAT_DUDV88:
@@ -1382,12 +1381,19 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1382 break; 1381 break;
1383 case RADEON_TXFORMAT_ARGB8888: 1382 case RADEON_TXFORMAT_ARGB8888:
1384 case RADEON_TXFORMAT_RGBA8888: 1383 case RADEON_TXFORMAT_RGBA8888:
1385 case RADEON_TXFORMAT_DXT23:
1386 case RADEON_TXFORMAT_DXT45:
1387 case RADEON_TXFORMAT_SHADOW32: 1384 case RADEON_TXFORMAT_SHADOW32:
1388 case RADEON_TXFORMAT_LDUDUV8888: 1385 case RADEON_TXFORMAT_LDUDUV8888:
1389 track->textures[i].cpp = 4; 1386 track->textures[i].cpp = 4;
1390 break; 1387 break;
1388 case RADEON_TXFORMAT_DXT1:
1389 track->textures[i].cpp = 1;
1390 track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
1391 break;
1392 case RADEON_TXFORMAT_DXT23:
1393 case RADEON_TXFORMAT_DXT45:
1394 track->textures[i].cpp = 1;
1395 track->textures[i].compress_format = R100_TRACK_COMP_DXT35;
1396 break;
1391 } 1397 }
1392 track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf); 1398 track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
1393 track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf); 1399 track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
@@ -2731,6 +2737,7 @@ static inline void r100_cs_track_texture_print(struct r100_cs_track_texture *t)
2731 DRM_ERROR("coordinate type %d\n", t->tex_coord_type); 2737 DRM_ERROR("coordinate type %d\n", t->tex_coord_type);
2732 DRM_ERROR("width round to power of 2 %d\n", t->roundup_w); 2738 DRM_ERROR("width round to power of 2 %d\n", t->roundup_w);
2733 DRM_ERROR("height round to power of 2 %d\n", t->roundup_h); 2739 DRM_ERROR("height round to power of 2 %d\n", t->roundup_h);
2740 DRM_ERROR("compress format %d\n", t->compress_format);
2734} 2741}
2735 2742
2736static int r100_cs_track_cube(struct radeon_device *rdev, 2743static int r100_cs_track_cube(struct radeon_device *rdev,
@@ -2760,6 +2767,36 @@ static int r100_cs_track_cube(struct radeon_device *rdev,
2760 return 0; 2767 return 0;
2761} 2768}
2762 2769
2770static int r100_track_compress_size(int compress_format, int w, int h)
2771{
2772 int block_width, block_height, block_bytes;
2773 int wblocks, hblocks;
2774 int min_wblocks;
2775 int sz;
2776
2777 block_width = 4;
2778 block_height = 4;
2779
2780 switch (compress_format) {
2781 case R100_TRACK_COMP_DXT1:
2782 block_bytes = 8;
2783 min_wblocks = 4;
2784 break;
2785 default:
2786 case R100_TRACK_COMP_DXT35:
2787 block_bytes = 16;
2788 min_wblocks = 2;
2789 break;
2790 }
2791
2792 hblocks = (h + block_height - 1) / block_height;
2793 wblocks = (w + block_width - 1) / block_width;
2794 if (wblocks < min_wblocks)
2795 wblocks = min_wblocks;
2796 sz = wblocks * hblocks * block_bytes;
2797 return sz;
2798}
2799
2763static int r100_cs_track_texture_check(struct radeon_device *rdev, 2800static int r100_cs_track_texture_check(struct radeon_device *rdev,
2764 struct r100_cs_track *track) 2801 struct r100_cs_track *track)
2765{ 2802{
@@ -2797,9 +2834,15 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev,
2797 h = h / (1 << i); 2834 h = h / (1 << i);
2798 if (track->textures[u].roundup_h) 2835 if (track->textures[u].roundup_h)
2799 h = roundup_pow_of_two(h); 2836 h = roundup_pow_of_two(h);
2800 size += w * h; 2837 if (track->textures[u].compress_format) {
2838
2839 size += r100_track_compress_size(track->textures[u].compress_format, w, h);
2840 /* compressed textures are block based */
2841 } else
2842 size += w * h;
2801 } 2843 }
2802 size *= track->textures[u].cpp; 2844 size *= track->textures[u].cpp;
2845
2803 switch (track->textures[u].tex_coord_type) { 2846 switch (track->textures[u].tex_coord_type) {
2804 case 0: 2847 case 0:
2805 break; 2848 break;
@@ -2967,6 +3010,7 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track
2967 track->arrays[i].esize = 0x7F; 3010 track->arrays[i].esize = 0x7F;
2968 } 3011 }
2969 for (i = 0; i < track->num_texture; i++) { 3012 for (i = 0; i < track->num_texture; i++) {
3013 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
2970 track->textures[i].pitch = 16536; 3014 track->textures[i].pitch = 16536;
2971 track->textures[i].width = 16536; 3015 track->textures[i].width = 16536;
2972 track->textures[i].height = 16536; 3016 track->textures[i].height = 16536;
@@ -3399,6 +3443,8 @@ int r100_init(struct radeon_device *rdev)
3399 r100_errata(rdev); 3443 r100_errata(rdev);
3400 /* Initialize clocks */ 3444 /* Initialize clocks */
3401 radeon_get_clock_info(rdev->ddev); 3445 radeon_get_clock_info(rdev->ddev);
3446 /* Initialize power management */
3447 radeon_pm_init(rdev);
3402 /* Get vram informations */ 3448 /* Get vram informations */
3403 r100_vram_info(rdev); 3449 r100_vram_info(rdev);
3404 /* Initialize memory controller (also test AGP) */ 3450 /* Initialize memory controller (also test AGP) */
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h
index ca50903dd2bb..7188c3778ee2 100644
--- a/drivers/gpu/drm/radeon/r100_track.h
+++ b/drivers/gpu/drm/radeon/r100_track.h
@@ -28,6 +28,10 @@ struct r100_cs_cube_info {
28 unsigned height; 28 unsigned height;
29}; 29};
30 30
31#define R100_TRACK_COMP_NONE 0
32#define R100_TRACK_COMP_DXT1 1
33#define R100_TRACK_COMP_DXT35 2
34
31struct r100_cs_track_texture { 35struct r100_cs_track_texture {
32 struct radeon_bo *robj; 36 struct radeon_bo *robj;
33 struct r100_cs_cube_info cube_info[5]; /* info for 5 non-primary faces */ 37 struct r100_cs_cube_info cube_info[5]; /* info for 5 non-primary faces */
@@ -44,6 +48,7 @@ struct r100_cs_track_texture {
44 bool enabled; 48 bool enabled;
45 bool roundup_w; 49 bool roundup_w;
46 bool roundup_h; 50 bool roundup_h;
51 unsigned compress_format;
47}; 52};
48 53
49struct r100_cs_track_limits { 54struct r100_cs_track_limits {
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index eb740fc3549f..20942127c46b 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -401,7 +401,6 @@ int r200_packet0_check(struct radeon_cs_parser *p,
401 case R200_TXFORMAT_Y8: 401 case R200_TXFORMAT_Y8:
402 track->textures[i].cpp = 1; 402 track->textures[i].cpp = 1;
403 break; 403 break;
404 case R200_TXFORMAT_DXT1:
405 case R200_TXFORMAT_AI88: 404 case R200_TXFORMAT_AI88:
406 case R200_TXFORMAT_ARGB1555: 405 case R200_TXFORMAT_ARGB1555:
407 case R200_TXFORMAT_RGB565: 406 case R200_TXFORMAT_RGB565:
@@ -418,9 +417,16 @@ int r200_packet0_check(struct radeon_cs_parser *p,
418 case R200_TXFORMAT_ABGR8888: 417 case R200_TXFORMAT_ABGR8888:
419 case R200_TXFORMAT_BGR111110: 418 case R200_TXFORMAT_BGR111110:
420 case R200_TXFORMAT_LDVDU8888: 419 case R200_TXFORMAT_LDVDU8888:
420 track->textures[i].cpp = 4;
421 break;
422 case R200_TXFORMAT_DXT1:
423 track->textures[i].cpp = 1;
424 track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
425 break;
421 case R200_TXFORMAT_DXT23: 426 case R200_TXFORMAT_DXT23:
422 case R200_TXFORMAT_DXT45: 427 case R200_TXFORMAT_DXT45:
423 track->textures[i].cpp = 4; 428 track->textures[i].cpp = 1;
429 track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
424 break; 430 break;
425 } 431 }
426 track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf); 432 track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 83378c39d0e3..83490c2b5061 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -686,7 +686,15 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
686 r100_cs_dump_packet(p, pkt); 686 r100_cs_dump_packet(p, pkt);
687 return r; 687 return r;
688 } 688 }
689 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 689
690 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
691 tile_flags |= R300_TXO_MACRO_TILE;
692 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
693 tile_flags |= R300_TXO_MICRO_TILE;
694
695 tmp = idx_value + ((u32)reloc->lobj.gpu_offset);
696 tmp |= tile_flags;
697 ib[idx] = tmp;
690 track->textures[i].robj = reloc->robj; 698 track->textures[i].robj = reloc->robj;
691 break; 699 break;
692 /* Tracked registers */ 700 /* Tracked registers */
@@ -852,7 +860,6 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
852 case R300_TX_FORMAT_Z6Y5X5: 860 case R300_TX_FORMAT_Z6Y5X5:
853 case R300_TX_FORMAT_W4Z4Y4X4: 861 case R300_TX_FORMAT_W4Z4Y4X4:
854 case R300_TX_FORMAT_W1Z5Y5X5: 862 case R300_TX_FORMAT_W1Z5Y5X5:
855 case R300_TX_FORMAT_DXT1:
856 case R300_TX_FORMAT_D3DMFT_CxV8U8: 863 case R300_TX_FORMAT_D3DMFT_CxV8U8:
857 case R300_TX_FORMAT_B8G8_B8G8: 864 case R300_TX_FORMAT_B8G8_B8G8:
858 case R300_TX_FORMAT_G8R8_G8B8: 865 case R300_TX_FORMAT_G8R8_G8B8:
@@ -866,8 +873,6 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
866 case 0x17: 873 case 0x17:
867 case R300_TX_FORMAT_FL_I32: 874 case R300_TX_FORMAT_FL_I32:
868 case 0x1e: 875 case 0x1e:
869 case R300_TX_FORMAT_DXT3:
870 case R300_TX_FORMAT_DXT5:
871 track->textures[i].cpp = 4; 876 track->textures[i].cpp = 4;
872 break; 877 break;
873 case R300_TX_FORMAT_W16Z16Y16X16: 878 case R300_TX_FORMAT_W16Z16Y16X16:
@@ -878,6 +883,15 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
878 case R300_TX_FORMAT_FL_R32G32B32A32: 883 case R300_TX_FORMAT_FL_R32G32B32A32:
879 track->textures[i].cpp = 16; 884 track->textures[i].cpp = 16;
880 break; 885 break;
886 case R300_TX_FORMAT_DXT1:
887 track->textures[i].cpp = 1;
888 track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
889 break;
890 case R300_TX_FORMAT_DXT3:
891 case R300_TX_FORMAT_DXT5:
892 track->textures[i].cpp = 1;
893 track->textures[i].compress_format = R100_TRACK_COMP_DXT35;
894 break;
881 default: 895 default:
882 DRM_ERROR("Invalid texture format %u\n", 896 DRM_ERROR("Invalid texture format %u\n",
883 (idx_value & 0x1F)); 897 (idx_value & 0x1F));
@@ -1324,6 +1338,8 @@ int r300_init(struct radeon_device *rdev)
1324 r300_errata(rdev); 1338 r300_errata(rdev);
1325 /* Initialize clocks */ 1339 /* Initialize clocks */
1326 radeon_get_clock_info(rdev->ddev); 1340 radeon_get_clock_info(rdev->ddev);
1341 /* Initialize power management */
1342 radeon_pm_init(rdev);
1327 /* Get vram informations */ 1343 /* Get vram informations */
1328 r300_vram_info(rdev); 1344 r300_vram_info(rdev);
1329 /* Initialize memory controller (also test AGP) */ 1345 /* Initialize memory controller (also test AGP) */
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 36656bd110bf..a0ac3c134b1b 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1863,6 +1863,14 @@ int r600_startup(struct radeon_device *rdev)
1863 } 1863 }
1864 r600_gpu_init(rdev); 1864 r600_gpu_init(rdev);
1865 1865
1866 if (!rdev->r600_blit.shader_obj) {
1867 r = r600_blit_init(rdev);
1868 if (r) {
1869 DRM_ERROR("radeon: failed blitter (%d).\n", r);
1870 return r;
1871 }
1872 }
1873
1866 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); 1874 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
1867 if (unlikely(r != 0)) 1875 if (unlikely(r != 0))
1868 return r; 1876 return r;
@@ -2038,12 +2046,6 @@ int r600_init(struct radeon_device *rdev)
2038 if (r) 2046 if (r)
2039 return r; 2047 return r;
2040 2048
2041 r = r600_blit_init(rdev);
2042 if (r) {
2043 DRM_ERROR("radeon: failed blitter (%d).\n", r);
2044 return r;
2045 }
2046
2047 rdev->accel_working = true; 2049 rdev->accel_working = true;
2048 r = r600_startup(rdev); 2050 r = r600_startup(rdev);
2049 if (r) { 2051 if (r) {
@@ -2065,6 +2067,10 @@ int r600_init(struct radeon_device *rdev)
2065 rdev->accel_working = false; 2067 rdev->accel_working = false;
2066 } 2068 }
2067 } 2069 }
2070
2071 r = r600_audio_init(rdev);
2072 if (r)
2073 return r; /* TODO error handling */
2068 return 0; 2074 return 0;
2069} 2075}
2070 2076
@@ -2073,6 +2079,7 @@ void r600_fini(struct radeon_device *rdev)
2073 /* Suspend operations */ 2079 /* Suspend operations */
2074 r600_suspend(rdev); 2080 r600_suspend(rdev);
2075 2081
2082 r600_audio_fini(rdev);
2076 r600_blit_fini(rdev); 2083 r600_blit_fini(rdev);
2077 r600_irq_fini(rdev); 2084 r600_irq_fini(rdev);
2078 radeon_irq_kms_fini(rdev); 2085 radeon_irq_kms_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
new file mode 100644
index 000000000000..99e2c3891a7d
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -0,0 +1,267 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Christian König.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Christian König
25 */
26#include "drmP.h"
27#include "radeon.h"
28#include "radeon_reg.h"
29#include "atom.h"
30
31#define AUDIO_TIMER_INTERVALL 100 /* 1/10 sekund should be enough */
32
33/*
34 * check if the chipset is supported
35 */
36static int r600_audio_chipset_supported(struct radeon_device *rdev)
37{
38 return rdev->family >= CHIP_R600
39 || rdev->family == CHIP_RS600
40 || rdev->family == CHIP_RS690
41 || rdev->family == CHIP_RS740;
42}
43
44/*
45 * current number of channels
46 */
47static int r600_audio_channels(struct radeon_device *rdev)
48{
49 return (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0x7) + 1;
50}
51
52/*
53 * current bits per sample
54 */
55static int r600_audio_bits_per_sample(struct radeon_device *rdev)
56{
57 uint32_t value = (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0xF0) >> 4;
58 switch (value) {
59 case 0x0: return 8;
60 case 0x1: return 16;
61 case 0x2: return 20;
62 case 0x3: return 24;
63 case 0x4: return 32;
64 }
65
66 DRM_ERROR("Unknown bits per sample 0x%x using 16 instead.\n", (int)value);
67
68 return 16;
69}
70
71/*
72 * current sampling rate in HZ
73 */
74static int r600_audio_rate(struct radeon_device *rdev)
75{
76 uint32_t value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL);
77 uint32_t result;
78
79 if (value & 0x4000)
80 result = 44100;
81 else
82 result = 48000;
83
84 result *= ((value >> 11) & 0x7) + 1;
85 result /= ((value >> 8) & 0x7) + 1;
86
87 return result;
88}
89
90/*
91 * iec 60958 status bits
92 */
93static uint8_t r600_audio_status_bits(struct radeon_device *rdev)
94{
95 return RREG32(R600_AUDIO_STATUS_BITS) & 0xff;
96}
97
98/*
99 * iec 60958 category code
100 */
101static uint8_t r600_audio_category_code(struct radeon_device *rdev)
102{
103 return (RREG32(R600_AUDIO_STATUS_BITS) >> 8) & 0xff;
104}
105
106/*
107 * update all hdmi interfaces with current audio parameters
108 */
109static void r600_audio_update_hdmi(unsigned long param)
110{
111 struct radeon_device *rdev = (struct radeon_device *)param;
112 struct drm_device *dev = rdev->ddev;
113
114 int channels = r600_audio_channels(rdev);
115 int rate = r600_audio_rate(rdev);
116 int bps = r600_audio_bits_per_sample(rdev);
117 uint8_t status_bits = r600_audio_status_bits(rdev);
118 uint8_t category_code = r600_audio_category_code(rdev);
119
120 struct drm_encoder *encoder;
121 int changes = 0;
122
123 changes |= channels != rdev->audio_channels;
124 changes |= rate != rdev->audio_rate;
125 changes |= bps != rdev->audio_bits_per_sample;
126 changes |= status_bits != rdev->audio_status_bits;
127 changes |= category_code != rdev->audio_category_code;
128
129 if (changes) {
130 rdev->audio_channels = channels;
131 rdev->audio_rate = rate;
132 rdev->audio_bits_per_sample = bps;
133 rdev->audio_status_bits = status_bits;
134 rdev->audio_category_code = category_code;
135 }
136
137 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
138 if (changes || r600_hdmi_buffer_status_changed(encoder))
139 r600_hdmi_update_audio_settings(
140 encoder, channels,
141 rate, bps, status_bits,
142 category_code);
143 }
144
145 mod_timer(&rdev->audio_timer,
146 jiffies + msecs_to_jiffies(AUDIO_TIMER_INTERVALL));
147}
148
149/*
150 * initialize the audio vars and register the update timer
151 */
152int r600_audio_init(struct radeon_device *rdev)
153{
154 if (!r600_audio_chipset_supported(rdev))
155 return 0;
156
157 DRM_INFO("%s audio support", radeon_audio ? "Enabling" : "Disabling");
158 WREG32_P(R600_AUDIO_ENABLE, radeon_audio ? 0x81000000 : 0x0, ~0x81000000);
159
160 rdev->audio_channels = -1;
161 rdev->audio_rate = -1;
162 rdev->audio_bits_per_sample = -1;
163 rdev->audio_status_bits = 0;
164 rdev->audio_category_code = 0;
165
166 setup_timer(
167 &rdev->audio_timer,
168 r600_audio_update_hdmi,
169 (unsigned long)rdev);
170
171 mod_timer(&rdev->audio_timer, jiffies + 1);
172
173 return 0;
174}
175
176/*
177 * determin how the encoders and audio interface is wired together
178 */
179int r600_audio_tmds_index(struct drm_encoder *encoder)
180{
181 struct drm_device *dev = encoder->dev;
182 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
183 struct drm_encoder *other;
184
185 switch (radeon_encoder->encoder_id) {
186 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
187 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
188 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
189 return 0;
190
191 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
192 /* special case check if an TMDS1 is present */
193 list_for_each_entry(other, &dev->mode_config.encoder_list, head) {
194 if (to_radeon_encoder(other)->encoder_id ==
195 ENCODER_OBJECT_ID_INTERNAL_TMDS1)
196 return 1;
197 }
198 return 0;
199
200 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
201 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
202 return 1;
203
204 default:
205 DRM_ERROR("Unsupported encoder type 0x%02X\n",
206 radeon_encoder->encoder_id);
207 return -1;
208 }
209}
210
211/*
212 * atach the audio codec to the clock source of the encoder
213 */
214void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
215{
216 struct drm_device *dev = encoder->dev;
217 struct radeon_device *rdev = dev->dev_private;
218 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
219 int base_rate = 48000;
220
221 switch (radeon_encoder->encoder_id) {
222 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
223 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
224 WREG32_P(R600_AUDIO_TIMING, 0, ~0x301);
225 break;
226
227 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
228 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
229 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
230 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
231 WREG32_P(R600_AUDIO_TIMING, 0x100, ~0x301);
232 break;
233
234 default:
235 DRM_ERROR("Unsupported encoder type 0x%02X\n",
236 radeon_encoder->encoder_id);
237 return;
238 }
239
240 switch (r600_audio_tmds_index(encoder)) {
241 case 0:
242 WREG32(R600_AUDIO_PLL1_MUL, base_rate*50);
243 WREG32(R600_AUDIO_PLL1_DIV, clock*100);
244 WREG32(R600_AUDIO_CLK_SRCSEL, 0);
245 break;
246
247 case 1:
248 WREG32(R600_AUDIO_PLL2_MUL, base_rate*50);
249 WREG32(R600_AUDIO_PLL2_DIV, clock*100);
250 WREG32(R600_AUDIO_CLK_SRCSEL, 1);
251 break;
252 }
253}
254
255/*
256 * release the audio timer
257 * TODO: How to do this correctly on SMP systems?
258 */
259void r600_audio_fini(struct radeon_device *rdev)
260{
261 if (!r600_audio_chipset_supported(rdev))
262 return;
263
264 WREG32_P(R600_AUDIO_ENABLE, 0x0, ~0x81000000);
265
266 del_timer(&rdev->audio_timer);
267}
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
new file mode 100644
index 000000000000..fcc949df0e5d
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -0,0 +1,506 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Christian König.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Christian König
25 */
26#include "drmP.h"
27#include "radeon_drm.h"
28#include "radeon.h"
29#include "atom.h"
30
31/*
32 * HDMI color format
33 */
34enum r600_hdmi_color_format {
35 RGB = 0,
36 YCC_422 = 1,
37 YCC_444 = 2
38};
39
40/*
41 * IEC60958 status bits
42 */
43enum r600_hdmi_iec_status_bits {
44 AUDIO_STATUS_DIG_ENABLE = 0x01,
45 AUDIO_STATUS_V = 0x02,
46 AUDIO_STATUS_VCFG = 0x04,
47 AUDIO_STATUS_EMPHASIS = 0x08,
48 AUDIO_STATUS_COPYRIGHT = 0x10,
49 AUDIO_STATUS_NONAUDIO = 0x20,
50 AUDIO_STATUS_PROFESSIONAL = 0x40,
51 AUDIO_STATUS_LEVEL = 0x80
52};
53
54struct {
55 uint32_t Clock;
56
57 int N_32kHz;
58 int CTS_32kHz;
59
60 int N_44_1kHz;
61 int CTS_44_1kHz;
62
63 int N_48kHz;
64 int CTS_48kHz;
65
66} r600_hdmi_ACR[] = {
67 /* 32kHz 44.1kHz 48kHz */
68 /* Clock N CTS N CTS N CTS */
69 { 25174, 4576, 28125, 7007, 31250, 6864, 28125 }, /* 25,20/1.001 MHz */
70 { 25200, 4096, 25200, 6272, 28000, 6144, 25200 }, /* 25.20 MHz */
71 { 27000, 4096, 27000, 6272, 30000, 6144, 27000 }, /* 27.00 MHz */
72 { 27027, 4096, 27027, 6272, 30030, 6144, 27027 }, /* 27.00*1.001 MHz */
73 { 54000, 4096, 54000, 6272, 60000, 6144, 54000 }, /* 54.00 MHz */
74 { 54054, 4096, 54054, 6272, 60060, 6144, 54054 }, /* 54.00*1.001 MHz */
75 { 74175, 11648, 210937, 17836, 234375, 11648, 140625 }, /* 74.25/1.001 MHz */
76 { 74250, 4096, 74250, 6272, 82500, 6144, 74250 }, /* 74.25 MHz */
77 { 148351, 11648, 421875, 8918, 234375, 5824, 140625 }, /* 148.50/1.001 MHz */
78 { 148500, 4096, 148500, 6272, 165000, 6144, 148500 }, /* 148.50 MHz */
79 { 0, 4096, 0, 6272, 0, 6144, 0 } /* Other */
80};
81
82/*
83 * calculate CTS value if it's not found in the table
84 */
85static void r600_hdmi_calc_CTS(uint32_t clock, int *CTS, int N, int freq)
86{
87 if (*CTS == 0)
88 *CTS = clock*N/(128*freq)*1000;
89 DRM_DEBUG("Using ACR timing N=%d CTS=%d for frequency %d\n",
90 N, *CTS, freq);
91}
92
93/*
94 * update the N and CTS parameters for a given pixel clock rate
95 */
96static void r600_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock)
97{
98 struct drm_device *dev = encoder->dev;
99 struct radeon_device *rdev = dev->dev_private;
100 uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
101 int CTS;
102 int N;
103 int i;
104
105 for (i = 0; r600_hdmi_ACR[i].Clock != clock && r600_hdmi_ACR[i].Clock != 0; i++);
106
107 CTS = r600_hdmi_ACR[i].CTS_32kHz;
108 N = r600_hdmi_ACR[i].N_32kHz;
109 r600_hdmi_calc_CTS(clock, &CTS, N, 32000);
110 WREG32(offset+R600_HDMI_32kHz_CTS, CTS << 12);
111 WREG32(offset+R600_HDMI_32kHz_N, N);
112
113 CTS = r600_hdmi_ACR[i].CTS_44_1kHz;
114 N = r600_hdmi_ACR[i].N_44_1kHz;
115 r600_hdmi_calc_CTS(clock, &CTS, N, 44100);
116 WREG32(offset+R600_HDMI_44_1kHz_CTS, CTS << 12);
117 WREG32(offset+R600_HDMI_44_1kHz_N, N);
118
119 CTS = r600_hdmi_ACR[i].CTS_48kHz;
120 N = r600_hdmi_ACR[i].N_48kHz;
121 r600_hdmi_calc_CTS(clock, &CTS, N, 48000);
122 WREG32(offset+R600_HDMI_48kHz_CTS, CTS << 12);
123 WREG32(offset+R600_HDMI_48kHz_N, N);
124}
125
126/*
127 * calculate the crc for a given info frame
128 */
129static void r600_hdmi_infoframe_checksum(uint8_t packetType,
130 uint8_t versionNumber,
131 uint8_t length,
132 uint8_t *frame)
133{
134 int i;
135 frame[0] = packetType + versionNumber + length;
136 for (i = 1; i <= length; i++)
137 frame[0] += frame[i];
138 frame[0] = 0x100 - frame[0];
139}
140
141/*
142 * build a HDMI Video Info Frame
143 */
144static void r600_hdmi_videoinfoframe(
145 struct drm_encoder *encoder,
146 enum r600_hdmi_color_format color_format,
147 int active_information_present,
148 uint8_t active_format_aspect_ratio,
149 uint8_t scan_information,
150 uint8_t colorimetry,
151 uint8_t ex_colorimetry,
152 uint8_t quantization,
153 int ITC,
154 uint8_t picture_aspect_ratio,
155 uint8_t video_format_identification,
156 uint8_t pixel_repetition,
157 uint8_t non_uniform_picture_scaling,
158 uint8_t bar_info_data_valid,
159 uint16_t top_bar,
160 uint16_t bottom_bar,
161 uint16_t left_bar,
162 uint16_t right_bar
163)
164{
165 struct drm_device *dev = encoder->dev;
166 struct radeon_device *rdev = dev->dev_private;
167 uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
168
169 uint8_t frame[14];
170
171 frame[0x0] = 0;
172 frame[0x1] =
173 (scan_information & 0x3) |
174 ((bar_info_data_valid & 0x3) << 2) |
175 ((active_information_present & 0x1) << 4) |
176 ((color_format & 0x3) << 5);
177 frame[0x2] =
178 (active_format_aspect_ratio & 0xF) |
179 ((picture_aspect_ratio & 0x3) << 4) |
180 ((colorimetry & 0x3) << 6);
181 frame[0x3] =
182 (non_uniform_picture_scaling & 0x3) |
183 ((quantization & 0x3) << 2) |
184 ((ex_colorimetry & 0x7) << 4) |
185 ((ITC & 0x1) << 7);
186 frame[0x4] = (video_format_identification & 0x7F);
187 frame[0x5] = (pixel_repetition & 0xF);
188 frame[0x6] = (top_bar & 0xFF);
189 frame[0x7] = (top_bar >> 8);
190 frame[0x8] = (bottom_bar & 0xFF);
191 frame[0x9] = (bottom_bar >> 8);
192 frame[0xA] = (left_bar & 0xFF);
193 frame[0xB] = (left_bar >> 8);
194 frame[0xC] = (right_bar & 0xFF);
195 frame[0xD] = (right_bar >> 8);
196
197 r600_hdmi_infoframe_checksum(0x82, 0x02, 0x0D, frame);
198
199 WREG32(offset+R600_HDMI_VIDEOINFOFRAME_0,
200 frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
201 WREG32(offset+R600_HDMI_VIDEOINFOFRAME_1,
202 frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
203 WREG32(offset+R600_HDMI_VIDEOINFOFRAME_2,
204 frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
205 WREG32(offset+R600_HDMI_VIDEOINFOFRAME_3,
206 frame[0xC] | (frame[0xD] << 8));
207}
208
209/*
210 * build a Audio Info Frame
211 */
212static void r600_hdmi_audioinfoframe(
213 struct drm_encoder *encoder,
214 uint8_t channel_count,
215 uint8_t coding_type,
216 uint8_t sample_size,
217 uint8_t sample_frequency,
218 uint8_t format,
219 uint8_t channel_allocation,
220 uint8_t level_shift,
221 int downmix_inhibit
222)
223{
224 struct drm_device *dev = encoder->dev;
225 struct radeon_device *rdev = dev->dev_private;
226 uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
227
228 uint8_t frame[11];
229
230 frame[0x0] = 0;
231 frame[0x1] = (channel_count & 0x7) | ((coding_type & 0xF) << 4);
232 frame[0x2] = (sample_size & 0x3) | ((sample_frequency & 0x7) << 2);
233 frame[0x3] = format;
234 frame[0x4] = channel_allocation;
235 frame[0x5] = ((level_shift & 0xF) << 3) | ((downmix_inhibit & 0x1) << 7);
236 frame[0x6] = 0;
237 frame[0x7] = 0;
238 frame[0x8] = 0;
239 frame[0x9] = 0;
240 frame[0xA] = 0;
241
242 r600_hdmi_infoframe_checksum(0x84, 0x01, 0x0A, frame);
243
244 WREG32(offset+R600_HDMI_AUDIOINFOFRAME_0,
245 frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
246 WREG32(offset+R600_HDMI_AUDIOINFOFRAME_1,
247 frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x8] << 24));
248}
249
250/*
251 * test if audio buffer is filled enough to start playing
252 */
253static int r600_hdmi_is_audio_buffer_filled(struct drm_encoder *encoder)
254{
255 struct drm_device *dev = encoder->dev;
256 struct radeon_device *rdev = dev->dev_private;
257 uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
258
259 return (RREG32(offset+R600_HDMI_STATUS) & 0x10) != 0;
260}
261
262/*
263 * have buffer status changed since last call?
264 */
265int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder)
266{
267 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
268 int status, result;
269
270 if (!radeon_encoder->hdmi_offset)
271 return 0;
272
273 status = r600_hdmi_is_audio_buffer_filled(encoder);
274 result = radeon_encoder->hdmi_buffer_status != status;
275 radeon_encoder->hdmi_buffer_status = status;
276
277 return result;
278}
279
280/*
281 * write the audio workaround status to the hardware
282 */
283void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
284{
285 struct drm_device *dev = encoder->dev;
286 struct radeon_device *rdev = dev->dev_private;
287 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
288 uint32_t offset = radeon_encoder->hdmi_offset;
289
290 if (!offset)
291 return;
292
293 if (r600_hdmi_is_audio_buffer_filled(encoder)) {
294 /* disable audio workaround and start delivering of audio frames */
295 WREG32_P(offset+R600_HDMI_CNTL, 0x00000001, ~0x00001001);
296
297 } else if (radeon_encoder->hdmi_audio_workaround) {
298 /* enable audio workaround and start delivering of audio frames */
299 WREG32_P(offset+R600_HDMI_CNTL, 0x00001001, ~0x00001001);
300
301 } else {
302 /* disable audio workaround and stop delivering of audio frames */
303 WREG32_P(offset+R600_HDMI_CNTL, 0x00000000, ~0x00001001);
304 }
305}
306
307
308/*
309 * update the info frames with the data from the current display mode
310 */
311void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode)
312{
313 struct drm_device *dev = encoder->dev;
314 struct radeon_device *rdev = dev->dev_private;
315 uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
316
317 if (!offset)
318 return;
319
320 r600_audio_set_clock(encoder, mode->clock);
321
322 WREG32(offset+R600_HDMI_UNKNOWN_0, 0x1000);
323 WREG32(offset+R600_HDMI_UNKNOWN_1, 0x0);
324 WREG32(offset+R600_HDMI_UNKNOWN_2, 0x1000);
325
326 r600_hdmi_update_ACR(encoder, mode->clock);
327
328 WREG32(offset+R600_HDMI_VIDEOCNTL, 0x13);
329
330 WREG32(offset+R600_HDMI_VERSION, 0x202);
331
332 r600_hdmi_videoinfoframe(encoder, RGB, 0, 0, 0, 0,
333 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
334
335 /* it's unknown what these bits do excatly, but it's indeed quite usefull for debugging */
336 WREG32(offset+R600_HDMI_AUDIO_DEBUG_0, 0x00FFFFFF);
337 WREG32(offset+R600_HDMI_AUDIO_DEBUG_1, 0x007FFFFF);
338 WREG32(offset+R600_HDMI_AUDIO_DEBUG_2, 0x00000001);
339 WREG32(offset+R600_HDMI_AUDIO_DEBUG_3, 0x00000001);
340
341 r600_hdmi_audio_workaround(encoder);
342
343 /* audio packets per line, does anyone know how to calc this ? */
344 WREG32_P(offset+R600_HDMI_CNTL, 0x00040000, ~0x001F0000);
345
346 /* update? reset? don't realy know */
347 WREG32_P(offset+R600_HDMI_CNTL, 0x14000000, ~0x14000000);
348}
349
350/*
351 * update settings with current parameters from audio engine
352 */
353void r600_hdmi_update_audio_settings(struct drm_encoder *encoder,
354 int channels,
355 int rate,
356 int bps,
357 uint8_t status_bits,
358 uint8_t category_code)
359{
360 struct drm_device *dev = encoder->dev;
361 struct radeon_device *rdev = dev->dev_private;
362 uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
363
364 uint32_t iec;
365
366 if (!offset)
367 return;
368
369 DRM_DEBUG("%s with %d channels, %d Hz sampling rate, %d bits per sample,\n",
370 r600_hdmi_is_audio_buffer_filled(encoder) ? "playing" : "stopped",
371 channels, rate, bps);
372 DRM_DEBUG("0x%02X IEC60958 status bits and 0x%02X category code\n",
373 (int)status_bits, (int)category_code);
374
375 iec = 0;
376 if (status_bits & AUDIO_STATUS_PROFESSIONAL)
377 iec |= 1 << 0;
378 if (status_bits & AUDIO_STATUS_NONAUDIO)
379 iec |= 1 << 1;
380 if (status_bits & AUDIO_STATUS_COPYRIGHT)
381 iec |= 1 << 2;
382 if (status_bits & AUDIO_STATUS_EMPHASIS)
383 iec |= 1 << 3;
384
385 iec |= category_code << 8;
386
387 switch (rate) {
388 case 32000: iec |= 0x3 << 24; break;
389 case 44100: iec |= 0x0 << 24; break;
390 case 88200: iec |= 0x8 << 24; break;
391 case 176400: iec |= 0xc << 24; break;
392 case 48000: iec |= 0x2 << 24; break;
393 case 96000: iec |= 0xa << 24; break;
394 case 192000: iec |= 0xe << 24; break;
395 }
396
397 WREG32(offset+R600_HDMI_IEC60958_1, iec);
398
399 iec = 0;
400 switch (bps) {
401 case 16: iec |= 0x2; break;
402 case 20: iec |= 0x3; break;
403 case 24: iec |= 0xb; break;
404 }
405 if (status_bits & AUDIO_STATUS_V)
406 iec |= 0x5 << 16;
407
408 WREG32_P(offset+R600_HDMI_IEC60958_2, iec, ~0x5000f);
409
410 /* 0x021 or 0x031 sets the audio frame length */
411 WREG32(offset+R600_HDMI_AUDIOCNTL, 0x31);
412 r600_hdmi_audioinfoframe(encoder, channels-1, 0, 0, 0, 0, 0, 0, 0);
413
414 r600_hdmi_audio_workaround(encoder);
415
416 /* update? reset? don't realy know */
417 WREG32_P(offset+R600_HDMI_CNTL, 0x04000000, ~0x04000000);
418}
419
420/*
421 * enable/disable the HDMI engine
422 */
423void r600_hdmi_enable(struct drm_encoder *encoder, int enable)
424{
425 struct drm_device *dev = encoder->dev;
426 struct radeon_device *rdev = dev->dev_private;
427 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
428 uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
429
430 if (!offset)
431 return;
432
433 DRM_DEBUG("%s HDMI interface @ 0x%04X\n", enable ? "Enabling" : "Disabling", offset);
434
435 /* some version of atombios ignore the enable HDMI flag
436 * so enabling/disabling HDMI was moved here for TMDS1+2 */
437 switch (radeon_encoder->encoder_id) {
438 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
439 WREG32_P(AVIVO_TMDSA_CNTL, enable ? 0x4 : 0x0, ~0x4);
440 WREG32(offset+R600_HDMI_ENABLE, enable ? 0x101 : 0x0);
441 break;
442
443 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
444 WREG32_P(AVIVO_LVTMA_CNTL, enable ? 0x4 : 0x0, ~0x4);
445 WREG32(offset+R600_HDMI_ENABLE, enable ? 0x105 : 0x0);
446 break;
447
448 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
449 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
450 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
451 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
452 /* This part is doubtfull in my opinion */
453 WREG32(offset+R600_HDMI_ENABLE, enable ? 0x110 : 0x0);
454 break;
455
456 default:
457 DRM_ERROR("unknown HDMI output type\n");
458 break;
459 }
460}
461
462/*
463 * determin at which register offset the HDMI encoder is
464 */
465void r600_hdmi_init(struct drm_encoder *encoder)
466{
467 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
468
469 switch (radeon_encoder->encoder_id) {
470 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
471 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
472 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
473 radeon_encoder->hdmi_offset = R600_HDMI_TMDS1;
474 break;
475
476 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
477 switch (r600_audio_tmds_index(encoder)) {
478 case 0:
479 radeon_encoder->hdmi_offset = R600_HDMI_TMDS1;
480 break;
481 case 1:
482 radeon_encoder->hdmi_offset = R600_HDMI_TMDS2;
483 break;
484 default:
485 radeon_encoder->hdmi_offset = 0;
486 break;
487 }
488 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
489 radeon_encoder->hdmi_offset = R600_HDMI_TMDS2;
490 break;
491
492 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
493 radeon_encoder->hdmi_offset = R600_HDMI_DIG;
494 break;
495
496 default:
497 radeon_encoder->hdmi_offset = 0;
498 break;
499 }
500
501 DRM_DEBUG("using HDMI engine at offset 0x%04X for encoder 0x%x\n",
502 radeon_encoder->hdmi_offset, radeon_encoder->encoder_id);
503
504 /* TODO: make this configureable */
505 radeon_encoder->hdmi_audio_workaround = 0;
506}
diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h
index e2d1f5f33f7e..d0e28ffdeda9 100644
--- a/drivers/gpu/drm/radeon/r600_reg.h
+++ b/drivers/gpu/drm/radeon/r600_reg.h
@@ -110,5 +110,79 @@
110#define R600_BIOS_6_SCRATCH 0x173c 110#define R600_BIOS_6_SCRATCH 0x173c
111#define R600_BIOS_7_SCRATCH 0x1740 111#define R600_BIOS_7_SCRATCH 0x1740
112 112
113/* Audio, these regs were reverse enginered,
114 * so the chance is high that the naming is wrong
115 * R6xx+ ??? */
116
117/* Audio clocks */
118#define R600_AUDIO_PLL1_MUL 0x0514
119#define R600_AUDIO_PLL1_DIV 0x0518
120#define R600_AUDIO_PLL2_MUL 0x0524
121#define R600_AUDIO_PLL2_DIV 0x0528
122#define R600_AUDIO_CLK_SRCSEL 0x0534
123
124/* Audio general */
125#define R600_AUDIO_ENABLE 0x7300
126#define R600_AUDIO_TIMING 0x7344
127
128/* Audio params */
129#define R600_AUDIO_VENDOR_ID 0x7380
130#define R600_AUDIO_REVISION_ID 0x7384
131#define R600_AUDIO_ROOT_NODE_COUNT 0x7388
132#define R600_AUDIO_NID1_NODE_COUNT 0x738c
133#define R600_AUDIO_NID1_TYPE 0x7390
134#define R600_AUDIO_SUPPORTED_SIZE_RATE 0x7394
135#define R600_AUDIO_SUPPORTED_CODEC 0x7398
136#define R600_AUDIO_SUPPORTED_POWER_STATES 0x739c
137#define R600_AUDIO_NID2_CAPS 0x73a0
138#define R600_AUDIO_NID3_CAPS 0x73a4
139#define R600_AUDIO_NID3_PIN_CAPS 0x73a8
140
141/* Audio conn list */
142#define R600_AUDIO_CONN_LIST_LEN 0x73ac
143#define R600_AUDIO_CONN_LIST 0x73b0
144
145/* Audio verbs */
146#define R600_AUDIO_RATE_BPS_CHANNEL 0x73c0
147#define R600_AUDIO_PLAYING 0x73c4
148#define R600_AUDIO_IMPLEMENTATION_ID 0x73c8
149#define R600_AUDIO_CONFIG_DEFAULT 0x73cc
150#define R600_AUDIO_PIN_SENSE 0x73d0
151#define R600_AUDIO_PIN_WIDGET_CNTL 0x73d4
152#define R600_AUDIO_STATUS_BITS 0x73d8
153
154/* HDMI base register addresses */
155#define R600_HDMI_TMDS1 0x7400
156#define R600_HDMI_TMDS2 0x7700
157#define R600_HDMI_DIG 0x7800
158
159/* HDMI registers */
160#define R600_HDMI_ENABLE 0x00
161#define R600_HDMI_STATUS 0x04
162#define R600_HDMI_CNTL 0x08
163#define R600_HDMI_UNKNOWN_0 0x0C
164#define R600_HDMI_AUDIOCNTL 0x10
165#define R600_HDMI_VIDEOCNTL 0x14
166#define R600_HDMI_VERSION 0x18
167#define R600_HDMI_UNKNOWN_1 0x28
168#define R600_HDMI_VIDEOINFOFRAME_0 0x54
169#define R600_HDMI_VIDEOINFOFRAME_1 0x58
170#define R600_HDMI_VIDEOINFOFRAME_2 0x5c
171#define R600_HDMI_VIDEOINFOFRAME_3 0x60
172#define R600_HDMI_32kHz_CTS 0xac
173#define R600_HDMI_32kHz_N 0xb0
174#define R600_HDMI_44_1kHz_CTS 0xb4
175#define R600_HDMI_44_1kHz_N 0xb8
176#define R600_HDMI_48kHz_CTS 0xbc
177#define R600_HDMI_48kHz_N 0xc0
178#define R600_HDMI_AUDIOINFOFRAME_0 0xcc
179#define R600_HDMI_AUDIOINFOFRAME_1 0xd0
180#define R600_HDMI_IEC60958_1 0xd4
181#define R600_HDMI_IEC60958_2 0xd8
182#define R600_HDMI_UNKNOWN_2 0xdc
183#define R600_HDMI_AUDIO_DEBUG_0 0xe0
184#define R600_HDMI_AUDIO_DEBUG_1 0xe4
185#define R600_HDMI_AUDIO_DEBUG_2 0xe8
186#define R600_HDMI_AUDIO_DEBUG_3 0xec
113 187
114#endif 188#endif
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index c938bb54123c..cd650fd3964e 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -89,6 +89,7 @@ extern int radeon_testing;
89extern int radeon_connector_table; 89extern int radeon_connector_table;
90extern int radeon_tv; 90extern int radeon_tv;
91extern int radeon_new_pll; 91extern int radeon_new_pll;
92extern int radeon_audio;
92 93
93/* 94/*
94 * Copy from radeon_drv.h so we don't have to include both and have conflicting 95 * Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -814,6 +815,14 @@ struct radeon_device {
814 struct r600_ih ih; /* r6/700 interrupt ring */ 815 struct r600_ih ih; /* r6/700 interrupt ring */
815 struct workqueue_struct *wq; 816 struct workqueue_struct *wq;
816 struct work_struct hotplug_work; 817 struct work_struct hotplug_work;
818
819 /* audio stuff */
820 struct timer_list audio_timer;
821 int audio_channels;
822 int audio_rate;
823 int audio_bits_per_sample;
824 uint8_t audio_status_bits;
825 uint8_t audio_category_code;
817}; 826};
818 827
819int radeon_device_init(struct radeon_device *rdev, 828int radeon_device_init(struct radeon_device *rdev,
@@ -1016,6 +1025,7 @@ extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
1016extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable); 1025extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
1017extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable); 1026extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
1018extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain); 1027extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain);
1028extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo);
1019 1029
1020/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */ 1030/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */
1021struct r100_mc_save { 1031struct r100_mc_save {
@@ -1146,6 +1156,21 @@ extern void r600_irq_fini(struct radeon_device *rdev);
1146extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size); 1156extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size);
1147extern int r600_irq_set(struct radeon_device *rdev); 1157extern int r600_irq_set(struct radeon_device *rdev);
1148 1158
1159extern int r600_audio_init(struct radeon_device *rdev);
1160extern int r600_audio_tmds_index(struct drm_encoder *encoder);
1161extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock);
1162extern void r600_audio_fini(struct radeon_device *rdev);
1163extern void r600_hdmi_init(struct drm_encoder *encoder);
1164extern void r600_hdmi_enable(struct drm_encoder *encoder, int enable);
1165extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
1166extern int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
1167extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder,
1168 int channels,
1169 int rate,
1170 int bps,
1171 uint8_t status_bits,
1172 uint8_t category_code);
1173
1149#include "radeon_object.h" 1174#include "radeon_object.h"
1150 1175
1151#endif 1176#endif
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index c5c45e626d74..dbd56ef82f9c 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -87,6 +87,7 @@ int radeon_testing = 0;
87int radeon_connector_table = 0; 87int radeon_connector_table = 0;
88int radeon_tv = 1; 88int radeon_tv = 1;
89int radeon_new_pll = 1; 89int radeon_new_pll = 1;
90int radeon_audio = 1;
90 91
91MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); 92MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
92module_param_named(no_wb, radeon_no_wb, int, 0444); 93module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -124,6 +125,9 @@ module_param_named(tv, radeon_tv, int, 0444);
124MODULE_PARM_DESC(new_pll, "Select new PLL code for AVIVO chips"); 125MODULE_PARM_DESC(new_pll, "Select new PLL code for AVIVO chips");
125module_param_named(new_pll, radeon_new_pll, int, 0444); 126module_param_named(new_pll, radeon_new_pll, int, 0444);
126 127
128MODULE_PARM_DESC(audio, "Audio enable (0 = disable)");
129module_param_named(audio, radeon_audio, int, 0444);
130
127static int radeon_suspend(struct drm_device *dev, pm_message_t state) 131static int radeon_suspend(struct drm_device *dev, pm_message_t state)
128{ 132{
129 drm_radeon_private_t *dev_priv = dev->dev_private; 133 drm_radeon_private_t *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index b4f23ec93201..0d1d908e5225 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -438,6 +438,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
438 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 438 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
439 union lvds_encoder_control args; 439 union lvds_encoder_control args;
440 int index = 0; 440 int index = 0;
441 int hdmi_detected = 0;
441 uint8_t frev, crev; 442 uint8_t frev, crev;
442 struct radeon_encoder_atom_dig *dig; 443 struct radeon_encoder_atom_dig *dig;
443 struct drm_connector *connector; 444 struct drm_connector *connector;
@@ -458,6 +459,9 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
458 if (!radeon_connector->con_priv) 459 if (!radeon_connector->con_priv)
459 return; 460 return;
460 461
462 if (drm_detect_hdmi_monitor(radeon_connector->edid))
463 hdmi_detected = 1;
464
461 dig_connector = radeon_connector->con_priv; 465 dig_connector = radeon_connector->con_priv;
462 466
463 memset(&args, 0, sizeof(args)); 467 memset(&args, 0, sizeof(args));
@@ -487,7 +491,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
487 case 1: 491 case 1:
488 args.v1.ucMisc = 0; 492 args.v1.ucMisc = 0;
489 args.v1.ucAction = action; 493 args.v1.ucAction = action;
490 if (drm_detect_hdmi_monitor(radeon_connector->edid)) 494 if (hdmi_detected)
491 args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE; 495 args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
492 args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); 496 args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
493 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 497 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
@@ -512,7 +516,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
512 if (dig->coherent_mode) 516 if (dig->coherent_mode)
513 args.v2.ucMisc |= PANEL_ENCODER_MISC_COHERENT; 517 args.v2.ucMisc |= PANEL_ENCODER_MISC_COHERENT;
514 } 518 }
515 if (drm_detect_hdmi_monitor(radeon_connector->edid)) 519 if (hdmi_detected)
516 args.v2.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE; 520 args.v2.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
517 args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); 521 args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
518 args.v2.ucTruncate = 0; 522 args.v2.ucTruncate = 0;
@@ -552,7 +556,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
552 } 556 }
553 557
554 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 558 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
555 559 r600_hdmi_enable(encoder, hdmi_detected);
556} 560}
557 561
558int 562int
@@ -893,7 +897,6 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
893 } 897 }
894 898
895 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 899 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
896
897} 900}
898 901
899static void 902static void
@@ -1162,7 +1165,6 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
1162 } 1165 }
1163 1166
1164 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 1167 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1165
1166} 1168}
1167 1169
1168static void 1170static void
@@ -1265,6 +1267,8 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
1265 break; 1267 break;
1266 } 1268 }
1267 atombios_apply_encoder_quirks(encoder, adjusted_mode); 1269 atombios_apply_encoder_quirks(encoder, adjusted_mode);
1270
1271 r600_hdmi_setmode(encoder, adjusted_mode);
1268} 1272}
1269 1273
1270static bool 1274static bool
@@ -1510,4 +1514,6 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su
1510 drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs); 1514 drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
1511 break; 1515 break;
1512 } 1516 }
1517
1518 r600_hdmi_init(encoder);
1513} 1519}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 2944486871b0..60df2d7e7e4c 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -66,8 +66,9 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
66 } 66 }
67 r = radeon_bo_create(rdev, gobj, size, kernel, initial_domain, &robj); 67 r = radeon_bo_create(rdev, gobj, size, kernel, initial_domain, &robj);
68 if (r) { 68 if (r) {
69 DRM_ERROR("Failed to allocate GEM object (%d, %d, %u)\n", 69 if (r != -ERESTARTSYS)
70 size, initial_domain, alignment); 70 DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
71 size, initial_domain, alignment, r);
71 mutex_lock(&rdev->ddev->struct_mutex); 72 mutex_lock(&rdev->ddev->struct_mutex);
72 drm_gem_object_unreference(gobj); 73 drm_gem_object_unreference(gobj);
73 mutex_unlock(&rdev->ddev->struct_mutex); 74 mutex_unlock(&rdev->ddev->struct_mutex);
@@ -350,9 +351,10 @@ int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
350 rbo = gobj->driver_private; 351 rbo = gobj->driver_private;
351 r = radeon_bo_reserve(rbo, false); 352 r = radeon_bo_reserve(rbo, false);
352 if (unlikely(r != 0)) 353 if (unlikely(r != 0))
353 return r; 354 goto out;
354 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch); 355 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
355 radeon_bo_unreserve(rbo); 356 radeon_bo_unreserve(rbo);
357out:
356 mutex_lock(&dev->struct_mutex); 358 mutex_lock(&dev->struct_mutex);
357 drm_gem_object_unreference(gobj); 359 drm_gem_object_unreference(gobj);
358 mutex_unlock(&dev->struct_mutex); 360 mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 44d4b652ea12..3dcbe130c422 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -334,6 +334,9 @@ struct radeon_encoder {
334 enum radeon_rmx_type rmx_type; 334 enum radeon_rmx_type rmx_type;
335 struct drm_display_mode native_mode; 335 struct drm_display_mode native_mode;
336 void *enc_priv; 336 void *enc_priv;
337 int hdmi_offset;
338 int hdmi_audio_workaround;
339 int hdmi_buffer_status;
337}; 340};
338 341
339struct radeon_connector_atom_dig { 342struct radeon_connector_atom_dig {
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 544e18ffaf22..d9ffe1f56e8f 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -56,6 +56,13 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
56 kfree(bo); 56 kfree(bo);
57} 57}
58 58
59bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo)
60{
61 if (bo->destroy == &radeon_ttm_bo_destroy)
62 return true;
63 return false;
64}
65
59void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) 66void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
60{ 67{
61 u32 c = 0; 68 u32 c = 0;
@@ -71,6 +78,8 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
71 rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; 78 rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
72 if (domain & RADEON_GEM_DOMAIN_CPU) 79 if (domain & RADEON_GEM_DOMAIN_CPU)
73 rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 80 rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
81 if (!c)
82 rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
74 rbo->placement.num_placement = c; 83 rbo->placement.num_placement = c;
75 rbo->placement.num_busy_placement = c; 84 rbo->placement.num_busy_placement = c;
76} 85}
@@ -481,14 +490,20 @@ int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
481} 490}
482 491
483void radeon_bo_move_notify(struct ttm_buffer_object *bo, 492void radeon_bo_move_notify(struct ttm_buffer_object *bo,
484 struct ttm_mem_reg *mem) 493 struct ttm_mem_reg *mem)
485{ 494{
486 struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo); 495 struct radeon_bo *rbo;
496 if (!radeon_ttm_bo_is_radeon_bo(bo))
497 return;
498 rbo = container_of(bo, struct radeon_bo, tbo);
487 radeon_bo_check_tiling(rbo, 0, 1); 499 radeon_bo_check_tiling(rbo, 0, 1);
488} 500}
489 501
490void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) 502void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
491{ 503{
492 struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo); 504 struct radeon_bo *rbo;
505 if (!radeon_ttm_bo_is_radeon_bo(bo))
506 return;
507 rbo = container_of(bo, struct radeon_bo, tbo);
493 radeon_bo_check_tiling(rbo, 0, 0); 508 radeon_bo_check_tiling(rbo, 0, 0);
494} 509}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index f6b69c2c0d00..a02f18011ad1 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -59,19 +59,17 @@ static inline unsigned radeon_mem_type_to_domain(u32 mem_type)
59 * 59 *
60 * Returns: 60 * Returns:
61 * -EBUSY: buffer is busy and @no_wait is true 61 * -EBUSY: buffer is busy and @no_wait is true
62 * -ERESTART: A wait for the buffer to become unreserved was interrupted by 62 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
63 * a signal. Release all buffer reservations and return to user-space. 63 * a signal. Release all buffer reservations and return to user-space.
64 */ 64 */
65static inline int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait) 65static inline int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait)
66{ 66{
67 int r; 67 int r;
68 68
69retry:
70 r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0); 69 r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
71 if (unlikely(r != 0)) { 70 if (unlikely(r != 0)) {
72 if (r == -ERESTART) 71 if (r != -ERESTARTSYS)
73 goto retry; 72 dev_err(bo->rdev->dev, "%p reserve failed\n", bo);
74 dev_err(bo->rdev->dev, "%p reserve failed\n", bo);
75 return r; 73 return r;
76 } 74 }
77 return 0; 75 return 0;
@@ -125,12 +123,10 @@ static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
125{ 123{
126 int r; 124 int r;
127 125
128retry:
129 r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0); 126 r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
130 if (unlikely(r != 0)) { 127 if (unlikely(r != 0)) {
131 if (r == -ERESTART) 128 if (r != -ERESTARTSYS)
132 goto retry; 129 dev_err(bo->rdev->dev, "%p reserve failed for wait\n", bo);
133 dev_err(bo->rdev->dev, "%p reserve failed for wait\n", bo);
134 return r; 130 return r;
135 } 131 }
136 spin_lock(&bo->tbo.lock); 132 spin_lock(&bo->tbo.lock);
@@ -140,8 +136,6 @@ retry:
140 r = ttm_bo_wait(&bo->tbo, true, true, no_wait); 136 r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
141 spin_unlock(&bo->tbo.lock); 137 spin_unlock(&bo->tbo.lock);
142 ttm_bo_unreserve(&bo->tbo); 138 ttm_bo_unreserve(&bo->tbo);
143 if (unlikely(r == -ERESTART))
144 goto retry;
145 return r; 139 return r;
146} 140}
147 141
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 34b08d307c81..8bce64cdc320 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -44,8 +44,11 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
44 struct drm_device *dev = node->minor->dev; 44 struct drm_device *dev = node->minor->dev;
45 struct radeon_device *rdev = dev->dev_private; 45 struct radeon_device *rdev = dev->dev_private;
46 46
47 seq_printf(m, "engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev)); 47 seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk);
48 seq_printf(m, "memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev)); 48 seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
49 seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk);
50 if (rdev->asic->get_memory_clock)
51 seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
49 52
50 return 0; 53 return 0;
51} 54}
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 5a19d529d1c0..d7fd160cc671 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -200,7 +200,19 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
200static void radeon_evict_flags(struct ttm_buffer_object *bo, 200static void radeon_evict_flags(struct ttm_buffer_object *bo,
201 struct ttm_placement *placement) 201 struct ttm_placement *placement)
202{ 202{
203 struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo); 203 struct radeon_bo *rbo;
204 static u32 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
205
206 if (!radeon_ttm_bo_is_radeon_bo(bo)) {
207 placement->fpfn = 0;
208 placement->lpfn = 0;
209 placement->placement = &placements;
210 placement->busy_placement = &placements;
211 placement->num_placement = 1;
212 placement->num_busy_placement = 1;
213 return;
214 }
215 rbo = container_of(bo, struct radeon_bo, tbo);
204 switch (bo->mem.mem_type) { 216 switch (bo->mem.mem_type) {
205 case TTM_PL_VRAM: 217 case TTM_PL_VRAM:
206 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); 218 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index c1fcdddb6be6..368415df5f3a 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -497,6 +497,8 @@ int rs400_init(struct radeon_device *rdev)
497 497
498 /* Initialize clocks */ 498 /* Initialize clocks */
499 radeon_get_clock_info(rdev->ddev); 499 radeon_get_clock_info(rdev->ddev);
500 /* Initialize power management */
501 radeon_pm_init(rdev);
500 /* Get vram informations */ 502 /* Get vram informations */
501 rs400_vram_info(rdev); 503 rs400_vram_info(rdev);
502 /* Initialize memory controller (also test AGP) */ 504 /* Initialize memory controller (also test AGP) */
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index fbb0357f1ec3..3bcb66e52786 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -892,6 +892,14 @@ static int rv770_startup(struct radeon_device *rdev)
892 } 892 }
893 rv770_gpu_init(rdev); 893 rv770_gpu_init(rdev);
894 894
895 if (!rdev->r600_blit.shader_obj) {
896 r = r600_blit_init(rdev);
897 if (r) {
898 DRM_ERROR("radeon: failed blitter (%d).\n", r);
899 return r;
900 }
901 }
902
895 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); 903 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
896 if (unlikely(r != 0)) 904 if (unlikely(r != 0))
897 return r; 905 return r;
@@ -1051,12 +1059,6 @@ int rv770_init(struct radeon_device *rdev)
1051 if (r) 1059 if (r)
1052 return r; 1060 return r;
1053 1061
1054 r = r600_blit_init(rdev);
1055 if (r) {
1056 DRM_ERROR("radeon: failed blitter (%d).\n", r);
1057 return r;
1058 }
1059
1060 rdev->accel_working = true; 1062 rdev->accel_working = true;
1061 r = rv770_startup(rdev); 1063 r = rv770_startup(rdev);
1062 if (r) { 1064 if (r) {
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 1fbb2eea5e88..2920f9a279e1 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -71,34 +71,34 @@ static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
71 return -EINVAL; 71 return -EINVAL;
72} 72}
73 73
74static void ttm_mem_type_manager_debug(struct ttm_bo_global *glob, 74static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
75 struct ttm_mem_type_manager *man)
76{ 75{
76 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
77
77 printk(KERN_ERR TTM_PFX " has_type: %d\n", man->has_type); 78 printk(KERN_ERR TTM_PFX " has_type: %d\n", man->has_type);
78 printk(KERN_ERR TTM_PFX " use_type: %d\n", man->use_type); 79 printk(KERN_ERR TTM_PFX " use_type: %d\n", man->use_type);
79 printk(KERN_ERR TTM_PFX " flags: 0x%08X\n", man->flags); 80 printk(KERN_ERR TTM_PFX " flags: 0x%08X\n", man->flags);
80 printk(KERN_ERR TTM_PFX " gpu_offset: 0x%08lX\n", man->gpu_offset); 81 printk(KERN_ERR TTM_PFX " gpu_offset: 0x%08lX\n", man->gpu_offset);
81 printk(KERN_ERR TTM_PFX " io_offset: 0x%08lX\n", man->io_offset); 82 printk(KERN_ERR TTM_PFX " io_offset: 0x%08lX\n", man->io_offset);
82 printk(KERN_ERR TTM_PFX " io_size: %ld\n", man->io_size); 83 printk(KERN_ERR TTM_PFX " io_size: %ld\n", man->io_size);
83 printk(KERN_ERR TTM_PFX " size: %ld\n", (unsigned long)man->size); 84 printk(KERN_ERR TTM_PFX " size: %llu\n", man->size);
84 printk(KERN_ERR TTM_PFX " available_caching: 0x%08X\n", 85 printk(KERN_ERR TTM_PFX " available_caching: 0x%08X\n",
85 man->available_caching); 86 man->available_caching);
86 printk(KERN_ERR TTM_PFX " default_caching: 0x%08X\n", 87 printk(KERN_ERR TTM_PFX " default_caching: 0x%08X\n",
87 man->default_caching); 88 man->default_caching);
88 spin_lock(&glob->lru_lock); 89 if (mem_type != TTM_PL_SYSTEM) {
89 drm_mm_debug_table(&man->manager, TTM_PFX); 90 spin_lock(&bdev->glob->lru_lock);
90 spin_unlock(&glob->lru_lock); 91 drm_mm_debug_table(&man->manager, TTM_PFX);
92 spin_unlock(&bdev->glob->lru_lock);
93 }
91} 94}
92 95
93static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, 96static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
94 struct ttm_placement *placement) 97 struct ttm_placement *placement)
95{ 98{
96 struct ttm_bo_device *bdev = bo->bdev;
97 struct ttm_bo_global *glob = bo->glob;
98 struct ttm_mem_type_manager *man;
99 int i, ret, mem_type; 99 int i, ret, mem_type;
100 100
101 printk(KERN_ERR TTM_PFX "No space for %p (%ld pages, %ldK, %ldM)\n", 101 printk(KERN_ERR TTM_PFX "No space for %p (%lu pages, %luK, %luM)\n",
102 bo, bo->mem.num_pages, bo->mem.size >> 10, 102 bo, bo->mem.num_pages, bo->mem.size >> 10,
103 bo->mem.size >> 20); 103 bo->mem.size >> 20);
104 for (i = 0; i < placement->num_placement; i++) { 104 for (i = 0; i < placement->num_placement; i++) {
@@ -106,10 +106,9 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
106 &mem_type); 106 &mem_type);
107 if (ret) 107 if (ret)
108 return; 108 return;
109 man = &bdev->man[mem_type];
110 printk(KERN_ERR TTM_PFX " placement[%d]=0x%08X (%d)\n", 109 printk(KERN_ERR TTM_PFX " placement[%d]=0x%08X (%d)\n",
111 i, placement->placement[i], mem_type); 110 i, placement->placement[i], mem_type);
112 ttm_mem_type_manager_debug(glob, man); 111 ttm_mem_type_debug(bo->bdev, mem_type);
113 } 112 }
114} 113}
115 114
@@ -465,6 +464,8 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
465 spin_unlock(&bo->lock); 464 spin_unlock(&bo->lock);
466 465
467 spin_lock(&glob->lru_lock); 466 spin_lock(&glob->lru_lock);
467 put_count = ttm_bo_del_from_lru(bo);
468
468 ret = ttm_bo_reserve_locked(bo, false, false, false, 0); 469 ret = ttm_bo_reserve_locked(bo, false, false, false, 0);
469 BUG_ON(ret); 470 BUG_ON(ret);
470 if (bo->ttm) 471 if (bo->ttm)
@@ -472,20 +473,19 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
472 473
473 if (!list_empty(&bo->ddestroy)) { 474 if (!list_empty(&bo->ddestroy)) {
474 list_del_init(&bo->ddestroy); 475 list_del_init(&bo->ddestroy);
475 kref_put(&bo->list_kref, ttm_bo_ref_bug); 476 ++put_count;
476 } 477 }
477 if (bo->mem.mm_node) { 478 if (bo->mem.mm_node) {
478 bo->mem.mm_node->private = NULL; 479 bo->mem.mm_node->private = NULL;
479 drm_mm_put_block(bo->mem.mm_node); 480 drm_mm_put_block(bo->mem.mm_node);
480 bo->mem.mm_node = NULL; 481 bo->mem.mm_node = NULL;
481 } 482 }
482 put_count = ttm_bo_del_from_lru(bo);
483 spin_unlock(&glob->lru_lock); 483 spin_unlock(&glob->lru_lock);
484 484
485 atomic_set(&bo->reserved, 0); 485 atomic_set(&bo->reserved, 0);
486 486
487 while (put_count--) 487 while (put_count--)
488 kref_put(&bo->list_kref, ttm_bo_release_list); 488 kref_put(&bo->list_kref, ttm_bo_ref_bug);
489 489
490 return 0; 490 return 0;
491 } 491 }
@@ -684,19 +684,45 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
684 struct ttm_buffer_object *bo; 684 struct ttm_buffer_object *bo;
685 int ret, put_count = 0; 685 int ret, put_count = 0;
686 686
687retry:
687 spin_lock(&glob->lru_lock); 688 spin_lock(&glob->lru_lock);
689 if (list_empty(&man->lru)) {
690 spin_unlock(&glob->lru_lock);
691 return -EBUSY;
692 }
693
688 bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru); 694 bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
689 kref_get(&bo->list_kref); 695 kref_get(&bo->list_kref);
690 ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, false, 0); 696
691 if (likely(ret == 0)) 697 ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
692 put_count = ttm_bo_del_from_lru(bo); 698
699 if (unlikely(ret == -EBUSY)) {
700 spin_unlock(&glob->lru_lock);
701 if (likely(!no_wait))
702 ret = ttm_bo_wait_unreserved(bo, interruptible);
703
704 kref_put(&bo->list_kref, ttm_bo_release_list);
705
706 /**
707 * We *need* to retry after releasing the lru lock.
708 */
709
710 if (unlikely(ret != 0))
711 return ret;
712 goto retry;
713 }
714
715 put_count = ttm_bo_del_from_lru(bo);
693 spin_unlock(&glob->lru_lock); 716 spin_unlock(&glob->lru_lock);
694 if (unlikely(ret != 0)) 717
695 return ret; 718 BUG_ON(ret != 0);
719
696 while (put_count--) 720 while (put_count--)
697 kref_put(&bo->list_kref, ttm_bo_ref_bug); 721 kref_put(&bo->list_kref, ttm_bo_ref_bug);
722
698 ret = ttm_bo_evict(bo, interruptible, no_wait); 723 ret = ttm_bo_evict(bo, interruptible, no_wait);
699 ttm_bo_unreserve(bo); 724 ttm_bo_unreserve(bo);
725
700 kref_put(&bo->list_kref, ttm_bo_release_list); 726 kref_put(&bo->list_kref, ttm_bo_release_list);
701 return ret; 727 return ret;
702} 728}
@@ -849,7 +875,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
849 int i, ret; 875 int i, ret;
850 876
851 mem->mm_node = NULL; 877 mem->mm_node = NULL;
852 for (i = 0; i <= placement->num_placement; ++i) { 878 for (i = 0; i < placement->num_placement; ++i) {
853 ret = ttm_mem_type_from_flags(placement->placement[i], 879 ret = ttm_mem_type_from_flags(placement->placement[i],
854 &mem_type); 880 &mem_type);
855 if (ret) 881 if (ret)
@@ -900,8 +926,8 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
900 if (!type_found) 926 if (!type_found)
901 return -EINVAL; 927 return -EINVAL;
902 928
903 for (i = 0; i <= placement->num_busy_placement; ++i) { 929 for (i = 0; i < placement->num_busy_placement; ++i) {
904 ret = ttm_mem_type_from_flags(placement->placement[i], 930 ret = ttm_mem_type_from_flags(placement->busy_placement[i],
905 &mem_type); 931 &mem_type);
906 if (ret) 932 if (ret)
907 return ret; 933 return ret;
@@ -911,7 +937,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
911 if (!ttm_bo_mt_compatible(man, 937 if (!ttm_bo_mt_compatible(man,
912 bo->type == ttm_bo_type_user, 938 bo->type == ttm_bo_type_user,
913 mem_type, 939 mem_type,
914 placement->placement[i], 940 placement->busy_placement[i],
915 &cur_flags)) 941 &cur_flags))
916 continue; 942 continue;
917 943
@@ -921,7 +947,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
921 * Use the access and other non-mapping-related flag bits from 947 * Use the access and other non-mapping-related flag bits from
922 * the memory placement flags to the current flags 948 * the memory placement flags to the current flags
923 */ 949 */
924 ttm_flag_masked(&cur_flags, placement->placement[i], 950 ttm_flag_masked(&cur_flags, placement->busy_placement[i],
925 ~TTM_PL_MASK_MEMTYPE); 951 ~TTM_PL_MASK_MEMTYPE);
926 952
927 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem, 953 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
@@ -1115,6 +1141,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1115 bo->glob = bdev->glob; 1141 bo->glob = bdev->glob;
1116 bo->type = type; 1142 bo->type = type;
1117 bo->num_pages = num_pages; 1143 bo->num_pages = num_pages;
1144 bo->mem.size = num_pages << PAGE_SHIFT;
1118 bo->mem.mem_type = TTM_PL_SYSTEM; 1145 bo->mem.mem_type = TTM_PL_SYSTEM;
1119 bo->mem.num_pages = bo->num_pages; 1146 bo->mem.num_pages = bo->num_pages;
1120 bo->mem.mm_node = NULL; 1147 bo->mem.mm_node = NULL;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 609a85a4d855..668dbe8b8dd3 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -320,7 +320,7 @@ ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
320 return -EFAULT; 320 return -EFAULT;
321 321
322 driver = bo->bdev->driver; 322 driver = bo->bdev->driver;
323 if (unlikely(driver->verify_access)) { 323 if (unlikely(!driver->verify_access)) {
324 ret = -EPERM; 324 ret = -EPERM;
325 goto out_unref; 325 goto out_unref;
326 } 326 }
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 9e640c62ebd9..95ccbe377f9c 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -1046,25 +1046,27 @@ config SENSORS_ATK0110
1046 will be called asus_atk0110. 1046 will be called asus_atk0110.
1047 1047
1048config SENSORS_LIS3LV02D 1048config SENSORS_LIS3LV02D
1049 tristate "STMicroeletronics LIS3LV02Dx three-axis digital accelerometer" 1049 tristate "STMicroeletronics LIS3* three-axis digital accelerometer"
1050 depends on INPUT 1050 depends on INPUT
1051 select INPUT_POLLDEV 1051 select INPUT_POLLDEV
1052 select NEW_LEDS 1052 select NEW_LEDS
1053 select LEDS_CLASS 1053 select LEDS_CLASS
1054 default n 1054 default n
1055 help 1055 help
1056 This driver provides support for the LIS3LV02Dx accelerometer. In 1056 This driver provides support for the LIS3* accelerometers, such as the
1057 particular, it can be found in a number of HP laptops, which have the 1057 LIS3LV02DL or the LIS331DL. In particular, it can be found in a number
1058 "Mobile Data Protection System 3D" or "3D DriveGuard" feature. On such 1058 of HP laptops, which have the "Mobile Data Protection System 3D" or
1059 systems the driver should load automatically (via ACPI). The 1059 "3D DriveGuard" feature. On such systems the driver should load
1060 accelerometer might also be found in other systems, connected via SPI 1060 automatically (via ACPI alias). The accelerometer might also be found
1061 or I2C. The accelerometer data is readable via 1061 in other systems, connected via SPI or I2C. The accelerometer data is
1062 /sys/devices/platform/lis3lv02d. 1062 readable via /sys/devices/platform/lis3lv02d.
1063 1063
1064 This driver also provides an absolute input class device, allowing 1064 This driver also provides an absolute input class device, allowing
1065 the laptop to act as a pinball machine-esque joystick. On HP laptops, 1065 a laptop to act as a pinball machine-esque joystick. It provides also
1066 a misc device which can be used to detect free-fall. On HP laptops,
1066 if the led infrastructure is activated, support for a led indicating 1067 if the led infrastructure is activated, support for a led indicating
1067 disk protection will be provided as hp:red:hddprotection. 1068 disk protection will be provided as hp::hddprotect. For more
1069 information on the feature, refer to Documentation/hwmon/lis3lv02d.
1068 1070
1069 This driver can also be built as modules. If so, the core module 1071 This driver can also be built as modules. If so, the core module
1070 will be called lis3lv02d and a specific module for HP laptops will be 1072 will be called lis3lv02d and a specific module for HP laptops will be
diff --git a/drivers/hwmon/adm1021.c b/drivers/hwmon/adm1021.c
index 33acf29531af..1ad0a885c5a5 100644
--- a/drivers/hwmon/adm1021.c
+++ b/drivers/hwmon/adm1021.c
@@ -34,9 +34,8 @@
34static const unsigned short normal_i2c[] = { 34static const unsigned short normal_i2c[] = {
35 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 0x4c, 0x4d, 0x4e, I2C_CLIENT_END }; 35 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 0x4c, 0x4d, 0x4e, I2C_CLIENT_END };
36 36
37/* Insmod parameters */ 37enum chips {
38I2C_CLIENT_INSMOD_8(adm1021, adm1023, max1617, max1617a, thmc10, lm84, gl523sm, 38 adm1021, adm1023, max1617, max1617a, thmc10, lm84, gl523sm, mc1066 };
39 mc1066);
40 39
41/* adm1021 constants specified below */ 40/* adm1021 constants specified below */
42 41
@@ -97,7 +96,7 @@ struct adm1021_data {
97 96
98static int adm1021_probe(struct i2c_client *client, 97static int adm1021_probe(struct i2c_client *client,
99 const struct i2c_device_id *id); 98 const struct i2c_device_id *id);
100static int adm1021_detect(struct i2c_client *client, int kind, 99static int adm1021_detect(struct i2c_client *client,
101 struct i2c_board_info *info); 100 struct i2c_board_info *info);
102static void adm1021_init_client(struct i2c_client *client); 101static void adm1021_init_client(struct i2c_client *client);
103static int adm1021_remove(struct i2c_client *client); 102static int adm1021_remove(struct i2c_client *client);
@@ -130,7 +129,7 @@ static struct i2c_driver adm1021_driver = {
130 .remove = adm1021_remove, 129 .remove = adm1021_remove,
131 .id_table = adm1021_id, 130 .id_table = adm1021_id,
132 .detect = adm1021_detect, 131 .detect = adm1021_detect,
133 .address_data = &addr_data, 132 .address_list = normal_i2c,
134}; 133};
135 134
136static ssize_t show_temp(struct device *dev, 135static ssize_t show_temp(struct device *dev,
@@ -284,7 +283,7 @@ static const struct attribute_group adm1021_group = {
284}; 283};
285 284
286/* Return 0 if detection is successful, -ENODEV otherwise */ 285/* Return 0 if detection is successful, -ENODEV otherwise */
287static int adm1021_detect(struct i2c_client *client, int kind, 286static int adm1021_detect(struct i2c_client *client,
288 struct i2c_board_info *info) 287 struct i2c_board_info *info)
289{ 288{
290 struct i2c_adapter *adapter = client->adapter; 289 struct i2c_adapter *adapter = client->adapter;
diff --git a/drivers/hwmon/adm1025.c b/drivers/hwmon/adm1025.c
index db6ac2b04f6f..251b63165e2a 100644
--- a/drivers/hwmon/adm1025.c
+++ b/drivers/hwmon/adm1025.c
@@ -64,11 +64,7 @@
64 64
65static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END }; 65static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
66 66
67/* 67enum chips { adm1025, ne1619 };
68 * Insmod parameters
69 */
70
71I2C_CLIENT_INSMOD_2(adm1025, ne1619);
72 68
73/* 69/*
74 * The ADM1025 registers 70 * The ADM1025 registers
@@ -111,7 +107,7 @@ static const int in_scale[6] = { 2500, 2250, 3300, 5000, 12000, 3300 };
111 107
112static int adm1025_probe(struct i2c_client *client, 108static int adm1025_probe(struct i2c_client *client,
113 const struct i2c_device_id *id); 109 const struct i2c_device_id *id);
114static int adm1025_detect(struct i2c_client *client, int kind, 110static int adm1025_detect(struct i2c_client *client,
115 struct i2c_board_info *info); 111 struct i2c_board_info *info);
116static void adm1025_init_client(struct i2c_client *client); 112static void adm1025_init_client(struct i2c_client *client);
117static int adm1025_remove(struct i2c_client *client); 113static int adm1025_remove(struct i2c_client *client);
@@ -137,7 +133,7 @@ static struct i2c_driver adm1025_driver = {
137 .remove = adm1025_remove, 133 .remove = adm1025_remove,
138 .id_table = adm1025_id, 134 .id_table = adm1025_id,
139 .detect = adm1025_detect, 135 .detect = adm1025_detect,
140 .address_data = &addr_data, 136 .address_list = normal_i2c,
141}; 137};
142 138
143/* 139/*
@@ -409,7 +405,7 @@ static const struct attribute_group adm1025_group_in4 = {
409}; 405};
410 406
411/* Return 0 if detection is successful, -ENODEV otherwise */ 407/* Return 0 if detection is successful, -ENODEV otherwise */
412static int adm1025_detect(struct i2c_client *client, int kind, 408static int adm1025_detect(struct i2c_client *client,
413 struct i2c_board_info *info) 409 struct i2c_board_info *info)
414{ 410{
415 struct i2c_adapter *adapter = client->adapter; 411 struct i2c_adapter *adapter = client->adapter;
diff --git a/drivers/hwmon/adm1026.c b/drivers/hwmon/adm1026.c
index fb5363985e21..65335b268fa9 100644
--- a/drivers/hwmon/adm1026.c
+++ b/drivers/hwmon/adm1026.c
@@ -37,9 +37,6 @@
37/* Addresses to scan */ 37/* Addresses to scan */
38static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END }; 38static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
39 39
40/* Insmod parameters */
41I2C_CLIENT_INSMOD_1(adm1026);
42
43static int gpio_input[17] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, 40static int gpio_input[17] = { -1, -1, -1, -1, -1, -1, -1, -1, -1,
44 -1, -1, -1, -1, -1, -1, -1, -1 }; 41 -1, -1, -1, -1, -1, -1, -1, -1 };
45static int gpio_output[17] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, 42static int gpio_output[17] = { -1, -1, -1, -1, -1, -1, -1, -1, -1,
@@ -293,7 +290,7 @@ struct adm1026_data {
293 290
294static int adm1026_probe(struct i2c_client *client, 291static int adm1026_probe(struct i2c_client *client,
295 const struct i2c_device_id *id); 292 const struct i2c_device_id *id);
296static int adm1026_detect(struct i2c_client *client, int kind, 293static int adm1026_detect(struct i2c_client *client,
297 struct i2c_board_info *info); 294 struct i2c_board_info *info);
298static int adm1026_remove(struct i2c_client *client); 295static int adm1026_remove(struct i2c_client *client);
299static int adm1026_read_value(struct i2c_client *client, u8 reg); 296static int adm1026_read_value(struct i2c_client *client, u8 reg);
@@ -305,7 +302,7 @@ static void adm1026_init_client(struct i2c_client *client);
305 302
306 303
307static const struct i2c_device_id adm1026_id[] = { 304static const struct i2c_device_id adm1026_id[] = {
308 { "adm1026", adm1026 }, 305 { "adm1026", 0 },
309 { } 306 { }
310}; 307};
311MODULE_DEVICE_TABLE(i2c, adm1026_id); 308MODULE_DEVICE_TABLE(i2c, adm1026_id);
@@ -319,7 +316,7 @@ static struct i2c_driver adm1026_driver = {
319 .remove = adm1026_remove, 316 .remove = adm1026_remove,
320 .id_table = adm1026_id, 317 .id_table = adm1026_id,
321 .detect = adm1026_detect, 318 .detect = adm1026_detect,
322 .address_data = &addr_data, 319 .address_list = normal_i2c,
323}; 320};
324 321
325static int adm1026_read_value(struct i2c_client *client, u8 reg) 322static int adm1026_read_value(struct i2c_client *client, u8 reg)
@@ -1650,7 +1647,7 @@ static const struct attribute_group adm1026_group_in8_9 = {
1650}; 1647};
1651 1648
1652/* Return 0 if detection is successful, -ENODEV otherwise */ 1649/* Return 0 if detection is successful, -ENODEV otherwise */
1653static int adm1026_detect(struct i2c_client *client, int kind, 1650static int adm1026_detect(struct i2c_client *client,
1654 struct i2c_board_info *info) 1651 struct i2c_board_info *info)
1655{ 1652{
1656 struct i2c_adapter *adapter = client->adapter; 1653 struct i2c_adapter *adapter = client->adapter;
diff --git a/drivers/hwmon/adm1029.c b/drivers/hwmon/adm1029.c
index ef91e2a4a567..0b8a3b145bd2 100644
--- a/drivers/hwmon/adm1029.c
+++ b/drivers/hwmon/adm1029.c
@@ -44,12 +44,6 @@ static const unsigned short normal_i2c[] = { 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d,
44}; 44};
45 45
46/* 46/*
47 * Insmod parameters
48 */
49
50I2C_CLIENT_INSMOD_1(adm1029);
51
52/*
53 * The ADM1029 registers 47 * The ADM1029 registers
54 * Manufacturer ID is 0x41 for Analog Devices 48 * Manufacturer ID is 0x41 for Analog Devices
55 */ 49 */
@@ -117,7 +111,7 @@ static const u8 ADM1029_REG_FAN_DIV[] = {
117 111
118static int adm1029_probe(struct i2c_client *client, 112static int adm1029_probe(struct i2c_client *client,
119 const struct i2c_device_id *id); 113 const struct i2c_device_id *id);
120static int adm1029_detect(struct i2c_client *client, int kind, 114static int adm1029_detect(struct i2c_client *client,
121 struct i2c_board_info *info); 115 struct i2c_board_info *info);
122static int adm1029_remove(struct i2c_client *client); 116static int adm1029_remove(struct i2c_client *client);
123static struct adm1029_data *adm1029_update_device(struct device *dev); 117static struct adm1029_data *adm1029_update_device(struct device *dev);
@@ -128,7 +122,7 @@ static int adm1029_init_client(struct i2c_client *client);
128 */ 122 */
129 123
130static const struct i2c_device_id adm1029_id[] = { 124static const struct i2c_device_id adm1029_id[] = {
131 { "adm1029", adm1029 }, 125 { "adm1029", 0 },
132 { } 126 { }
133}; 127};
134MODULE_DEVICE_TABLE(i2c, adm1029_id); 128MODULE_DEVICE_TABLE(i2c, adm1029_id);
@@ -142,7 +136,7 @@ static struct i2c_driver adm1029_driver = {
142 .remove = adm1029_remove, 136 .remove = adm1029_remove,
143 .id_table = adm1029_id, 137 .id_table = adm1029_id,
144 .detect = adm1029_detect, 138 .detect = adm1029_detect,
145 .address_data = &addr_data, 139 .address_list = normal_i2c,
146}; 140};
147 141
148/* 142/*
@@ -297,7 +291,7 @@ static const struct attribute_group adm1029_group = {
297 */ 291 */
298 292
299/* Return 0 if detection is successful, -ENODEV otherwise */ 293/* Return 0 if detection is successful, -ENODEV otherwise */
300static int adm1029_detect(struct i2c_client *client, int kind, 294static int adm1029_detect(struct i2c_client *client,
301 struct i2c_board_info *info) 295 struct i2c_board_info *info)
302{ 296{
303 struct i2c_adapter *adapter = client->adapter; 297 struct i2c_adapter *adapter = client->adapter;
diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c
index 0e722175aae0..1644b92e7cc4 100644
--- a/drivers/hwmon/adm1031.c
+++ b/drivers/hwmon/adm1031.c
@@ -64,8 +64,7 @@
64/* Addresses to scan */ 64/* Addresses to scan */
65static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END }; 65static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
66 66
67/* Insmod parameters */ 67enum chips { adm1030, adm1031 };
68I2C_CLIENT_INSMOD_2(adm1030, adm1031);
69 68
70typedef u8 auto_chan_table_t[8][2]; 69typedef u8 auto_chan_table_t[8][2];
71 70
@@ -102,7 +101,7 @@ struct adm1031_data {
102 101
103static int adm1031_probe(struct i2c_client *client, 102static int adm1031_probe(struct i2c_client *client,
104 const struct i2c_device_id *id); 103 const struct i2c_device_id *id);
105static int adm1031_detect(struct i2c_client *client, int kind, 104static int adm1031_detect(struct i2c_client *client,
106 struct i2c_board_info *info); 105 struct i2c_board_info *info);
107static void adm1031_init_client(struct i2c_client *client); 106static void adm1031_init_client(struct i2c_client *client);
108static int adm1031_remove(struct i2c_client *client); 107static int adm1031_remove(struct i2c_client *client);
@@ -125,7 +124,7 @@ static struct i2c_driver adm1031_driver = {
125 .remove = adm1031_remove, 124 .remove = adm1031_remove,
126 .id_table = adm1031_id, 125 .id_table = adm1031_id,
127 .detect = adm1031_detect, 126 .detect = adm1031_detect,
128 .address_data = &addr_data, 127 .address_list = normal_i2c,
129}; 128};
130 129
131static inline u8 adm1031_read_value(struct i2c_client *client, u8 reg) 130static inline u8 adm1031_read_value(struct i2c_client *client, u8 reg)
@@ -813,7 +812,7 @@ static const struct attribute_group adm1031_group_opt = {
813}; 812};
814 813
815/* Return 0 if detection is successful, -ENODEV otherwise */ 814/* Return 0 if detection is successful, -ENODEV otherwise */
816static int adm1031_detect(struct i2c_client *client, int kind, 815static int adm1031_detect(struct i2c_client *client,
817 struct i2c_board_info *info) 816 struct i2c_board_info *info)
818{ 817{
819 struct i2c_adapter *adapter = client->adapter; 818 struct i2c_adapter *adapter = client->adapter;
diff --git a/drivers/hwmon/adm9240.c b/drivers/hwmon/adm9240.c
index 20e0481cc206..0727ad250793 100644
--- a/drivers/hwmon/adm9240.c
+++ b/drivers/hwmon/adm9240.c
@@ -55,8 +55,7 @@
55static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, 0x2f, 55static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, 0x2f,
56 I2C_CLIENT_END }; 56 I2C_CLIENT_END };
57 57
58/* Insmod parameters */ 58enum chips { adm9240, ds1780, lm81 };
59I2C_CLIENT_INSMOD_3(adm9240, ds1780, lm81);
60 59
61/* ADM9240 registers */ 60/* ADM9240 registers */
62#define ADM9240_REG_MAN_ID 0x3e 61#define ADM9240_REG_MAN_ID 0x3e
@@ -132,7 +131,7 @@ static inline unsigned int AOUT_FROM_REG(u8 reg)
132 131
133static int adm9240_probe(struct i2c_client *client, 132static int adm9240_probe(struct i2c_client *client,
134 const struct i2c_device_id *id); 133 const struct i2c_device_id *id);
135static int adm9240_detect(struct i2c_client *client, int kind, 134static int adm9240_detect(struct i2c_client *client,
136 struct i2c_board_info *info); 135 struct i2c_board_info *info);
137static void adm9240_init_client(struct i2c_client *client); 136static void adm9240_init_client(struct i2c_client *client);
138static int adm9240_remove(struct i2c_client *client); 137static int adm9240_remove(struct i2c_client *client);
@@ -156,7 +155,7 @@ static struct i2c_driver adm9240_driver = {
156 .remove = adm9240_remove, 155 .remove = adm9240_remove,
157 .id_table = adm9240_id, 156 .id_table = adm9240_id,
158 .detect = adm9240_detect, 157 .detect = adm9240_detect,
159 .address_data = &addr_data, 158 .address_list = normal_i2c,
160}; 159};
161 160
162/* per client data */ 161/* per client data */
@@ -545,7 +544,7 @@ static const struct attribute_group adm9240_group = {
545/*** sensor chip detect and driver install ***/ 544/*** sensor chip detect and driver install ***/
546 545
547/* Return 0 if detection is successful, -ENODEV otherwise */ 546/* Return 0 if detection is successful, -ENODEV otherwise */
548static int adm9240_detect(struct i2c_client *new_client, int kind, 547static int adm9240_detect(struct i2c_client *new_client,
549 struct i2c_board_info *info) 548 struct i2c_board_info *info)
550{ 549{
551 struct i2c_adapter *adapter = new_client->adapter; 550 struct i2c_adapter *adapter = new_client->adapter;
diff --git a/drivers/hwmon/ads7828.c b/drivers/hwmon/ads7828.c
index 451977bca7d6..aac85f3aed50 100644
--- a/drivers/hwmon/ads7828.c
+++ b/drivers/hwmon/ads7828.c
@@ -47,10 +47,7 @@
47static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 47static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b,
48 I2C_CLIENT_END }; 48 I2C_CLIENT_END };
49 49
50/* Insmod parameters */ 50/* Module parameters */
51I2C_CLIENT_INSMOD_1(ads7828);
52
53/* Other module parameters */
54static int se_input = 1; /* Default is SE, 0 == diff */ 51static int se_input = 1; /* Default is SE, 0 == diff */
55static int int_vref = 1; /* Default is internal ref ON */ 52static int int_vref = 1; /* Default is internal ref ON */
56static int vref_mv = ADS7828_INT_VREF_MV; /* set if vref != 2.5V */ 53static int vref_mv = ADS7828_INT_VREF_MV; /* set if vref != 2.5V */
@@ -72,7 +69,7 @@ struct ads7828_data {
72}; 69};
73 70
74/* Function declaration - necessary due to function dependencies */ 71/* Function declaration - necessary due to function dependencies */
75static int ads7828_detect(struct i2c_client *client, int kind, 72static int ads7828_detect(struct i2c_client *client,
76 struct i2c_board_info *info); 73 struct i2c_board_info *info);
77static int ads7828_probe(struct i2c_client *client, 74static int ads7828_probe(struct i2c_client *client,
78 const struct i2c_device_id *id); 75 const struct i2c_device_id *id);
@@ -168,7 +165,7 @@ static int ads7828_remove(struct i2c_client *client)
168} 165}
169 166
170static const struct i2c_device_id ads7828_id[] = { 167static const struct i2c_device_id ads7828_id[] = {
171 { "ads7828", ads7828 }, 168 { "ads7828", 0 },
172 { } 169 { }
173}; 170};
174MODULE_DEVICE_TABLE(i2c, ads7828_id); 171MODULE_DEVICE_TABLE(i2c, ads7828_id);
@@ -183,11 +180,11 @@ static struct i2c_driver ads7828_driver = {
183 .remove = ads7828_remove, 180 .remove = ads7828_remove,
184 .id_table = ads7828_id, 181 .id_table = ads7828_id,
185 .detect = ads7828_detect, 182 .detect = ads7828_detect,
186 .address_data = &addr_data, 183 .address_list = normal_i2c,
187}; 184};
188 185
189/* Return 0 if detection is successful, -ENODEV otherwise */ 186/* Return 0 if detection is successful, -ENODEV otherwise */
190static int ads7828_detect(struct i2c_client *client, int kind, 187static int ads7828_detect(struct i2c_client *client,
191 struct i2c_board_info *info) 188 struct i2c_board_info *info)
192{ 189{
193 struct i2c_adapter *adapter = client->adapter; 190 struct i2c_adapter *adapter = client->adapter;
diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c
index f9c9562b6a94..a1a7ef14b519 100644
--- a/drivers/hwmon/adt7462.c
+++ b/drivers/hwmon/adt7462.c
@@ -32,9 +32,6 @@
32/* Addresses to scan */ 32/* Addresses to scan */
33static const unsigned short normal_i2c[] = { 0x58, 0x5C, I2C_CLIENT_END }; 33static const unsigned short normal_i2c[] = { 0x58, 0x5C, I2C_CLIENT_END };
34 34
35/* Insmod parameters */
36I2C_CLIENT_INSMOD_1(adt7462);
37
38/* ADT7462 registers */ 35/* ADT7462 registers */
39#define ADT7462_REG_DEVICE 0x3D 36#define ADT7462_REG_DEVICE 0x3D
40#define ADT7462_REG_VENDOR 0x3E 37#define ADT7462_REG_VENDOR 0x3E
@@ -237,12 +234,12 @@ struct adt7462_data {
237 234
238static int adt7462_probe(struct i2c_client *client, 235static int adt7462_probe(struct i2c_client *client,
239 const struct i2c_device_id *id); 236 const struct i2c_device_id *id);
240static int adt7462_detect(struct i2c_client *client, int kind, 237static int adt7462_detect(struct i2c_client *client,
241 struct i2c_board_info *info); 238 struct i2c_board_info *info);
242static int adt7462_remove(struct i2c_client *client); 239static int adt7462_remove(struct i2c_client *client);
243 240
244static const struct i2c_device_id adt7462_id[] = { 241static const struct i2c_device_id adt7462_id[] = {
245 { "adt7462", adt7462 }, 242 { "adt7462", 0 },
246 { } 243 { }
247}; 244};
248MODULE_DEVICE_TABLE(i2c, adt7462_id); 245MODULE_DEVICE_TABLE(i2c, adt7462_id);
@@ -256,7 +253,7 @@ static struct i2c_driver adt7462_driver = {
256 .remove = adt7462_remove, 253 .remove = adt7462_remove,
257 .id_table = adt7462_id, 254 .id_table = adt7462_id,
258 .detect = adt7462_detect, 255 .detect = adt7462_detect,
259 .address_data = &addr_data, 256 .address_list = normal_i2c,
260}; 257};
261 258
262/* 259/*
@@ -1902,7 +1899,7 @@ static struct attribute *adt7462_attr[] =
1902}; 1899};
1903 1900
1904/* Return 0 if detection is successful, -ENODEV otherwise */ 1901/* Return 0 if detection is successful, -ENODEV otherwise */
1905static int adt7462_detect(struct i2c_client *client, int kind, 1902static int adt7462_detect(struct i2c_client *client,
1906 struct i2c_board_info *info) 1903 struct i2c_board_info *info)
1907{ 1904{
1908 struct i2c_adapter *adapter = client->adapter; 1905 struct i2c_adapter *adapter = client->adapter;
diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
index 32b1750a6890..3445ce1cba81 100644
--- a/drivers/hwmon/adt7470.c
+++ b/drivers/hwmon/adt7470.c
@@ -33,9 +33,6 @@
33/* Addresses to scan */ 33/* Addresses to scan */
34static const unsigned short normal_i2c[] = { 0x2C, 0x2E, 0x2F, I2C_CLIENT_END }; 34static const unsigned short normal_i2c[] = { 0x2C, 0x2E, 0x2F, I2C_CLIENT_END };
35 35
36/* Insmod parameters */
37I2C_CLIENT_INSMOD_1(adt7470);
38
39/* ADT7470 registers */ 36/* ADT7470 registers */
40#define ADT7470_REG_BASE_ADDR 0x20 37#define ADT7470_REG_BASE_ADDR 0x20
41#define ADT7470_REG_TEMP_BASE_ADDR 0x20 38#define ADT7470_REG_TEMP_BASE_ADDR 0x20
@@ -177,12 +174,12 @@ struct adt7470_data {
177 174
178static int adt7470_probe(struct i2c_client *client, 175static int adt7470_probe(struct i2c_client *client,
179 const struct i2c_device_id *id); 176 const struct i2c_device_id *id);
180static int adt7470_detect(struct i2c_client *client, int kind, 177static int adt7470_detect(struct i2c_client *client,
181 struct i2c_board_info *info); 178 struct i2c_board_info *info);
182static int adt7470_remove(struct i2c_client *client); 179static int adt7470_remove(struct i2c_client *client);
183 180
184static const struct i2c_device_id adt7470_id[] = { 181static const struct i2c_device_id adt7470_id[] = {
185 { "adt7470", adt7470 }, 182 { "adt7470", 0 },
186 { } 183 { }
187}; 184};
188MODULE_DEVICE_TABLE(i2c, adt7470_id); 185MODULE_DEVICE_TABLE(i2c, adt7470_id);
@@ -196,7 +193,7 @@ static struct i2c_driver adt7470_driver = {
196 .remove = adt7470_remove, 193 .remove = adt7470_remove,
197 .id_table = adt7470_id, 194 .id_table = adt7470_id,
198 .detect = adt7470_detect, 195 .detect = adt7470_detect,
199 .address_data = &addr_data, 196 .address_list = normal_i2c,
200}; 197};
201 198
202/* 199/*
@@ -1225,7 +1222,7 @@ static struct attribute *adt7470_attr[] =
1225}; 1222};
1226 1223
1227/* Return 0 if detection is successful, -ENODEV otherwise */ 1224/* Return 0 if detection is successful, -ENODEV otherwise */
1228static int adt7470_detect(struct i2c_client *client, int kind, 1225static int adt7470_detect(struct i2c_client *client,
1229 struct i2c_board_info *info) 1226 struct i2c_board_info *info)
1230{ 1227{
1231 struct i2c_adapter *adapter = client->adapter; 1228 struct i2c_adapter *adapter = client->adapter;
diff --git a/drivers/hwmon/adt7473.c b/drivers/hwmon/adt7473.c
index aea244db974e..434576f61c84 100644
--- a/drivers/hwmon/adt7473.c
+++ b/drivers/hwmon/adt7473.c
@@ -32,9 +32,6 @@
32/* Addresses to scan */ 32/* Addresses to scan */
33static const unsigned short normal_i2c[] = { 0x2C, 0x2D, 0x2E, I2C_CLIENT_END }; 33static const unsigned short normal_i2c[] = { 0x2C, 0x2D, 0x2E, I2C_CLIENT_END };
34 34
35/* Insmod parameters */
36I2C_CLIENT_INSMOD_1(adt7473);
37
38/* ADT7473 registers */ 35/* ADT7473 registers */
39#define ADT7473_REG_BASE_ADDR 0x20 36#define ADT7473_REG_BASE_ADDR 0x20
40 37
@@ -166,12 +163,12 @@ struct adt7473_data {
166 163
167static int adt7473_probe(struct i2c_client *client, 164static int adt7473_probe(struct i2c_client *client,
168 const struct i2c_device_id *id); 165 const struct i2c_device_id *id);
169static int adt7473_detect(struct i2c_client *client, int kind, 166static int adt7473_detect(struct i2c_client *client,
170 struct i2c_board_info *info); 167 struct i2c_board_info *info);
171static int adt7473_remove(struct i2c_client *client); 168static int adt7473_remove(struct i2c_client *client);
172 169
173static const struct i2c_device_id adt7473_id[] = { 170static const struct i2c_device_id adt7473_id[] = {
174 { "adt7473", adt7473 }, 171 { "adt7473", 0 },
175 { } 172 { }
176}; 173};
177 174
@@ -184,7 +181,7 @@ static struct i2c_driver adt7473_driver = {
184 .remove = adt7473_remove, 181 .remove = adt7473_remove,
185 .id_table = adt7473_id, 182 .id_table = adt7473_id,
186 .detect = adt7473_detect, 183 .detect = adt7473_detect,
187 .address_data = &addr_data, 184 .address_list = normal_i2c,
188}; 185};
189 186
190/* 187/*
@@ -1085,7 +1082,7 @@ static struct attribute *adt7473_attr[] =
1085}; 1082};
1086 1083
1087/* Return 0 if detection is successful, -ENODEV otherwise */ 1084/* Return 0 if detection is successful, -ENODEV otherwise */
1088static int adt7473_detect(struct i2c_client *client, int kind, 1085static int adt7473_detect(struct i2c_client *client,
1089 struct i2c_board_info *info) 1086 struct i2c_board_info *info)
1090{ 1087{
1091 struct i2c_adapter *adapter = client->adapter; 1088 struct i2c_adapter *adapter = client->adapter;
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
index 99abfddedbc3..a0c385145686 100644
--- a/drivers/hwmon/adt7475.c
+++ b/drivers/hwmon/adt7475.c
@@ -148,7 +148,7 @@
148 148
149static unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END }; 149static unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
150 150
151I2C_CLIENT_INSMOD_4(adt7473, adt7475, adt7476, adt7490); 151enum chips { adt7473, adt7475, adt7476, adt7490 };
152 152
153static const struct i2c_device_id adt7475_id[] = { 153static const struct i2c_device_id adt7475_id[] = {
154 { "adt7473", adt7473 }, 154 { "adt7473", adt7473 },
@@ -1172,7 +1172,7 @@ static struct attribute_group in4_attr_group = { .attrs = in4_attrs };
1172static struct attribute_group in5_attr_group = { .attrs = in5_attrs }; 1172static struct attribute_group in5_attr_group = { .attrs = in5_attrs };
1173static struct attribute_group vid_attr_group = { .attrs = vid_attrs }; 1173static struct attribute_group vid_attr_group = { .attrs = vid_attrs };
1174 1174
1175static int adt7475_detect(struct i2c_client *client, int kind, 1175static int adt7475_detect(struct i2c_client *client,
1176 struct i2c_board_info *info) 1176 struct i2c_board_info *info)
1177{ 1177{
1178 struct i2c_adapter *adapter = client->adapter; 1178 struct i2c_adapter *adapter = client->adapter;
@@ -1412,7 +1412,7 @@ static struct i2c_driver adt7475_driver = {
1412 .remove = adt7475_remove, 1412 .remove = adt7475_remove,
1413 .id_table = adt7475_id, 1413 .id_table = adt7475_id,
1414 .detect = adt7475_detect, 1414 .detect = adt7475_detect,
1415 .address_data = &addr_data, 1415 .address_list = normal_i2c,
1416}; 1416};
1417 1417
1418static void adt7475_read_hystersis(struct i2c_client *client) 1418static void adt7475_read_hystersis(struct i2c_client *client)
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index 7ea6a8f66056..c1605b528e8f 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -518,7 +518,7 @@ static int applesmc_pm_restore(struct device *dev)
518 return applesmc_pm_resume(dev); 518 return applesmc_pm_resume(dev);
519} 519}
520 520
521static struct dev_pm_ops applesmc_pm_ops = { 521static const struct dev_pm_ops applesmc_pm_ops = {
522 .resume = applesmc_pm_resume, 522 .resume = applesmc_pm_resume,
523 .restore = applesmc_pm_restore, 523 .restore = applesmc_pm_restore,
524}; 524};
diff --git a/drivers/hwmon/asb100.c b/drivers/hwmon/asb100.c
index 480f80ea1fa0..7dada559b3a1 100644
--- a/drivers/hwmon/asb100.c
+++ b/drivers/hwmon/asb100.c
@@ -51,9 +51,6 @@
51/* I2C addresses to scan */ 51/* I2C addresses to scan */
52static const unsigned short normal_i2c[] = { 0x2d, I2C_CLIENT_END }; 52static const unsigned short normal_i2c[] = { 0x2d, I2C_CLIENT_END };
53 53
54/* Insmod parameters */
55I2C_CLIENT_INSMOD_1(asb100);
56
57static unsigned short force_subclients[4]; 54static unsigned short force_subclients[4];
58module_param_array(force_subclients, short, NULL, 0); 55module_param_array(force_subclients, short, NULL, 0);
59MODULE_PARM_DESC(force_subclients, "List of subclient addresses: " 56MODULE_PARM_DESC(force_subclients, "List of subclient addresses: "
@@ -209,14 +206,14 @@ static void asb100_write_value(struct i2c_client *client, u16 reg, u16 val);
209 206
210static int asb100_probe(struct i2c_client *client, 207static int asb100_probe(struct i2c_client *client,
211 const struct i2c_device_id *id); 208 const struct i2c_device_id *id);
212static int asb100_detect(struct i2c_client *client, int kind, 209static int asb100_detect(struct i2c_client *client,
213 struct i2c_board_info *info); 210 struct i2c_board_info *info);
214static int asb100_remove(struct i2c_client *client); 211static int asb100_remove(struct i2c_client *client);
215static struct asb100_data *asb100_update_device(struct device *dev); 212static struct asb100_data *asb100_update_device(struct device *dev);
216static void asb100_init_client(struct i2c_client *client); 213static void asb100_init_client(struct i2c_client *client);
217 214
218static const struct i2c_device_id asb100_id[] = { 215static const struct i2c_device_id asb100_id[] = {
219 { "asb100", asb100 }, 216 { "asb100", 0 },
220 { } 217 { }
221}; 218};
222MODULE_DEVICE_TABLE(i2c, asb100_id); 219MODULE_DEVICE_TABLE(i2c, asb100_id);
@@ -230,7 +227,7 @@ static struct i2c_driver asb100_driver = {
230 .remove = asb100_remove, 227 .remove = asb100_remove,
231 .id_table = asb100_id, 228 .id_table = asb100_id,
232 .detect = asb100_detect, 229 .detect = asb100_detect,
233 .address_data = &addr_data, 230 .address_list = normal_i2c,
234}; 231};
235 232
236/* 7 Voltages */ 233/* 7 Voltages */
@@ -697,7 +694,7 @@ ERROR_SC_2:
697} 694}
698 695
699/* Return 0 if detection is successful, -ENODEV otherwise */ 696/* Return 0 if detection is successful, -ENODEV otherwise */
700static int asb100_detect(struct i2c_client *client, int kind, 697static int asb100_detect(struct i2c_client *client,
701 struct i2c_board_info *info) 698 struct i2c_board_info *info)
702{ 699{
703 struct i2c_adapter *adapter = client->adapter; 700 struct i2c_adapter *adapter = client->adapter;
diff --git a/drivers/hwmon/atxp1.c b/drivers/hwmon/atxp1.c
index d6b490d3e36f..94cadc19f0c5 100644
--- a/drivers/hwmon/atxp1.c
+++ b/drivers/hwmon/atxp1.c
@@ -44,17 +44,14 @@ MODULE_AUTHOR("Sebastian Witt <se.witt@gmx.net>");
44 44
45static const unsigned short normal_i2c[] = { 0x37, 0x4e, I2C_CLIENT_END }; 45static const unsigned short normal_i2c[] = { 0x37, 0x4e, I2C_CLIENT_END };
46 46
47I2C_CLIENT_INSMOD_1(atxp1);
48
49static int atxp1_probe(struct i2c_client *client, 47static int atxp1_probe(struct i2c_client *client,
50 const struct i2c_device_id *id); 48 const struct i2c_device_id *id);
51static int atxp1_remove(struct i2c_client *client); 49static int atxp1_remove(struct i2c_client *client);
52static struct atxp1_data * atxp1_update_device(struct device *dev); 50static struct atxp1_data * atxp1_update_device(struct device *dev);
53static int atxp1_detect(struct i2c_client *client, int kind, 51static int atxp1_detect(struct i2c_client *client, struct i2c_board_info *info);
54 struct i2c_board_info *info);
55 52
56static const struct i2c_device_id atxp1_id[] = { 53static const struct i2c_device_id atxp1_id[] = {
57 { "atxp1", atxp1 }, 54 { "atxp1", 0 },
58 { } 55 { }
59}; 56};
60MODULE_DEVICE_TABLE(i2c, atxp1_id); 57MODULE_DEVICE_TABLE(i2c, atxp1_id);
@@ -68,7 +65,7 @@ static struct i2c_driver atxp1_driver = {
68 .remove = atxp1_remove, 65 .remove = atxp1_remove,
69 .id_table = atxp1_id, 66 .id_table = atxp1_id,
70 .detect = atxp1_detect, 67 .detect = atxp1_detect,
71 .address_data = &addr_data, 68 .address_list = normal_i2c,
72}; 69};
73 70
74struct atxp1_data { 71struct atxp1_data {
@@ -275,7 +272,7 @@ static const struct attribute_group atxp1_group = {
275 272
276 273
277/* Return 0 if detection is successful, -ENODEV otherwise */ 274/* Return 0 if detection is successful, -ENODEV otherwise */
278static int atxp1_detect(struct i2c_client *new_client, int kind, 275static int atxp1_detect(struct i2c_client *new_client,
279 struct i2c_board_info *info) 276 struct i2c_board_info *info)
280{ 277{
281 struct i2c_adapter *adapter = new_client->adapter; 278 struct i2c_adapter *adapter = new_client->adapter;
diff --git a/drivers/hwmon/dme1737.c b/drivers/hwmon/dme1737.c
index 4377bb0cc526..823dd28a902c 100644
--- a/drivers/hwmon/dme1737.c
+++ b/drivers/hwmon/dme1737.c
@@ -57,11 +57,7 @@ MODULE_PARM_DESC(probe_all_addr, "Include probing of non-standard LPC "
57/* Addresses to scan */ 57/* Addresses to scan */
58static const unsigned short normal_i2c[] = {0x2c, 0x2d, 0x2e, I2C_CLIENT_END}; 58static const unsigned short normal_i2c[] = {0x2c, 0x2d, 0x2e, I2C_CLIENT_END};
59 59
60/* Insmod parameters */ 60enum chips { dme1737, sch5027, sch311x };
61I2C_CLIENT_INSMOD_2(dme1737, sch5027);
62
63/* ISA chip types */
64enum isa_chips { sch311x = sch5027 + 1 };
65 61
66/* --------------------------------------------------------------------- 62/* ---------------------------------------------------------------------
67 * Registers 63 * Registers
@@ -2208,7 +2204,7 @@ exit:
2208} 2204}
2209 2205
2210/* Return 0 if detection is successful, -ENODEV otherwise */ 2206/* Return 0 if detection is successful, -ENODEV otherwise */
2211static int dme1737_i2c_detect(struct i2c_client *client, int kind, 2207static int dme1737_i2c_detect(struct i2c_client *client,
2212 struct i2c_board_info *info) 2208 struct i2c_board_info *info)
2213{ 2209{
2214 struct i2c_adapter *adapter = client->adapter; 2210 struct i2c_adapter *adapter = client->adapter;
@@ -2318,7 +2314,7 @@ static struct i2c_driver dme1737_i2c_driver = {
2318 .remove = dme1737_i2c_remove, 2314 .remove = dme1737_i2c_remove,
2319 .id_table = dme1737_id, 2315 .id_table = dme1737_id,
2320 .detect = dme1737_i2c_detect, 2316 .detect = dme1737_i2c_detect,
2321 .address_data = &addr_data, 2317 .address_list = normal_i2c,
2322}; 2318};
2323 2319
2324/* --------------------------------------------------------------------- 2320/* ---------------------------------------------------------------------
diff --git a/drivers/hwmon/ds1621.c b/drivers/hwmon/ds1621.c
index 2a4c6a05b14f..e11363467a8d 100644
--- a/drivers/hwmon/ds1621.c
+++ b/drivers/hwmon/ds1621.c
@@ -38,7 +38,6 @@ static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 0x4c,
38 0x4d, 0x4e, 0x4f, I2C_CLIENT_END }; 38 0x4d, 0x4e, 0x4f, I2C_CLIENT_END };
39 39
40/* Insmod parameters */ 40/* Insmod parameters */
41I2C_CLIENT_INSMOD_1(ds1621);
42static int polarity = -1; 41static int polarity = -1;
43module_param(polarity, int, 0); 42module_param(polarity, int, 0);
44MODULE_PARM_DESC(polarity, "Output's polarity: 0 = active high, 1 = active low"); 43MODULE_PARM_DESC(polarity, "Output's polarity: 0 = active high, 1 = active low");
@@ -224,7 +223,7 @@ static const struct attribute_group ds1621_group = {
224 223
225 224
226/* Return 0 if detection is successful, -ENODEV otherwise */ 225/* Return 0 if detection is successful, -ENODEV otherwise */
227static int ds1621_detect(struct i2c_client *client, int kind, 226static int ds1621_detect(struct i2c_client *client,
228 struct i2c_board_info *info) 227 struct i2c_board_info *info)
229{ 228{
230 struct i2c_adapter *adapter = client->adapter; 229 struct i2c_adapter *adapter = client->adapter;
@@ -305,8 +304,8 @@ static int ds1621_remove(struct i2c_client *client)
305} 304}
306 305
307static const struct i2c_device_id ds1621_id[] = { 306static const struct i2c_device_id ds1621_id[] = {
308 { "ds1621", ds1621 }, 307 { "ds1621", 0 },
309 { "ds1625", ds1621 }, 308 { "ds1625", 0 },
310 { } 309 { }
311}; 310};
312MODULE_DEVICE_TABLE(i2c, ds1621_id); 311MODULE_DEVICE_TABLE(i2c, ds1621_id);
@@ -321,7 +320,7 @@ static struct i2c_driver ds1621_driver = {
321 .remove = ds1621_remove, 320 .remove = ds1621_remove,
322 .id_table = ds1621_id, 321 .id_table = ds1621_id,
323 .detect = ds1621_detect, 322 .detect = ds1621_detect,
324 .address_data = &addr_data, 323 .address_list = normal_i2c,
325}; 324};
326 325
327static int __init ds1621_init(void) 326static int __init ds1621_init(void)
diff --git a/drivers/hwmon/f75375s.c b/drivers/hwmon/f75375s.c
index 40dfbcd3f3f2..277398f9c938 100644
--- a/drivers/hwmon/f75375s.c
+++ b/drivers/hwmon/f75375s.c
@@ -39,8 +39,7 @@
39/* Addresses to scan */ 39/* Addresses to scan */
40static const unsigned short normal_i2c[] = { 0x2d, 0x2e, I2C_CLIENT_END }; 40static const unsigned short normal_i2c[] = { 0x2d, 0x2e, I2C_CLIENT_END };
41 41
42/* Insmod parameters */ 42enum chips { f75373, f75375 };
43I2C_CLIENT_INSMOD_2(f75373, f75375);
44 43
45/* Fintek F75375 registers */ 44/* Fintek F75375 registers */
46#define F75375_REG_CONFIG0 0x0 45#define F75375_REG_CONFIG0 0x0
@@ -113,7 +112,7 @@ struct f75375_data {
113 s8 temp_max_hyst[2]; 112 s8 temp_max_hyst[2];
114}; 113};
115 114
116static int f75375_detect(struct i2c_client *client, int kind, 115static int f75375_detect(struct i2c_client *client,
117 struct i2c_board_info *info); 116 struct i2c_board_info *info);
118static int f75375_probe(struct i2c_client *client, 117static int f75375_probe(struct i2c_client *client,
119 const struct i2c_device_id *id); 118 const struct i2c_device_id *id);
@@ -135,7 +134,7 @@ static struct i2c_driver f75375_driver = {
135 .remove = f75375_remove, 134 .remove = f75375_remove,
136 .id_table = f75375_id, 135 .id_table = f75375_id,
137 .detect = f75375_detect, 136 .detect = f75375_detect,
138 .address_data = &addr_data, 137 .address_list = normal_i2c,
139}; 138};
140 139
141static inline int f75375_read8(struct i2c_client *client, u8 reg) 140static inline int f75375_read8(struct i2c_client *client, u8 reg)
@@ -677,7 +676,7 @@ static int f75375_remove(struct i2c_client *client)
677} 676}
678 677
679/* Return 0 if detection is successful, -ENODEV otherwise */ 678/* Return 0 if detection is successful, -ENODEV otherwise */
680static int f75375_detect(struct i2c_client *client, int kind, 679static int f75375_detect(struct i2c_client *client,
681 struct i2c_board_info *info) 680 struct i2c_board_info *info)
682{ 681{
683 struct i2c_adapter *adapter = client->adapter; 682 struct i2c_adapter *adapter = client->adapter;
diff --git a/drivers/hwmon/fschmd.c b/drivers/hwmon/fschmd.c
index 281829cd1533..bd0fc67e804b 100644
--- a/drivers/hwmon/fschmd.c
+++ b/drivers/hwmon/fschmd.c
@@ -56,7 +56,8 @@ static int nowayout = WATCHDOG_NOWAYOUT;
56module_param(nowayout, int, 0); 56module_param(nowayout, int, 0);
57MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" 57MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
58 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); 58 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
59I2C_CLIENT_INSMOD_7(fscpos, fscher, fscscy, fschrc, fschmd, fschds, fscsyl); 59
60enum chips { fscpos, fscher, fscscy, fschrc, fschmd, fschds, fscsyl };
60 61
61/* 62/*
62 * The FSCHMD registers and other defines 63 * The FSCHMD registers and other defines
@@ -221,7 +222,7 @@ static const int FSCHMD_NO_TEMP_SENSORS[7] = { 3, 3, 4, 3, 5, 5, 11 };
221 222
222static int fschmd_probe(struct i2c_client *client, 223static int fschmd_probe(struct i2c_client *client,
223 const struct i2c_device_id *id); 224 const struct i2c_device_id *id);
224static int fschmd_detect(struct i2c_client *client, int kind, 225static int fschmd_detect(struct i2c_client *client,
225 struct i2c_board_info *info); 226 struct i2c_board_info *info);
226static int fschmd_remove(struct i2c_client *client); 227static int fschmd_remove(struct i2c_client *client);
227static struct fschmd_data *fschmd_update_device(struct device *dev); 228static struct fschmd_data *fschmd_update_device(struct device *dev);
@@ -251,7 +252,7 @@ static struct i2c_driver fschmd_driver = {
251 .remove = fschmd_remove, 252 .remove = fschmd_remove,
252 .id_table = fschmd_id, 253 .id_table = fschmd_id,
253 .detect = fschmd_detect, 254 .detect = fschmd_detect,
254 .address_data = &addr_data, 255 .address_list = normal_i2c,
255}; 256};
256 257
257/* 258/*
@@ -1000,7 +1001,7 @@ static void fschmd_dmi_decode(const struct dmi_header *header, void *dummy)
1000 } 1001 }
1001} 1002}
1002 1003
1003static int fschmd_detect(struct i2c_client *client, int _kind, 1004static int fschmd_detect(struct i2c_client *client,
1004 struct i2c_board_info *info) 1005 struct i2c_board_info *info)
1005{ 1006{
1006 enum chips kind; 1007 enum chips kind;
diff --git a/drivers/hwmon/gl518sm.c b/drivers/hwmon/gl518sm.c
index 1d69458aa0b6..e7ae5743e181 100644
--- a/drivers/hwmon/gl518sm.c
+++ b/drivers/hwmon/gl518sm.c
@@ -46,8 +46,7 @@
46/* Addresses to scan */ 46/* Addresses to scan */
47static const unsigned short normal_i2c[] = { 0x2c, 0x2d, I2C_CLIENT_END }; 47static const unsigned short normal_i2c[] = { 0x2c, 0x2d, I2C_CLIENT_END };
48 48
49/* Insmod parameters */ 49enum chips { gl518sm_r00, gl518sm_r80 };
50I2C_CLIENT_INSMOD_2(gl518sm_r00, gl518sm_r80);
51 50
52/* Many GL518 constants specified below */ 51/* Many GL518 constants specified below */
53 52
@@ -139,8 +138,7 @@ struct gl518_data {
139 138
140static int gl518_probe(struct i2c_client *client, 139static int gl518_probe(struct i2c_client *client,
141 const struct i2c_device_id *id); 140 const struct i2c_device_id *id);
142static int gl518_detect(struct i2c_client *client, int kind, 141static int gl518_detect(struct i2c_client *client, struct i2c_board_info *info);
143 struct i2c_board_info *info);
144static void gl518_init_client(struct i2c_client *client); 142static void gl518_init_client(struct i2c_client *client);
145static int gl518_remove(struct i2c_client *client); 143static int gl518_remove(struct i2c_client *client);
146static int gl518_read_value(struct i2c_client *client, u8 reg); 144static int gl518_read_value(struct i2c_client *client, u8 reg);
@@ -163,7 +161,7 @@ static struct i2c_driver gl518_driver = {
163 .remove = gl518_remove, 161 .remove = gl518_remove,
164 .id_table = gl518_id, 162 .id_table = gl518_id,
165 .detect = gl518_detect, 163 .detect = gl518_detect,
166 .address_data = &addr_data, 164 .address_list = normal_i2c,
167}; 165};
168 166
169/* 167/*
@@ -484,8 +482,7 @@ static const struct attribute_group gl518_group_r80 = {
484 */ 482 */
485 483
486/* Return 0 if detection is successful, -ENODEV otherwise */ 484/* Return 0 if detection is successful, -ENODEV otherwise */
487static int gl518_detect(struct i2c_client *client, int kind, 485static int gl518_detect(struct i2c_client *client, struct i2c_board_info *info)
488 struct i2c_board_info *info)
489{ 486{
490 struct i2c_adapter *adapter = client->adapter; 487 struct i2c_adapter *adapter = client->adapter;
491 int rev; 488 int rev;
diff --git a/drivers/hwmon/gl520sm.c b/drivers/hwmon/gl520sm.c
index 92b5720ceaff..ec588026f0a9 100644
--- a/drivers/hwmon/gl520sm.c
+++ b/drivers/hwmon/gl520sm.c
@@ -41,9 +41,6 @@ MODULE_PARM_DESC(extra_sensor_type, "Type of extra sensor (0=autodetect, 1=tempe
41/* Addresses to scan */ 41/* Addresses to scan */
42static const unsigned short normal_i2c[] = { 0x2c, 0x2d, I2C_CLIENT_END }; 42static const unsigned short normal_i2c[] = { 0x2c, 0x2d, I2C_CLIENT_END };
43 43
44/* Insmod parameters */
45I2C_CLIENT_INSMOD_1(gl520sm);
46
47/* Many GL520 constants specified below 44/* Many GL520 constants specified below
48One of the inputs can be configured as either temp or voltage. 45One of the inputs can be configured as either temp or voltage.
49That's why _TEMP2 and _IN4 access the same register 46That's why _TEMP2 and _IN4 access the same register
@@ -81,8 +78,7 @@ static const u8 GL520_REG_TEMP_MAX_HYST[] = { 0x06, 0x18 };
81 78
82static int gl520_probe(struct i2c_client *client, 79static int gl520_probe(struct i2c_client *client,
83 const struct i2c_device_id *id); 80 const struct i2c_device_id *id);
84static int gl520_detect(struct i2c_client *client, int kind, 81static int gl520_detect(struct i2c_client *client, struct i2c_board_info *info);
85 struct i2c_board_info *info);
86static void gl520_init_client(struct i2c_client *client); 82static void gl520_init_client(struct i2c_client *client);
87static int gl520_remove(struct i2c_client *client); 83static int gl520_remove(struct i2c_client *client);
88static int gl520_read_value(struct i2c_client *client, u8 reg); 84static int gl520_read_value(struct i2c_client *client, u8 reg);
@@ -91,7 +87,7 @@ static struct gl520_data *gl520_update_device(struct device *dev);
91 87
92/* Driver data */ 88/* Driver data */
93static const struct i2c_device_id gl520_id[] = { 89static const struct i2c_device_id gl520_id[] = {
94 { "gl520sm", gl520sm }, 90 { "gl520sm", 0 },
95 { } 91 { }
96}; 92};
97MODULE_DEVICE_TABLE(i2c, gl520_id); 93MODULE_DEVICE_TABLE(i2c, gl520_id);
@@ -105,7 +101,7 @@ static struct i2c_driver gl520_driver = {
105 .remove = gl520_remove, 101 .remove = gl520_remove,
106 .id_table = gl520_id, 102 .id_table = gl520_id,
107 .detect = gl520_detect, 103 .detect = gl520_detect,
108 .address_data = &addr_data, 104 .address_list = normal_i2c,
109}; 105};
110 106
111/* Client data */ 107/* Client data */
@@ -681,8 +677,7 @@ static const struct attribute_group gl520_group_opt = {
681 */ 677 */
682 678
683/* Return 0 if detection is successful, -ENODEV otherwise */ 679/* Return 0 if detection is successful, -ENODEV otherwise */
684static int gl520_detect(struct i2c_client *client, int kind, 680static int gl520_detect(struct i2c_client *client, struct i2c_board_info *info)
685 struct i2c_board_info *info)
686{ 681{
687 struct i2c_adapter *adapter = client->adapter; 682 struct i2c_adapter *adapter = client->adapter;
688 683
diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c
index cf5afb9a10ab..b2f2277cad3c 100644
--- a/drivers/hwmon/lis3lv02d.c
+++ b/drivers/hwmon/lis3lv02d.c
@@ -43,13 +43,30 @@
43#define MDPS_POLL_INTERVAL 50 43#define MDPS_POLL_INTERVAL 50
44/* 44/*
45 * The sensor can also generate interrupts (DRDY) but it's pretty pointless 45 * The sensor can also generate interrupts (DRDY) but it's pretty pointless
46 * because their are generated even if the data do not change. So it's better 46 * because they are generated even if the data do not change. So it's better
47 * to keep the interrupt for the free-fall event. The values are updated at 47 * to keep the interrupt for the free-fall event. The values are updated at
48 * 40Hz (at the lowest frequency), but as it can be pretty time consuming on 48 * 40Hz (at the lowest frequency), but as it can be pretty time consuming on
49 * some low processor, we poll the sensor only at 20Hz... enough for the 49 * some low processor, we poll the sensor only at 20Hz... enough for the
50 * joystick. 50 * joystick.
51 */ 51 */
52 52
53#define LIS3_PWRON_DELAY_WAI_12B (5000)
54#define LIS3_PWRON_DELAY_WAI_8B (3000)
55
56/*
57 * LIS3LV02D spec says 1024 LSBs corresponds 1 G -> 1LSB is 1000/1024 mG
58 * LIS302D spec says: 18 mG / digit
59 * LIS3_ACCURACY is used to increase accuracy of the intermediate
60 * calculation results.
61 */
62#define LIS3_ACCURACY 1024
63/* Sensitivity values for -2G +2G scale */
64#define LIS3_SENSITIVITY_12B ((LIS3_ACCURACY * 1000) / 1024)
65#define LIS3_SENSITIVITY_8B (18 * LIS3_ACCURACY)
66
67#define LIS3_DEFAULT_FUZZ 3
68#define LIS3_DEFAULT_FLAT 3
69
53struct lis3lv02d lis3_dev = { 70struct lis3lv02d lis3_dev = {
54 .misc_wait = __WAIT_QUEUE_HEAD_INITIALIZER(lis3_dev.misc_wait), 71 .misc_wait = __WAIT_QUEUE_HEAD_INITIALIZER(lis3_dev.misc_wait),
55}; 72};
@@ -65,7 +82,7 @@ static s16 lis3lv02d_read_8(struct lis3lv02d *lis3, int reg)
65 return lo; 82 return lo;
66} 83}
67 84
68static s16 lis3lv02d_read_16(struct lis3lv02d *lis3, int reg) 85static s16 lis3lv02d_read_12(struct lis3lv02d *lis3, int reg)
69{ 86{
70 u8 lo, hi; 87 u8 lo, hi;
71 88
@@ -102,16 +119,106 @@ static inline int lis3lv02d_get_axis(s8 axis, int hw_values[3])
102static void lis3lv02d_get_xyz(struct lis3lv02d *lis3, int *x, int *y, int *z) 119static void lis3lv02d_get_xyz(struct lis3lv02d *lis3, int *x, int *y, int *z)
103{ 120{
104 int position[3]; 121 int position[3];
122 int i;
105 123
124 mutex_lock(&lis3->mutex);
106 position[0] = lis3->read_data(lis3, OUTX); 125 position[0] = lis3->read_data(lis3, OUTX);
107 position[1] = lis3->read_data(lis3, OUTY); 126 position[1] = lis3->read_data(lis3, OUTY);
108 position[2] = lis3->read_data(lis3, OUTZ); 127 position[2] = lis3->read_data(lis3, OUTZ);
128 mutex_unlock(&lis3->mutex);
129
130 for (i = 0; i < 3; i++)
131 position[i] = (position[i] * lis3->scale) / LIS3_ACCURACY;
109 132
110 *x = lis3lv02d_get_axis(lis3->ac.x, position); 133 *x = lis3lv02d_get_axis(lis3->ac.x, position);
111 *y = lis3lv02d_get_axis(lis3->ac.y, position); 134 *y = lis3lv02d_get_axis(lis3->ac.y, position);
112 *z = lis3lv02d_get_axis(lis3->ac.z, position); 135 *z = lis3lv02d_get_axis(lis3->ac.z, position);
113} 136}
114 137
138/* conversion btw sampling rate and the register values */
139static int lis3_12_rates[4] = {40, 160, 640, 2560};
140static int lis3_8_rates[2] = {100, 400};
141
142/* ODR is Output Data Rate */
143static int lis3lv02d_get_odr(void)
144{
145 u8 ctrl;
146 int shift;
147
148 lis3_dev.read(&lis3_dev, CTRL_REG1, &ctrl);
149 ctrl &= lis3_dev.odr_mask;
150 shift = ffs(lis3_dev.odr_mask) - 1;
151 return lis3_dev.odrs[(ctrl >> shift)];
152}
153
154static int lis3lv02d_set_odr(int rate)
155{
156 u8 ctrl;
157 int i, len, shift;
158
159 lis3_dev.read(&lis3_dev, CTRL_REG1, &ctrl);
160 ctrl &= ~lis3_dev.odr_mask;
161 len = 1 << hweight_long(lis3_dev.odr_mask); /* # of possible values */
162 shift = ffs(lis3_dev.odr_mask) - 1;
163
164 for (i = 0; i < len; i++)
165 if (lis3_dev.odrs[i] == rate) {
166 lis3_dev.write(&lis3_dev, CTRL_REG1,
167 ctrl | (i << shift));
168 return 0;
169 }
170 return -EINVAL;
171}
172
173static int lis3lv02d_selftest(struct lis3lv02d *lis3, s16 results[3])
174{
175 u8 reg;
176 s16 x, y, z;
177 u8 selftest;
178 int ret;
179
180 mutex_lock(&lis3->mutex);
181 if (lis3_dev.whoami == WAI_12B)
182 selftest = CTRL1_ST;
183 else
184 selftest = CTRL1_STP;
185
186 lis3->read(lis3, CTRL_REG1, &reg);
187 lis3->write(lis3, CTRL_REG1, (reg | selftest));
188 msleep(lis3->pwron_delay / lis3lv02d_get_odr());
189
190 /* Read directly to avoid axis remap */
191 x = lis3->read_data(lis3, OUTX);
192 y = lis3->read_data(lis3, OUTY);
193 z = lis3->read_data(lis3, OUTZ);
194
195 /* back to normal settings */
196 lis3->write(lis3, CTRL_REG1, reg);
197 msleep(lis3->pwron_delay / lis3lv02d_get_odr());
198
199 results[0] = x - lis3->read_data(lis3, OUTX);
200 results[1] = y - lis3->read_data(lis3, OUTY);
201 results[2] = z - lis3->read_data(lis3, OUTZ);
202
203 ret = 0;
204 if (lis3->pdata) {
205 int i;
206 for (i = 0; i < 3; i++) {
207 /* Check against selftest acceptance limits */
208 if ((results[i] < lis3->pdata->st_min_limits[i]) ||
209 (results[i] > lis3->pdata->st_max_limits[i])) {
210 ret = -EIO;
211 goto fail;
212 }
213 }
214 }
215
216 /* test passed */
217fail:
218 mutex_unlock(&lis3->mutex);
219 return ret;
220}
221
115void lis3lv02d_poweroff(struct lis3lv02d *lis3) 222void lis3lv02d_poweroff(struct lis3lv02d *lis3)
116{ 223{
117 /* disable X,Y,Z axis and power down */ 224 /* disable X,Y,Z axis and power down */
@@ -125,14 +232,19 @@ void lis3lv02d_poweron(struct lis3lv02d *lis3)
125 232
126 lis3->init(lis3); 233 lis3->init(lis3);
127 234
235 /* LIS3 power on delay is quite long */
236 msleep(lis3->pwron_delay / lis3lv02d_get_odr());
237
128 /* 238 /*
129 * Common configuration 239 * Common configuration
130 * BDU: LSB and MSB values are not updated until both have been read. 240 * BDU: (12 bits sensors only) LSB and MSB values are not updated until
131 * So the value read will always be correct. 241 * both have been read. So the value read will always be correct.
132 */ 242 */
133 lis3->read(lis3, CTRL_REG2, &reg); 243 if (lis3->whoami == WAI_12B) {
134 reg |= CTRL2_BDU; 244 lis3->read(lis3, CTRL_REG2, &reg);
135 lis3->write(lis3, CTRL_REG2, reg); 245 reg |= CTRL2_BDU;
246 lis3->write(lis3, CTRL_REG2, reg);
247 }
136} 248}
137EXPORT_SYMBOL_GPL(lis3lv02d_poweron); 249EXPORT_SYMBOL_GPL(lis3lv02d_poweron);
138 250
@@ -273,22 +385,17 @@ static void lis3lv02d_joystick_poll(struct input_polled_dev *pidev)
273 int x, y, z; 385 int x, y, z;
274 386
275 lis3lv02d_get_xyz(&lis3_dev, &x, &y, &z); 387 lis3lv02d_get_xyz(&lis3_dev, &x, &y, &z);
276 input_report_abs(pidev->input, ABS_X, x - lis3_dev.xcalib); 388 input_report_abs(pidev->input, ABS_X, x);
277 input_report_abs(pidev->input, ABS_Y, y - lis3_dev.ycalib); 389 input_report_abs(pidev->input, ABS_Y, y);
278 input_report_abs(pidev->input, ABS_Z, z - lis3_dev.zcalib); 390 input_report_abs(pidev->input, ABS_Z, z);
279} 391 input_sync(pidev->input);
280
281
282static inline void lis3lv02d_calibrate_joystick(void)
283{
284 lis3lv02d_get_xyz(&lis3_dev,
285 &lis3_dev.xcalib, &lis3_dev.ycalib, &lis3_dev.zcalib);
286} 392}
287 393
288int lis3lv02d_joystick_enable(void) 394int lis3lv02d_joystick_enable(void)
289{ 395{
290 struct input_dev *input_dev; 396 struct input_dev *input_dev;
291 int err; 397 int err;
398 int max_val, fuzz, flat;
292 399
293 if (lis3_dev.idev) 400 if (lis3_dev.idev)
294 return -EINVAL; 401 return -EINVAL;
@@ -301,8 +408,6 @@ int lis3lv02d_joystick_enable(void)
301 lis3_dev.idev->poll_interval = MDPS_POLL_INTERVAL; 408 lis3_dev.idev->poll_interval = MDPS_POLL_INTERVAL;
302 input_dev = lis3_dev.idev->input; 409 input_dev = lis3_dev.idev->input;
303 410
304 lis3lv02d_calibrate_joystick();
305
306 input_dev->name = "ST LIS3LV02DL Accelerometer"; 411 input_dev->name = "ST LIS3LV02DL Accelerometer";
307 input_dev->phys = DRIVER_NAME "/input0"; 412 input_dev->phys = DRIVER_NAME "/input0";
308 input_dev->id.bustype = BUS_HOST; 413 input_dev->id.bustype = BUS_HOST;
@@ -310,9 +415,12 @@ int lis3lv02d_joystick_enable(void)
310 input_dev->dev.parent = &lis3_dev.pdev->dev; 415 input_dev->dev.parent = &lis3_dev.pdev->dev;
311 416
312 set_bit(EV_ABS, input_dev->evbit); 417 set_bit(EV_ABS, input_dev->evbit);
313 input_set_abs_params(input_dev, ABS_X, -lis3_dev.mdps_max_val, lis3_dev.mdps_max_val, 3, 3); 418 max_val = (lis3_dev.mdps_max_val * lis3_dev.scale) / LIS3_ACCURACY;
314 input_set_abs_params(input_dev, ABS_Y, -lis3_dev.mdps_max_val, lis3_dev.mdps_max_val, 3, 3); 419 fuzz = (LIS3_DEFAULT_FUZZ * lis3_dev.scale) / LIS3_ACCURACY;
315 input_set_abs_params(input_dev, ABS_Z, -lis3_dev.mdps_max_val, lis3_dev.mdps_max_val, 3, 3); 420 flat = (LIS3_DEFAULT_FLAT * lis3_dev.scale) / LIS3_ACCURACY;
421 input_set_abs_params(input_dev, ABS_X, -max_val, max_val, fuzz, flat);
422 input_set_abs_params(input_dev, ABS_Y, -max_val, max_val, fuzz, flat);
423 input_set_abs_params(input_dev, ABS_Z, -max_val, max_val, fuzz, flat);
316 424
317 err = input_register_polled_device(lis3_dev.idev); 425 err = input_register_polled_device(lis3_dev.idev);
318 if (err) { 426 if (err) {
@@ -332,11 +440,23 @@ void lis3lv02d_joystick_disable(void)
332 if (lis3_dev.irq) 440 if (lis3_dev.irq)
333 misc_deregister(&lis3lv02d_misc_device); 441 misc_deregister(&lis3lv02d_misc_device);
334 input_unregister_polled_device(lis3_dev.idev); 442 input_unregister_polled_device(lis3_dev.idev);
443 input_free_polled_device(lis3_dev.idev);
335 lis3_dev.idev = NULL; 444 lis3_dev.idev = NULL;
336} 445}
337EXPORT_SYMBOL_GPL(lis3lv02d_joystick_disable); 446EXPORT_SYMBOL_GPL(lis3lv02d_joystick_disable);
338 447
339/* Sysfs stuff */ 448/* Sysfs stuff */
449static ssize_t lis3lv02d_selftest_show(struct device *dev,
450 struct device_attribute *attr, char *buf)
451{
452 int result;
453 s16 values[3];
454
455 result = lis3lv02d_selftest(&lis3_dev, values);
456 return sprintf(buf, "%s %d %d %d\n", result == 0 ? "OK" : "FAIL",
457 values[0], values[1], values[2]);
458}
459
340static ssize_t lis3lv02d_position_show(struct device *dev, 460static ssize_t lis3lv02d_position_show(struct device *dev,
341 struct device_attribute *attr, char *buf) 461 struct device_attribute *attr, char *buf)
342{ 462{
@@ -346,41 +466,35 @@ static ssize_t lis3lv02d_position_show(struct device *dev,
346 return sprintf(buf, "(%d,%d,%d)\n", x, y, z); 466 return sprintf(buf, "(%d,%d,%d)\n", x, y, z);
347} 467}
348 468
349static ssize_t lis3lv02d_calibrate_show(struct device *dev, 469static ssize_t lis3lv02d_rate_show(struct device *dev,
350 struct device_attribute *attr, char *buf) 470 struct device_attribute *attr, char *buf)
351{ 471{
352 return sprintf(buf, "(%d,%d,%d)\n", lis3_dev.xcalib, lis3_dev.ycalib, lis3_dev.zcalib); 472 return sprintf(buf, "%d\n", lis3lv02d_get_odr());
353} 473}
354 474
355static ssize_t lis3lv02d_calibrate_store(struct device *dev, 475static ssize_t lis3lv02d_rate_set(struct device *dev,
356 struct device_attribute *attr, 476 struct device_attribute *attr, const char *buf,
357 const char *buf, size_t count) 477 size_t count)
358{ 478{
359 lis3lv02d_calibrate_joystick(); 479 unsigned long rate;
360 return count;
361}
362 480
363/* conversion btw sampling rate and the register values */ 481 if (strict_strtoul(buf, 0, &rate))
364static int lis3lv02dl_df_val[4] = {40, 160, 640, 2560}; 482 return -EINVAL;
365static ssize_t lis3lv02d_rate_show(struct device *dev,
366 struct device_attribute *attr, char *buf)
367{
368 u8 ctrl;
369 int val;
370 483
371 lis3_dev.read(&lis3_dev, CTRL_REG1, &ctrl); 484 if (lis3lv02d_set_odr(rate))
372 val = (ctrl & (CTRL1_DF0 | CTRL1_DF1)) >> 4; 485 return -EINVAL;
373 return sprintf(buf, "%d\n", lis3lv02dl_df_val[val]); 486
487 return count;
374} 488}
375 489
490static DEVICE_ATTR(selftest, S_IRUSR, lis3lv02d_selftest_show, NULL);
376static DEVICE_ATTR(position, S_IRUGO, lis3lv02d_position_show, NULL); 491static DEVICE_ATTR(position, S_IRUGO, lis3lv02d_position_show, NULL);
377static DEVICE_ATTR(calibrate, S_IRUGO|S_IWUSR, lis3lv02d_calibrate_show, 492static DEVICE_ATTR(rate, S_IRUGO | S_IWUSR, lis3lv02d_rate_show,
378 lis3lv02d_calibrate_store); 493 lis3lv02d_rate_set);
379static DEVICE_ATTR(rate, S_IRUGO, lis3lv02d_rate_show, NULL);
380 494
381static struct attribute *lis3lv02d_attributes[] = { 495static struct attribute *lis3lv02d_attributes[] = {
496 &dev_attr_selftest.attr,
382 &dev_attr_position.attr, 497 &dev_attr_position.attr,
383 &dev_attr_calibrate.attr,
384 &dev_attr_rate.attr, 498 &dev_attr_rate.attr,
385 NULL 499 NULL
386}; 500};
@@ -409,22 +523,30 @@ EXPORT_SYMBOL_GPL(lis3lv02d_remove_fs);
409 523
410/* 524/*
411 * Initialise the accelerometer and the various subsystems. 525 * Initialise the accelerometer and the various subsystems.
412 * Should be rather independant of the bus system. 526 * Should be rather independent of the bus system.
413 */ 527 */
414int lis3lv02d_init_device(struct lis3lv02d *dev) 528int lis3lv02d_init_device(struct lis3lv02d *dev)
415{ 529{
416 dev->whoami = lis3lv02d_read_8(dev, WHO_AM_I); 530 dev->whoami = lis3lv02d_read_8(dev, WHO_AM_I);
417 531
418 switch (dev->whoami) { 532 switch (dev->whoami) {
419 case LIS_DOUBLE_ID: 533 case WAI_12B:
420 printk(KERN_INFO DRIVER_NAME ": 2-byte sensor found\n"); 534 printk(KERN_INFO DRIVER_NAME ": 12 bits sensor found\n");
421 dev->read_data = lis3lv02d_read_16; 535 dev->read_data = lis3lv02d_read_12;
422 dev->mdps_max_val = 2048; 536 dev->mdps_max_val = 2048;
537 dev->pwron_delay = LIS3_PWRON_DELAY_WAI_12B;
538 dev->odrs = lis3_12_rates;
539 dev->odr_mask = CTRL1_DF0 | CTRL1_DF1;
540 dev->scale = LIS3_SENSITIVITY_12B;
423 break; 541 break;
424 case LIS_SINGLE_ID: 542 case WAI_8B:
425 printk(KERN_INFO DRIVER_NAME ": 1-byte sensor found\n"); 543 printk(KERN_INFO DRIVER_NAME ": 8 bits sensor found\n");
426 dev->read_data = lis3lv02d_read_8; 544 dev->read_data = lis3lv02d_read_8;
427 dev->mdps_max_val = 128; 545 dev->mdps_max_val = 128;
546 dev->pwron_delay = LIS3_PWRON_DELAY_WAI_8B;
547 dev->odrs = lis3_8_rates;
548 dev->odr_mask = CTRL1_DR;
549 dev->scale = LIS3_SENSITIVITY_8B;
428 break; 550 break;
429 default: 551 default:
430 printk(KERN_ERR DRIVER_NAME 552 printk(KERN_ERR DRIVER_NAME
@@ -432,6 +554,8 @@ int lis3lv02d_init_device(struct lis3lv02d *dev)
432 return -EINVAL; 554 return -EINVAL;
433 } 555 }
434 556
557 mutex_init(&dev->mutex);
558
435 lis3lv02d_add_fs(dev); 559 lis3lv02d_add_fs(dev);
436 lis3lv02d_poweron(dev); 560 lis3lv02d_poweron(dev);
437 561
@@ -443,7 +567,7 @@ int lis3lv02d_init_device(struct lis3lv02d *dev)
443 if (dev->pdata) { 567 if (dev->pdata) {
444 struct lis3lv02d_platform_data *p = dev->pdata; 568 struct lis3lv02d_platform_data *p = dev->pdata;
445 569
446 if (p->click_flags && (dev->whoami == LIS_SINGLE_ID)) { 570 if (p->click_flags && (dev->whoami == WAI_8B)) {
447 dev->write(dev, CLICK_CFG, p->click_flags); 571 dev->write(dev, CLICK_CFG, p->click_flags);
448 dev->write(dev, CLICK_TIMELIMIT, p->click_time_limit); 572 dev->write(dev, CLICK_TIMELIMIT, p->click_time_limit);
449 dev->write(dev, CLICK_LATENCY, p->click_latency); 573 dev->write(dev, CLICK_LATENCY, p->click_latency);
@@ -454,7 +578,7 @@ int lis3lv02d_init_device(struct lis3lv02d *dev)
454 (p->click_thresh_y << 4)); 578 (p->click_thresh_y << 4));
455 } 579 }
456 580
457 if (p->wakeup_flags && (dev->whoami == LIS_SINGLE_ID)) { 581 if (p->wakeup_flags && (dev->whoami == WAI_8B)) {
458 dev->write(dev, FF_WU_CFG_1, p->wakeup_flags); 582 dev->write(dev, FF_WU_CFG_1, p->wakeup_flags);
459 dev->write(dev, FF_WU_THS_1, p->wakeup_thresh & 0x7f); 583 dev->write(dev, FF_WU_THS_1, p->wakeup_thresh & 0x7f);
460 /* default to 2.5ms for now */ 584 /* default to 2.5ms for now */
@@ -484,4 +608,3 @@ EXPORT_SYMBOL_GPL(lis3lv02d_init_device);
484MODULE_DESCRIPTION("ST LIS3LV02Dx three-axis digital accelerometer driver"); 608MODULE_DESCRIPTION("ST LIS3LV02Dx three-axis digital accelerometer driver");
485MODULE_AUTHOR("Yan Burman, Eric Piel, Pavel Machek"); 609MODULE_AUTHOR("Yan Burman, Eric Piel, Pavel Machek");
486MODULE_LICENSE("GPL"); 610MODULE_LICENSE("GPL");
487
diff --git a/drivers/hwmon/lis3lv02d.h b/drivers/hwmon/lis3lv02d.h
index 3e1ff46f72d3..e6a01f44709b 100644
--- a/drivers/hwmon/lis3lv02d.h
+++ b/drivers/hwmon/lis3lv02d.h
@@ -2,7 +2,7 @@
2 * lis3lv02d.h - ST LIS3LV02DL accelerometer driver 2 * lis3lv02d.h - ST LIS3LV02DL accelerometer driver
3 * 3 *
4 * Copyright (C) 2007-2008 Yan Burman 4 * Copyright (C) 2007-2008 Yan Burman
5 * Copyright (C) 2008 Eric Piel 5 * Copyright (C) 2008-2009 Eric Piel
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
@@ -22,20 +22,18 @@
22#include <linux/input-polldev.h> 22#include <linux/input-polldev.h>
23 23
24/* 24/*
25 * The actual chip is STMicroelectronics LIS3LV02DL or LIS3LV02DQ that seems to 25 * This driver tries to support the "digital" accelerometer chips from
26 * be connected via SPI. There exists also several similar chips (such as LIS302DL or 26 * STMicroelectronics such as LIS3LV02DL, LIS302DL, LIS3L02DQ, LIS331DL,
27 * LIS3L02DQ) and they have slightly different registers, but we can provide a 27 * LIS35DE, or LIS202DL. They are very similar in terms of programming, with
28 * common interface for all of them. 28 * almost the same registers. In addition to differing on physical properties,
29 * They can also be connected via I²C. 29 * they differ on the number of axes (2/3), precision (8/12 bits), and special
30 * features (freefall detection, click...). Unfortunately, not all the
31 * differences can be probed via a register.
32 * They can be connected either via I²C or SPI.
30 */ 33 */
31 34
32#include <linux/lis3lv02d.h> 35#include <linux/lis3lv02d.h>
33 36
34/* 2-byte registers */
35#define LIS_DOUBLE_ID 0x3A /* LIS3LV02D[LQ] */
36/* 1-byte registers */
37#define LIS_SINGLE_ID 0x3B /* LIS[32]02DL and others */
38
39enum lis3_reg { 37enum lis3_reg {
40 WHO_AM_I = 0x0F, 38 WHO_AM_I = 0x0F,
41 OFFSET_X = 0x16, 39 OFFSET_X = 0x16,
@@ -94,7 +92,13 @@ enum lis3lv02d_reg {
94 DD_THSE_H = 0x3F, 92 DD_THSE_H = 0x3F,
95}; 93};
96 94
97enum lis3lv02d_ctrl1 { 95enum lis3_who_am_i {
96 WAI_12B = 0x3A, /* 12 bits: LIS3LV02D[LQ]... */
97 WAI_8B = 0x3B, /* 8 bits: LIS[23]02D[LQ]... */
98 WAI_6B = 0x52, /* 6 bits: LIS331DLF - not supported */
99};
100
101enum lis3lv02d_ctrl1_12b {
98 CTRL1_Xen = 0x01, 102 CTRL1_Xen = 0x01,
99 CTRL1_Yen = 0x02, 103 CTRL1_Yen = 0x02,
100 CTRL1_Zen = 0x04, 104 CTRL1_Zen = 0x04,
@@ -104,6 +108,16 @@ enum lis3lv02d_ctrl1 {
104 CTRL1_PD0 = 0x40, 108 CTRL1_PD0 = 0x40,
105 CTRL1_PD1 = 0x80, 109 CTRL1_PD1 = 0x80,
106}; 110};
111
112/* Delta to ctrl1_12b version */
113enum lis3lv02d_ctrl1_8b {
114 CTRL1_STM = 0x08,
115 CTRL1_STP = 0x10,
116 CTRL1_FS = 0x20,
117 CTRL1_PD = 0x40,
118 CTRL1_DR = 0x80,
119};
120
107enum lis3lv02d_ctrl2 { 121enum lis3lv02d_ctrl2 {
108 CTRL2_DAS = 0x01, 122 CTRL2_DAS = 0x01,
109 CTRL2_SIM = 0x02, 123 CTRL2_SIM = 0x02,
@@ -194,16 +208,20 @@ struct lis3lv02d {
194 int (*write) (struct lis3lv02d *lis3, int reg, u8 val); 208 int (*write) (struct lis3lv02d *lis3, int reg, u8 val);
195 int (*read) (struct lis3lv02d *lis3, int reg, u8 *ret); 209 int (*read) (struct lis3lv02d *lis3, int reg, u8 *ret);
196 210
197 u8 whoami; /* 3Ah: 2-byte registries, 3Bh: 1-byte registries */ 211 int *odrs; /* Supported output data rates */
212 u8 odr_mask; /* ODR bit mask */
213 u8 whoami; /* indicates measurement precision */
198 s16 (*read_data) (struct lis3lv02d *lis3, int reg); 214 s16 (*read_data) (struct lis3lv02d *lis3, int reg);
199 int mdps_max_val; 215 int mdps_max_val;
216 int pwron_delay;
217 int scale; /*
218 * relationship between 1 LBS and mG
219 * (1/1000th of earth gravity)
220 */
200 221
201 struct input_polled_dev *idev; /* input device */ 222 struct input_polled_dev *idev; /* input device */
202 struct platform_device *pdev; /* platform device */ 223 struct platform_device *pdev; /* platform device */
203 atomic_t count; /* interrupt count after last read */ 224 atomic_t count; /* interrupt count after last read */
204 int xcalib; /* calibrated null value for x */
205 int ycalib; /* calibrated null value for y */
206 int zcalib; /* calibrated null value for z */
207 struct axis_conversion ac; /* hw -> logical axis */ 225 struct axis_conversion ac; /* hw -> logical axis */
208 226
209 u32 irq; /* IRQ number */ 227 u32 irq; /* IRQ number */
@@ -212,6 +230,7 @@ struct lis3lv02d {
212 unsigned long misc_opened; /* bit0: whether the device is open */ 230 unsigned long misc_opened; /* bit0: whether the device is open */
213 231
214 struct lis3lv02d_platform_data *pdata; /* for passing board config */ 232 struct lis3lv02d_platform_data *pdata; /* for passing board config */
233 struct mutex mutex; /* Serialize poll and selftest */
215}; 234};
216 235
217int lis3lv02d_init_device(struct lis3lv02d *lis3); 236int lis3lv02d_init_device(struct lis3lv02d *lis3);
diff --git a/drivers/hwmon/lm63.c b/drivers/hwmon/lm63.c
index 5da66ab04f74..bf81aff7051d 100644
--- a/drivers/hwmon/lm63.c
+++ b/drivers/hwmon/lm63.c
@@ -56,12 +56,6 @@
56static const unsigned short normal_i2c[] = { 0x4c, I2C_CLIENT_END }; 56static const unsigned short normal_i2c[] = { 0x4c, I2C_CLIENT_END };
57 57
58/* 58/*
59 * Insmod parameters
60 */
61
62I2C_CLIENT_INSMOD_1(lm63);
63
64/*
65 * The LM63 registers 59 * The LM63 registers
66 */ 60 */
67 61
@@ -134,8 +128,7 @@ static int lm63_remove(struct i2c_client *client);
134 128
135static struct lm63_data *lm63_update_device(struct device *dev); 129static struct lm63_data *lm63_update_device(struct device *dev);
136 130
137static int lm63_detect(struct i2c_client *client, int kind, 131static int lm63_detect(struct i2c_client *client, struct i2c_board_info *info);
138 struct i2c_board_info *info);
139static void lm63_init_client(struct i2c_client *client); 132static void lm63_init_client(struct i2c_client *client);
140 133
141/* 134/*
@@ -143,7 +136,7 @@ static void lm63_init_client(struct i2c_client *client);
143 */ 136 */
144 137
145static const struct i2c_device_id lm63_id[] = { 138static const struct i2c_device_id lm63_id[] = {
146 { "lm63", lm63 }, 139 { "lm63", 0 },
147 { } 140 { }
148}; 141};
149MODULE_DEVICE_TABLE(i2c, lm63_id); 142MODULE_DEVICE_TABLE(i2c, lm63_id);
@@ -157,7 +150,7 @@ static struct i2c_driver lm63_driver = {
157 .remove = lm63_remove, 150 .remove = lm63_remove,
158 .id_table = lm63_id, 151 .id_table = lm63_id,
159 .detect = lm63_detect, 152 .detect = lm63_detect,
160 .address_data = &addr_data, 153 .address_list = normal_i2c,
161}; 154};
162 155
163/* 156/*
@@ -423,7 +416,7 @@ static const struct attribute_group lm63_group_fan1 = {
423 */ 416 */
424 417
425/* Return 0 if detection is successful, -ENODEV otherwise */ 418/* Return 0 if detection is successful, -ENODEV otherwise */
426static int lm63_detect(struct i2c_client *new_client, int kind, 419static int lm63_detect(struct i2c_client *new_client,
427 struct i2c_board_info *info) 420 struct i2c_board_info *info)
428{ 421{
429 struct i2c_adapter *adapter = new_client->adapter; 422 struct i2c_adapter *adapter = new_client->adapter;
diff --git a/drivers/hwmon/lm73.c b/drivers/hwmon/lm73.c
index 0bf8b2a8e9f0..c5f39ba103c0 100644
--- a/drivers/hwmon/lm73.c
+++ b/drivers/hwmon/lm73.c
@@ -27,9 +27,6 @@
27static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4c, 27static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4c,
28 0x4d, 0x4e, I2C_CLIENT_END }; 28 0x4d, 0x4e, I2C_CLIENT_END };
29 29
30/* Insmod parameters */
31I2C_CLIENT_INSMOD_1(lm73);
32
33/* LM73 registers */ 30/* LM73 registers */
34#define LM73_REG_INPUT 0x00 31#define LM73_REG_INPUT 0x00
35#define LM73_REG_CONF 0x01 32#define LM73_REG_CONF 0x01
@@ -145,13 +142,13 @@ static int lm73_remove(struct i2c_client *client)
145} 142}
146 143
147static const struct i2c_device_id lm73_ids[] = { 144static const struct i2c_device_id lm73_ids[] = {
148 { "lm73", lm73 }, 145 { "lm73", 0 },
149 { /* LIST END */ } 146 { /* LIST END */ }
150}; 147};
151MODULE_DEVICE_TABLE(i2c, lm73_ids); 148MODULE_DEVICE_TABLE(i2c, lm73_ids);
152 149
153/* Return 0 if detection is successful, -ENODEV otherwise */ 150/* Return 0 if detection is successful, -ENODEV otherwise */
154static int lm73_detect(struct i2c_client *new_client, int kind, 151static int lm73_detect(struct i2c_client *new_client,
155 struct i2c_board_info *info) 152 struct i2c_board_info *info)
156{ 153{
157 struct i2c_adapter *adapter = new_client->adapter; 154 struct i2c_adapter *adapter = new_client->adapter;
@@ -182,7 +179,7 @@ static struct i2c_driver lm73_driver = {
182 .remove = lm73_remove, 179 .remove = lm73_remove,
183 .id_table = lm73_ids, 180 .id_table = lm73_ids,
184 .detect = lm73_detect, 181 .detect = lm73_detect,
185 .address_data = &addr_data, 182 .address_list = normal_i2c,
186}; 183};
187 184
188/* module glue */ 185/* module glue */
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index e392548cccb8..8ae2cfe2d827 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -32,15 +32,12 @@
32 32
33/* 33/*
34 * This driver handles the LM75 and compatible digital temperature sensors. 34 * This driver handles the LM75 and compatible digital temperature sensors.
35 * Only types which are _not_ listed in I2C_CLIENT_INSMOD_*() need to be
36 * listed here. We start at 9 since I2C_CLIENT_INSMOD_*() currently allow
37 * definition of up to 8 chip types (plus zero).
38 */ 35 */
39 36
40enum lm75_type { /* keep sorted in alphabetical order */ 37enum lm75_type { /* keep sorted in alphabetical order */
41 ds1775 = 9, 38 ds1775,
42 ds75, 39 ds75,
43 /* lm75 -- in I2C_CLIENT_INSMOD_1() */ 40 lm75,
44 lm75a, 41 lm75a,
45 max6625, 42 max6625,
46 max6626, 43 max6626,
@@ -58,9 +55,6 @@ enum lm75_type { /* keep sorted in alphabetical order */
58static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 0x4c, 55static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 0x4c,
59 0x4d, 0x4e, 0x4f, I2C_CLIENT_END }; 56 0x4d, 0x4e, 0x4f, I2C_CLIENT_END };
60 57
61/* Insmod parameters */
62I2C_CLIENT_INSMOD_1(lm75);
63
64 58
65/* The LM75 registers */ 59/* The LM75 registers */
66#define LM75_REG_CONF 0x01 60#define LM75_REG_CONF 0x01
@@ -234,7 +228,7 @@ static const struct i2c_device_id lm75_ids[] = {
234MODULE_DEVICE_TABLE(i2c, lm75_ids); 228MODULE_DEVICE_TABLE(i2c, lm75_ids);
235 229
236/* Return 0 if detection is successful, -ENODEV otherwise */ 230/* Return 0 if detection is successful, -ENODEV otherwise */
237static int lm75_detect(struct i2c_client *new_client, int kind, 231static int lm75_detect(struct i2c_client *new_client,
238 struct i2c_board_info *info) 232 struct i2c_board_info *info)
239{ 233{
240 struct i2c_adapter *adapter = new_client->adapter; 234 struct i2c_adapter *adapter = new_client->adapter;
@@ -295,7 +289,7 @@ static struct i2c_driver lm75_driver = {
295 .remove = lm75_remove, 289 .remove = lm75_remove,
296 .id_table = lm75_ids, 290 .id_table = lm75_ids,
297 .detect = lm75_detect, 291 .detect = lm75_detect,
298 .address_data = &addr_data, 292 .address_list = normal_i2c,
299}; 293};
300 294
301/*-----------------------------------------------------------------------*/ 295/*-----------------------------------------------------------------------*/
diff --git a/drivers/hwmon/lm77.c b/drivers/hwmon/lm77.c
index ac067fd19482..b28a297be50c 100644
--- a/drivers/hwmon/lm77.c
+++ b/drivers/hwmon/lm77.c
@@ -39,9 +39,6 @@
39static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 39static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b,
40 I2C_CLIENT_END }; 40 I2C_CLIENT_END };
41 41
42/* Insmod parameters */
43I2C_CLIENT_INSMOD_1(lm77);
44
45/* The LM77 registers */ 42/* The LM77 registers */
46#define LM77_REG_TEMP 0x00 43#define LM77_REG_TEMP 0x00
47#define LM77_REG_CONF 0x01 44#define LM77_REG_CONF 0x01
@@ -66,8 +63,7 @@ struct lm77_data {
66 63
67static int lm77_probe(struct i2c_client *client, 64static int lm77_probe(struct i2c_client *client,
68 const struct i2c_device_id *id); 65 const struct i2c_device_id *id);
69static int lm77_detect(struct i2c_client *client, int kind, 66static int lm77_detect(struct i2c_client *client, struct i2c_board_info *info);
70 struct i2c_board_info *info);
71static void lm77_init_client(struct i2c_client *client); 67static void lm77_init_client(struct i2c_client *client);
72static int lm77_remove(struct i2c_client *client); 68static int lm77_remove(struct i2c_client *client);
73static u16 lm77_read_value(struct i2c_client *client, u8 reg); 69static u16 lm77_read_value(struct i2c_client *client, u8 reg);
@@ -77,7 +73,7 @@ static struct lm77_data *lm77_update_device(struct device *dev);
77 73
78 74
79static const struct i2c_device_id lm77_id[] = { 75static const struct i2c_device_id lm77_id[] = {
80 { "lm77", lm77 }, 76 { "lm77", 0 },
81 { } 77 { }
82}; 78};
83MODULE_DEVICE_TABLE(i2c, lm77_id); 79MODULE_DEVICE_TABLE(i2c, lm77_id);
@@ -92,7 +88,7 @@ static struct i2c_driver lm77_driver = {
92 .remove = lm77_remove, 88 .remove = lm77_remove,
93 .id_table = lm77_id, 89 .id_table = lm77_id,
94 .detect = lm77_detect, 90 .detect = lm77_detect,
95 .address_data = &addr_data, 91 .address_list = normal_i2c,
96}; 92};
97 93
98/* straight from the datasheet */ 94/* straight from the datasheet */
@@ -245,7 +241,7 @@ static const struct attribute_group lm77_group = {
245}; 241};
246 242
247/* Return 0 if detection is successful, -ENODEV otherwise */ 243/* Return 0 if detection is successful, -ENODEV otherwise */
248static int lm77_detect(struct i2c_client *new_client, int kind, 244static int lm77_detect(struct i2c_client *new_client,
249 struct i2c_board_info *info) 245 struct i2c_board_info *info)
250{ 246{
251 struct i2c_adapter *adapter = new_client->adapter; 247 struct i2c_adapter *adapter = new_client->adapter;
diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
index 5978291cebb3..cadcbd90ff3b 100644
--- a/drivers/hwmon/lm78.c
+++ b/drivers/hwmon/lm78.c
@@ -41,8 +41,7 @@ static const unsigned short normal_i2c[] = { 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d,
41 0x2e, 0x2f, I2C_CLIENT_END }; 41 0x2e, 0x2f, I2C_CLIENT_END };
42static unsigned short isa_address = 0x290; 42static unsigned short isa_address = 0x290;
43 43
44/* Insmod parameters */ 44enum chips { lm78, lm79 };
45I2C_CLIENT_INSMOD_2(lm78, lm79);
46 45
47/* Many LM78 constants specified below */ 46/* Many LM78 constants specified below */
48 47
@@ -142,7 +141,7 @@ struct lm78_data {
142}; 141};
143 142
144 143
145static int lm78_i2c_detect(struct i2c_client *client, int kind, 144static int lm78_i2c_detect(struct i2c_client *client,
146 struct i2c_board_info *info); 145 struct i2c_board_info *info);
147static int lm78_i2c_probe(struct i2c_client *client, 146static int lm78_i2c_probe(struct i2c_client *client,
148 const struct i2c_device_id *id); 147 const struct i2c_device_id *id);
@@ -173,7 +172,7 @@ static struct i2c_driver lm78_driver = {
173 .remove = lm78_i2c_remove, 172 .remove = lm78_i2c_remove,
174 .id_table = lm78_i2c_id, 173 .id_table = lm78_i2c_id,
175 .detect = lm78_i2c_detect, 174 .detect = lm78_i2c_detect,
176 .address_data = &addr_data, 175 .address_list = normal_i2c,
177}; 176};
178 177
179static struct platform_driver lm78_isa_driver = { 178static struct platform_driver lm78_isa_driver = {
@@ -558,7 +557,7 @@ static int lm78_alias_detect(struct i2c_client *client, u8 chipid)
558 return 1; 557 return 1;
559} 558}
560 559
561static int lm78_i2c_detect(struct i2c_client *client, int kind, 560static int lm78_i2c_detect(struct i2c_client *client,
562 struct i2c_board_info *info) 561 struct i2c_board_info *info)
563{ 562{
564 int i; 563 int i;
diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c
index bcffc1899403..18a0e6c5fe88 100644
--- a/drivers/hwmon/lm80.c
+++ b/drivers/hwmon/lm80.c
@@ -35,9 +35,6 @@
35static const unsigned short normal_i2c[] = { 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 35static const unsigned short normal_i2c[] = { 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d,
36 0x2e, 0x2f, I2C_CLIENT_END }; 36 0x2e, 0x2f, I2C_CLIENT_END };
37 37
38/* Insmod parameters */
39I2C_CLIENT_INSMOD_1(lm80);
40
41/* Many LM80 constants specified below */ 38/* Many LM80 constants specified below */
42 39
43/* The LM80 registers */ 40/* The LM80 registers */
@@ -133,8 +130,7 @@ struct lm80_data {
133 130
134static int lm80_probe(struct i2c_client *client, 131static int lm80_probe(struct i2c_client *client,
135 const struct i2c_device_id *id); 132 const struct i2c_device_id *id);
136static int lm80_detect(struct i2c_client *client, int kind, 133static int lm80_detect(struct i2c_client *client, struct i2c_board_info *info);
137 struct i2c_board_info *info);
138static void lm80_init_client(struct i2c_client *client); 134static void lm80_init_client(struct i2c_client *client);
139static int lm80_remove(struct i2c_client *client); 135static int lm80_remove(struct i2c_client *client);
140static struct lm80_data *lm80_update_device(struct device *dev); 136static struct lm80_data *lm80_update_device(struct device *dev);
@@ -146,7 +142,7 @@ static int lm80_write_value(struct i2c_client *client, u8 reg, u8 value);
146 */ 142 */
147 143
148static const struct i2c_device_id lm80_id[] = { 144static const struct i2c_device_id lm80_id[] = {
149 { "lm80", lm80 }, 145 { "lm80", 0 },
150 { } 146 { }
151}; 147};
152MODULE_DEVICE_TABLE(i2c, lm80_id); 148MODULE_DEVICE_TABLE(i2c, lm80_id);
@@ -160,7 +156,7 @@ static struct i2c_driver lm80_driver = {
160 .remove = lm80_remove, 156 .remove = lm80_remove,
161 .id_table = lm80_id, 157 .id_table = lm80_id,
162 .detect = lm80_detect, 158 .detect = lm80_detect,
163 .address_data = &addr_data, 159 .address_list = normal_i2c,
164}; 160};
165 161
166/* 162/*
@@ -447,8 +443,7 @@ static const struct attribute_group lm80_group = {
447}; 443};
448 444
449/* Return 0 if detection is successful, -ENODEV otherwise */ 445/* Return 0 if detection is successful, -ENODEV otherwise */
450static int lm80_detect(struct i2c_client *client, int kind, 446static int lm80_detect(struct i2c_client *client, struct i2c_board_info *info)
451 struct i2c_board_info *info)
452{ 447{
453 struct i2c_adapter *adapter = client->adapter; 448 struct i2c_adapter *adapter = client->adapter;
454 int i, cur; 449 int i, cur;
diff --git a/drivers/hwmon/lm83.c b/drivers/hwmon/lm83.c
index 08b03e6ed0b7..8290476aee4a 100644
--- a/drivers/hwmon/lm83.c
+++ b/drivers/hwmon/lm83.c
@@ -51,11 +51,7 @@
51static const unsigned short normal_i2c[] = { 51static const unsigned short normal_i2c[] = {
52 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 0x4c, 0x4d, 0x4e, I2C_CLIENT_END }; 52 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 0x4c, 0x4d, 0x4e, I2C_CLIENT_END };
53 53
54/* 54enum chips { lm83, lm82 };
55 * Insmod parameters
56 */
57
58I2C_CLIENT_INSMOD_2(lm83, lm82);
59 55
60/* 56/*
61 * The LM83 registers 57 * The LM83 registers
@@ -118,7 +114,7 @@ static const u8 LM83_REG_W_HIGH[] = {
118 * Functions declaration 114 * Functions declaration
119 */ 115 */
120 116
121static int lm83_detect(struct i2c_client *new_client, int kind, 117static int lm83_detect(struct i2c_client *new_client,
122 struct i2c_board_info *info); 118 struct i2c_board_info *info);
123static int lm83_probe(struct i2c_client *client, 119static int lm83_probe(struct i2c_client *client,
124 const struct i2c_device_id *id); 120 const struct i2c_device_id *id);
@@ -145,7 +141,7 @@ static struct i2c_driver lm83_driver = {
145 .remove = lm83_remove, 141 .remove = lm83_remove,
146 .id_table = lm83_id, 142 .id_table = lm83_id,
147 .detect = lm83_detect, 143 .detect = lm83_detect,
148 .address_data = &addr_data, 144 .address_list = normal_i2c,
149}; 145};
150 146
151/* 147/*
@@ -291,7 +287,7 @@ static const struct attribute_group lm83_group_opt = {
291 */ 287 */
292 288
293/* Return 0 if detection is successful, -ENODEV otherwise */ 289/* Return 0 if detection is successful, -ENODEV otherwise */
294static int lm83_detect(struct i2c_client *new_client, int kind, 290static int lm83_detect(struct i2c_client *new_client,
295 struct i2c_board_info *info) 291 struct i2c_board_info *info)
296{ 292{
297 struct i2c_adapter *adapter = new_client->adapter; 293 struct i2c_adapter *adapter = new_client->adapter;
diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c
index d56da2e74708..b3841a615595 100644
--- a/drivers/hwmon/lm85.c
+++ b/drivers/hwmon/lm85.c
@@ -38,9 +38,11 @@
38/* Addresses to scan */ 38/* Addresses to scan */
39static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END }; 39static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
40 40
41/* Insmod parameters */ 41enum chips {
42I2C_CLIENT_INSMOD_7(lm85b, lm85c, adm1027, adt7463, adt7468, emc6d100, 42 any_chip, lm85b, lm85c,
43 emc6d102); 43 adm1027, adt7463, adt7468,
44 emc6d100, emc6d102
45};
44 46
45/* The LM85 registers */ 47/* The LM85 registers */
46 48
@@ -323,8 +325,7 @@ struct lm85_data {
323 struct lm85_zone zone[3]; 325 struct lm85_zone zone[3];
324}; 326};
325 327
326static int lm85_detect(struct i2c_client *client, int kind, 328static int lm85_detect(struct i2c_client *client, struct i2c_board_info *info);
327 struct i2c_board_info *info);
328static int lm85_probe(struct i2c_client *client, 329static int lm85_probe(struct i2c_client *client,
329 const struct i2c_device_id *id); 330 const struct i2c_device_id *id);
330static int lm85_remove(struct i2c_client *client); 331static int lm85_remove(struct i2c_client *client);
@@ -357,7 +358,7 @@ static struct i2c_driver lm85_driver = {
357 .remove = lm85_remove, 358 .remove = lm85_remove,
358 .id_table = lm85_id, 359 .id_table = lm85_id,
359 .detect = lm85_detect, 360 .detect = lm85_detect,
360 .address_data = &addr_data, 361 .address_list = normal_i2c,
361}; 362};
362 363
363 364
@@ -1156,8 +1157,7 @@ static int lm85_is_fake(struct i2c_client *client)
1156} 1157}
1157 1158
1158/* Return 0 if detection is successful, -ENODEV otherwise */ 1159/* Return 0 if detection is successful, -ENODEV otherwise */
1159static int lm85_detect(struct i2c_client *client, int kind, 1160static int lm85_detect(struct i2c_client *client, struct i2c_board_info *info)
1160 struct i2c_board_info *info)
1161{ 1161{
1162 struct i2c_adapter *adapter = client->adapter; 1162 struct i2c_adapter *adapter = client->adapter;
1163 int address = client->addr; 1163 int address = client->addr;
diff --git a/drivers/hwmon/lm87.c b/drivers/hwmon/lm87.c
index 4929b1815eee..f1e6e7512ffa 100644
--- a/drivers/hwmon/lm87.c
+++ b/drivers/hwmon/lm87.c
@@ -74,11 +74,7 @@
74 74
75static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END }; 75static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
76 76
77/* 77enum chips { lm87, adm1024 };
78 * Insmod parameters
79 */
80
81I2C_CLIENT_INSMOD_2(lm87, adm1024);
82 78
83/* 79/*
84 * The LM87 registers 80 * The LM87 registers
@@ -158,7 +154,7 @@ static u8 LM87_REG_TEMP_LOW[3] = { 0x3A, 0x38, 0x2C };
158 154
159static int lm87_probe(struct i2c_client *client, 155static int lm87_probe(struct i2c_client *client,
160 const struct i2c_device_id *id); 156 const struct i2c_device_id *id);
161static int lm87_detect(struct i2c_client *new_client, int kind, 157static int lm87_detect(struct i2c_client *new_client,
162 struct i2c_board_info *info); 158 struct i2c_board_info *info);
163static void lm87_init_client(struct i2c_client *client); 159static void lm87_init_client(struct i2c_client *client);
164static int lm87_remove(struct i2c_client *client); 160static int lm87_remove(struct i2c_client *client);
@@ -184,7 +180,7 @@ static struct i2c_driver lm87_driver = {
184 .remove = lm87_remove, 180 .remove = lm87_remove,
185 .id_table = lm87_id, 181 .id_table = lm87_id,
186 .detect = lm87_detect, 182 .detect = lm87_detect,
187 .address_data = &addr_data, 183 .address_list = normal_i2c,
188}; 184};
189 185
190/* 186/*
@@ -662,7 +658,7 @@ static const struct attribute_group lm87_group_opt = {
662}; 658};
663 659
664/* Return 0 if detection is successful, -ENODEV otherwise */ 660/* Return 0 if detection is successful, -ENODEV otherwise */
665static int lm87_detect(struct i2c_client *new_client, int kind, 661static int lm87_detect(struct i2c_client *new_client,
666 struct i2c_board_info *info) 662 struct i2c_board_info *info)
667{ 663{
668 struct i2c_adapter *adapter = new_client->adapter; 664 struct i2c_adapter *adapter = new_client->adapter;
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index b7c905f50ed4..7c9bdc167426 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -93,12 +93,7 @@
93static const unsigned short normal_i2c[] = { 93static const unsigned short normal_i2c[] = {
94 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 0x4c, 0x4d, 0x4e, I2C_CLIENT_END }; 94 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 0x4c, 0x4d, 0x4e, I2C_CLIENT_END };
95 95
96/* 96enum chips { lm90, adm1032, lm99, lm86, max6657, adt7461, max6680, max6646 };
97 * Insmod parameters
98 */
99
100I2C_CLIENT_INSMOD_8(lm90, adm1032, lm99, lm86, max6657, adt7461, max6680,
101 max6646);
102 97
103/* 98/*
104 * The LM90 registers 99 * The LM90 registers
@@ -152,8 +147,7 @@ I2C_CLIENT_INSMOD_8(lm90, adm1032, lm99, lm86, max6657, adt7461, max6680,
152 * Functions declaration 147 * Functions declaration
153 */ 148 */
154 149
155static int lm90_detect(struct i2c_client *client, int kind, 150static int lm90_detect(struct i2c_client *client, struct i2c_board_info *info);
156 struct i2c_board_info *info);
157static int lm90_probe(struct i2c_client *client, 151static int lm90_probe(struct i2c_client *client,
158 const struct i2c_device_id *id); 152 const struct i2c_device_id *id);
159static void lm90_init_client(struct i2c_client *client); 153static void lm90_init_client(struct i2c_client *client);
@@ -192,7 +186,7 @@ static struct i2c_driver lm90_driver = {
192 .remove = lm90_remove, 186 .remove = lm90_remove,
193 .id_table = lm90_id, 187 .id_table = lm90_id,
194 .detect = lm90_detect, 188 .detect = lm90_detect,
195 .address_data = &addr_data, 189 .address_list = normal_i2c,
196}; 190};
197 191
198/* 192/*
@@ -656,7 +650,7 @@ static int lm90_read_reg(struct i2c_client* client, u8 reg, u8 *value)
656} 650}
657 651
658/* Return 0 if detection is successful, -ENODEV otherwise */ 652/* Return 0 if detection is successful, -ENODEV otherwise */
659static int lm90_detect(struct i2c_client *new_client, int kind, 653static int lm90_detect(struct i2c_client *new_client,
660 struct i2c_board_info *info) 654 struct i2c_board_info *info)
661{ 655{
662 struct i2c_adapter *adapter = new_client->adapter; 656 struct i2c_adapter *adapter = new_client->adapter;
diff --git a/drivers/hwmon/lm92.c b/drivers/hwmon/lm92.c
index 47ac698709dc..7c31e6205f85 100644
--- a/drivers/hwmon/lm92.c
+++ b/drivers/hwmon/lm92.c
@@ -54,9 +54,6 @@
54static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 54static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b,
55 I2C_CLIENT_END }; 55 I2C_CLIENT_END };
56 56
57/* Insmod parameters */
58I2C_CLIENT_INSMOD_1(lm92);
59
60/* The LM92 registers */ 57/* The LM92 registers */
61#define LM92_REG_CONFIG 0x01 /* 8-bit, RW */ 58#define LM92_REG_CONFIG 0x01 /* 8-bit, RW */
62#define LM92_REG_TEMP 0x00 /* 16-bit, RO */ 59#define LM92_REG_TEMP 0x00 /* 16-bit, RO */
@@ -319,7 +316,7 @@ static const struct attribute_group lm92_group = {
319}; 316};
320 317
321/* Return 0 if detection is successful, -ENODEV otherwise */ 318/* Return 0 if detection is successful, -ENODEV otherwise */
322static int lm92_detect(struct i2c_client *new_client, int kind, 319static int lm92_detect(struct i2c_client *new_client,
323 struct i2c_board_info *info) 320 struct i2c_board_info *info)
324{ 321{
325 struct i2c_adapter *adapter = new_client->adapter; 322 struct i2c_adapter *adapter = new_client->adapter;
@@ -401,7 +398,7 @@ static int lm92_remove(struct i2c_client *client)
401 */ 398 */
402 399
403static const struct i2c_device_id lm92_id[] = { 400static const struct i2c_device_id lm92_id[] = {
404 { "lm92", lm92 }, 401 { "lm92", 0 },
405 /* max6635 could be added here */ 402 /* max6635 could be added here */
406 { } 403 { }
407}; 404};
@@ -416,7 +413,7 @@ static struct i2c_driver lm92_driver = {
416 .remove = lm92_remove, 413 .remove = lm92_remove,
417 .id_table = lm92_id, 414 .id_table = lm92_id,
418 .detect = lm92_detect, 415 .detect = lm92_detect,
419 .address_data = &addr_data, 416 .address_list = normal_i2c,
420}; 417};
421 418
422static int __init sensors_lm92_init(void) 419static int __init sensors_lm92_init(void)
diff --git a/drivers/hwmon/lm93.c b/drivers/hwmon/lm93.c
index 124dd7cea54c..6669255aadcf 100644
--- a/drivers/hwmon/lm93.c
+++ b/drivers/hwmon/lm93.c
@@ -145,7 +145,6 @@
145static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END }; 145static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
146 146
147/* Insmod parameters */ 147/* Insmod parameters */
148I2C_CLIENT_INSMOD_1(lm93);
149 148
150static int disable_block; 149static int disable_block;
151module_param(disable_block, bool, 0); 150module_param(disable_block, bool, 0);
@@ -2501,8 +2500,7 @@ static void lm93_init_client(struct i2c_client *client)
2501} 2500}
2502 2501
2503/* Return 0 if detection is successful, -ENODEV otherwise */ 2502/* Return 0 if detection is successful, -ENODEV otherwise */
2504static int lm93_detect(struct i2c_client *client, int kind, 2503static int lm93_detect(struct i2c_client *client, struct i2c_board_info *info)
2505 struct i2c_board_info *info)
2506{ 2504{
2507 struct i2c_adapter *adapter = client->adapter; 2505 struct i2c_adapter *adapter = client->adapter;
2508 int mfr, ver; 2506 int mfr, ver;
@@ -2603,7 +2601,7 @@ static int lm93_remove(struct i2c_client *client)
2603} 2601}
2604 2602
2605static const struct i2c_device_id lm93_id[] = { 2603static const struct i2c_device_id lm93_id[] = {
2606 { "lm93", lm93 }, 2604 { "lm93", 0 },
2607 { } 2605 { }
2608}; 2606};
2609MODULE_DEVICE_TABLE(i2c, lm93_id); 2607MODULE_DEVICE_TABLE(i2c, lm93_id);
@@ -2617,7 +2615,7 @@ static struct i2c_driver lm93_driver = {
2617 .remove = lm93_remove, 2615 .remove = lm93_remove,
2618 .id_table = lm93_id, 2616 .id_table = lm93_id,
2619 .detect = lm93_detect, 2617 .detect = lm93_detect,
2620 .address_data = &addr_data, 2618 .address_list = normal_i2c,
2621}; 2619};
2622 2620
2623static int __init lm93_init(void) 2621static int __init lm93_init(void)
diff --git a/drivers/hwmon/lm95241.c b/drivers/hwmon/lm95241.c
index 906b896cf1d0..8fc8eb8cba47 100644
--- a/drivers/hwmon/lm95241.c
+++ b/drivers/hwmon/lm95241.c
@@ -39,9 +39,6 @@
39static const unsigned short normal_i2c[] = { 39static const unsigned short normal_i2c[] = {
40 0x19, 0x2a, 0x2b, I2C_CLIENT_END}; 40 0x19, 0x2a, 0x2b, I2C_CLIENT_END};
41 41
42/* Insmod parameters */
43I2C_CLIENT_INSMOD_1(lm95241);
44
45/* LM95241 registers */ 42/* LM95241 registers */
46#define LM95241_REG_R_MAN_ID 0xFE 43#define LM95241_REG_R_MAN_ID 0xFE
47#define LM95241_REG_R_CHIP_ID 0xFF 44#define LM95241_REG_R_CHIP_ID 0xFF
@@ -310,7 +307,7 @@ static const struct attribute_group lm95241_group = {
310}; 307};
311 308
312/* Return 0 if detection is successful, -ENODEV otherwise */ 309/* Return 0 if detection is successful, -ENODEV otherwise */
313static int lm95241_detect(struct i2c_client *new_client, int kind, 310static int lm95241_detect(struct i2c_client *new_client,
314 struct i2c_board_info *info) 311 struct i2c_board_info *info)
315{ 312{
316 struct i2c_adapter *adapter = new_client->adapter; 313 struct i2c_adapter *adapter = new_client->adapter;
@@ -446,7 +443,7 @@ static struct lm95241_data *lm95241_update_device(struct device *dev)
446 443
447/* Driver data (common to all clients) */ 444/* Driver data (common to all clients) */
448static const struct i2c_device_id lm95241_id[] = { 445static const struct i2c_device_id lm95241_id[] = {
449 { "lm95241", lm95241 }, 446 { "lm95241", 0 },
450 { } 447 { }
451}; 448};
452MODULE_DEVICE_TABLE(i2c, lm95241_id); 449MODULE_DEVICE_TABLE(i2c, lm95241_id);
@@ -460,7 +457,7 @@ static struct i2c_driver lm95241_driver = {
460 .remove = lm95241_remove, 457 .remove = lm95241_remove,
461 .id_table = lm95241_id, 458 .id_table = lm95241_id,
462 .detect = lm95241_detect, 459 .detect = lm95241_detect,
463 .address_data = &addr_data, 460 .address_list = normal_i2c,
464}; 461};
465 462
466static int __init sensors_lm95241_init(void) 463static int __init sensors_lm95241_init(void)
diff --git a/drivers/hwmon/max1619.c b/drivers/hwmon/max1619.c
index 7fcf5ff89e7f..022ded098100 100644
--- a/drivers/hwmon/max1619.c
+++ b/drivers/hwmon/max1619.c
@@ -41,12 +41,6 @@ static const unsigned short normal_i2c[] = {
41 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 0x4c, 0x4d, 0x4e, I2C_CLIENT_END }; 41 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 0x4c, 0x4d, 0x4e, I2C_CLIENT_END };
42 42
43/* 43/*
44 * Insmod parameters
45 */
46
47I2C_CLIENT_INSMOD_1(max1619);
48
49/*
50 * The MAX1619 registers 44 * The MAX1619 registers
51 */ 45 */
52 46
@@ -88,7 +82,7 @@ static int temp_to_reg(int val)
88 82
89static int max1619_probe(struct i2c_client *client, 83static int max1619_probe(struct i2c_client *client,
90 const struct i2c_device_id *id); 84 const struct i2c_device_id *id);
91static int max1619_detect(struct i2c_client *client, int kind, 85static int max1619_detect(struct i2c_client *client,
92 struct i2c_board_info *info); 86 struct i2c_board_info *info);
93static void max1619_init_client(struct i2c_client *client); 87static void max1619_init_client(struct i2c_client *client);
94static int max1619_remove(struct i2c_client *client); 88static int max1619_remove(struct i2c_client *client);
@@ -99,7 +93,7 @@ static struct max1619_data *max1619_update_device(struct device *dev);
99 */ 93 */
100 94
101static const struct i2c_device_id max1619_id[] = { 95static const struct i2c_device_id max1619_id[] = {
102 { "max1619", max1619 }, 96 { "max1619", 0 },
103 { } 97 { }
104}; 98};
105MODULE_DEVICE_TABLE(i2c, max1619_id); 99MODULE_DEVICE_TABLE(i2c, max1619_id);
@@ -113,7 +107,7 @@ static struct i2c_driver max1619_driver = {
113 .remove = max1619_remove, 107 .remove = max1619_remove,
114 .id_table = max1619_id, 108 .id_table = max1619_id,
115 .detect = max1619_detect, 109 .detect = max1619_detect,
116 .address_data = &addr_data, 110 .address_list = normal_i2c,
117}; 111};
118 112
119/* 113/*
@@ -226,7 +220,7 @@ static const struct attribute_group max1619_group = {
226 */ 220 */
227 221
228/* Return 0 if detection is successful, -ENODEV otherwise */ 222/* Return 0 if detection is successful, -ENODEV otherwise */
229static int max1619_detect(struct i2c_client *client, int kind, 223static int max1619_detect(struct i2c_client *client,
230 struct i2c_board_info *info) 224 struct i2c_board_info *info)
231{ 225{
232 struct i2c_adapter *adapter = client->adapter; 226 struct i2c_adapter *adapter = client->adapter;
diff --git a/drivers/hwmon/max6650.c b/drivers/hwmon/max6650.c
index 1da561e0cb37..a0160ee5caef 100644
--- a/drivers/hwmon/max6650.c
+++ b/drivers/hwmon/max6650.c
@@ -62,8 +62,6 @@ module_param(fan_voltage, int, S_IRUGO);
62module_param(prescaler, int, S_IRUGO); 62module_param(prescaler, int, S_IRUGO);
63module_param(clock, int, S_IRUGO); 63module_param(clock, int, S_IRUGO);
64 64
65I2C_CLIENT_INSMOD_1(max6650);
66
67/* 65/*
68 * MAX 6650/6651 registers 66 * MAX 6650/6651 registers
69 */ 67 */
@@ -116,7 +114,7 @@ I2C_CLIENT_INSMOD_1(max6650);
116 114
117static int max6650_probe(struct i2c_client *client, 115static int max6650_probe(struct i2c_client *client,
118 const struct i2c_device_id *id); 116 const struct i2c_device_id *id);
119static int max6650_detect(struct i2c_client *client, int kind, 117static int max6650_detect(struct i2c_client *client,
120 struct i2c_board_info *info); 118 struct i2c_board_info *info);
121static int max6650_init_client(struct i2c_client *client); 119static int max6650_init_client(struct i2c_client *client);
122static int max6650_remove(struct i2c_client *client); 120static int max6650_remove(struct i2c_client *client);
@@ -127,7 +125,7 @@ static struct max6650_data *max6650_update_device(struct device *dev);
127 */ 125 */
128 126
129static const struct i2c_device_id max6650_id[] = { 127static const struct i2c_device_id max6650_id[] = {
130 { "max6650", max6650 }, 128 { "max6650", 0 },
131 { } 129 { }
132}; 130};
133MODULE_DEVICE_TABLE(i2c, max6650_id); 131MODULE_DEVICE_TABLE(i2c, max6650_id);
@@ -141,7 +139,7 @@ static struct i2c_driver max6650_driver = {
141 .remove = max6650_remove, 139 .remove = max6650_remove,
142 .id_table = max6650_id, 140 .id_table = max6650_id,
143 .detect = max6650_detect, 141 .detect = max6650_detect,
144 .address_data = &addr_data, 142 .address_list = normal_i2c,
145}; 143};
146 144
147/* 145/*
@@ -528,7 +526,7 @@ static struct attribute_group max6650_attr_grp = {
528 */ 526 */
529 527
530/* Return 0 if detection is successful, -ENODEV otherwise */ 528/* Return 0 if detection is successful, -ENODEV otherwise */
531static int max6650_detect(struct i2c_client *client, int kind, 529static int max6650_detect(struct i2c_client *client,
532 struct i2c_board_info *info) 530 struct i2c_board_info *info)
533{ 531{
534 struct i2c_adapter *adapter = client->adapter; 532 struct i2c_adapter *adapter = client->adapter;
diff --git a/drivers/hwmon/pcf8591.c b/drivers/hwmon/pcf8591.c
index 1d7ffebd679d..d44787949851 100644
--- a/drivers/hwmon/pcf8591.c
+++ b/drivers/hwmon/pcf8591.c
@@ -29,7 +29,6 @@ static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 0x4c,
29 0x4d, 0x4e, 0x4f, I2C_CLIENT_END }; 29 0x4d, 0x4e, 0x4f, I2C_CLIENT_END };
30 30
31/* Insmod parameters */ 31/* Insmod parameters */
32I2C_CLIENT_INSMOD_1(pcf8591);
33 32
34static int input_mode; 33static int input_mode;
35module_param(input_mode, int, 0); 34module_param(input_mode, int, 0);
@@ -169,7 +168,7 @@ static const struct attribute_group pcf8591_attr_group_opt = {
169 */ 168 */
170 169
171/* Return 0 if detection is successful, -ENODEV otherwise */ 170/* Return 0 if detection is successful, -ENODEV otherwise */
172static int pcf8591_detect(struct i2c_client *client, int kind, 171static int pcf8591_detect(struct i2c_client *client,
173 struct i2c_board_info *info) 172 struct i2c_board_info *info)
174{ 173{
175 struct i2c_adapter *adapter = client->adapter; 174 struct i2c_adapter *adapter = client->adapter;
@@ -299,7 +298,7 @@ static struct i2c_driver pcf8591_driver = {
299 298
300 .class = I2C_CLASS_HWMON, /* Nearest choice */ 299 .class = I2C_CLASS_HWMON, /* Nearest choice */
301 .detect = pcf8591_detect, 300 .detect = pcf8591_detect,
302 .address_data = &addr_data, 301 .address_list = normal_i2c,
303}; 302};
304 303
305static int __init pcf8591_init(void) 304static int __init pcf8591_init(void)
diff --git a/drivers/hwmon/smsc47m192.c b/drivers/hwmon/smsc47m192.c
index 4d88c045781c..40b26673d87f 100644
--- a/drivers/hwmon/smsc47m192.c
+++ b/drivers/hwmon/smsc47m192.c
@@ -36,9 +36,6 @@
36/* Addresses to scan */ 36/* Addresses to scan */
37static const unsigned short normal_i2c[] = { 0x2c, 0x2d, I2C_CLIENT_END }; 37static const unsigned short normal_i2c[] = { 0x2c, 0x2d, I2C_CLIENT_END };
38 38
39/* Insmod parameters */
40I2C_CLIENT_INSMOD_1(smsc47m192);
41
42/* SMSC47M192 registers */ 39/* SMSC47M192 registers */
43#define SMSC47M192_REG_IN(nr) ((nr)<6 ? (0x20 + (nr)) : \ 40#define SMSC47M192_REG_IN(nr) ((nr)<6 ? (0x20 + (nr)) : \
44 (0x50 + (nr) - 6)) 41 (0x50 + (nr) - 6))
@@ -115,13 +112,13 @@ struct smsc47m192_data {
115 112
116static int smsc47m192_probe(struct i2c_client *client, 113static int smsc47m192_probe(struct i2c_client *client,
117 const struct i2c_device_id *id); 114 const struct i2c_device_id *id);
118static int smsc47m192_detect(struct i2c_client *client, int kind, 115static int smsc47m192_detect(struct i2c_client *client,
119 struct i2c_board_info *info); 116 struct i2c_board_info *info);
120static int smsc47m192_remove(struct i2c_client *client); 117static int smsc47m192_remove(struct i2c_client *client);
121static struct smsc47m192_data *smsc47m192_update_device(struct device *dev); 118static struct smsc47m192_data *smsc47m192_update_device(struct device *dev);
122 119
123static const struct i2c_device_id smsc47m192_id[] = { 120static const struct i2c_device_id smsc47m192_id[] = {
124 { "smsc47m192", smsc47m192 }, 121 { "smsc47m192", 0 },
125 { } 122 { }
126}; 123};
127MODULE_DEVICE_TABLE(i2c, smsc47m192_id); 124MODULE_DEVICE_TABLE(i2c, smsc47m192_id);
@@ -135,7 +132,7 @@ static struct i2c_driver smsc47m192_driver = {
135 .remove = smsc47m192_remove, 132 .remove = smsc47m192_remove,
136 .id_table = smsc47m192_id, 133 .id_table = smsc47m192_id,
137 .detect = smsc47m192_detect, 134 .detect = smsc47m192_detect,
138 .address_data = &addr_data, 135 .address_list = normal_i2c,
139}; 136};
140 137
141/* Voltages */ 138/* Voltages */
@@ -481,7 +478,7 @@ static void smsc47m192_init_client(struct i2c_client *client)
481} 478}
482 479
483/* Return 0 if detection is successful, -ENODEV otherwise */ 480/* Return 0 if detection is successful, -ENODEV otherwise */
484static int smsc47m192_detect(struct i2c_client *client, int kind, 481static int smsc47m192_detect(struct i2c_client *client,
485 struct i2c_board_info *info) 482 struct i2c_board_info *info)
486{ 483{
487 struct i2c_adapter *adapter = client->adapter; 484 struct i2c_adapter *adapter = client->adapter;
diff --git a/drivers/hwmon/thmc50.c b/drivers/hwmon/thmc50.c
index 4b793849c738..7dfb4dec4c5f 100644
--- a/drivers/hwmon/thmc50.c
+++ b/drivers/hwmon/thmc50.c
@@ -35,7 +35,7 @@ MODULE_LICENSE("GPL");
35static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END }; 35static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
36 36
37/* Insmod parameters */ 37/* Insmod parameters */
38I2C_CLIENT_INSMOD_2(thmc50, adm1022); 38enum chips { thmc50, adm1022 };
39 39
40static unsigned short adm1022_temp3[16]; 40static unsigned short adm1022_temp3[16];
41static unsigned int adm1022_temp3_num; 41static unsigned int adm1022_temp3_num;
@@ -84,7 +84,7 @@ struct thmc50_data {
84 u8 alarms; 84 u8 alarms;
85}; 85};
86 86
87static int thmc50_detect(struct i2c_client *client, int kind, 87static int thmc50_detect(struct i2c_client *client,
88 struct i2c_board_info *info); 88 struct i2c_board_info *info);
89static int thmc50_probe(struct i2c_client *client, 89static int thmc50_probe(struct i2c_client *client,
90 const struct i2c_device_id *id); 90 const struct i2c_device_id *id);
@@ -108,7 +108,7 @@ static struct i2c_driver thmc50_driver = {
108 .remove = thmc50_remove, 108 .remove = thmc50_remove,
109 .id_table = thmc50_id, 109 .id_table = thmc50_id,
110 .detect = thmc50_detect, 110 .detect = thmc50_detect,
111 .address_data = &addr_data, 111 .address_list = normal_i2c,
112}; 112};
113 113
114static ssize_t show_analog_out(struct device *dev, 114static ssize_t show_analog_out(struct device *dev,
@@ -286,7 +286,7 @@ static const struct attribute_group temp3_group = {
286}; 286};
287 287
288/* Return 0 if detection is successful, -ENODEV otherwise */ 288/* Return 0 if detection is successful, -ENODEV otherwise */
289static int thmc50_detect(struct i2c_client *client, int kind, 289static int thmc50_detect(struct i2c_client *client,
290 struct i2c_board_info *info) 290 struct i2c_board_info *info)
291{ 291{
292 unsigned company; 292 unsigned company;
diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c
index ee9673467c4a..a13b30e8d8d8 100644
--- a/drivers/hwmon/tmp401.c
+++ b/drivers/hwmon/tmp401.c
@@ -42,8 +42,7 @@
42/* Addresses to scan */ 42/* Addresses to scan */
43static const unsigned short normal_i2c[] = { 0x4c, I2C_CLIENT_END }; 43static const unsigned short normal_i2c[] = { 0x4c, I2C_CLIENT_END };
44 44
45/* Insmod parameters */ 45enum chips { tmp401, tmp411 };
46I2C_CLIENT_INSMOD_2(tmp401, tmp411);
47 46
48/* 47/*
49 * The TMP401 registers, note some registers have different addresses for 48 * The TMP401 registers, note some registers have different addresses for
@@ -98,7 +97,7 @@ static const u8 TMP411_TEMP_HIGHEST_LSB[2] = { 0x33, 0x37 };
98 97
99static int tmp401_probe(struct i2c_client *client, 98static int tmp401_probe(struct i2c_client *client,
100 const struct i2c_device_id *id); 99 const struct i2c_device_id *id);
101static int tmp401_detect(struct i2c_client *client, int kind, 100static int tmp401_detect(struct i2c_client *client,
102 struct i2c_board_info *info); 101 struct i2c_board_info *info);
103static int tmp401_remove(struct i2c_client *client); 102static int tmp401_remove(struct i2c_client *client);
104static struct tmp401_data *tmp401_update_device(struct device *dev); 103static struct tmp401_data *tmp401_update_device(struct device *dev);
@@ -123,7 +122,7 @@ static struct i2c_driver tmp401_driver = {
123 .remove = tmp401_remove, 122 .remove = tmp401_remove,
124 .id_table = tmp401_id, 123 .id_table = tmp401_id,
125 .detect = tmp401_detect, 124 .detect = tmp401_detect,
126 .address_data = &addr_data, 125 .address_list = normal_i2c,
127}; 126};
128 127
129/* 128/*
@@ -488,7 +487,7 @@ static void tmp401_init_client(struct i2c_client *client)
488 i2c_smbus_write_byte_data(client, TMP401_CONFIG_WRITE, config); 487 i2c_smbus_write_byte_data(client, TMP401_CONFIG_WRITE, config);
489} 488}
490 489
491static int tmp401_detect(struct i2c_client *client, int _kind, 490static int tmp401_detect(struct i2c_client *client,
492 struct i2c_board_info *info) 491 struct i2c_board_info *info)
493{ 492{
494 enum chips kind; 493 enum chips kind;
diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c
index bb5464a289ca..4f7c051e2d7b 100644
--- a/drivers/hwmon/tmp421.c
+++ b/drivers/hwmon/tmp421.c
@@ -39,8 +39,7 @@
39static unsigned short normal_i2c[] = { 0x2a, 0x4c, 0x4d, 0x4e, 0x4f, 39static unsigned short normal_i2c[] = { 0x2a, 0x4c, 0x4d, 0x4e, 0x4f,
40 I2C_CLIENT_END }; 40 I2C_CLIENT_END };
41 41
42/* Insmod parameters */ 42enum chips { tmp421, tmp422, tmp423 };
43I2C_CLIENT_INSMOD_3(tmp421, tmp422, tmp423);
44 43
45/* The TMP421 registers */ 44/* The TMP421 registers */
46#define TMP421_CONFIG_REG_1 0x09 45#define TMP421_CONFIG_REG_1 0x09
@@ -223,7 +222,7 @@ static int tmp421_init_client(struct i2c_client *client)
223 return 0; 222 return 0;
224} 223}
225 224
226static int tmp421_detect(struct i2c_client *client, int _kind, 225static int tmp421_detect(struct i2c_client *client,
227 struct i2c_board_info *info) 226 struct i2c_board_info *info)
228{ 227{
229 enum chips kind; 228 enum chips kind;
@@ -322,7 +321,7 @@ static struct i2c_driver tmp421_driver = {
322 .remove = tmp421_remove, 321 .remove = tmp421_remove,
323 .id_table = tmp421_id, 322 .id_table = tmp421_id,
324 .detect = tmp421_detect, 323 .detect = tmp421_detect,
325 .address_data = &addr_data, 324 .address_list = normal_i2c,
326}; 325};
327 326
328static int __init tmp421_init(void) 327static int __init tmp421_init(void)
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index bb5e78748783..0dcaba9b7189 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -5,6 +5,7 @@
5 Copyright (C) 2006 Yuan Mu (Winbond), 5 Copyright (C) 2006 Yuan Mu (Winbond),
6 Rudolf Marek <r.marek@assembler.cz> 6 Rudolf Marek <r.marek@assembler.cz>
7 David Hubbard <david.c.hubbard@gmail.com> 7 David Hubbard <david.c.hubbard@gmail.com>
8 Daniel J Blueman <daniel.blueman@gmail.com>
8 9
9 Shamelessly ripped from the w83627hf driver 10 Shamelessly ripped from the w83627hf driver
10 Copyright (C) 2003 Mark Studebaker 11 Copyright (C) 2003 Mark Studebaker
@@ -177,12 +178,15 @@ static const u16 W83627EHF_REG_TEMP_CONFIG[] = { 0x152, 0x252 };
177#define W83627EHF_REG_ALARM3 0x45B 178#define W83627EHF_REG_ALARM3 0x45B
178 179
179/* SmartFan registers */ 180/* SmartFan registers */
181#define W83627EHF_REG_FAN_STEPUP_TIME 0x0f
182#define W83627EHF_REG_FAN_STEPDOWN_TIME 0x0e
183
180/* DC or PWM output fan configuration */ 184/* DC or PWM output fan configuration */
181static const u8 W83627EHF_REG_PWM_ENABLE[] = { 185static const u8 W83627EHF_REG_PWM_ENABLE[] = {
182 0x04, /* SYS FAN0 output mode and PWM mode */ 186 0x04, /* SYS FAN0 output mode and PWM mode */
183 0x04, /* CPU FAN0 output mode and PWM mode */ 187 0x04, /* CPU FAN0 output mode and PWM mode */
184 0x12, /* AUX FAN mode */ 188 0x12, /* AUX FAN mode */
185 0x62, /* CPU fan1 mode */ 189 0x62, /* CPU FAN1 mode */
186}; 190};
187 191
188static const u8 W83627EHF_PWM_MODE_SHIFT[] = { 0, 1, 0, 6 }; 192static const u8 W83627EHF_PWM_MODE_SHIFT[] = { 0, 1, 0, 6 };
@@ -193,10 +197,12 @@ static const u8 W83627EHF_REG_PWM[] = { 0x01, 0x03, 0x11, 0x61 };
193static const u8 W83627EHF_REG_TARGET[] = { 0x05, 0x06, 0x13, 0x63 }; 197static const u8 W83627EHF_REG_TARGET[] = { 0x05, 0x06, 0x13, 0x63 };
194static const u8 W83627EHF_REG_TOLERANCE[] = { 0x07, 0x07, 0x14, 0x62 }; 198static const u8 W83627EHF_REG_TOLERANCE[] = { 0x07, 0x07, 0x14, 0x62 };
195 199
196
197/* Advanced Fan control, some values are common for all fans */ 200/* Advanced Fan control, some values are common for all fans */
198static const u8 W83627EHF_REG_FAN_MIN_OUTPUT[] = { 0x08, 0x09, 0x15, 0x64 }; 201static const u8 W83627EHF_REG_FAN_START_OUTPUT[] = { 0x0a, 0x0b, 0x16, 0x65 };
199static const u8 W83627EHF_REG_FAN_STOP_TIME[] = { 0x0C, 0x0D, 0x17, 0x66 }; 202static const u8 W83627EHF_REG_FAN_STOP_OUTPUT[] = { 0x08, 0x09, 0x15, 0x64 };
203static const u8 W83627EHF_REG_FAN_STOP_TIME[] = { 0x0c, 0x0d, 0x17, 0x66 };
204static const u8 W83627EHF_REG_FAN_MAX_OUTPUT[] = { 0xff, 0x67, 0xff, 0x69 };
205static const u8 W83627EHF_REG_FAN_STEP_OUTPUT[] = { 0xff, 0x68, 0xff, 0x6a };
200 206
201/* 207/*
202 * Conversions 208 * Conversions
@@ -295,14 +301,19 @@ struct w83627ehf_data {
295 301
296 u8 pwm_mode[4]; /* 0->DC variable voltage, 1->PWM variable duty cycle */ 302 u8 pwm_mode[4]; /* 0->DC variable voltage, 1->PWM variable duty cycle */
297 u8 pwm_enable[4]; /* 1->manual 303 u8 pwm_enable[4]; /* 1->manual
298 2->thermal cruise (also called SmartFan I) */ 304 2->thermal cruise mode (also called SmartFan I)
305 3->fan speed cruise mode
306 4->variable thermal cruise (also called SmartFan III) */
299 u8 pwm_num; /* number of pwm */ 307 u8 pwm_num; /* number of pwm */
300 u8 pwm[4]; 308 u8 pwm[4];
301 u8 target_temp[4]; 309 u8 target_temp[4];
302 u8 tolerance[4]; 310 u8 tolerance[4];
303 311
304 u8 fan_min_output[4]; /* minimum fan speed */ 312 u8 fan_start_output[4]; /* minimum fan speed when spinning up */
305 u8 fan_stop_time[4]; 313 u8 fan_stop_output[4]; /* minimum fan speed when spinning down */
314 u8 fan_stop_time[4]; /* time at minimum before disabling fan */
315 u8 fan_max_output[4]; /* maximum fan speed */
316 u8 fan_step_output[4]; /* rate of change output value */
306 317
307 u8 vid; 318 u8 vid;
308 u8 vrm; 319 u8 vrm;
@@ -529,8 +540,10 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
529 & 3) + 1; 540 & 3) + 1;
530 data->pwm[i] = w83627ehf_read_value(data, 541 data->pwm[i] = w83627ehf_read_value(data,
531 W83627EHF_REG_PWM[i]); 542 W83627EHF_REG_PWM[i]);
532 data->fan_min_output[i] = w83627ehf_read_value(data, 543 data->fan_start_output[i] = w83627ehf_read_value(data,
533 W83627EHF_REG_FAN_MIN_OUTPUT[i]); 544 W83627EHF_REG_FAN_START_OUTPUT[i]);
545 data->fan_stop_output[i] = w83627ehf_read_value(data,
546 W83627EHF_REG_FAN_STOP_OUTPUT[i]);
534 data->fan_stop_time[i] = w83627ehf_read_value(data, 547 data->fan_stop_time[i] = w83627ehf_read_value(data,
535 W83627EHF_REG_FAN_STOP_TIME[i]); 548 W83627EHF_REG_FAN_STOP_TIME[i]);
536 data->target_temp[i] = 549 data->target_temp[i] =
@@ -976,7 +989,7 @@ store_pwm_enable(struct device *dev, struct device_attribute *attr,
976 u32 val = simple_strtoul(buf, NULL, 10); 989 u32 val = simple_strtoul(buf, NULL, 10);
977 u16 reg; 990 u16 reg;
978 991
979 if (!val || (val > 2)) /* only modes 1 and 2 are supported */ 992 if (!val || (val > 4))
980 return -EINVAL; 993 return -EINVAL;
981 mutex_lock(&data->update_lock); 994 mutex_lock(&data->update_lock);
982 reg = w83627ehf_read_value(data, W83627EHF_REG_PWM_ENABLE[nr]); 995 reg = w83627ehf_read_value(data, W83627EHF_REG_PWM_ENABLE[nr]);
@@ -1118,7 +1131,10 @@ store_##reg(struct device *dev, struct device_attribute *attr, \
1118 return count; \ 1131 return count; \
1119} 1132}
1120 1133
1121fan_functions(fan_min_output, FAN_MIN_OUTPUT) 1134fan_functions(fan_start_output, FAN_START_OUTPUT)
1135fan_functions(fan_stop_output, FAN_STOP_OUTPUT)
1136fan_functions(fan_max_output, FAN_MAX_OUTPUT)
1137fan_functions(fan_step_output, FAN_STEP_OUTPUT)
1122 1138
1123#define fan_time_functions(reg, REG) \ 1139#define fan_time_functions(reg, REG) \
1124static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \ 1140static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \
@@ -1161,8 +1177,14 @@ static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
1161static struct sensor_device_attribute sda_sf3_arrays_fan4[] = { 1177static struct sensor_device_attribute sda_sf3_arrays_fan4[] = {
1162 SENSOR_ATTR(pwm4_stop_time, S_IWUSR | S_IRUGO, show_fan_stop_time, 1178 SENSOR_ATTR(pwm4_stop_time, S_IWUSR | S_IRUGO, show_fan_stop_time,
1163 store_fan_stop_time, 3), 1179 store_fan_stop_time, 3),
1164 SENSOR_ATTR(pwm4_min_output, S_IWUSR | S_IRUGO, show_fan_min_output, 1180 SENSOR_ATTR(pwm4_start_output, S_IWUSR | S_IRUGO, show_fan_start_output,
1165 store_fan_min_output, 3), 1181 store_fan_start_output, 3),
1182 SENSOR_ATTR(pwm4_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output,
1183 store_fan_stop_output, 3),
1184 SENSOR_ATTR(pwm4_max_output, S_IWUSR | S_IRUGO, show_fan_max_output,
1185 store_fan_max_output, 3),
1186 SENSOR_ATTR(pwm4_step_output, S_IWUSR | S_IRUGO, show_fan_step_output,
1187 store_fan_step_output, 3),
1166}; 1188};
1167 1189
1168static struct sensor_device_attribute sda_sf3_arrays[] = { 1190static struct sensor_device_attribute sda_sf3_arrays[] = {
@@ -1172,12 +1194,24 @@ static struct sensor_device_attribute sda_sf3_arrays[] = {
1172 store_fan_stop_time, 1), 1194 store_fan_stop_time, 1),
1173 SENSOR_ATTR(pwm3_stop_time, S_IWUSR | S_IRUGO, show_fan_stop_time, 1195 SENSOR_ATTR(pwm3_stop_time, S_IWUSR | S_IRUGO, show_fan_stop_time,
1174 store_fan_stop_time, 2), 1196 store_fan_stop_time, 2),
1175 SENSOR_ATTR(pwm1_min_output, S_IWUSR | S_IRUGO, show_fan_min_output, 1197 SENSOR_ATTR(pwm1_start_output, S_IWUSR | S_IRUGO, show_fan_start_output,
1176 store_fan_min_output, 0), 1198 store_fan_start_output, 0),
1177 SENSOR_ATTR(pwm2_min_output, S_IWUSR | S_IRUGO, show_fan_min_output, 1199 SENSOR_ATTR(pwm2_start_output, S_IWUSR | S_IRUGO, show_fan_start_output,
1178 store_fan_min_output, 1), 1200 store_fan_start_output, 1),
1179 SENSOR_ATTR(pwm3_min_output, S_IWUSR | S_IRUGO, show_fan_min_output, 1201 SENSOR_ATTR(pwm3_start_output, S_IWUSR | S_IRUGO, show_fan_start_output,
1180 store_fan_min_output, 2), 1202 store_fan_start_output, 2),
1203 SENSOR_ATTR(pwm1_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output,
1204 store_fan_stop_output, 0),
1205 SENSOR_ATTR(pwm2_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output,
1206 store_fan_stop_output, 1),
1207 SENSOR_ATTR(pwm3_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output,
1208 store_fan_stop_output, 2),
1209
1210 /* pwm1 and pwm3 don't support max and step settings */
1211 SENSOR_ATTR(pwm2_max_output, S_IWUSR | S_IRUGO, show_fan_max_output,
1212 store_fan_max_output, 1),
1213 SENSOR_ATTR(pwm2_step_output, S_IWUSR | S_IRUGO, show_fan_step_output,
1214 store_fan_step_output, 1),
1181}; 1215};
1182 1216
1183static ssize_t 1217static ssize_t
diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c
index 7ab7967da0a0..05f9225b6f94 100644
--- a/drivers/hwmon/w83781d.c
+++ b/drivers/hwmon/w83781d.c
@@ -56,9 +56,10 @@
56/* Addresses to scan */ 56/* Addresses to scan */
57static const unsigned short normal_i2c[] = { 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 57static const unsigned short normal_i2c[] = { 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d,
58 0x2e, 0x2f, I2C_CLIENT_END }; 58 0x2e, 0x2f, I2C_CLIENT_END };
59/* Insmod parameters */
60I2C_CLIENT_INSMOD_4(w83781d, w83782d, w83783s, as99127f);
61 59
60enum chips { w83781d, w83782d, w83783s, as99127f };
61
62/* Insmod parameters */
62static unsigned short force_subclients[4]; 63static unsigned short force_subclients[4];
63module_param_array(force_subclients, short, NULL, 0); 64module_param_array(force_subclients, short, NULL, 0);
64MODULE_PARM_DESC(force_subclients, "List of subclient addresses: " 65MODULE_PARM_DESC(force_subclients, "List of subclient addresses: "
@@ -1051,8 +1052,7 @@ w83781d_create_files(struct device *dev, int kind, int is_isa)
1051 1052
1052/* Return 0 if detection is successful, -ENODEV otherwise */ 1053/* Return 0 if detection is successful, -ENODEV otherwise */
1053static int 1054static int
1054w83781d_detect(struct i2c_client *client, int kind, 1055w83781d_detect(struct i2c_client *client, struct i2c_board_info *info)
1055 struct i2c_board_info *info)
1056{ 1056{
1057 int val1, val2; 1057 int val1, val2;
1058 struct w83781d_data *isa = w83781d_data_if_isa(); 1058 struct w83781d_data *isa = w83781d_data_if_isa();
@@ -1537,7 +1537,7 @@ static struct i2c_driver w83781d_driver = {
1537 .remove = w83781d_remove, 1537 .remove = w83781d_remove,
1538 .id_table = w83781d_ids, 1538 .id_table = w83781d_ids,
1539 .detect = w83781d_detect, 1539 .detect = w83781d_detect,
1540 .address_data = &addr_data, 1540 .address_list = normal_i2c,
1541}; 1541};
1542 1542
1543/* 1543/*
diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
index 0410bf12c521..400a88bde278 100644
--- a/drivers/hwmon/w83791d.c
+++ b/drivers/hwmon/w83791d.c
@@ -52,7 +52,6 @@ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, 0x2f,
52 I2C_CLIENT_END }; 52 I2C_CLIENT_END };
53 53
54/* Insmod parameters */ 54/* Insmod parameters */
55I2C_CLIENT_INSMOD_1(w83791d);
56 55
57static unsigned short force_subclients[4]; 56static unsigned short force_subclients[4];
58module_param_array(force_subclients, short, NULL, 0); 57module_param_array(force_subclients, short, NULL, 0);
@@ -326,7 +325,7 @@ struct w83791d_data {
326 325
327static int w83791d_probe(struct i2c_client *client, 326static int w83791d_probe(struct i2c_client *client,
328 const struct i2c_device_id *id); 327 const struct i2c_device_id *id);
329static int w83791d_detect(struct i2c_client *client, int kind, 328static int w83791d_detect(struct i2c_client *client,
330 struct i2c_board_info *info); 329 struct i2c_board_info *info);
331static int w83791d_remove(struct i2c_client *client); 330static int w83791d_remove(struct i2c_client *client);
332 331
@@ -341,7 +340,7 @@ static void w83791d_print_debug(struct w83791d_data *data, struct device *dev);
341static void w83791d_init_client(struct i2c_client *client); 340static void w83791d_init_client(struct i2c_client *client);
342 341
343static const struct i2c_device_id w83791d_id[] = { 342static const struct i2c_device_id w83791d_id[] = {
344 { "w83791d", w83791d }, 343 { "w83791d", 0 },
345 { } 344 { }
346}; 345};
347MODULE_DEVICE_TABLE(i2c, w83791d_id); 346MODULE_DEVICE_TABLE(i2c, w83791d_id);
@@ -355,7 +354,7 @@ static struct i2c_driver w83791d_driver = {
355 .remove = w83791d_remove, 354 .remove = w83791d_remove,
356 .id_table = w83791d_id, 355 .id_table = w83791d_id,
357 .detect = w83791d_detect, 356 .detect = w83791d_detect,
358 .address_data = &addr_data, 357 .address_list = normal_i2c,
359}; 358};
360 359
361/* following are the sysfs callback functions */ 360/* following are the sysfs callback functions */
@@ -1259,7 +1258,7 @@ error_sc_0:
1259 1258
1260 1259
1261/* Return 0 if detection is successful, -ENODEV otherwise */ 1260/* Return 0 if detection is successful, -ENODEV otherwise */
1262static int w83791d_detect(struct i2c_client *client, int kind, 1261static int w83791d_detect(struct i2c_client *client,
1263 struct i2c_board_info *info) 1262 struct i2c_board_info *info)
1264{ 1263{
1265 struct i2c_adapter *adapter = client->adapter; 1264 struct i2c_adapter *adapter = client->adapter;
diff --git a/drivers/hwmon/w83792d.c b/drivers/hwmon/w83792d.c
index 38978851333f..679718e6b017 100644
--- a/drivers/hwmon/w83792d.c
+++ b/drivers/hwmon/w83792d.c
@@ -50,7 +50,6 @@ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, 0x2f,
50 I2C_CLIENT_END }; 50 I2C_CLIENT_END };
51 51
52/* Insmod parameters */ 52/* Insmod parameters */
53I2C_CLIENT_INSMOD_1(w83792d);
54 53
55static unsigned short force_subclients[4]; 54static unsigned short force_subclients[4];
56module_param_array(force_subclients, short, NULL, 0); 55module_param_array(force_subclients, short, NULL, 0);
@@ -302,7 +301,7 @@ struct w83792d_data {
302 301
303static int w83792d_probe(struct i2c_client *client, 302static int w83792d_probe(struct i2c_client *client,
304 const struct i2c_device_id *id); 303 const struct i2c_device_id *id);
305static int w83792d_detect(struct i2c_client *client, int kind, 304static int w83792d_detect(struct i2c_client *client,
306 struct i2c_board_info *info); 305 struct i2c_board_info *info);
307static int w83792d_remove(struct i2c_client *client); 306static int w83792d_remove(struct i2c_client *client);
308static struct w83792d_data *w83792d_update_device(struct device *dev); 307static struct w83792d_data *w83792d_update_device(struct device *dev);
@@ -314,7 +313,7 @@ static void w83792d_print_debug(struct w83792d_data *data, struct device *dev);
314static void w83792d_init_client(struct i2c_client *client); 313static void w83792d_init_client(struct i2c_client *client);
315 314
316static const struct i2c_device_id w83792d_id[] = { 315static const struct i2c_device_id w83792d_id[] = {
317 { "w83792d", w83792d }, 316 { "w83792d", 0 },
318 { } 317 { }
319}; 318};
320MODULE_DEVICE_TABLE(i2c, w83792d_id); 319MODULE_DEVICE_TABLE(i2c, w83792d_id);
@@ -328,7 +327,7 @@ static struct i2c_driver w83792d_driver = {
328 .remove = w83792d_remove, 327 .remove = w83792d_remove,
329 .id_table = w83792d_id, 328 .id_table = w83792d_id,
330 .detect = w83792d_detect, 329 .detect = w83792d_detect,
331 .address_data = &addr_data, 330 .address_list = normal_i2c,
332}; 331};
333 332
334static inline long in_count_from_reg(int nr, struct w83792d_data *data) 333static inline long in_count_from_reg(int nr, struct w83792d_data *data)
@@ -1263,7 +1262,7 @@ static const struct attribute_group w83792d_group = {
1263 1262
1264/* Return 0 if detection is successful, -ENODEV otherwise */ 1263/* Return 0 if detection is successful, -ENODEV otherwise */
1265static int 1264static int
1266w83792d_detect(struct i2c_client *client, int kind, struct i2c_board_info *info) 1265w83792d_detect(struct i2c_client *client, struct i2c_board_info *info)
1267{ 1266{
1268 struct i2c_adapter *adapter = client->adapter; 1267 struct i2c_adapter *adapter = client->adapter;
1269 int val1, val2; 1268 int val1, val2;
diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c
index 80a2191bf127..9a2022b67495 100644
--- a/drivers/hwmon/w83793.c
+++ b/drivers/hwmon/w83793.c
@@ -41,7 +41,6 @@ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, 0x2f,
41 I2C_CLIENT_END }; 41 I2C_CLIENT_END };
42 42
43/* Insmod parameters */ 43/* Insmod parameters */
44I2C_CLIENT_INSMOD_1(w83793);
45 44
46static unsigned short force_subclients[4]; 45static unsigned short force_subclients[4];
47module_param_array(force_subclients, short, NULL, 0); 46module_param_array(force_subclients, short, NULL, 0);
@@ -230,7 +229,7 @@ static u8 w83793_read_value(struct i2c_client *client, u16 reg);
230static int w83793_write_value(struct i2c_client *client, u16 reg, u8 value); 229static int w83793_write_value(struct i2c_client *client, u16 reg, u8 value);
231static int w83793_probe(struct i2c_client *client, 230static int w83793_probe(struct i2c_client *client,
232 const struct i2c_device_id *id); 231 const struct i2c_device_id *id);
233static int w83793_detect(struct i2c_client *client, int kind, 232static int w83793_detect(struct i2c_client *client,
234 struct i2c_board_info *info); 233 struct i2c_board_info *info);
235static int w83793_remove(struct i2c_client *client); 234static int w83793_remove(struct i2c_client *client);
236static void w83793_init_client(struct i2c_client *client); 235static void w83793_init_client(struct i2c_client *client);
@@ -238,7 +237,7 @@ static void w83793_update_nonvolatile(struct device *dev);
238static struct w83793_data *w83793_update_device(struct device *dev); 237static struct w83793_data *w83793_update_device(struct device *dev);
239 238
240static const struct i2c_device_id w83793_id[] = { 239static const struct i2c_device_id w83793_id[] = {
241 { "w83793", w83793 }, 240 { "w83793", 0 },
242 { } 241 { }
243}; 242};
244MODULE_DEVICE_TABLE(i2c, w83793_id); 243MODULE_DEVICE_TABLE(i2c, w83793_id);
@@ -252,7 +251,7 @@ static struct i2c_driver w83793_driver = {
252 .remove = w83793_remove, 251 .remove = w83793_remove,
253 .id_table = w83793_id, 252 .id_table = w83793_id,
254 .detect = w83793_detect, 253 .detect = w83793_detect,
255 .address_data = &addr_data, 254 .address_list = normal_i2c,
256}; 255};
257 256
258static ssize_t 257static ssize_t
@@ -1161,7 +1160,7 @@ ERROR_SC_0:
1161} 1160}
1162 1161
1163/* Return 0 if detection is successful, -ENODEV otherwise */ 1162/* Return 0 if detection is successful, -ENODEV otherwise */
1164static int w83793_detect(struct i2c_client *client, int kind, 1163static int w83793_detect(struct i2c_client *client,
1165 struct i2c_board_info *info) 1164 struct i2c_board_info *info)
1166{ 1165{
1167 u8 tmp, bank, chip_id; 1166 u8 tmp, bank, chip_id;
diff --git a/drivers/hwmon/w83l785ts.c b/drivers/hwmon/w83l785ts.c
index 9b6c4c10fba7..20781def65ed 100644
--- a/drivers/hwmon/w83l785ts.c
+++ b/drivers/hwmon/w83l785ts.c
@@ -52,12 +52,6 @@
52static const unsigned short normal_i2c[] = { 0x2e, I2C_CLIENT_END }; 52static const unsigned short normal_i2c[] = { 0x2e, I2C_CLIENT_END };
53 53
54/* 54/*
55 * Insmod parameters
56 */
57
58I2C_CLIENT_INSMOD_1(w83l785ts);
59
60/*
61 * The W83L785TS-S registers 55 * The W83L785TS-S registers
62 * Manufacturer ID is 0x5CA3 for Winbond. 56 * Manufacturer ID is 0x5CA3 for Winbond.
63 */ 57 */
@@ -83,7 +77,7 @@ I2C_CLIENT_INSMOD_1(w83l785ts);
83 77
84static int w83l785ts_probe(struct i2c_client *client, 78static int w83l785ts_probe(struct i2c_client *client,
85 const struct i2c_device_id *id); 79 const struct i2c_device_id *id);
86static int w83l785ts_detect(struct i2c_client *client, int kind, 80static int w83l785ts_detect(struct i2c_client *client,
87 struct i2c_board_info *info); 81 struct i2c_board_info *info);
88static int w83l785ts_remove(struct i2c_client *client); 82static int w83l785ts_remove(struct i2c_client *client);
89static u8 w83l785ts_read_value(struct i2c_client *client, u8 reg, u8 defval); 83static u8 w83l785ts_read_value(struct i2c_client *client, u8 reg, u8 defval);
@@ -94,7 +88,7 @@ static struct w83l785ts_data *w83l785ts_update_device(struct device *dev);
94 */ 88 */
95 89
96static const struct i2c_device_id w83l785ts_id[] = { 90static const struct i2c_device_id w83l785ts_id[] = {
97 { "w83l785ts", w83l785ts }, 91 { "w83l785ts", 0 },
98 { } 92 { }
99}; 93};
100MODULE_DEVICE_TABLE(i2c, w83l785ts_id); 94MODULE_DEVICE_TABLE(i2c, w83l785ts_id);
@@ -108,7 +102,7 @@ static struct i2c_driver w83l785ts_driver = {
108 .remove = w83l785ts_remove, 102 .remove = w83l785ts_remove,
109 .id_table = w83l785ts_id, 103 .id_table = w83l785ts_id,
110 .detect = w83l785ts_detect, 104 .detect = w83l785ts_detect,
111 .address_data = &addr_data, 105 .address_list = normal_i2c,
112}; 106};
113 107
114/* 108/*
@@ -146,7 +140,7 @@ static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, show_temp, NULL, 1);
146 */ 140 */
147 141
148/* Return 0 if detection is successful, -ENODEV otherwise */ 142/* Return 0 if detection is successful, -ENODEV otherwise */
149static int w83l785ts_detect(struct i2c_client *client, int kind, 143static int w83l785ts_detect(struct i2c_client *client,
150 struct i2c_board_info *info) 144 struct i2c_board_info *info)
151{ 145{
152 struct i2c_adapter *adapter = client->adapter; 146 struct i2c_adapter *adapter = client->adapter;
diff --git a/drivers/hwmon/w83l786ng.c b/drivers/hwmon/w83l786ng.c
index 27da7d2b15fb..0254e181893d 100644
--- a/drivers/hwmon/w83l786ng.c
+++ b/drivers/hwmon/w83l786ng.c
@@ -38,7 +38,6 @@
38static const unsigned short normal_i2c[] = { 0x2e, 0x2f, I2C_CLIENT_END }; 38static const unsigned short normal_i2c[] = { 0x2e, 0x2f, I2C_CLIENT_END };
39 39
40/* Insmod parameters */ 40/* Insmod parameters */
41I2C_CLIENT_INSMOD_1(w83l786ng);
42 41
43static int reset; 42static int reset;
44module_param(reset, bool, 0); 43module_param(reset, bool, 0);
@@ -147,14 +146,14 @@ struct w83l786ng_data {
147 146
148static int w83l786ng_probe(struct i2c_client *client, 147static int w83l786ng_probe(struct i2c_client *client,
149 const struct i2c_device_id *id); 148 const struct i2c_device_id *id);
150static int w83l786ng_detect(struct i2c_client *client, int kind, 149static int w83l786ng_detect(struct i2c_client *client,
151 struct i2c_board_info *info); 150 struct i2c_board_info *info);
152static int w83l786ng_remove(struct i2c_client *client); 151static int w83l786ng_remove(struct i2c_client *client);
153static void w83l786ng_init_client(struct i2c_client *client); 152static void w83l786ng_init_client(struct i2c_client *client);
154static struct w83l786ng_data *w83l786ng_update_device(struct device *dev); 153static struct w83l786ng_data *w83l786ng_update_device(struct device *dev);
155 154
156static const struct i2c_device_id w83l786ng_id[] = { 155static const struct i2c_device_id w83l786ng_id[] = {
157 { "w83l786ng", w83l786ng }, 156 { "w83l786ng", 0 },
158 { } 157 { }
159}; 158};
160MODULE_DEVICE_TABLE(i2c, w83l786ng_id); 159MODULE_DEVICE_TABLE(i2c, w83l786ng_id);
@@ -168,7 +167,7 @@ static struct i2c_driver w83l786ng_driver = {
168 .remove = w83l786ng_remove, 167 .remove = w83l786ng_remove,
169 .id_table = w83l786ng_id, 168 .id_table = w83l786ng_id,
170 .detect = w83l786ng_detect, 169 .detect = w83l786ng_detect,
171 .address_data = &addr_data, 170 .address_list = normal_i2c,
172}; 171};
173 172
174static u8 173static u8
@@ -586,8 +585,7 @@ static const struct attribute_group w83l786ng_group = {
586}; 585};
587 586
588static int 587static int
589w83l786ng_detect(struct i2c_client *client, int kind, 588w83l786ng_detect(struct i2c_client *client, struct i2c_board_info *info)
590 struct i2c_board_info *info)
591{ 589{
592 struct i2c_adapter *adapter = client->adapter; 590 struct i2c_adapter *adapter = client->adapter;
593 u16 man_id; 591 u16 man_id;
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index 049555777f67..7647a20523a0 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -1155,7 +1155,7 @@ static int i2c_pxa_resume_noirq(struct device *dev)
1155 return 0; 1155 return 0;
1156} 1156}
1157 1157
1158static struct dev_pm_ops i2c_pxa_dev_pm_ops = { 1158static const struct dev_pm_ops i2c_pxa_dev_pm_ops = {
1159 .suspend_noirq = i2c_pxa_suspend_noirq, 1159 .suspend_noirq = i2c_pxa_suspend_noirq,
1160 .resume_noirq = i2c_pxa_resume_noirq, 1160 .resume_noirq = i2c_pxa_resume_noirq,
1161}; 1161};
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 96aafb91b69a..1d8c98613fa0 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -967,7 +967,7 @@ static int s3c24xx_i2c_resume(struct device *dev)
967 return 0; 967 return 0;
968} 968}
969 969
970static struct dev_pm_ops s3c24xx_i2c_dev_pm_ops = { 970static const struct dev_pm_ops s3c24xx_i2c_dev_pm_ops = {
971 .suspend_noirq = s3c24xx_i2c_suspend_noirq, 971 .suspend_noirq = s3c24xx_i2c_suspend_noirq,
972 .resume = s3c24xx_i2c_resume, 972 .resume = s3c24xx_i2c_resume,
973}; 973};
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 86a9d4e81472..ccc46418ef7f 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -647,7 +647,7 @@ static int sh_mobile_i2c_runtime_nop(struct device *dev)
647 return 0; 647 return 0;
648} 648}
649 649
650static struct dev_pm_ops sh_mobile_i2c_dev_pm_ops = { 650static const struct dev_pm_ops sh_mobile_i2c_dev_pm_ops = {
651 .runtime_suspend = sh_mobile_i2c_runtime_nop, 651 .runtime_suspend = sh_mobile_i2c_runtime_nop,
652 .runtime_resume = sh_mobile_i2c_runtime_nop, 652 .runtime_resume = sh_mobile_i2c_runtime_nop,
653}; 653};
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 4f34823e86b1..0ac2f90ab840 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -155,6 +155,35 @@ static void i2c_device_shutdown(struct device *dev)
155 driver->shutdown(client); 155 driver->shutdown(client);
156} 156}
157 157
158#ifdef CONFIG_SUSPEND
159static int i2c_device_pm_suspend(struct device *dev)
160{
161 const struct dev_pm_ops *pm;
162
163 if (!dev->driver)
164 return 0;
165 pm = dev->driver->pm;
166 if (!pm || !pm->suspend)
167 return 0;
168 return pm->suspend(dev);
169}
170
171static int i2c_device_pm_resume(struct device *dev)
172{
173 const struct dev_pm_ops *pm;
174
175 if (!dev->driver)
176 return 0;
177 pm = dev->driver->pm;
178 if (!pm || !pm->resume)
179 return 0;
180 return pm->resume(dev);
181}
182#else
183#define i2c_device_pm_suspend NULL
184#define i2c_device_pm_resume NULL
185#endif
186
158static int i2c_device_suspend(struct device *dev, pm_message_t mesg) 187static int i2c_device_suspend(struct device *dev, pm_message_t mesg)
159{ 188{
160 struct i2c_client *client = i2c_verify_client(dev); 189 struct i2c_client *client = i2c_verify_client(dev);
@@ -219,6 +248,11 @@ static const struct attribute_group *i2c_dev_attr_groups[] = {
219 NULL 248 NULL
220}; 249};
221 250
251const static struct dev_pm_ops i2c_device_pm_ops = {
252 .suspend = i2c_device_pm_suspend,
253 .resume = i2c_device_pm_resume,
254};
255
222struct bus_type i2c_bus_type = { 256struct bus_type i2c_bus_type = {
223 .name = "i2c", 257 .name = "i2c",
224 .match = i2c_device_match, 258 .match = i2c_device_match,
@@ -227,6 +261,7 @@ struct bus_type i2c_bus_type = {
227 .shutdown = i2c_device_shutdown, 261 .shutdown = i2c_device_shutdown,
228 .suspend = i2c_device_suspend, 262 .suspend = i2c_device_suspend,
229 .resume = i2c_device_resume, 263 .resume = i2c_device_resume,
264 .pm = &i2c_device_pm_ops,
230}; 265};
231EXPORT_SYMBOL_GPL(i2c_bus_type); 266EXPORT_SYMBOL_GPL(i2c_bus_type);
232 267
@@ -1184,7 +1219,7 @@ static int i2c_detect_address(struct i2c_client *temp_client,
1184 /* Finally call the custom detection function */ 1219 /* Finally call the custom detection function */
1185 memset(&info, 0, sizeof(struct i2c_board_info)); 1220 memset(&info, 0, sizeof(struct i2c_board_info));
1186 info.addr = addr; 1221 info.addr = addr;
1187 err = driver->detect(temp_client, -1, &info); 1222 err = driver->detect(temp_client, &info);
1188 if (err) { 1223 if (err) {
1189 /* -ENODEV is returned if the detection fails. We catch it 1224 /* -ENODEV is returned if the detection fails. We catch it
1190 here as this isn't an error. */ 1225 here as this isn't an error. */
@@ -1214,13 +1249,13 @@ static int i2c_detect_address(struct i2c_client *temp_client,
1214 1249
1215static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver) 1250static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver)
1216{ 1251{
1217 const struct i2c_client_address_data *address_data; 1252 const unsigned short *address_list;
1218 struct i2c_client *temp_client; 1253 struct i2c_client *temp_client;
1219 int i, err = 0; 1254 int i, err = 0;
1220 int adap_id = i2c_adapter_id(adapter); 1255 int adap_id = i2c_adapter_id(adapter);
1221 1256
1222 address_data = driver->address_data; 1257 address_list = driver->address_list;
1223 if (!driver->detect || !address_data) 1258 if (!driver->detect || !address_list)
1224 return 0; 1259 return 0;
1225 1260
1226 /* Set up a temporary client to help detect callback */ 1261 /* Set up a temporary client to help detect callback */
@@ -1235,7 +1270,7 @@ static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver)
1235 1270
1236 /* Stop here if we can't use SMBUS_QUICK */ 1271 /* Stop here if we can't use SMBUS_QUICK */
1237 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_QUICK)) { 1272 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_QUICK)) {
1238 if (address_data->normal_i2c[0] == I2C_CLIENT_END) 1273 if (address_list[0] == I2C_CLIENT_END)
1239 goto exit_free; 1274 goto exit_free;
1240 1275
1241 dev_warn(&adapter->dev, "SMBus Quick command not supported, " 1276 dev_warn(&adapter->dev, "SMBus Quick command not supported, "
@@ -1244,11 +1279,10 @@ static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver)
1244 goto exit_free; 1279 goto exit_free;
1245 } 1280 }
1246 1281
1247 for (i = 0; address_data->normal_i2c[i] != I2C_CLIENT_END; i += 1) { 1282 for (i = 0; address_list[i] != I2C_CLIENT_END; i += 1) {
1248 dev_dbg(&adapter->dev, "found normal entry for adapter %d, " 1283 dev_dbg(&adapter->dev, "found normal entry for adapter %d, "
1249 "addr 0x%02x\n", adap_id, 1284 "addr 0x%02x\n", adap_id, address_list[i]);
1250 address_data->normal_i2c[i]); 1285 temp_client->addr = address_list[i];
1251 temp_client->addr = address_data->normal_i2c[i];
1252 err = i2c_detect_address(temp_client, driver); 1286 err = i2c_detect_address(temp_client, driver);
1253 if (err) 1287 if (err)
1254 goto exit_free; 1288 goto exit_free;
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index bd07803e9183..abbb06996f9e 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -36,7 +36,6 @@
36#include <linux/mutex.h> 36#include <linux/mutex.h>
37#include <linux/inetdevice.h> 37#include <linux/inetdevice.h>
38#include <linux/workqueue.h> 38#include <linux/workqueue.h>
39#include <linux/if_arp.h>
40#include <net/arp.h> 39#include <net/arp.h>
41#include <net/neighbour.h> 40#include <net/neighbour.h>
42#include <net/route.h> 41#include <net/route.h>
@@ -92,22 +91,12 @@ EXPORT_SYMBOL(rdma_addr_unregister_client);
92int rdma_copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev, 91int rdma_copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev,
93 const unsigned char *dst_dev_addr) 92 const unsigned char *dst_dev_addr)
94{ 93{
95 switch (dev->type) { 94 dev_addr->dev_type = dev->type;
96 case ARPHRD_INFINIBAND:
97 dev_addr->dev_type = RDMA_NODE_IB_CA;
98 break;
99 case ARPHRD_ETHER:
100 dev_addr->dev_type = RDMA_NODE_RNIC;
101 break;
102 default:
103 return -EADDRNOTAVAIL;
104 }
105
106 memcpy(dev_addr->src_dev_addr, dev->dev_addr, MAX_ADDR_LEN); 95 memcpy(dev_addr->src_dev_addr, dev->dev_addr, MAX_ADDR_LEN);
107 memcpy(dev_addr->broadcast, dev->broadcast, MAX_ADDR_LEN); 96 memcpy(dev_addr->broadcast, dev->broadcast, MAX_ADDR_LEN);
108 if (dst_dev_addr) 97 if (dst_dev_addr)
109 memcpy(dev_addr->dst_dev_addr, dst_dev_addr, MAX_ADDR_LEN); 98 memcpy(dev_addr->dst_dev_addr, dst_dev_addr, MAX_ADDR_LEN);
110 dev_addr->src_dev = dev; 99 dev_addr->bound_dev_if = dev->ifindex;
111 return 0; 100 return 0;
112} 101}
113EXPORT_SYMBOL(rdma_copy_addr); 102EXPORT_SYMBOL(rdma_copy_addr);
@@ -117,6 +106,15 @@ int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
117 struct net_device *dev; 106 struct net_device *dev;
118 int ret = -EADDRNOTAVAIL; 107 int ret = -EADDRNOTAVAIL;
119 108
109 if (dev_addr->bound_dev_if) {
110 dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
111 if (!dev)
112 return -ENODEV;
113 ret = rdma_copy_addr(dev_addr, dev, NULL);
114 dev_put(dev);
115 return ret;
116 }
117
120 switch (addr->sa_family) { 118 switch (addr->sa_family) {
121 case AF_INET: 119 case AF_INET:
122 dev = ip_dev_find(&init_net, 120 dev = ip_dev_find(&init_net,
@@ -131,6 +129,7 @@ int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
131 129
132#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 130#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
133 case AF_INET6: 131 case AF_INET6:
132 read_lock(&dev_base_lock);
134 for_each_netdev(&init_net, dev) { 133 for_each_netdev(&init_net, dev) {
135 if (ipv6_chk_addr(&init_net, 134 if (ipv6_chk_addr(&init_net,
136 &((struct sockaddr_in6 *) addr)->sin6_addr, 135 &((struct sockaddr_in6 *) addr)->sin6_addr,
@@ -139,6 +138,7 @@ int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
139 break; 138 break;
140 } 139 }
141 } 140 }
141 read_unlock(&dev_base_lock);
142 break; 142 break;
143#endif 143#endif
144 } 144 }
@@ -176,48 +176,9 @@ static void queue_req(struct addr_req *req)
176 mutex_unlock(&lock); 176 mutex_unlock(&lock);
177} 177}
178 178
179static void addr_send_arp(struct sockaddr *dst_in) 179static int addr4_resolve(struct sockaddr_in *src_in,
180{ 180 struct sockaddr_in *dst_in,
181 struct rtable *rt; 181 struct rdma_dev_addr *addr)
182 struct flowi fl;
183
184 memset(&fl, 0, sizeof fl);
185
186 switch (dst_in->sa_family) {
187 case AF_INET:
188 fl.nl_u.ip4_u.daddr =
189 ((struct sockaddr_in *) dst_in)->sin_addr.s_addr;
190
191 if (ip_route_output_key(&init_net, &rt, &fl))
192 return;
193
194 neigh_event_send(rt->u.dst.neighbour, NULL);
195 ip_rt_put(rt);
196 break;
197
198#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
199 case AF_INET6:
200 {
201 struct dst_entry *dst;
202
203 fl.nl_u.ip6_u.daddr =
204 ((struct sockaddr_in6 *) dst_in)->sin6_addr;
205
206 dst = ip6_route_output(&init_net, NULL, &fl);
207 if (!dst)
208 return;
209
210 neigh_event_send(dst->neighbour, NULL);
211 dst_release(dst);
212 break;
213 }
214#endif
215 }
216}
217
218static int addr4_resolve_remote(struct sockaddr_in *src_in,
219 struct sockaddr_in *dst_in,
220 struct rdma_dev_addr *addr)
221{ 182{
222 __be32 src_ip = src_in->sin_addr.s_addr; 183 __be32 src_ip = src_in->sin_addr.s_addr;
223 __be32 dst_ip = dst_in->sin_addr.s_addr; 184 __be32 dst_ip = dst_in->sin_addr.s_addr;
@@ -229,10 +190,22 @@ static int addr4_resolve_remote(struct sockaddr_in *src_in,
229 memset(&fl, 0, sizeof fl); 190 memset(&fl, 0, sizeof fl);
230 fl.nl_u.ip4_u.daddr = dst_ip; 191 fl.nl_u.ip4_u.daddr = dst_ip;
231 fl.nl_u.ip4_u.saddr = src_ip; 192 fl.nl_u.ip4_u.saddr = src_ip;
193 fl.oif = addr->bound_dev_if;
194
232 ret = ip_route_output_key(&init_net, &rt, &fl); 195 ret = ip_route_output_key(&init_net, &rt, &fl);
233 if (ret) 196 if (ret)
234 goto out; 197 goto out;
235 198
199 src_in->sin_family = AF_INET;
200 src_in->sin_addr.s_addr = rt->rt_src;
201
202 if (rt->idev->dev->flags & IFF_LOOPBACK) {
203 ret = rdma_translate_ip((struct sockaddr *) dst_in, addr);
204 if (!ret)
205 memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN);
206 goto put;
207 }
208
236 /* If the device does ARP internally, return 'done' */ 209 /* If the device does ARP internally, return 'done' */
237 if (rt->idev->dev->flags & IFF_NOARP) { 210 if (rt->idev->dev->flags & IFF_NOARP) {
238 rdma_copy_addr(addr, rt->idev->dev, NULL); 211 rdma_copy_addr(addr, rt->idev->dev, NULL);
@@ -240,21 +213,14 @@ static int addr4_resolve_remote(struct sockaddr_in *src_in,
240 } 213 }
241 214
242 neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, rt->idev->dev); 215 neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, rt->idev->dev);
243 if (!neigh) { 216 if (!neigh || !(neigh->nud_state & NUD_VALID)) {
217 neigh_event_send(rt->u.dst.neighbour, NULL);
244 ret = -ENODATA; 218 ret = -ENODATA;
219 if (neigh)
220 goto release;
245 goto put; 221 goto put;
246 } 222 }
247 223
248 if (!(neigh->nud_state & NUD_VALID)) {
249 ret = -ENODATA;
250 goto release;
251 }
252
253 if (!src_ip) {
254 src_in->sin_family = dst_in->sin_family;
255 src_in->sin_addr.s_addr = rt->rt_src;
256 }
257
258 ret = rdma_copy_addr(addr, neigh->dev, neigh->ha); 224 ret = rdma_copy_addr(addr, neigh->dev, neigh->ha);
259release: 225release:
260 neigh_release(neigh); 226 neigh_release(neigh);
@@ -265,52 +231,77 @@ out:
265} 231}
266 232
267#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 233#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
268static int addr6_resolve_remote(struct sockaddr_in6 *src_in, 234static int addr6_resolve(struct sockaddr_in6 *src_in,
269 struct sockaddr_in6 *dst_in, 235 struct sockaddr_in6 *dst_in,
270 struct rdma_dev_addr *addr) 236 struct rdma_dev_addr *addr)
271{ 237{
272 struct flowi fl; 238 struct flowi fl;
273 struct neighbour *neigh; 239 struct neighbour *neigh;
274 struct dst_entry *dst; 240 struct dst_entry *dst;
275 int ret = -ENODATA; 241 int ret;
276 242
277 memset(&fl, 0, sizeof fl); 243 memset(&fl, 0, sizeof fl);
278 fl.nl_u.ip6_u.daddr = dst_in->sin6_addr; 244 ipv6_addr_copy(&fl.fl6_dst, &dst_in->sin6_addr);
279 fl.nl_u.ip6_u.saddr = src_in->sin6_addr; 245 ipv6_addr_copy(&fl.fl6_src, &src_in->sin6_addr);
246 fl.oif = addr->bound_dev_if;
280 247
281 dst = ip6_route_output(&init_net, NULL, &fl); 248 dst = ip6_route_output(&init_net, NULL, &fl);
282 if (!dst) 249 if ((ret = dst->error))
283 return ret; 250 goto put;
251
252 if (ipv6_addr_any(&fl.fl6_src)) {
253 ret = ipv6_dev_get_saddr(&init_net, ip6_dst_idev(dst)->dev,
254 &fl.fl6_dst, 0, &fl.fl6_src);
255 if (ret)
256 goto put;
257
258 src_in->sin6_family = AF_INET6;
259 ipv6_addr_copy(&src_in->sin6_addr, &fl.fl6_src);
260 }
261
262 if (dst->dev->flags & IFF_LOOPBACK) {
263 ret = rdma_translate_ip((struct sockaddr *) dst_in, addr);
264 if (!ret)
265 memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN);
266 goto put;
267 }
284 268
269 /* If the device does ARP internally, return 'done' */
285 if (dst->dev->flags & IFF_NOARP) { 270 if (dst->dev->flags & IFF_NOARP) {
286 ret = rdma_copy_addr(addr, dst->dev, NULL); 271 ret = rdma_copy_addr(addr, dst->dev, NULL);
287 } else { 272 goto put;
288 neigh = dst->neighbour; 273 }
289 if (neigh && (neigh->nud_state & NUD_VALID)) 274
290 ret = rdma_copy_addr(addr, neigh->dev, neigh->ha); 275 neigh = dst->neighbour;
276 if (!neigh || !(neigh->nud_state & NUD_VALID)) {
277 neigh_event_send(dst->neighbour, NULL);
278 ret = -ENODATA;
279 goto put;
291 } 280 }
292 281
282 ret = rdma_copy_addr(addr, dst->dev, neigh->ha);
283put:
293 dst_release(dst); 284 dst_release(dst);
294 return ret; 285 return ret;
295} 286}
296#else 287#else
297static int addr6_resolve_remote(struct sockaddr_in6 *src_in, 288static int addr6_resolve(struct sockaddr_in6 *src_in,
298 struct sockaddr_in6 *dst_in, 289 struct sockaddr_in6 *dst_in,
299 struct rdma_dev_addr *addr) 290 struct rdma_dev_addr *addr)
300{ 291{
301 return -EADDRNOTAVAIL; 292 return -EADDRNOTAVAIL;
302} 293}
303#endif 294#endif
304 295
305static int addr_resolve_remote(struct sockaddr *src_in, 296static int addr_resolve(struct sockaddr *src_in,
306 struct sockaddr *dst_in, 297 struct sockaddr *dst_in,
307 struct rdma_dev_addr *addr) 298 struct rdma_dev_addr *addr)
308{ 299{
309 if (src_in->sa_family == AF_INET) { 300 if (src_in->sa_family == AF_INET) {
310 return addr4_resolve_remote((struct sockaddr_in *) src_in, 301 return addr4_resolve((struct sockaddr_in *) src_in,
311 (struct sockaddr_in *) dst_in, addr); 302 (struct sockaddr_in *) dst_in, addr);
312 } else 303 } else
313 return addr6_resolve_remote((struct sockaddr_in6 *) src_in, 304 return addr6_resolve((struct sockaddr_in6 *) src_in,
314 (struct sockaddr_in6 *) dst_in, addr); 305 (struct sockaddr_in6 *) dst_in, addr);
315} 306}
316 307
@@ -327,8 +318,7 @@ static void process_req(struct work_struct *work)
327 if (req->status == -ENODATA) { 318 if (req->status == -ENODATA) {
328 src_in = (struct sockaddr *) &req->src_addr; 319 src_in = (struct sockaddr *) &req->src_addr;
329 dst_in = (struct sockaddr *) &req->dst_addr; 320 dst_in = (struct sockaddr *) &req->dst_addr;
330 req->status = addr_resolve_remote(src_in, dst_in, 321 req->status = addr_resolve(src_in, dst_in, req->addr);
331 req->addr);
332 if (req->status && time_after_eq(jiffies, req->timeout)) 322 if (req->status && time_after_eq(jiffies, req->timeout))
333 req->status = -ETIMEDOUT; 323 req->status = -ETIMEDOUT;
334 else if (req->status == -ENODATA) 324 else if (req->status == -ENODATA)
@@ -352,82 +342,6 @@ static void process_req(struct work_struct *work)
352 } 342 }
353} 343}
354 344
355static int addr_resolve_local(struct sockaddr *src_in,
356 struct sockaddr *dst_in,
357 struct rdma_dev_addr *addr)
358{
359 struct net_device *dev;
360 int ret;
361
362 switch (dst_in->sa_family) {
363 case AF_INET:
364 {
365 __be32 src_ip = ((struct sockaddr_in *) src_in)->sin_addr.s_addr;
366 __be32 dst_ip = ((struct sockaddr_in *) dst_in)->sin_addr.s_addr;
367
368 dev = ip_dev_find(&init_net, dst_ip);
369 if (!dev)
370 return -EADDRNOTAVAIL;
371
372 if (ipv4_is_zeronet(src_ip)) {
373 src_in->sa_family = dst_in->sa_family;
374 ((struct sockaddr_in *) src_in)->sin_addr.s_addr = dst_ip;
375 ret = rdma_copy_addr(addr, dev, dev->dev_addr);
376 } else if (ipv4_is_loopback(src_ip)) {
377 ret = rdma_translate_ip(dst_in, addr);
378 if (!ret)
379 memcpy(addr->dst_dev_addr, dev->dev_addr, MAX_ADDR_LEN);
380 } else {
381 ret = rdma_translate_ip(src_in, addr);
382 if (!ret)
383 memcpy(addr->dst_dev_addr, dev->dev_addr, MAX_ADDR_LEN);
384 }
385 dev_put(dev);
386 break;
387 }
388
389#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
390 case AF_INET6:
391 {
392 struct in6_addr *a;
393
394 for_each_netdev(&init_net, dev)
395 if (ipv6_chk_addr(&init_net,
396 &((struct sockaddr_in6 *) dst_in)->sin6_addr,
397 dev, 1))
398 break;
399
400 if (!dev)
401 return -EADDRNOTAVAIL;
402
403 a = &((struct sockaddr_in6 *) src_in)->sin6_addr;
404
405 if (ipv6_addr_any(a)) {
406 src_in->sa_family = dst_in->sa_family;
407 ((struct sockaddr_in6 *) src_in)->sin6_addr =
408 ((struct sockaddr_in6 *) dst_in)->sin6_addr;
409 ret = rdma_copy_addr(addr, dev, dev->dev_addr);
410 } else if (ipv6_addr_loopback(a)) {
411 ret = rdma_translate_ip(dst_in, addr);
412 if (!ret)
413 memcpy(addr->dst_dev_addr, dev->dev_addr, MAX_ADDR_LEN);
414 } else {
415 ret = rdma_translate_ip(src_in, addr);
416 if (!ret)
417 memcpy(addr->dst_dev_addr, dev->dev_addr, MAX_ADDR_LEN);
418 }
419 break;
420 }
421#endif
422
423 default:
424 ret = -EADDRNOTAVAIL;
425 break;
426 }
427
428 return ret;
429}
430
431int rdma_resolve_ip(struct rdma_addr_client *client, 345int rdma_resolve_ip(struct rdma_addr_client *client,
432 struct sockaddr *src_addr, struct sockaddr *dst_addr, 346 struct sockaddr *src_addr, struct sockaddr *dst_addr,
433 struct rdma_dev_addr *addr, int timeout_ms, 347 struct rdma_dev_addr *addr, int timeout_ms,
@@ -443,22 +357,28 @@ int rdma_resolve_ip(struct rdma_addr_client *client,
443 if (!req) 357 if (!req)
444 return -ENOMEM; 358 return -ENOMEM;
445 359
446 if (src_addr) 360 src_in = (struct sockaddr *) &req->src_addr;
447 memcpy(&req->src_addr, src_addr, ip_addr_size(src_addr)); 361 dst_in = (struct sockaddr *) &req->dst_addr;
448 memcpy(&req->dst_addr, dst_addr, ip_addr_size(dst_addr)); 362
363 if (src_addr) {
364 if (src_addr->sa_family != dst_addr->sa_family) {
365 ret = -EINVAL;
366 goto err;
367 }
368
369 memcpy(src_in, src_addr, ip_addr_size(src_addr));
370 } else {
371 src_in->sa_family = dst_addr->sa_family;
372 }
373
374 memcpy(dst_in, dst_addr, ip_addr_size(dst_addr));
449 req->addr = addr; 375 req->addr = addr;
450 req->callback = callback; 376 req->callback = callback;
451 req->context = context; 377 req->context = context;
452 req->client = client; 378 req->client = client;
453 atomic_inc(&client->refcount); 379 atomic_inc(&client->refcount);
454 380
455 src_in = (struct sockaddr *) &req->src_addr; 381 req->status = addr_resolve(src_in, dst_in, addr);
456 dst_in = (struct sockaddr *) &req->dst_addr;
457
458 req->status = addr_resolve_local(src_in, dst_in, addr);
459 if (req->status == -EADDRNOTAVAIL)
460 req->status = addr_resolve_remote(src_in, dst_in, addr);
461
462 switch (req->status) { 382 switch (req->status) {
463 case 0: 383 case 0:
464 req->timeout = jiffies; 384 req->timeout = jiffies;
@@ -467,15 +387,16 @@ int rdma_resolve_ip(struct rdma_addr_client *client,
467 case -ENODATA: 387 case -ENODATA:
468 req->timeout = msecs_to_jiffies(timeout_ms) + jiffies; 388 req->timeout = msecs_to_jiffies(timeout_ms) + jiffies;
469 queue_req(req); 389 queue_req(req);
470 addr_send_arp(dst_in);
471 break; 390 break;
472 default: 391 default:
473 ret = req->status; 392 ret = req->status;
474 atomic_dec(&client->refcount); 393 atomic_dec(&client->refcount);
475 kfree(req); 394 goto err;
476 break;
477 } 395 }
478 return ret; 396 return ret;
397err:
398 kfree(req);
399 return ret;
479} 400}
480EXPORT_SYMBOL(rdma_resolve_ip); 401EXPORT_SYMBOL(rdma_resolve_ip);
481 402
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 075317884b53..fbdd73106000 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -330,17 +330,7 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv)
330 union ib_gid gid; 330 union ib_gid gid;
331 int ret = -ENODEV; 331 int ret = -ENODEV;
332 332
333 switch (rdma_node_get_transport(dev_addr->dev_type)) { 333 rdma_addr_get_sgid(dev_addr, &gid);
334 case RDMA_TRANSPORT_IB:
335 ib_addr_get_sgid(dev_addr, &gid);
336 break;
337 case RDMA_TRANSPORT_IWARP:
338 iw_addr_get_sgid(dev_addr, &gid);
339 break;
340 default:
341 return -ENODEV;
342 }
343
344 list_for_each_entry(cma_dev, &dev_list, list) { 334 list_for_each_entry(cma_dev, &dev_list, list) {
345 ret = ib_find_cached_gid(cma_dev->device, &gid, 335 ret = ib_find_cached_gid(cma_dev->device, &gid,
346 &id_priv->id.port_num, NULL); 336 &id_priv->id.port_num, NULL);
@@ -1032,11 +1022,17 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
1032 if (rt->num_paths == 2) 1022 if (rt->num_paths == 2)
1033 rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path; 1023 rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path;
1034 1024
1035 ib_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); 1025 if (cma_any_addr((struct sockaddr *) &rt->addr.src_addr)) {
1036 ret = rdma_translate_ip((struct sockaddr *) &id->route.addr.src_addr, 1026 rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND;
1037 &id->route.addr.dev_addr); 1027 rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid);
1038 if (ret) 1028 ib_addr_set_pkey(&rt->addr.dev_addr, rt->path_rec[0].pkey);
1039 goto destroy_id; 1029 } else {
1030 ret = rdma_translate_ip((struct sockaddr *) &rt->addr.src_addr,
1031 &rt->addr.dev_addr);
1032 if (ret)
1033 goto destroy_id;
1034 }
1035 rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);
1040 1036
1041 id_priv = container_of(id, struct rdma_id_private, id); 1037 id_priv = container_of(id, struct rdma_id_private, id);
1042 id_priv->state = CMA_CONNECT; 1038 id_priv->state = CMA_CONNECT;
@@ -1071,10 +1067,12 @@ static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,
1071 cma_save_net_info(&id->route.addr, &listen_id->route.addr, 1067 cma_save_net_info(&id->route.addr, &listen_id->route.addr,
1072 ip_ver, port, src, dst); 1068 ip_ver, port, src, dst);
1073 1069
1074 ret = rdma_translate_ip((struct sockaddr *) &id->route.addr.src_addr, 1070 if (!cma_any_addr((struct sockaddr *) &id->route.addr.src_addr)) {
1075 &id->route.addr.dev_addr); 1071 ret = rdma_translate_ip((struct sockaddr *) &id->route.addr.src_addr,
1076 if (ret) 1072 &id->route.addr.dev_addr);
1077 goto err; 1073 if (ret)
1074 goto err;
1075 }
1078 1076
1079 id_priv = container_of(id, struct rdma_id_private, id); 1077 id_priv = container_of(id, struct rdma_id_private, id);
1080 id_priv->state = CMA_CONNECT; 1078 id_priv->state = CMA_CONNECT;
@@ -1474,15 +1472,6 @@ static void cma_listen_on_all(struct rdma_id_private *id_priv)
1474 mutex_unlock(&lock); 1472 mutex_unlock(&lock);
1475} 1473}
1476 1474
1477static int cma_bind_any(struct rdma_cm_id *id, sa_family_t af)
1478{
1479 struct sockaddr_storage addr_in;
1480
1481 memset(&addr_in, 0, sizeof addr_in);
1482 addr_in.ss_family = af;
1483 return rdma_bind_addr(id, (struct sockaddr *) &addr_in);
1484}
1485
1486int rdma_listen(struct rdma_cm_id *id, int backlog) 1475int rdma_listen(struct rdma_cm_id *id, int backlog)
1487{ 1476{
1488 struct rdma_id_private *id_priv; 1477 struct rdma_id_private *id_priv;
@@ -1490,7 +1479,8 @@ int rdma_listen(struct rdma_cm_id *id, int backlog)
1490 1479
1491 id_priv = container_of(id, struct rdma_id_private, id); 1480 id_priv = container_of(id, struct rdma_id_private, id);
1492 if (id_priv->state == CMA_IDLE) { 1481 if (id_priv->state == CMA_IDLE) {
1493 ret = cma_bind_any(id, AF_INET); 1482 ((struct sockaddr *) &id->route.addr.src_addr)->sa_family = AF_INET;
1483 ret = rdma_bind_addr(id, (struct sockaddr *) &id->route.addr.src_addr);
1494 if (ret) 1484 if (ret)
1495 return ret; 1485 return ret;
1496 } 1486 }
@@ -1565,8 +1555,8 @@ static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms,
1565 struct sockaddr_in6 *sin6; 1555 struct sockaddr_in6 *sin6;
1566 1556
1567 memset(&path_rec, 0, sizeof path_rec); 1557 memset(&path_rec, 0, sizeof path_rec);
1568 ib_addr_get_sgid(&addr->dev_addr, &path_rec.sgid); 1558 rdma_addr_get_sgid(&addr->dev_addr, &path_rec.sgid);
1569 ib_addr_get_dgid(&addr->dev_addr, &path_rec.dgid); 1559 rdma_addr_get_dgid(&addr->dev_addr, &path_rec.dgid);
1570 path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(&addr->dev_addr)); 1560 path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(&addr->dev_addr));
1571 path_rec.numb_path = 1; 1561 path_rec.numb_path = 1;
1572 path_rec.reversible = 1; 1562 path_rec.reversible = 1;
@@ -1781,7 +1771,11 @@ port_found:
1781 if (ret) 1771 if (ret)
1782 goto out; 1772 goto out;
1783 1773
1784 ib_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid); 1774 id_priv->id.route.addr.dev_addr.dev_type =
1775 (rdma_node_get_transport(cma_dev->device->node_type) == RDMA_TRANSPORT_IB) ?
1776 ARPHRD_INFINIBAND : ARPHRD_ETHER;
1777
1778 rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid);
1785 ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey); 1779 ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey);
1786 id_priv->id.port_num = p; 1780 id_priv->id.port_num = p;
1787 cma_attach_to_dev(id_priv, cma_dev); 1781 cma_attach_to_dev(id_priv, cma_dev);
@@ -1839,7 +1833,7 @@ out:
1839static int cma_resolve_loopback(struct rdma_id_private *id_priv) 1833static int cma_resolve_loopback(struct rdma_id_private *id_priv)
1840{ 1834{
1841 struct cma_work *work; 1835 struct cma_work *work;
1842 struct sockaddr_in *src_in, *dst_in; 1836 struct sockaddr *src, *dst;
1843 union ib_gid gid; 1837 union ib_gid gid;
1844 int ret; 1838 int ret;
1845 1839
@@ -1853,14 +1847,19 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv)
1853 goto err; 1847 goto err;
1854 } 1848 }
1855 1849
1856 ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid); 1850 rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
1857 ib_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid); 1851 rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid);
1858 1852
1859 if (cma_zero_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr)) { 1853 src = (struct sockaddr *) &id_priv->id.route.addr.src_addr;
1860 src_in = (struct sockaddr_in *)&id_priv->id.route.addr.src_addr; 1854 if (cma_zero_addr(src)) {
1861 dst_in = (struct sockaddr_in *)&id_priv->id.route.addr.dst_addr; 1855 dst = (struct sockaddr *) &id_priv->id.route.addr.dst_addr;
1862 src_in->sin_family = dst_in->sin_family; 1856 if ((src->sa_family = dst->sa_family) == AF_INET) {
1863 src_in->sin_addr.s_addr = dst_in->sin_addr.s_addr; 1857 ((struct sockaddr_in *) src)->sin_addr.s_addr =
1858 ((struct sockaddr_in *) dst)->sin_addr.s_addr;
1859 } else {
1860 ipv6_addr_copy(&((struct sockaddr_in6 *) src)->sin6_addr,
1861 &((struct sockaddr_in6 *) dst)->sin6_addr);
1862 }
1864 } 1863 }
1865 1864
1866 work->id = id_priv; 1865 work->id = id_priv;
@@ -1878,10 +1877,14 @@ err:
1878static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, 1877static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
1879 struct sockaddr *dst_addr) 1878 struct sockaddr *dst_addr)
1880{ 1879{
1881 if (src_addr && src_addr->sa_family) 1880 if (!src_addr || !src_addr->sa_family) {
1882 return rdma_bind_addr(id, src_addr); 1881 src_addr = (struct sockaddr *) &id->route.addr.src_addr;
1883 else 1882 if ((src_addr->sa_family = dst_addr->sa_family) == AF_INET6) {
1884 return cma_bind_any(id, dst_addr->sa_family); 1883 ((struct sockaddr_in6 *) src_addr)->sin6_scope_id =
1884 ((struct sockaddr_in6 *) dst_addr)->sin6_scope_id;
1885 }
1886 }
1887 return rdma_bind_addr(id, src_addr);
1885} 1888}
1886 1889
1887int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, 1890int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
@@ -2077,6 +2080,25 @@ static int cma_get_port(struct rdma_id_private *id_priv)
2077 return ret; 2080 return ret;
2078} 2081}
2079 2082
2083static int cma_check_linklocal(struct rdma_dev_addr *dev_addr,
2084 struct sockaddr *addr)
2085{
2086#if defined(CONFIG_IPv6) || defined(CONFIG_IPV6_MODULE)
2087 struct sockaddr_in6 *sin6;
2088
2089 if (addr->sa_family != AF_INET6)
2090 return 0;
2091
2092 sin6 = (struct sockaddr_in6 *) addr;
2093 if ((ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) &&
2094 !sin6->sin6_scope_id)
2095 return -EINVAL;
2096
2097 dev_addr->bound_dev_if = sin6->sin6_scope_id;
2098#endif
2099 return 0;
2100}
2101
2080int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) 2102int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
2081{ 2103{
2082 struct rdma_id_private *id_priv; 2104 struct rdma_id_private *id_priv;
@@ -2089,7 +2111,13 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
2089 if (!cma_comp_exch(id_priv, CMA_IDLE, CMA_ADDR_BOUND)) 2111 if (!cma_comp_exch(id_priv, CMA_IDLE, CMA_ADDR_BOUND))
2090 return -EINVAL; 2112 return -EINVAL;
2091 2113
2092 if (!cma_any_addr(addr)) { 2114 ret = cma_check_linklocal(&id->route.addr.dev_addr, addr);
2115 if (ret)
2116 goto err1;
2117
2118 if (cma_loopback_addr(addr)) {
2119 ret = cma_bind_loopback(id_priv);
2120 } else if (!cma_zero_addr(addr)) {
2093 ret = rdma_translate_ip(addr, &id->route.addr.dev_addr); 2121 ret = rdma_translate_ip(addr, &id->route.addr.dev_addr);
2094 if (ret) 2122 if (ret)
2095 goto err1; 2123 goto err1;
@@ -2108,7 +2136,7 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
2108 2136
2109 return 0; 2137 return 0;
2110err2: 2138err2:
2111 if (!cma_any_addr(addr)) { 2139 if (id_priv->cma_dev) {
2112 mutex_lock(&lock); 2140 mutex_lock(&lock);
2113 cma_detach_from_dev(id_priv); 2141 cma_detach_from_dev(id_priv);
2114 mutex_unlock(&lock); 2142 mutex_unlock(&lock);
@@ -2687,10 +2715,15 @@ static void cma_set_mgid(struct rdma_id_private *id_priv,
2687 if (cma_any_addr(addr)) { 2715 if (cma_any_addr(addr)) {
2688 memset(mgid, 0, sizeof *mgid); 2716 memset(mgid, 0, sizeof *mgid);
2689 } else if ((addr->sa_family == AF_INET6) && 2717 } else if ((addr->sa_family == AF_INET6) &&
2690 ((be32_to_cpu(sin6->sin6_addr.s6_addr32[0]) & 0xFF10A01B) == 2718 ((be32_to_cpu(sin6->sin6_addr.s6_addr32[0]) & 0xFFF0FFFF) ==
2691 0xFF10A01B)) { 2719 0xFF10A01B)) {
2692 /* IPv6 address is an SA assigned MGID. */ 2720 /* IPv6 address is an SA assigned MGID. */
2693 memcpy(mgid, &sin6->sin6_addr, sizeof *mgid); 2721 memcpy(mgid, &sin6->sin6_addr, sizeof *mgid);
2722 } else if ((addr->sa_family == AF_INET6)) {
2723 ipv6_ib_mc_map(&sin6->sin6_addr, dev_addr->broadcast, mc_map);
2724 if (id_priv->id.ps == RDMA_PS_UDP)
2725 mc_map[7] = 0x01; /* Use RDMA CM signature */
2726 *mgid = *(union ib_gid *) (mc_map + 4);
2694 } else { 2727 } else {
2695 ip_ib_mc_map(sin->sin_addr.s_addr, dev_addr->broadcast, mc_map); 2728 ip_ib_mc_map(sin->sin_addr.s_addr, dev_addr->broadcast, mc_map);
2696 if (id_priv->id.ps == RDMA_PS_UDP) 2729 if (id_priv->id.ps == RDMA_PS_UDP)
@@ -2716,7 +2749,7 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
2716 cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid); 2749 cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid);
2717 if (id_priv->id.ps == RDMA_PS_UDP) 2750 if (id_priv->id.ps == RDMA_PS_UDP)
2718 rec.qkey = cpu_to_be32(RDMA_UDP_QKEY); 2751 rec.qkey = cpu_to_be32(RDMA_UDP_QKEY);
2719 ib_addr_get_sgid(dev_addr, &rec.port_gid); 2752 rdma_addr_get_sgid(dev_addr, &rec.port_gid);
2720 rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); 2753 rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
2721 rec.join_state = 1; 2754 rec.join_state = 1;
2722 2755
@@ -2815,7 +2848,7 @@ static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id
2815 2848
2816 dev_addr = &id_priv->id.route.addr.dev_addr; 2849 dev_addr = &id_priv->id.route.addr.dev_addr;
2817 2850
2818 if ((dev_addr->src_dev == ndev) && 2851 if ((dev_addr->bound_dev_if == ndev->ifindex) &&
2819 memcmp(dev_addr->src_dev_addr, ndev->dev_addr, ndev->addr_len)) { 2852 memcmp(dev_addr->src_dev_addr, ndev->dev_addr, ndev->addr_len)) {
2820 printk(KERN_INFO "RDMA CM addr change for ndev %s used by id %p\n", 2853 printk(KERN_INFO "RDMA CM addr change for ndev %s used by id %p\n",
2821 ndev->name, &id_priv->id); 2854 ndev->name, &id_priv->id);
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 82543716d59e..7e1ffd8ccd5c 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -604,6 +604,12 @@ retry:
604 return ret ? ret : id; 604 return ret ? ret : id;
605} 605}
606 606
607void ib_sa_unpack_path(void *attribute, struct ib_sa_path_rec *rec)
608{
609 ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), attribute, rec);
610}
611EXPORT_SYMBOL(ib_sa_unpack_path);
612
607static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query, 613static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
608 int status, 614 int status,
609 struct ib_sa_mad *mad) 615 struct ib_sa_mad *mad)
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index bb96d3c4b0f4..b2e16c332d5b 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -43,6 +43,7 @@
43#include <rdma/rdma_user_cm.h> 43#include <rdma/rdma_user_cm.h>
44#include <rdma/ib_marshall.h> 44#include <rdma/ib_marshall.h>
45#include <rdma/rdma_cm.h> 45#include <rdma/rdma_cm.h>
46#include <rdma/rdma_cm_ib.h>
46 47
47MODULE_AUTHOR("Sean Hefty"); 48MODULE_AUTHOR("Sean Hefty");
48MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access"); 49MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access");
@@ -562,10 +563,10 @@ static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
562 switch (route->num_paths) { 563 switch (route->num_paths) {
563 case 0: 564 case 0:
564 dev_addr = &route->addr.dev_addr; 565 dev_addr = &route->addr.dev_addr;
565 ib_addr_get_dgid(dev_addr, 566 rdma_addr_get_dgid(dev_addr,
566 (union ib_gid *) &resp->ib_route[0].dgid); 567 (union ib_gid *) &resp->ib_route[0].dgid);
567 ib_addr_get_sgid(dev_addr, 568 rdma_addr_get_sgid(dev_addr,
568 (union ib_gid *) &resp->ib_route[0].sgid); 569 (union ib_gid *) &resp->ib_route[0].sgid);
569 resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); 570 resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
570 break; 571 break;
571 case 2: 572 case 2:
@@ -812,6 +813,51 @@ static int ucma_set_option_id(struct ucma_context *ctx, int optname,
812 return ret; 813 return ret;
813} 814}
814 815
816static int ucma_set_ib_path(struct ucma_context *ctx,
817 struct ib_path_rec_data *path_data, size_t optlen)
818{
819 struct ib_sa_path_rec sa_path;
820 struct rdma_cm_event event;
821 int ret;
822
823 if (optlen % sizeof(*path_data))
824 return -EINVAL;
825
826 for (; optlen; optlen -= sizeof(*path_data), path_data++) {
827 if (path_data->flags == (IB_PATH_GMP | IB_PATH_PRIMARY |
828 IB_PATH_BIDIRECTIONAL))
829 break;
830 }
831
832 if (!optlen)
833 return -EINVAL;
834
835 ib_sa_unpack_path(path_data->path_rec, &sa_path);
836 ret = rdma_set_ib_paths(ctx->cm_id, &sa_path, 1);
837 if (ret)
838 return ret;
839
840 memset(&event, 0, sizeof event);
841 event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
842 return ucma_event_handler(ctx->cm_id, &event);
843}
844
845static int ucma_set_option_ib(struct ucma_context *ctx, int optname,
846 void *optval, size_t optlen)
847{
848 int ret;
849
850 switch (optname) {
851 case RDMA_OPTION_IB_PATH:
852 ret = ucma_set_ib_path(ctx, optval, optlen);
853 break;
854 default:
855 ret = -ENOSYS;
856 }
857
858 return ret;
859}
860
815static int ucma_set_option_level(struct ucma_context *ctx, int level, 861static int ucma_set_option_level(struct ucma_context *ctx, int level,
816 int optname, void *optval, size_t optlen) 862 int optname, void *optval, size_t optlen)
817{ 863{
@@ -821,6 +867,9 @@ static int ucma_set_option_level(struct ucma_context *ctx, int level,
821 case RDMA_OPTION_ID: 867 case RDMA_OPTION_ID:
822 ret = ucma_set_option_id(ctx, optname, optval, optlen); 868 ret = ucma_set_option_id(ctx, optname, optval, optlen);
823 break; 869 break;
870 case RDMA_OPTION_IB:
871 ret = ucma_set_option_ib(ctx, optname, optval, optlen);
872 break;
824 default: 873 default:
825 ret = -ENOSYS; 874 ret = -ENOSYS;
826 } 875 }
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 56feab6c251e..112d3970222a 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -285,7 +285,7 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
285 285
286 ucontext = ibdev->alloc_ucontext(ibdev, &udata); 286 ucontext = ibdev->alloc_ucontext(ibdev, &udata);
287 if (IS_ERR(ucontext)) { 287 if (IS_ERR(ucontext)) {
288 ret = PTR_ERR(file->ucontext); 288 ret = PTR_ERR(ucontext);
289 goto err; 289 goto err;
290 } 290 }
291 291
diff --git a/drivers/infiniband/hw/amso1100/c2_qp.c b/drivers/infiniband/hw/amso1100/c2_qp.c
index a6d89440ad2c..ad518868df77 100644
--- a/drivers/infiniband/hw/amso1100/c2_qp.c
+++ b/drivers/infiniband/hw/amso1100/c2_qp.c
@@ -798,8 +798,10 @@ int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
798 u8 actual_sge_count; 798 u8 actual_sge_count;
799 u32 msg_size; 799 u32 msg_size;
800 800
801 if (qp->state > IB_QPS_RTS) 801 if (qp->state > IB_QPS_RTS) {
802 return -EINVAL; 802 err = -EINVAL;
803 goto out;
804 }
803 805
804 while (ib_wr) { 806 while (ib_wr) {
805 807
@@ -930,6 +932,7 @@ int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
930 ib_wr = ib_wr->next; 932 ib_wr = ib_wr->next;
931 } 933 }
932 934
935out:
933 if (err) 936 if (err)
934 *bad_wr = ib_wr; 937 *bad_wr = ib_wr;
935 return err; 938 return err;
@@ -944,8 +947,10 @@ int c2_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
944 unsigned long lock_flags; 947 unsigned long lock_flags;
945 int err = 0; 948 int err = 0;
946 949
947 if (qp->state > IB_QPS_RTS) 950 if (qp->state > IB_QPS_RTS) {
948 return -EINVAL; 951 err = -EINVAL;
952 goto out;
953 }
949 954
950 /* 955 /*
951 * Try and post each work request 956 * Try and post each work request
@@ -998,6 +1003,7 @@ int c2_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
998 ib_wr = ib_wr->next; 1003 ib_wr = ib_wr->next;
999 } 1004 }
1000 1005
1006out:
1001 if (err) 1007 if (err)
1002 *bad_wr = ib_wr; 1008 *bad_wr = ib_wr;
1003 return err; 1009 return err;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index 1cecf98829ac..3eb8cecf81d7 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -365,18 +365,19 @@ int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
365 spin_lock_irqsave(&qhp->lock, flag); 365 spin_lock_irqsave(&qhp->lock, flag);
366 if (qhp->attr.state > IWCH_QP_STATE_RTS) { 366 if (qhp->attr.state > IWCH_QP_STATE_RTS) {
367 spin_unlock_irqrestore(&qhp->lock, flag); 367 spin_unlock_irqrestore(&qhp->lock, flag);
368 return -EINVAL; 368 err = -EINVAL;
369 goto out;
369 } 370 }
370 num_wrs = Q_FREECNT(qhp->wq.sq_rptr, qhp->wq.sq_wptr, 371 num_wrs = Q_FREECNT(qhp->wq.sq_rptr, qhp->wq.sq_wptr,
371 qhp->wq.sq_size_log2); 372 qhp->wq.sq_size_log2);
372 if (num_wrs <= 0) { 373 if (num_wrs <= 0) {
373 spin_unlock_irqrestore(&qhp->lock, flag); 374 spin_unlock_irqrestore(&qhp->lock, flag);
374 return -ENOMEM; 375 err = -ENOMEM;
376 goto out;
375 } 377 }
376 while (wr) { 378 while (wr) {
377 if (num_wrs == 0) { 379 if (num_wrs == 0) {
378 err = -ENOMEM; 380 err = -ENOMEM;
379 *bad_wr = wr;
380 break; 381 break;
381 } 382 }
382 idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2); 383 idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);
@@ -428,10 +429,8 @@ int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
428 wr->opcode); 429 wr->opcode);
429 err = -EINVAL; 430 err = -EINVAL;
430 } 431 }
431 if (err) { 432 if (err)
432 *bad_wr = wr;
433 break; 433 break;
434 }
435 wqe->send.wrid.id0.hi = qhp->wq.sq_wptr; 434 wqe->send.wrid.id0.hi = qhp->wq.sq_wptr;
436 sqp->wr_id = wr->wr_id; 435 sqp->wr_id = wr->wr_id;
437 sqp->opcode = wr2opcode(t3_wr_opcode); 436 sqp->opcode = wr2opcode(t3_wr_opcode);
@@ -454,6 +453,10 @@ int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
454 } 453 }
455 spin_unlock_irqrestore(&qhp->lock, flag); 454 spin_unlock_irqrestore(&qhp->lock, flag);
456 ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid); 455 ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
456
457out:
458 if (err)
459 *bad_wr = wr;
457 return err; 460 return err;
458} 461}
459 462
@@ -471,18 +474,19 @@ int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
471 spin_lock_irqsave(&qhp->lock, flag); 474 spin_lock_irqsave(&qhp->lock, flag);
472 if (qhp->attr.state > IWCH_QP_STATE_RTS) { 475 if (qhp->attr.state > IWCH_QP_STATE_RTS) {
473 spin_unlock_irqrestore(&qhp->lock, flag); 476 spin_unlock_irqrestore(&qhp->lock, flag);
474 return -EINVAL; 477 err = -EINVAL;
478 goto out;
475 } 479 }
476 num_wrs = Q_FREECNT(qhp->wq.rq_rptr, qhp->wq.rq_wptr, 480 num_wrs = Q_FREECNT(qhp->wq.rq_rptr, qhp->wq.rq_wptr,
477 qhp->wq.rq_size_log2) - 1; 481 qhp->wq.rq_size_log2) - 1;
478 if (!wr) { 482 if (!wr) {
479 spin_unlock_irqrestore(&qhp->lock, flag); 483 spin_unlock_irqrestore(&qhp->lock, flag);
480 return -EINVAL; 484 err = -ENOMEM;
485 goto out;
481 } 486 }
482 while (wr) { 487 while (wr) {
483 if (wr->num_sge > T3_MAX_SGE) { 488 if (wr->num_sge > T3_MAX_SGE) {
484 err = -EINVAL; 489 err = -EINVAL;
485 *bad_wr = wr;
486 break; 490 break;
487 } 491 }
488 idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2); 492 idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);
@@ -494,10 +498,10 @@ int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
494 err = build_zero_stag_recv(qhp, wqe, wr); 498 err = build_zero_stag_recv(qhp, wqe, wr);
495 else 499 else
496 err = -ENOMEM; 500 err = -ENOMEM;
497 if (err) { 501
498 *bad_wr = wr; 502 if (err)
499 break; 503 break;
500 } 504
501 build_fw_riwrh((void *) wqe, T3_WR_RCV, T3_COMPLETION_FLAG, 505 build_fw_riwrh((void *) wqe, T3_WR_RCV, T3_COMPLETION_FLAG,
502 Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2), 506 Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2),
503 0, sizeof(struct t3_receive_wr) >> 3, T3_SOPEOP); 507 0, sizeof(struct t3_receive_wr) >> 3, T3_SOPEOP);
@@ -511,6 +515,10 @@ int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
511 } 515 }
512 spin_unlock_irqrestore(&qhp->lock, flag); 516 spin_unlock_irqrestore(&qhp->lock, flag);
513 ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid); 517 ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
518
519out:
520 if (err)
521 *bad_wr = wr;
514 return err; 522 return err;
515} 523}
516 524
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index c825142a2fb7..0136abd50dd4 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -375,6 +375,7 @@ extern rwlock_t ehca_qp_idr_lock;
375extern rwlock_t ehca_cq_idr_lock; 375extern rwlock_t ehca_cq_idr_lock;
376extern struct idr ehca_qp_idr; 376extern struct idr ehca_qp_idr;
377extern struct idr ehca_cq_idr; 377extern struct idr ehca_cq_idr;
378extern spinlock_t shca_list_lock;
378 379
379extern int ehca_static_rate; 380extern int ehca_static_rate;
380extern int ehca_port_act_time; 381extern int ehca_port_act_time;
diff --git a/drivers/infiniband/hw/ehca/ehca_eq.c b/drivers/infiniband/hw/ehca/ehca_eq.c
index 523e733c630e..3b87589b8ea0 100644
--- a/drivers/infiniband/hw/ehca/ehca_eq.c
+++ b/drivers/infiniband/hw/ehca/ehca_eq.c
@@ -169,12 +169,15 @@ int ehca_destroy_eq(struct ehca_shca *shca, struct ehca_eq *eq)
169 unsigned long flags; 169 unsigned long flags;
170 u64 h_ret; 170 u64 h_ret;
171 171
172 spin_lock_irqsave(&eq->spinlock, flags);
173 ibmebus_free_irq(eq->ist, (void *)shca); 172 ibmebus_free_irq(eq->ist, (void *)shca);
174 173
175 h_ret = hipz_h_destroy_eq(shca->ipz_hca_handle, eq); 174 spin_lock_irqsave(&shca_list_lock, flags);
175 eq->is_initialized = 0;
176 spin_unlock_irqrestore(&shca_list_lock, flags);
176 177
177 spin_unlock_irqrestore(&eq->spinlock, flags); 178 tasklet_kill(&eq->interrupt_task);
179
180 h_ret = hipz_h_destroy_eq(shca->ipz_hca_handle, eq);
178 181
179 if (h_ret != H_SUCCESS) { 182 if (h_ret != H_SUCCESS) {
180 ehca_err(&shca->ib_device, "Can't free EQ resources."); 183 ehca_err(&shca->ib_device, "Can't free EQ resources.");
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index fb2d83c5bf01..129a6bebd6e3 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -123,7 +123,7 @@ DEFINE_IDR(ehca_qp_idr);
123DEFINE_IDR(ehca_cq_idr); 123DEFINE_IDR(ehca_cq_idr);
124 124
125static LIST_HEAD(shca_list); /* list of all registered ehcas */ 125static LIST_HEAD(shca_list); /* list of all registered ehcas */
126static DEFINE_SPINLOCK(shca_list_lock); 126DEFINE_SPINLOCK(shca_list_lock);
127 127
128static struct timer_list poll_eqs_timer; 128static struct timer_list poll_eqs_timer;
129 129
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c
index 8fd88cd828fd..e3ec7fdd67bd 100644
--- a/drivers/infiniband/hw/ehca/ehca_reqs.c
+++ b/drivers/infiniband/hw/ehca/ehca_reqs.c
@@ -400,7 +400,6 @@ static inline void map_ib_wc_status(u32 cqe_status,
400 400
401static inline int post_one_send(struct ehca_qp *my_qp, 401static inline int post_one_send(struct ehca_qp *my_qp,
402 struct ib_send_wr *cur_send_wr, 402 struct ib_send_wr *cur_send_wr,
403 struct ib_send_wr **bad_send_wr,
404 int hidden) 403 int hidden)
405{ 404{
406 struct ehca_wqe *wqe_p; 405 struct ehca_wqe *wqe_p;
@@ -412,8 +411,6 @@ static inline int post_one_send(struct ehca_qp *my_qp,
412 wqe_p = ipz_qeit_get_inc(&my_qp->ipz_squeue); 411 wqe_p = ipz_qeit_get_inc(&my_qp->ipz_squeue);
413 if (unlikely(!wqe_p)) { 412 if (unlikely(!wqe_p)) {
414 /* too many posted work requests: queue overflow */ 413 /* too many posted work requests: queue overflow */
415 if (bad_send_wr)
416 *bad_send_wr = cur_send_wr;
417 ehca_err(my_qp->ib_qp.device, "Too many posted WQEs " 414 ehca_err(my_qp->ib_qp.device, "Too many posted WQEs "
418 "qp_num=%x", my_qp->ib_qp.qp_num); 415 "qp_num=%x", my_qp->ib_qp.qp_num);
419 return -ENOMEM; 416 return -ENOMEM;
@@ -433,8 +430,6 @@ static inline int post_one_send(struct ehca_qp *my_qp,
433 */ 430 */
434 if (unlikely(ret)) { 431 if (unlikely(ret)) {
435 my_qp->ipz_squeue.current_q_offset = start_offset; 432 my_qp->ipz_squeue.current_q_offset = start_offset;
436 if (bad_send_wr)
437 *bad_send_wr = cur_send_wr;
438 ehca_err(my_qp->ib_qp.device, "Could not write WQE " 433 ehca_err(my_qp->ib_qp.device, "Could not write WQE "
439 "qp_num=%x", my_qp->ib_qp.qp_num); 434 "qp_num=%x", my_qp->ib_qp.qp_num);
440 return -EINVAL; 435 return -EINVAL;
@@ -448,7 +443,6 @@ int ehca_post_send(struct ib_qp *qp,
448 struct ib_send_wr **bad_send_wr) 443 struct ib_send_wr **bad_send_wr)
449{ 444{
450 struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp); 445 struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
451 struct ib_send_wr *cur_send_wr;
452 int wqe_cnt = 0; 446 int wqe_cnt = 0;
453 int ret = 0; 447 int ret = 0;
454 unsigned long flags; 448 unsigned long flags;
@@ -457,7 +451,8 @@ int ehca_post_send(struct ib_qp *qp,
457 if (unlikely(my_qp->state < IB_QPS_RTS)) { 451 if (unlikely(my_qp->state < IB_QPS_RTS)) {
458 ehca_err(qp->device, "Invalid QP state qp_state=%d qpn=%x", 452 ehca_err(qp->device, "Invalid QP state qp_state=%d qpn=%x",
459 my_qp->state, qp->qp_num); 453 my_qp->state, qp->qp_num);
460 return -EINVAL; 454 ret = -EINVAL;
455 goto out;
461 } 456 }
462 457
463 /* LOCK the QUEUE */ 458 /* LOCK the QUEUE */
@@ -476,24 +471,21 @@ int ehca_post_send(struct ib_qp *qp,
476 struct ib_send_wr circ_wr; 471 struct ib_send_wr circ_wr;
477 memset(&circ_wr, 0, sizeof(circ_wr)); 472 memset(&circ_wr, 0, sizeof(circ_wr));
478 circ_wr.opcode = IB_WR_RDMA_READ; 473 circ_wr.opcode = IB_WR_RDMA_READ;
479 post_one_send(my_qp, &circ_wr, NULL, 1); /* ignore retcode */ 474 post_one_send(my_qp, &circ_wr, 1); /* ignore retcode */
480 wqe_cnt++; 475 wqe_cnt++;
481 ehca_dbg(qp->device, "posted circ wr qp_num=%x", qp->qp_num); 476 ehca_dbg(qp->device, "posted circ wr qp_num=%x", qp->qp_num);
482 my_qp->message_count = my_qp->packet_count = 0; 477 my_qp->message_count = my_qp->packet_count = 0;
483 } 478 }
484 479
485 /* loop processes list of send reqs */ 480 /* loop processes list of send reqs */
486 for (cur_send_wr = send_wr; cur_send_wr != NULL; 481 while (send_wr) {
487 cur_send_wr = cur_send_wr->next) { 482 ret = post_one_send(my_qp, send_wr, 0);
488 ret = post_one_send(my_qp, cur_send_wr, bad_send_wr, 0);
489 if (unlikely(ret)) { 483 if (unlikely(ret)) {
490 /* if one or more WQEs were successful, don't fail */
491 if (wqe_cnt)
492 ret = 0;
493 goto post_send_exit0; 484 goto post_send_exit0;
494 } 485 }
495 wqe_cnt++; 486 wqe_cnt++;
496 } /* eof for cur_send_wr */ 487 send_wr = send_wr->next;
488 }
497 489
498post_send_exit0: 490post_send_exit0:
499 iosync(); /* serialize GAL register access */ 491 iosync(); /* serialize GAL register access */
@@ -503,6 +495,10 @@ post_send_exit0:
503 my_qp, qp->qp_num, wqe_cnt, ret); 495 my_qp, qp->qp_num, wqe_cnt, ret);
504 my_qp->message_count += wqe_cnt; 496 my_qp->message_count += wqe_cnt;
505 spin_unlock_irqrestore(&my_qp->spinlock_s, flags); 497 spin_unlock_irqrestore(&my_qp->spinlock_s, flags);
498
499out:
500 if (ret)
501 *bad_send_wr = send_wr;
506 return ret; 502 return ret;
507} 503}
508 504
@@ -511,7 +507,6 @@ static int internal_post_recv(struct ehca_qp *my_qp,
511 struct ib_recv_wr *recv_wr, 507 struct ib_recv_wr *recv_wr,
512 struct ib_recv_wr **bad_recv_wr) 508 struct ib_recv_wr **bad_recv_wr)
513{ 509{
514 struct ib_recv_wr *cur_recv_wr;
515 struct ehca_wqe *wqe_p; 510 struct ehca_wqe *wqe_p;
516 int wqe_cnt = 0; 511 int wqe_cnt = 0;
517 int ret = 0; 512 int ret = 0;
@@ -522,27 +517,23 @@ static int internal_post_recv(struct ehca_qp *my_qp,
522 if (unlikely(!HAS_RQ(my_qp))) { 517 if (unlikely(!HAS_RQ(my_qp))) {
523 ehca_err(dev, "QP has no RQ ehca_qp=%p qp_num=%x ext_type=%d", 518 ehca_err(dev, "QP has no RQ ehca_qp=%p qp_num=%x ext_type=%d",
524 my_qp, my_qp->real_qp_num, my_qp->ext_type); 519 my_qp, my_qp->real_qp_num, my_qp->ext_type);
525 return -ENODEV; 520 ret = -ENODEV;
521 goto out;
526 } 522 }
527 523
528 /* LOCK the QUEUE */ 524 /* LOCK the QUEUE */
529 spin_lock_irqsave(&my_qp->spinlock_r, flags); 525 spin_lock_irqsave(&my_qp->spinlock_r, flags);
530 526
531 /* loop processes list of send reqs */ 527 /* loop processes list of recv reqs */
532 for (cur_recv_wr = recv_wr; cur_recv_wr != NULL; 528 while (recv_wr) {
533 cur_recv_wr = cur_recv_wr->next) {
534 u64 start_offset = my_qp->ipz_rqueue.current_q_offset; 529 u64 start_offset = my_qp->ipz_rqueue.current_q_offset;
535 /* get pointer next to free WQE */ 530 /* get pointer next to free WQE */
536 wqe_p = ipz_qeit_get_inc(&my_qp->ipz_rqueue); 531 wqe_p = ipz_qeit_get_inc(&my_qp->ipz_rqueue);
537 if (unlikely(!wqe_p)) { 532 if (unlikely(!wqe_p)) {
538 /* too many posted work requests: queue overflow */ 533 /* too many posted work requests: queue overflow */
539 if (bad_recv_wr) 534 ret = -ENOMEM;
540 *bad_recv_wr = cur_recv_wr; 535 ehca_err(dev, "Too many posted WQEs "
541 if (wqe_cnt == 0) { 536 "qp_num=%x", my_qp->real_qp_num);
542 ret = -ENOMEM;
543 ehca_err(dev, "Too many posted WQEs "
544 "qp_num=%x", my_qp->real_qp_num);
545 }
546 goto post_recv_exit0; 537 goto post_recv_exit0;
547 } 538 }
548 /* 539 /*
@@ -552,7 +543,7 @@ static int internal_post_recv(struct ehca_qp *my_qp,
552 rq_map_idx = start_offset / my_qp->ipz_rqueue.qe_size; 543 rq_map_idx = start_offset / my_qp->ipz_rqueue.qe_size;
553 544
554 /* write a RECV WQE into the QUEUE */ 545 /* write a RECV WQE into the QUEUE */
555 ret = ehca_write_rwqe(&my_qp->ipz_rqueue, wqe_p, cur_recv_wr, 546 ret = ehca_write_rwqe(&my_qp->ipz_rqueue, wqe_p, recv_wr,
556 rq_map_idx); 547 rq_map_idx);
557 /* 548 /*
558 * if something failed, 549 * if something failed,
@@ -560,22 +551,20 @@ static int internal_post_recv(struct ehca_qp *my_qp,
560 */ 551 */
561 if (unlikely(ret)) { 552 if (unlikely(ret)) {
562 my_qp->ipz_rqueue.current_q_offset = start_offset; 553 my_qp->ipz_rqueue.current_q_offset = start_offset;
563 *bad_recv_wr = cur_recv_wr; 554 ret = -EINVAL;
564 if (wqe_cnt == 0) { 555 ehca_err(dev, "Could not write WQE "
565 ret = -EINVAL; 556 "qp_num=%x", my_qp->real_qp_num);
566 ehca_err(dev, "Could not write WQE "
567 "qp_num=%x", my_qp->real_qp_num);
568 }
569 goto post_recv_exit0; 557 goto post_recv_exit0;
570 } 558 }
571 559
572 qmap_entry = &my_qp->rq_map.map[rq_map_idx]; 560 qmap_entry = &my_qp->rq_map.map[rq_map_idx];
573 qmap_entry->app_wr_id = get_app_wr_id(cur_recv_wr->wr_id); 561 qmap_entry->app_wr_id = get_app_wr_id(recv_wr->wr_id);
574 qmap_entry->reported = 0; 562 qmap_entry->reported = 0;
575 qmap_entry->cqe_req = 1; 563 qmap_entry->cqe_req = 1;
576 564
577 wqe_cnt++; 565 wqe_cnt++;
578 } /* eof for cur_recv_wr */ 566 recv_wr = recv_wr->next;
567 } /* eof for recv_wr */
579 568
580post_recv_exit0: 569post_recv_exit0:
581 iosync(); /* serialize GAL register access */ 570 iosync(); /* serialize GAL register access */
@@ -584,6 +573,11 @@ post_recv_exit0:
584 ehca_dbg(dev, "ehca_qp=%p qp_num=%x wqe_cnt=%d ret=%i", 573 ehca_dbg(dev, "ehca_qp=%p qp_num=%x wqe_cnt=%d ret=%i",
585 my_qp, my_qp->real_qp_num, wqe_cnt, ret); 574 my_qp, my_qp->real_qp_num, wqe_cnt, ret);
586 spin_unlock_irqrestore(&my_qp->spinlock_r, flags); 575 spin_unlock_irqrestore(&my_qp->spinlock_r, flags);
576
577out:
578 if (ret)
579 *bad_recv_wr = recv_wr;
580
587 return ret; 581 return ret;
588} 582}
589 583
@@ -597,6 +591,7 @@ int ehca_post_recv(struct ib_qp *qp,
597 if (unlikely(my_qp->state == IB_QPS_RESET)) { 591 if (unlikely(my_qp->state == IB_QPS_RESET)) {
598 ehca_err(qp->device, "Invalid QP state qp_state=%d qpn=%x", 592 ehca_err(qp->device, "Invalid QP state qp_state=%d qpn=%x",
599 my_qp->state, qp->qp_num); 593 my_qp->state, qp->qp_num);
594 *bad_recv_wr = recv_wr;
600 return -EINVAL; 595 return -EINVAL;
601 } 596 }
602 597
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index 013d1380e77c..d2787fe80304 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -39,6 +39,7 @@
39#include <linux/delay.h> 39#include <linux/delay.h>
40#include <linux/netdevice.h> 40#include <linux/netdevice.h>
41#include <linux/vmalloc.h> 41#include <linux/vmalloc.h>
42#include <linux/bitmap.h>
42 43
43#include "ipath_kernel.h" 44#include "ipath_kernel.h"
44#include "ipath_verbs.h" 45#include "ipath_verbs.h"
@@ -1697,7 +1698,7 @@ void ipath_chg_pioavailkernel(struct ipath_devdata *dd, unsigned start,
1697 unsigned len, int avail) 1698 unsigned len, int avail)
1698{ 1699{
1699 unsigned long flags; 1700 unsigned long flags;
1700 unsigned end, cnt = 0, next; 1701 unsigned end, cnt = 0;
1701 1702
1702 /* There are two bits per send buffer (busy and generation) */ 1703 /* There are two bits per send buffer (busy and generation) */
1703 start *= 2; 1704 start *= 2;
@@ -1748,12 +1749,7 @@ void ipath_chg_pioavailkernel(struct ipath_devdata *dd, unsigned start,
1748 1749
1749 if (dd->ipath_pioupd_thresh) { 1750 if (dd->ipath_pioupd_thresh) {
1750 end = 2 * (dd->ipath_piobcnt2k + dd->ipath_piobcnt4k); 1751 end = 2 * (dd->ipath_piobcnt2k + dd->ipath_piobcnt4k);
1751 next = find_first_bit(dd->ipath_pioavailkernel, end); 1752 cnt = bitmap_weight(dd->ipath_pioavailkernel, end);
1752 while (next < end) {
1753 cnt++;
1754 next = find_next_bit(dd->ipath_pioavailkernel, end,
1755 next + 1);
1756 }
1757 } 1753 }
1758 spin_unlock_irqrestore(&ipath_pioavail_lock, flags); 1754 spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
1759 1755
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 3cb3f47a10b8..e596537ff353 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -103,7 +103,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
103 props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE; 103 props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
104 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM) 104 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
105 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM; 105 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
106 if (dev->dev->caps.max_gso_sz) 106 if (dev->dev->caps.max_gso_sz && dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH)
107 props->device_cap_flags |= IB_DEVICE_UD_TSO; 107 props->device_cap_flags |= IB_DEVICE_UD_TSO;
108 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY) 108 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY)
109 props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY; 109 props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 256a00c6aeea..989555cee883 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -54,7 +54,8 @@ enum {
54 /* 54 /*
55 * Largest possible UD header: send with GRH and immediate data. 55 * Largest possible UD header: send with GRH and immediate data.
56 */ 56 */
57 MLX4_IB_UD_HEADER_SIZE = 72 57 MLX4_IB_UD_HEADER_SIZE = 72,
58 MLX4_IB_LSO_HEADER_SPARE = 128,
58}; 59};
59 60
60struct mlx4_ib_sqp { 61struct mlx4_ib_sqp {
@@ -67,7 +68,8 @@ struct mlx4_ib_sqp {
67}; 68};
68 69
69enum { 70enum {
70 MLX4_IB_MIN_SQ_STRIDE = 6 71 MLX4_IB_MIN_SQ_STRIDE = 6,
72 MLX4_IB_CACHE_LINE_SIZE = 64,
71}; 73};
72 74
73static const __be32 mlx4_ib_opcode[] = { 75static const __be32 mlx4_ib_opcode[] = {
@@ -261,7 +263,7 @@ static int send_wqe_overhead(enum ib_qp_type type, u32 flags)
261 case IB_QPT_UD: 263 case IB_QPT_UD:
262 return sizeof (struct mlx4_wqe_ctrl_seg) + 264 return sizeof (struct mlx4_wqe_ctrl_seg) +
263 sizeof (struct mlx4_wqe_datagram_seg) + 265 sizeof (struct mlx4_wqe_datagram_seg) +
264 ((flags & MLX4_IB_QP_LSO) ? 64 : 0); 266 ((flags & MLX4_IB_QP_LSO) ? MLX4_IB_LSO_HEADER_SPARE : 0);
265 case IB_QPT_UC: 267 case IB_QPT_UC:
266 return sizeof (struct mlx4_wqe_ctrl_seg) + 268 return sizeof (struct mlx4_wqe_ctrl_seg) +
267 sizeof (struct mlx4_wqe_raddr_seg); 269 sizeof (struct mlx4_wqe_raddr_seg);
@@ -897,7 +899,6 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
897 899
898 context->flags = cpu_to_be32((to_mlx4_state(new_state) << 28) | 900 context->flags = cpu_to_be32((to_mlx4_state(new_state) << 28) |
899 (to_mlx4_st(ibqp->qp_type) << 16)); 901 (to_mlx4_st(ibqp->qp_type) << 16));
900 context->flags |= cpu_to_be32(1 << 8); /* DE? */
901 902
902 if (!(attr_mask & IB_QP_PATH_MIG_STATE)) 903 if (!(attr_mask & IB_QP_PATH_MIG_STATE))
903 context->flags |= cpu_to_be32(MLX4_QP_PM_MIGRATED << 11); 904 context->flags |= cpu_to_be32(MLX4_QP_PM_MIGRATED << 11);
@@ -1467,16 +1468,12 @@ static void __set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg)
1467 1468
1468static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr, 1469static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr,
1469 struct mlx4_ib_qp *qp, unsigned *lso_seg_len, 1470 struct mlx4_ib_qp *qp, unsigned *lso_seg_len,
1470 __be32 *lso_hdr_sz) 1471 __be32 *lso_hdr_sz, __be32 *blh)
1471{ 1472{
1472 unsigned halign = ALIGN(sizeof *wqe + wr->wr.ud.hlen, 16); 1473 unsigned halign = ALIGN(sizeof *wqe + wr->wr.ud.hlen, 16);
1473 1474
1474 /* 1475 if (unlikely(halign > MLX4_IB_CACHE_LINE_SIZE))
1475 * This is a temporary limitation and will be removed in 1476 *blh = cpu_to_be32(1 << 6);
1476 * a forthcoming FW release:
1477 */
1478 if (unlikely(halign > 64))
1479 return -EINVAL;
1480 1477
1481 if (unlikely(!(qp->flags & MLX4_IB_QP_LSO) && 1478 if (unlikely(!(qp->flags & MLX4_IB_QP_LSO) &&
1482 wr->num_sge > qp->sq.max_gs - (halign >> 4))) 1479 wr->num_sge > qp->sq.max_gs - (halign >> 4)))
@@ -1522,6 +1519,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1522 __be32 dummy; 1519 __be32 dummy;
1523 __be32 *lso_wqe; 1520 __be32 *lso_wqe;
1524 __be32 uninitialized_var(lso_hdr_sz); 1521 __be32 uninitialized_var(lso_hdr_sz);
1522 __be32 blh;
1525 int i; 1523 int i;
1526 1524
1527 spin_lock_irqsave(&qp->sq.lock, flags); 1525 spin_lock_irqsave(&qp->sq.lock, flags);
@@ -1530,6 +1528,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1530 1528
1531 for (nreq = 0; wr; ++nreq, wr = wr->next) { 1529 for (nreq = 0; wr; ++nreq, wr = wr->next) {
1532 lso_wqe = &dummy; 1530 lso_wqe = &dummy;
1531 blh = 0;
1533 1532
1534 if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { 1533 if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
1535 err = -ENOMEM; 1534 err = -ENOMEM;
@@ -1616,7 +1615,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1616 size += sizeof (struct mlx4_wqe_datagram_seg) / 16; 1615 size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
1617 1616
1618 if (wr->opcode == IB_WR_LSO) { 1617 if (wr->opcode == IB_WR_LSO) {
1619 err = build_lso_seg(wqe, wr, qp, &seglen, &lso_hdr_sz); 1618 err = build_lso_seg(wqe, wr, qp, &seglen, &lso_hdr_sz, &blh);
1620 if (unlikely(err)) { 1619 if (unlikely(err)) {
1621 *bad_wr = wr; 1620 *bad_wr = wr;
1622 goto out; 1621 goto out;
@@ -1687,7 +1686,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1687 } 1686 }
1688 1687
1689 ctrl->owner_opcode = mlx4_ib_opcode[wr->opcode] | 1688 ctrl->owner_opcode = mlx4_ib_opcode[wr->opcode] |
1690 (ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0); 1689 (ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0) | blh;
1691 1690
1692 stamp = ind + qp->sq_spare_wqes; 1691 stamp = ind + qp->sq_spare_wqes;
1693 ind += DIV_ROUND_UP(size * 16, 1U << qp->sq.wqe_shift); 1692 ind += DIV_ROUND_UP(size * 16, 1U << qp->sq.wqe_shift);
diff --git a/drivers/infiniband/hw/nes/Kconfig b/drivers/infiniband/hw/nes/Kconfig
index d449eb6ec78e..846dc97cf260 100644
--- a/drivers/infiniband/hw/nes/Kconfig
+++ b/drivers/infiniband/hw/nes/Kconfig
@@ -4,14 +4,13 @@ config INFINIBAND_NES
4 select LIBCRC32C 4 select LIBCRC32C
5 select INET_LRO 5 select INET_LRO
6 ---help--- 6 ---help---
7 This is a low-level driver for NetEffect RDMA enabled 7 This is the RDMA Network Interface Card (RNIC) driver for
8 Network Interface Cards (RNIC). 8 NetEffect Ethernet Cluster Server Adapters.
9 9
10config INFINIBAND_NES_DEBUG 10config INFINIBAND_NES_DEBUG
11 bool "Verbose debugging output" 11 bool "Verbose debugging output"
12 depends on INFINIBAND_NES 12 depends on INFINIBAND_NES
13 default n 13 default n
14 ---help--- 14 ---help---
15 This option causes the NetEffect RNIC driver to produce debug 15 This option enables debug messages from the NetEffect RNIC
16 messages. Select this if you are developing the driver 16 driver. Select this if you are diagnosing a problem.
17 or trying to diagnose a problem.
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index cbde0cfe27e0..b9d09bafd6c1 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved. 2 * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. 3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -521,7 +521,8 @@ static int __devinit nes_probe(struct pci_dev *pcidev, const struct pci_device_i
521 spin_lock_init(&nesdev->indexed_regs_lock); 521 spin_lock_init(&nesdev->indexed_regs_lock);
522 522
523 /* Remap the PCI registers in adapter BAR0 to kernel VA space */ 523 /* Remap the PCI registers in adapter BAR0 to kernel VA space */
524 mmio_regs = ioremap_nocache(pci_resource_start(pcidev, BAR_0), sizeof(mmio_regs)); 524 mmio_regs = ioremap_nocache(pci_resource_start(pcidev, BAR_0),
525 pci_resource_len(pcidev, BAR_0));
525 if (mmio_regs == NULL) { 526 if (mmio_regs == NULL) {
526 printk(KERN_ERR PFX "Unable to remap BAR0\n"); 527 printk(KERN_ERR PFX "Unable to remap BAR0\n");
527 ret = -EIO; 528 ret = -EIO;
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
index bcc6abc4faff..98840564bb2f 100644
--- a/drivers/infiniband/hw/nes/nes.h
+++ b/drivers/infiniband/hw/nes/nes.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved. 2 * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. 3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 73473db19863..39468c277036 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved. 2 * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
@@ -52,6 +52,7 @@
52#include <linux/random.h> 52#include <linux/random.h>
53#include <linux/list.h> 53#include <linux/list.h>
54#include <linux/threads.h> 54#include <linux/threads.h>
55#include <linux/highmem.h>
55#include <net/arp.h> 56#include <net/arp.h>
56#include <net/neighbour.h> 57#include <net/neighbour.h>
57#include <net/route.h> 58#include <net/route.h>
@@ -251,6 +252,33 @@ static int parse_mpa(struct nes_cm_node *cm_node, u8 *buffer, u32 *type,
251 252
252 mpa_frame = (struct ietf_mpa_frame *)buffer; 253 mpa_frame = (struct ietf_mpa_frame *)buffer;
253 cm_node->mpa_frame_size = ntohs(mpa_frame->priv_data_len); 254 cm_node->mpa_frame_size = ntohs(mpa_frame->priv_data_len);
255 /* make sure mpa private data len is less than 512 bytes */
256 if (cm_node->mpa_frame_size > IETF_MAX_PRIV_DATA_LEN) {
257 nes_debug(NES_DBG_CM, "The received Length of Private"
258 " Data field exceeds 512 octets\n");
259 return -EINVAL;
260 }
261 /*
262 * make sure MPA receiver interoperate with the
263 * received MPA version and MPA key information
264 *
265 */
266 if (mpa_frame->rev != mpa_version) {
267 nes_debug(NES_DBG_CM, "The received mpa version"
268 " can not be interoperated\n");
269 return -EINVAL;
270 }
271 if (cm_node->state != NES_CM_STATE_MPAREQ_SENT) {
272 if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REQ, IETF_MPA_KEY_SIZE)) {
273 nes_debug(NES_DBG_CM, "Unexpected MPA Key received \n");
274 return -EINVAL;
275 }
276 } else {
277 if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE)) {
278 nes_debug(NES_DBG_CM, "Unexpected MPA Key received \n");
279 return -EINVAL;
280 }
281 }
254 282
255 if (cm_node->mpa_frame_size + sizeof(struct ietf_mpa_frame) != len) { 283 if (cm_node->mpa_frame_size + sizeof(struct ietf_mpa_frame) != len) {
256 nes_debug(NES_DBG_CM, "The received ietf buffer was not right" 284 nes_debug(NES_DBG_CM, "The received ietf buffer was not right"
@@ -486,6 +514,8 @@ static void nes_retrans_expired(struct nes_cm_node *cm_node)
486 send_reset(cm_node, NULL); 514 send_reset(cm_node, NULL);
487 break; 515 break;
488 default: 516 default:
517 add_ref_cm_node(cm_node);
518 send_reset(cm_node, NULL);
489 create_event(cm_node, NES_CM_EVENT_ABORTED); 519 create_event(cm_node, NES_CM_EVENT_ABORTED);
490 } 520 }
491} 521}
@@ -949,6 +979,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
949 reset_entry); 979 reset_entry);
950 { 980 {
951 struct nes_cm_node *loopback = cm_node->loopbackpartner; 981 struct nes_cm_node *loopback = cm_node->loopbackpartner;
982 enum nes_cm_node_state old_state;
952 if (NES_CM_STATE_FIN_WAIT1 <= cm_node->state) { 983 if (NES_CM_STATE_FIN_WAIT1 <= cm_node->state) {
953 rem_ref_cm_node(cm_node->cm_core, cm_node); 984 rem_ref_cm_node(cm_node->cm_core, cm_node);
954 } else { 985 } else {
@@ -960,11 +991,12 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
960 NES_CM_STATE_CLOSED; 991 NES_CM_STATE_CLOSED;
961 WARN_ON(1); 992 WARN_ON(1);
962 } else { 993 } else {
963 cm_node->state = 994 old_state = cm_node->state;
964 NES_CM_STATE_CLOSED; 995 cm_node->state = NES_CM_STATE_LISTENER_DESTROYED;
965 rem_ref_cm_node( 996 if (old_state != NES_CM_STATE_MPAREQ_RCVD)
966 cm_node->cm_core, 997 rem_ref_cm_node(
967 cm_node); 998 cm_node->cm_core,
999 cm_node);
968 } 1000 }
969 } else { 1001 } else {
970 struct nes_cm_event event; 1002 struct nes_cm_event event;
@@ -980,20 +1012,9 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
980 loopback->loc_port; 1012 loopback->loc_port;
981 event.cm_info.cm_id = loopback->cm_id; 1013 event.cm_info.cm_id = loopback->cm_id;
982 cm_event_connect_error(&event); 1014 cm_event_connect_error(&event);
1015 cm_node->state = NES_CM_STATE_LISTENER_DESTROYED;
983 loopback->state = NES_CM_STATE_CLOSED; 1016 loopback->state = NES_CM_STATE_CLOSED;
984 1017
985 event.cm_node = cm_node;
986 event.cm_info.rem_addr =
987 cm_node->rem_addr;
988 event.cm_info.loc_addr =
989 cm_node->loc_addr;
990 event.cm_info.rem_port =
991 cm_node->rem_port;
992 event.cm_info.loc_port =
993 cm_node->loc_port;
994 event.cm_info.cm_id = cm_node->cm_id;
995 cm_event_reset(&event);
996
997 rem_ref_cm_node(cm_node->cm_core, 1018 rem_ref_cm_node(cm_node->cm_core,
998 cm_node); 1019 cm_node);
999 1020
@@ -1077,12 +1098,13 @@ static inline int mini_cm_accelerated(struct nes_cm_core *cm_core,
1077/** 1098/**
1078 * nes_addr_resolve_neigh 1099 * nes_addr_resolve_neigh
1079 */ 1100 */
1080static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip) 1101static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpindex)
1081{ 1102{
1082 struct rtable *rt; 1103 struct rtable *rt;
1083 struct flowi fl; 1104 struct flowi fl;
1084 struct neighbour *neigh; 1105 struct neighbour *neigh;
1085 int rc = -1; 1106 int rc = arpindex;
1107 struct nes_adapter *nesadapter = nesvnic->nesdev->nesadapter;
1086 1108
1087 memset(&fl, 0, sizeof fl); 1109 memset(&fl, 0, sizeof fl);
1088 fl.nl_u.ip4_u.daddr = htonl(dst_ip); 1110 fl.nl_u.ip4_u.daddr = htonl(dst_ip);
@@ -1098,6 +1120,21 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip)
1098 nes_debug(NES_DBG_CM, "Neighbor MAC address for 0x%08X" 1120 nes_debug(NES_DBG_CM, "Neighbor MAC address for 0x%08X"
1099 " is %pM, Gateway is 0x%08X \n", dst_ip, 1121 " is %pM, Gateway is 0x%08X \n", dst_ip,
1100 neigh->ha, ntohl(rt->rt_gateway)); 1122 neigh->ha, ntohl(rt->rt_gateway));
1123
1124 if (arpindex >= 0) {
1125 if (!memcmp(nesadapter->arp_table[arpindex].mac_addr,
1126 neigh->ha, ETH_ALEN)){
1127 /* Mac address same as in nes_arp_table */
1128 neigh_release(neigh);
1129 ip_rt_put(rt);
1130 return rc;
1131 }
1132
1133 nes_manage_arp_cache(nesvnic->netdev,
1134 nesadapter->arp_table[arpindex].mac_addr,
1135 dst_ip, NES_ARP_DELETE);
1136 }
1137
1101 nes_manage_arp_cache(nesvnic->netdev, neigh->ha, 1138 nes_manage_arp_cache(nesvnic->netdev, neigh->ha,
1102 dst_ip, NES_ARP_ADD); 1139 dst_ip, NES_ARP_ADD);
1103 rc = nes_arp_table(nesvnic->nesdev, dst_ip, NULL, 1140 rc = nes_arp_table(nesvnic->nesdev, dst_ip, NULL,
@@ -1113,7 +1150,6 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip)
1113 return rc; 1150 return rc;
1114} 1151}
1115 1152
1116
1117/** 1153/**
1118 * make_cm_node - create a new instance of a cm node 1154 * make_cm_node - create a new instance of a cm node
1119 */ 1155 */
@@ -1123,6 +1159,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
1123{ 1159{
1124 struct nes_cm_node *cm_node; 1160 struct nes_cm_node *cm_node;
1125 struct timespec ts; 1161 struct timespec ts;
1162 int oldarpindex = 0;
1126 int arpindex = 0; 1163 int arpindex = 0;
1127 struct nes_device *nesdev; 1164 struct nes_device *nesdev;
1128 struct nes_adapter *nesadapter; 1165 struct nes_adapter *nesadapter;
@@ -1176,17 +1213,18 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
1176 nesadapter = nesdev->nesadapter; 1213 nesadapter = nesdev->nesadapter;
1177 1214
1178 cm_node->loopbackpartner = NULL; 1215 cm_node->loopbackpartner = NULL;
1216
1179 /* get the mac addr for the remote node */ 1217 /* get the mac addr for the remote node */
1180 if (ipv4_is_loopback(htonl(cm_node->rem_addr))) 1218 if (ipv4_is_loopback(htonl(cm_node->rem_addr)))
1181 arpindex = nes_arp_table(nesdev, ntohl(nesvnic->local_ipaddr), NULL, NES_ARP_RESOLVE); 1219 arpindex = nes_arp_table(nesdev, ntohl(nesvnic->local_ipaddr), NULL, NES_ARP_RESOLVE);
1182 else 1220 else {
1183 arpindex = nes_arp_table(nesdev, cm_node->rem_addr, NULL, NES_ARP_RESOLVE); 1221 oldarpindex = nes_arp_table(nesdev, cm_node->rem_addr, NULL, NES_ARP_RESOLVE);
1222 arpindex = nes_addr_resolve_neigh(nesvnic, cm_info->rem_addr, oldarpindex);
1223
1224 }
1184 if (arpindex < 0) { 1225 if (arpindex < 0) {
1185 arpindex = nes_addr_resolve_neigh(nesvnic, cm_info->rem_addr); 1226 kfree(cm_node);
1186 if (arpindex < 0) { 1227 return NULL;
1187 kfree(cm_node);
1188 return NULL;
1189 }
1190 } 1228 }
1191 1229
1192 /* copy the mac addr to node context */ 1230 /* copy the mac addr to node context */
@@ -1333,13 +1371,20 @@ static void handle_fin_pkt(struct nes_cm_node *cm_node)
1333 case NES_CM_STATE_SYN_RCVD: 1371 case NES_CM_STATE_SYN_RCVD:
1334 case NES_CM_STATE_SYN_SENT: 1372 case NES_CM_STATE_SYN_SENT:
1335 case NES_CM_STATE_ESTABLISHED: 1373 case NES_CM_STATE_ESTABLISHED:
1336 case NES_CM_STATE_MPAREQ_SENT:
1337 case NES_CM_STATE_MPAREJ_RCVD: 1374 case NES_CM_STATE_MPAREJ_RCVD:
1338 cm_node->tcp_cntxt.rcv_nxt++; 1375 cm_node->tcp_cntxt.rcv_nxt++;
1339 cleanup_retrans_entry(cm_node); 1376 cleanup_retrans_entry(cm_node);
1340 cm_node->state = NES_CM_STATE_LAST_ACK; 1377 cm_node->state = NES_CM_STATE_LAST_ACK;
1341 send_fin(cm_node, NULL); 1378 send_fin(cm_node, NULL);
1342 break; 1379 break;
1380 case NES_CM_STATE_MPAREQ_SENT:
1381 create_event(cm_node, NES_CM_EVENT_ABORTED);
1382 cm_node->tcp_cntxt.rcv_nxt++;
1383 cleanup_retrans_entry(cm_node);
1384 cm_node->state = NES_CM_STATE_CLOSED;
1385 add_ref_cm_node(cm_node);
1386 send_reset(cm_node, NULL);
1387 break;
1343 case NES_CM_STATE_FIN_WAIT1: 1388 case NES_CM_STATE_FIN_WAIT1:
1344 cm_node->tcp_cntxt.rcv_nxt++; 1389 cm_node->tcp_cntxt.rcv_nxt++;
1345 cleanup_retrans_entry(cm_node); 1390 cleanup_retrans_entry(cm_node);
@@ -1590,6 +1635,7 @@ static void handle_syn_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
1590 break; 1635 break;
1591 case NES_CM_STATE_CLOSED: 1636 case NES_CM_STATE_CLOSED:
1592 cleanup_retrans_entry(cm_node); 1637 cleanup_retrans_entry(cm_node);
1638 add_ref_cm_node(cm_node);
1593 send_reset(cm_node, skb); 1639 send_reset(cm_node, skb);
1594 break; 1640 break;
1595 case NES_CM_STATE_TSA: 1641 case NES_CM_STATE_TSA:
@@ -1641,9 +1687,15 @@ static void handle_synack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
1641 passive_open_err(cm_node, skb, 1); 1687 passive_open_err(cm_node, skb, 1);
1642 break; 1688 break;
1643 case NES_CM_STATE_LISTENING: 1689 case NES_CM_STATE_LISTENING:
1690 cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
1691 cleanup_retrans_entry(cm_node);
1692 cm_node->state = NES_CM_STATE_CLOSED;
1693 send_reset(cm_node, skb);
1694 break;
1644 case NES_CM_STATE_CLOSED: 1695 case NES_CM_STATE_CLOSED:
1645 cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq); 1696 cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
1646 cleanup_retrans_entry(cm_node); 1697 cleanup_retrans_entry(cm_node);
1698 add_ref_cm_node(cm_node);
1647 send_reset(cm_node, skb); 1699 send_reset(cm_node, skb);
1648 break; 1700 break;
1649 case NES_CM_STATE_ESTABLISHED: 1701 case NES_CM_STATE_ESTABLISHED:
@@ -1712,8 +1764,13 @@ static int handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
1712 dev_kfree_skb_any(skb); 1764 dev_kfree_skb_any(skb);
1713 break; 1765 break;
1714 case NES_CM_STATE_LISTENING: 1766 case NES_CM_STATE_LISTENING:
1767 cleanup_retrans_entry(cm_node);
1768 cm_node->state = NES_CM_STATE_CLOSED;
1769 send_reset(cm_node, skb);
1770 break;
1715 case NES_CM_STATE_CLOSED: 1771 case NES_CM_STATE_CLOSED:
1716 cleanup_retrans_entry(cm_node); 1772 cleanup_retrans_entry(cm_node);
1773 add_ref_cm_node(cm_node);
1717 send_reset(cm_node, skb); 1774 send_reset(cm_node, skb);
1718 break; 1775 break;
1719 case NES_CM_STATE_LAST_ACK: 1776 case NES_CM_STATE_LAST_ACK:
@@ -1974,7 +2031,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
1974 if (!cm_node) 2031 if (!cm_node)
1975 return NULL; 2032 return NULL;
1976 mpa_frame = &cm_node->mpa_frame; 2033 mpa_frame = &cm_node->mpa_frame;
1977 strcpy(mpa_frame->key, IEFT_MPA_KEY_REQ); 2034 memcpy(mpa_frame->key, IEFT_MPA_KEY_REQ, IETF_MPA_KEY_SIZE);
1978 mpa_frame->flags = IETF_MPA_FLAGS_CRC; 2035 mpa_frame->flags = IETF_MPA_FLAGS_CRC;
1979 mpa_frame->rev = IETF_MPA_VERSION; 2036 mpa_frame->rev = IETF_MPA_VERSION;
1980 mpa_frame->priv_data_len = htons(private_data_len); 2037 mpa_frame->priv_data_len = htons(private_data_len);
@@ -2102,30 +2159,39 @@ static int mini_cm_reject(struct nes_cm_core *cm_core,
2102 cm_node->state = NES_CM_STATE_CLOSED; 2159 cm_node->state = NES_CM_STATE_CLOSED;
2103 rem_ref_cm_node(cm_core, cm_node); 2160 rem_ref_cm_node(cm_core, cm_node);
2104 } else { 2161 } else {
2105 ret = send_mpa_reject(cm_node); 2162 if (cm_node->state == NES_CM_STATE_LISTENER_DESTROYED) {
2106 if (ret) { 2163 rem_ref_cm_node(cm_core, cm_node);
2107 cm_node->state = NES_CM_STATE_CLOSED; 2164 } else {
2108 err = send_reset(cm_node, NULL); 2165 ret = send_mpa_reject(cm_node);
2109 if (err) 2166 if (ret) {
2110 WARN_ON(1); 2167 cm_node->state = NES_CM_STATE_CLOSED;
2111 } else 2168 err = send_reset(cm_node, NULL);
2112 cm_id->add_ref(cm_id); 2169 if (err)
2170 WARN_ON(1);
2171 } else
2172 cm_id->add_ref(cm_id);
2173 }
2113 } 2174 }
2114 } else { 2175 } else {
2115 cm_node->cm_id = NULL; 2176 cm_node->cm_id = NULL;
2116 event.cm_node = loopback; 2177 if (cm_node->state == NES_CM_STATE_LISTENER_DESTROYED) {
2117 event.cm_info.rem_addr = loopback->rem_addr; 2178 rem_ref_cm_node(cm_core, cm_node);
2118 event.cm_info.loc_addr = loopback->loc_addr; 2179 rem_ref_cm_node(cm_core, loopback);
2119 event.cm_info.rem_port = loopback->rem_port; 2180 } else {
2120 event.cm_info.loc_port = loopback->loc_port; 2181 event.cm_node = loopback;
2121 event.cm_info.cm_id = loopback->cm_id; 2182 event.cm_info.rem_addr = loopback->rem_addr;
2122 cm_event_mpa_reject(&event); 2183 event.cm_info.loc_addr = loopback->loc_addr;
2123 rem_ref_cm_node(cm_core, cm_node); 2184 event.cm_info.rem_port = loopback->rem_port;
2124 loopback->state = NES_CM_STATE_CLOSING; 2185 event.cm_info.loc_port = loopback->loc_port;
2186 event.cm_info.cm_id = loopback->cm_id;
2187 cm_event_mpa_reject(&event);
2188 rem_ref_cm_node(cm_core, cm_node);
2189 loopback->state = NES_CM_STATE_CLOSING;
2125 2190
2126 cm_id = loopback->cm_id; 2191 cm_id = loopback->cm_id;
2127 rem_ref_cm_node(cm_core, loopback); 2192 rem_ref_cm_node(cm_core, loopback);
2128 cm_id->rem_ref(cm_id); 2193 cm_id->rem_ref(cm_id);
2194 }
2129 } 2195 }
2130 2196
2131 return ret; 2197 return ret;
@@ -2164,11 +2230,15 @@ static int mini_cm_close(struct nes_cm_core *cm_core, struct nes_cm_node *cm_nod
2164 case NES_CM_STATE_CLOSING: 2230 case NES_CM_STATE_CLOSING:
2165 ret = -1; 2231 ret = -1;
2166 break; 2232 break;
2167 case NES_CM_STATE_MPAREJ_RCVD:
2168 case NES_CM_STATE_LISTENING: 2233 case NES_CM_STATE_LISTENING:
2234 cleanup_retrans_entry(cm_node);
2235 send_reset(cm_node, NULL);
2236 break;
2237 case NES_CM_STATE_MPAREJ_RCVD:
2169 case NES_CM_STATE_UNKNOWN: 2238 case NES_CM_STATE_UNKNOWN:
2170 case NES_CM_STATE_INITED: 2239 case NES_CM_STATE_INITED:
2171 case NES_CM_STATE_CLOSED: 2240 case NES_CM_STATE_CLOSED:
2241 case NES_CM_STATE_LISTENER_DESTROYED:
2172 ret = rem_ref_cm_node(cm_core, cm_node); 2242 ret = rem_ref_cm_node(cm_core, cm_node);
2173 break; 2243 break;
2174 case NES_CM_STATE_TSA: 2244 case NES_CM_STATE_TSA:
@@ -2687,8 +2757,6 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2687 struct nes_pd *nespd; 2757 struct nes_pd *nespd;
2688 u64 tagged_offset; 2758 u64 tagged_offset;
2689 2759
2690
2691
2692 ibqp = nes_get_qp(cm_id->device, conn_param->qpn); 2760 ibqp = nes_get_qp(cm_id->device, conn_param->qpn);
2693 if (!ibqp) 2761 if (!ibqp)
2694 return -EINVAL; 2762 return -EINVAL;
@@ -2704,6 +2772,13 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2704 "%s\n", cm_node, nesvnic, nesvnic->netdev, 2772 "%s\n", cm_node, nesvnic, nesvnic->netdev,
2705 nesvnic->netdev->name); 2773 nesvnic->netdev->name);
2706 2774
2775 if (NES_CM_STATE_LISTENER_DESTROYED == cm_node->state) {
2776 if (cm_node->loopbackpartner)
2777 rem_ref_cm_node(cm_node->cm_core, cm_node->loopbackpartner);
2778 rem_ref_cm_node(cm_node->cm_core, cm_node);
2779 return -EINVAL;
2780 }
2781
2707 /* associate the node with the QP */ 2782 /* associate the node with the QP */
2708 nesqp->cm_node = (void *)cm_node; 2783 nesqp->cm_node = (void *)cm_node;
2709 cm_node->nesqp = nesqp; 2784 cm_node->nesqp = nesqp;
@@ -2786,6 +2861,10 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2786 cpu_to_le32(conn_param->private_data_len + 2861 cpu_to_le32(conn_param->private_data_len +
2787 sizeof(struct ietf_mpa_frame)); 2862 sizeof(struct ietf_mpa_frame));
2788 wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = ibmr->lkey; 2863 wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = ibmr->lkey;
2864 if (nesqp->sq_kmapped) {
2865 nesqp->sq_kmapped = 0;
2866 kunmap(nesqp->page);
2867 }
2789 2868
2790 nesqp->nesqp_context->ird_ord_sizes |= 2869 nesqp->nesqp_context->ird_ord_sizes |=
2791 cpu_to_le32(NES_QPCONTEXT_ORDIRD_LSMM_PRESENT | 2870 cpu_to_le32(NES_QPCONTEXT_ORDIRD_LSMM_PRESENT |
@@ -2929,7 +3008,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
2929 if (cm_node->mpa_frame_size > MAX_CM_BUFFER) 3008 if (cm_node->mpa_frame_size > MAX_CM_BUFFER)
2930 return -EINVAL; 3009 return -EINVAL;
2931 3010
2932 strcpy(&cm_node->mpa_frame.key[0], IEFT_MPA_KEY_REP); 3011 memcpy(&cm_node->mpa_frame.key[0], IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE);
2933 if (loopback) { 3012 if (loopback) {
2934 memcpy(&loopback->mpa_frame.priv_data, pdata, pdata_len); 3013 memcpy(&loopback->mpa_frame.priv_data, pdata, pdata_len);
2935 loopback->mpa_frame.priv_data_len = pdata_len; 3014 loopback->mpa_frame.priv_data_len = pdata_len;
@@ -2974,6 +3053,9 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2974 if (!nesdev) 3053 if (!nesdev)
2975 return -EINVAL; 3054 return -EINVAL;
2976 3055
3056 if (!(cm_id->local_addr.sin_port) || !(cm_id->remote_addr.sin_port))
3057 return -EINVAL;
3058
2977 nes_debug(NES_DBG_CM, "QP%u, current IP = 0x%08X, Destination IP = " 3059 nes_debug(NES_DBG_CM, "QP%u, current IP = 0x%08X, Destination IP = "
2978 "0x%08X:0x%04X, local = 0x%08X:0x%04X.\n", nesqp->hwqp.qp_id, 3060 "0x%08X:0x%04X, local = 0x%08X:0x%04X.\n", nesqp->hwqp.qp_id,
2979 ntohl(nesvnic->local_ipaddr), 3061 ntohl(nesvnic->local_ipaddr),
@@ -3251,6 +3333,11 @@ static void cm_event_connected(struct nes_cm_event *event)
3251 wqe->wqe_words[NES_IWARP_SQ_WQE_LENGTH0_IDX] = 0; 3333 wqe->wqe_words[NES_IWARP_SQ_WQE_LENGTH0_IDX] = 0;
3252 wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = 0; 3334 wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = 0;
3253 3335
3336 if (nesqp->sq_kmapped) {
3337 nesqp->sq_kmapped = 0;
3338 kunmap(nesqp->page);
3339 }
3340
3254 /* use the reserved spot on the WQ for the extra first WQE */ 3341 /* use the reserved spot on the WQ for the extra first WQE */
3255 nesqp->nesqp_context->ird_ord_sizes &= 3342 nesqp->nesqp_context->ird_ord_sizes &=
3256 cpu_to_le32(~(NES_QPCONTEXT_ORDIRD_LSMM_PRESENT | 3343 cpu_to_le32(~(NES_QPCONTEXT_ORDIRD_LSMM_PRESENT |
@@ -3346,7 +3433,7 @@ static void cm_event_connect_error(struct nes_cm_event *event)
3346 nesqp->cm_id = NULL; 3433 nesqp->cm_id = NULL;
3347 cm_id->provider_data = NULL; 3434 cm_id->provider_data = NULL;
3348 cm_event.event = IW_CM_EVENT_CONNECT_REPLY; 3435 cm_event.event = IW_CM_EVENT_CONNECT_REPLY;
3349 cm_event.status = IW_CM_EVENT_STATUS_REJECTED; 3436 cm_event.status = -ECONNRESET;
3350 cm_event.provider_data = cm_id->provider_data; 3437 cm_event.provider_data = cm_id->provider_data;
3351 cm_event.local_addr = cm_id->local_addr; 3438 cm_event.local_addr = cm_id->local_addr;
3352 cm_event.remote_addr = cm_id->remote_addr; 3439 cm_event.remote_addr = cm_id->remote_addr;
@@ -3390,6 +3477,8 @@ static void cm_event_reset(struct nes_cm_event *event)
3390 3477
3391 nes_debug(NES_DBG_CM, "%p - cm_id = %p\n", event->cm_node, cm_id); 3478 nes_debug(NES_DBG_CM, "%p - cm_id = %p\n", event->cm_node, cm_id);
3392 nesqp = cm_id->provider_data; 3479 nesqp = cm_id->provider_data;
3480 if (!nesqp)
3481 return;
3393 3482
3394 nesqp->cm_id = NULL; 3483 nesqp->cm_id = NULL;
3395 /* cm_id->provider_data = NULL; */ 3484 /* cm_id->provider_data = NULL; */
@@ -3401,8 +3490,8 @@ static void cm_event_reset(struct nes_cm_event *event)
3401 cm_event.private_data = NULL; 3490 cm_event.private_data = NULL;
3402 cm_event.private_data_len = 0; 3491 cm_event.private_data_len = 0;
3403 3492
3404 ret = cm_id->event_handler(cm_id, &cm_event);
3405 cm_id->add_ref(cm_id); 3493 cm_id->add_ref(cm_id);
3494 ret = cm_id->event_handler(cm_id, &cm_event);
3406 atomic_inc(&cm_closes); 3495 atomic_inc(&cm_closes);
3407 cm_event.event = IW_CM_EVENT_CLOSE; 3496 cm_event.event = IW_CM_EVENT_CLOSE;
3408 cm_event.status = IW_CM_EVENT_STATUS_OK; 3497 cm_event.status = IW_CM_EVENT_STATUS_OK;
diff --git a/drivers/infiniband/hw/nes/nes_cm.h b/drivers/infiniband/hw/nes/nes_cm.h
index 90e8e4d8a5ce..d9825fda70a1 100644
--- a/drivers/infiniband/hw/nes/nes_cm.h
+++ b/drivers/infiniband/hw/nes/nes_cm.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved. 2 * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
@@ -47,6 +47,8 @@
47#define IEFT_MPA_KEY_REP "MPA ID Rep Frame" 47#define IEFT_MPA_KEY_REP "MPA ID Rep Frame"
48#define IETF_MPA_KEY_SIZE 16 48#define IETF_MPA_KEY_SIZE 16
49#define IETF_MPA_VERSION 1 49#define IETF_MPA_VERSION 1
50#define IETF_MAX_PRIV_DATA_LEN 512
51#define IETF_MPA_FRAME_SIZE 20
50 52
51enum ietf_mpa_flags { 53enum ietf_mpa_flags {
52 IETF_MPA_FLAGS_MARKERS = 0x80, /* receive Markers */ 54 IETF_MPA_FLAGS_MARKERS = 0x80, /* receive Markers */
@@ -169,7 +171,7 @@ struct nes_timer_entry {
169 171
170#define NES_CM_DEF_SEQ2 0x18ed5740 172#define NES_CM_DEF_SEQ2 0x18ed5740
171#define NES_CM_DEF_LOCAL_ID2 0xb807 173#define NES_CM_DEF_LOCAL_ID2 0xb807
172#define MAX_CM_BUFFER 512 174#define MAX_CM_BUFFER (IETF_MPA_FRAME_SIZE + IETF_MAX_PRIV_DATA_LEN)
173 175
174 176
175typedef u32 nes_addr_t; 177typedef u32 nes_addr_t;
@@ -198,6 +200,7 @@ enum nes_cm_node_state {
198 NES_CM_STATE_TIME_WAIT, 200 NES_CM_STATE_TIME_WAIT,
199 NES_CM_STATE_LAST_ACK, 201 NES_CM_STATE_LAST_ACK,
200 NES_CM_STATE_CLOSING, 202 NES_CM_STATE_CLOSING,
203 NES_CM_STATE_LISTENER_DESTROYED,
201 NES_CM_STATE_CLOSED 204 NES_CM_STATE_CLOSED
202}; 205};
203 206
diff --git a/drivers/infiniband/hw/nes/nes_context.h b/drivers/infiniband/hw/nes/nes_context.h
index 0fb8d81d9a62..b4393a16099d 100644
--- a/drivers/infiniband/hw/nes/nes_context.h
+++ b/drivers/infiniband/hw/nes/nes_context.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved. 2 * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 3512d6de3019..b1c2cbb88f09 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved. 2 * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
@@ -424,8 +424,9 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
424 424
425 nesadapter->base_pd = 1; 425 nesadapter->base_pd = 1;
426 426
427 nesadapter->device_cap_flags = 427 nesadapter->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY |
428 IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_WINDOW; 428 IB_DEVICE_MEM_WINDOW |
429 IB_DEVICE_MEM_MGT_EXTENSIONS;
429 430
430 nesadapter->allocated_qps = (unsigned long *)&(((unsigned char *)nesadapter) 431 nesadapter->allocated_qps = (unsigned long *)&(((unsigned char *)nesadapter)
431 [(sizeof(struct nes_adapter)+(sizeof(unsigned long)-1))&(~(sizeof(unsigned long)-1))]); 432 [(sizeof(struct nes_adapter)+(sizeof(unsigned long)-1))&(~(sizeof(unsigned long)-1))]);
@@ -436,11 +437,12 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
436 nesadapter->qp_table = (struct nes_qp **)(&nesadapter->allocated_arps[BITS_TO_LONGS(arp_table_size)]); 437 nesadapter->qp_table = (struct nes_qp **)(&nesadapter->allocated_arps[BITS_TO_LONGS(arp_table_size)]);
437 438
438 439
439 /* mark the usual suspect QPs and CQs as in use */ 440 /* mark the usual suspect QPs, MR and CQs as in use */
440 for (u32temp = 0; u32temp < NES_FIRST_QPN; u32temp++) { 441 for (u32temp = 0; u32temp < NES_FIRST_QPN; u32temp++) {
441 set_bit(u32temp, nesadapter->allocated_qps); 442 set_bit(u32temp, nesadapter->allocated_qps);
442 set_bit(u32temp, nesadapter->allocated_cqs); 443 set_bit(u32temp, nesadapter->allocated_cqs);
443 } 444 }
445 set_bit(0, nesadapter->allocated_mrs);
444 446
445 for (u32temp = 0; u32temp < 20; u32temp++) 447 for (u32temp = 0; u32temp < 20; u32temp++)
446 set_bit(u32temp, nesadapter->allocated_pds); 448 set_bit(u32temp, nesadapter->allocated_pds);
@@ -481,7 +483,7 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
481 nesadapter->max_irrq_wr = (u32temp >> 16) & 3; 483 nesadapter->max_irrq_wr = (u32temp >> 16) & 3;
482 484
483 nesadapter->max_sge = 4; 485 nesadapter->max_sge = 4;
484 nesadapter->max_cqe = 32767; 486 nesadapter->max_cqe = 32766;
485 487
486 if (nes_read_eeprom_values(nesdev, nesadapter)) { 488 if (nes_read_eeprom_values(nesdev, nesadapter)) {
487 printk(KERN_ERR PFX "Unable to read EEPROM data.\n"); 489 printk(KERN_ERR PFX "Unable to read EEPROM data.\n");
@@ -1355,6 +1357,8 @@ int nes_init_phy(struct nes_device *nesdev)
1355 } 1357 }
1356 if ((phy_type == NES_PHY_TYPE_ARGUS) || 1358 if ((phy_type == NES_PHY_TYPE_ARGUS) ||
1357 (phy_type == NES_PHY_TYPE_SFP_D)) { 1359 (phy_type == NES_PHY_TYPE_SFP_D)) {
1360 u32 first_time = 1;
1361
1358 /* Check firmware heartbeat */ 1362 /* Check firmware heartbeat */
1359 nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee); 1363 nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee);
1360 temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); 1364 temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
@@ -1362,8 +1366,13 @@ int nes_init_phy(struct nes_device *nesdev)
1362 nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee); 1366 nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee);
1363 temp_phy_data2 = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); 1367 temp_phy_data2 = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
1364 1368
1365 if (temp_phy_data != temp_phy_data2) 1369 if (temp_phy_data != temp_phy_data2) {
1366 return 0; 1370 nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7fd);
1371 temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
1372 if ((temp_phy_data & 0xff) > 0x20)
1373 return 0;
1374 printk(PFX "Reinitializing PHY\n");
1375 }
1367 1376
1368 /* no heartbeat, configure the PHY */ 1377 /* no heartbeat, configure the PHY */
1369 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0x0000, 0x8000); 1378 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0x0000, 0x8000);
@@ -1399,7 +1408,7 @@ int nes_init_phy(struct nes_device *nesdev)
1399 temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); 1408 temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
1400 do { 1409 do {
1401 if (counter++ > 150) { 1410 if (counter++ > 150) {
1402 nes_debug(NES_DBG_PHY, "No PHY heartbeat\n"); 1411 printk(PFX "No PHY heartbeat\n");
1403 break; 1412 break;
1404 } 1413 }
1405 mdelay(1); 1414 mdelay(1);
@@ -1413,11 +1422,20 @@ int nes_init_phy(struct nes_device *nesdev)
1413 nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7fd); 1422 nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7fd);
1414 temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); 1423 temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
1415 if (counter++ > 300) { 1424 if (counter++ > 300) {
1416 nes_debug(NES_DBG_PHY, "PHY did not track\n"); 1425 if (((temp_phy_data & 0xff) == 0x0) && first_time) {
1417 break; 1426 first_time = 0;
1427 counter = 0;
1428 /* reset AMCC PHY and try again */
1429 nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0xe854, 0x00c0);
1430 nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0xe854, 0x0040);
1431 continue;
1432 } else {
1433 printk(PFX "PHY did not track\n");
1434 break;
1435 }
1418 } 1436 }
1419 mdelay(10); 1437 mdelay(10);
1420 } while (((temp_phy_data & 0xff) != 0x50) && ((temp_phy_data & 0xff) != 0x70)); 1438 } while ((temp_phy_data & 0xff) < 0x30);
1421 1439
1422 /* setup signal integrity */ 1440 /* setup signal integrity */
1423 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd003, 0x0000); 1441 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd003, 0x0000);
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h
index f28a41ba9fa1..084be0ee689b 100644
--- a/drivers/infiniband/hw/nes/nes_hw.h
+++ b/drivers/infiniband/hw/nes/nes_hw.h
@@ -1,5 +1,5 @@
1/* 1/*
2* Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved. 2* Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
3* 3*
4* This software is available to you under a choice of one of two 4* This software is available to you under a choice of one of two
5* licenses. You may choose to be licensed under the terms of the GNU 5* licenses. You may choose to be licensed under the terms of the GNU
@@ -546,11 +546,23 @@ enum nes_iwarp_sq_fmr_wqe_word_idx {
546 NES_IWARP_SQ_FMR_WQE_PBL_LENGTH_IDX = 14, 546 NES_IWARP_SQ_FMR_WQE_PBL_LENGTH_IDX = 14,
547}; 547};
548 548
549enum nes_iwarp_sq_fmr_opcodes {
550 NES_IWARP_SQ_FMR_WQE_ZERO_BASED = (1<<6),
551 NES_IWARP_SQ_FMR_WQE_PAGE_SIZE_4K = (0<<7),
552 NES_IWARP_SQ_FMR_WQE_PAGE_SIZE_2M = (1<<7),
553 NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_LOCAL_READ = (1<<16),
554 NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_LOCAL_WRITE = (1<<17),
555 NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_REMOTE_READ = (1<<18),
556 NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_REMOTE_WRITE = (1<<19),
557 NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_WINDOW_BIND = (1<<20),
558};
559
560#define NES_IWARP_SQ_FMR_WQE_MR_LENGTH_HIGH_MASK 0xFF;
561
549enum nes_iwarp_sq_locinv_wqe_word_idx { 562enum nes_iwarp_sq_locinv_wqe_word_idx {
550 NES_IWARP_SQ_LOCINV_WQE_INV_STAG_IDX = 6, 563 NES_IWARP_SQ_LOCINV_WQE_INV_STAG_IDX = 6,
551}; 564};
552 565
553
554enum nes_iwarp_rq_wqe_word_idx { 566enum nes_iwarp_rq_wqe_word_idx {
555 NES_IWARP_RQ_WQE_TOTAL_PAYLOAD_IDX = 1, 567 NES_IWARP_RQ_WQE_TOTAL_PAYLOAD_IDX = 1,
556 NES_IWARP_RQ_WQE_COMP_CTX_LOW_IDX = 2, 568 NES_IWARP_RQ_WQE_COMP_CTX_LOW_IDX = 2,
@@ -1153,6 +1165,19 @@ struct nes_pbl {
1153 /* TODO: need to add list for two level tables */ 1165 /* TODO: need to add list for two level tables */
1154}; 1166};
1155 1167
1168#define NES_4K_PBL_CHUNK_SIZE 4096
1169
1170struct nes_fast_mr_wqe_pbl {
1171 u64 *kva;
1172 dma_addr_t paddr;
1173};
1174
1175struct nes_ib_fast_reg_page_list {
1176 struct ib_fast_reg_page_list ibfrpl;
1177 struct nes_fast_mr_wqe_pbl nes_wqe_pbl;
1178 u64 pbl;
1179};
1180
1156struct nes_listener { 1181struct nes_listener {
1157 struct work_struct work; 1182 struct work_struct work;
1158 struct workqueue_struct *wq; 1183 struct workqueue_struct *wq;
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index de18fdfdadf2..ab1102780186 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved. 2 * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/nes/nes_user.h b/drivers/infiniband/hw/nes/nes_user.h
index cc90c14b49eb..71e133ab209b 100644
--- a/drivers/infiniband/hw/nes/nes_user.h
+++ b/drivers/infiniband/hw/nes/nes_user.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved. 2 * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
3 * Copyright (c) 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Topspin Communications. All rights reserved.
4 * Copyright (c) 2005 Cisco Systems. All rights reserved. 4 * Copyright (c) 2005 Cisco Systems. All rights reserved.
5 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. 5 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
@@ -86,6 +86,7 @@ enum iwnes_memreg_type {
86 IWNES_MEMREG_TYPE_CQ = 0x0002, 86 IWNES_MEMREG_TYPE_CQ = 0x0002,
87 IWNES_MEMREG_TYPE_MW = 0x0003, 87 IWNES_MEMREG_TYPE_MW = 0x0003,
88 IWNES_MEMREG_TYPE_FMR = 0x0004, 88 IWNES_MEMREG_TYPE_FMR = 0x0004,
89 IWNES_MEMREG_TYPE_FMEM = 0x0005,
89}; 90};
90 91
91struct nes_mem_reg_req { 92struct nes_mem_reg_req {
diff --git a/drivers/infiniband/hw/nes/nes_utils.c b/drivers/infiniband/hw/nes/nes_utils.c
index 9687c397ce1a..729d525c5b70 100644
--- a/drivers/infiniband/hw/nes/nes_utils.c
+++ b/drivers/infiniband/hw/nes/nes_utils.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved. 2 * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index a680c42d6e8c..64d3136e3747 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved. 2 * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
@@ -275,342 +275,236 @@ static int nes_bind_mw(struct ib_qp *ibqp, struct ib_mw *ibmw,
275} 275}
276 276
277 277
278/** 278/*
279 * nes_alloc_fmr 279 * nes_alloc_fast_mr
280 */ 280 */
281static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd, 281static int alloc_fast_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd,
282 int ibmr_access_flags, 282 u32 stag, u32 page_count)
283 struct ib_fmr_attr *ibfmr_attr)
284{ 283{
285 unsigned long flags;
286 struct nes_pd *nespd = to_nespd(ibpd);
287 struct nes_vnic *nesvnic = to_nesvnic(ibpd->device);
288 struct nes_device *nesdev = nesvnic->nesdev;
289 struct nes_adapter *nesadapter = nesdev->nesadapter;
290 struct nes_fmr *nesfmr;
291 struct nes_cqp_request *cqp_request;
292 struct nes_hw_cqp_wqe *cqp_wqe; 284 struct nes_hw_cqp_wqe *cqp_wqe;
285 struct nes_cqp_request *cqp_request;
286 unsigned long flags;
293 int ret; 287 int ret;
294 u32 stag; 288 struct nes_adapter *nesadapter = nesdev->nesadapter;
295 u32 stag_index = 0;
296 u32 next_stag_index = 0;
297 u32 driver_key = 0;
298 u32 opcode = 0; 289 u32 opcode = 0;
299 u8 stag_key = 0; 290 u16 major_code;
300 int i=0; 291 u64 region_length = page_count * PAGE_SIZE;
301 struct nes_vpbl vpbl;
302
303 get_random_bytes(&next_stag_index, sizeof(next_stag_index));
304 stag_key = (u8)next_stag_index;
305
306 driver_key = 0;
307
308 next_stag_index >>= 8;
309 next_stag_index %= nesadapter->max_mr;
310
311 ret = nes_alloc_resource(nesadapter, nesadapter->allocated_mrs,
312 nesadapter->max_mr, &stag_index, &next_stag_index);
313 if (ret) {
314 goto failed_resource_alloc;
315 }
316
317 nesfmr = kzalloc(sizeof(*nesfmr), GFP_KERNEL);
318 if (!nesfmr) {
319 ret = -ENOMEM;
320 goto failed_fmr_alloc;
321 }
322
323 nesfmr->nesmr.mode = IWNES_MEMREG_TYPE_FMR;
324 if (ibfmr_attr->max_pages == 1) {
325 /* use zero length PBL */
326 nesfmr->nesmr.pbl_4k = 0;
327 nesfmr->nesmr.pbls_used = 0;
328 } else if (ibfmr_attr->max_pages <= 32) {
329 /* use PBL 256 */
330 nesfmr->nesmr.pbl_4k = 0;
331 nesfmr->nesmr.pbls_used = 1;
332 } else if (ibfmr_attr->max_pages <= 512) {
333 /* use 4K PBLs */
334 nesfmr->nesmr.pbl_4k = 1;
335 nesfmr->nesmr.pbls_used = 1;
336 } else {
337 /* use two level 4K PBLs */
338 /* add support for two level 256B PBLs */
339 nesfmr->nesmr.pbl_4k = 1;
340 nesfmr->nesmr.pbls_used = 1 + (ibfmr_attr->max_pages >> 9) +
341 ((ibfmr_attr->max_pages & 511) ? 1 : 0);
342 }
343 /* Register the region with the adapter */
344 spin_lock_irqsave(&nesadapter->pbl_lock, flags);
345
346 /* track PBL resources */
347 if (nesfmr->nesmr.pbls_used != 0) {
348 if (nesfmr->nesmr.pbl_4k) {
349 if (nesfmr->nesmr.pbls_used > nesadapter->free_4kpbl) {
350 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
351 ret = -ENOMEM;
352 goto failed_vpbl_avail;
353 } else {
354 nesadapter->free_4kpbl -= nesfmr->nesmr.pbls_used;
355 }
356 } else {
357 if (nesfmr->nesmr.pbls_used > nesadapter->free_256pbl) {
358 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
359 ret = -ENOMEM;
360 goto failed_vpbl_avail;
361 } else {
362 nesadapter->free_256pbl -= nesfmr->nesmr.pbls_used;
363 }
364 }
365 }
366
367 /* one level pbl */
368 if (nesfmr->nesmr.pbls_used == 0) {
369 nesfmr->root_vpbl.pbl_vbase = NULL;
370 nes_debug(NES_DBG_MR, "zero level pbl \n");
371 } else if (nesfmr->nesmr.pbls_used == 1) {
372 /* can change it to kmalloc & dma_map_single */
373 nesfmr->root_vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 4096,
374 &nesfmr->root_vpbl.pbl_pbase);
375 if (!nesfmr->root_vpbl.pbl_vbase) {
376 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
377 ret = -ENOMEM;
378 goto failed_vpbl_alloc;
379 }
380 nesfmr->leaf_pbl_cnt = 0;
381 nes_debug(NES_DBG_MR, "one level pbl, root_vpbl.pbl_vbase=%p \n",
382 nesfmr->root_vpbl.pbl_vbase);
383 }
384 /* two level pbl */
385 else {
386 nesfmr->root_vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 8192,
387 &nesfmr->root_vpbl.pbl_pbase);
388 if (!nesfmr->root_vpbl.pbl_vbase) {
389 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
390 ret = -ENOMEM;
391 goto failed_vpbl_alloc;
392 }
393
394 nesfmr->leaf_pbl_cnt = nesfmr->nesmr.pbls_used-1;
395 nesfmr->root_vpbl.leaf_vpbl = kzalloc(sizeof(*nesfmr->root_vpbl.leaf_vpbl)*1024, GFP_ATOMIC);
396 if (!nesfmr->root_vpbl.leaf_vpbl) {
397 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
398 ret = -ENOMEM;
399 goto failed_leaf_vpbl_alloc;
400 }
401
402 nes_debug(NES_DBG_MR, "two level pbl, root_vpbl.pbl_vbase=%p"
403 " leaf_pbl_cnt=%d root_vpbl.leaf_vpbl=%p\n",
404 nesfmr->root_vpbl.pbl_vbase, nesfmr->leaf_pbl_cnt, nesfmr->root_vpbl.leaf_vpbl);
405
406 for (i=0; i<nesfmr->leaf_pbl_cnt; i++)
407 nesfmr->root_vpbl.leaf_vpbl[i].pbl_vbase = NULL;
408
409 for (i=0; i<nesfmr->leaf_pbl_cnt; i++) {
410 vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 4096,
411 &vpbl.pbl_pbase);
412
413 if (!vpbl.pbl_vbase) {
414 ret = -ENOMEM;
415 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
416 goto failed_leaf_vpbl_pages_alloc;
417 }
418
419 nesfmr->root_vpbl.pbl_vbase[i].pa_low = cpu_to_le32((u32)vpbl.pbl_pbase);
420 nesfmr->root_vpbl.pbl_vbase[i].pa_high = cpu_to_le32((u32)((((u64)vpbl.pbl_pbase)>>32)));
421 nesfmr->root_vpbl.leaf_vpbl[i] = vpbl;
422
423 nes_debug(NES_DBG_MR, "pbase_low=0x%x, pbase_high=0x%x, vpbl=%p\n",
424 nesfmr->root_vpbl.pbl_vbase[i].pa_low,
425 nesfmr->root_vpbl.pbl_vbase[i].pa_high,
426 &nesfmr->root_vpbl.leaf_vpbl[i]);
427 }
428 }
429 nesfmr->ib_qp = NULL;
430 nesfmr->access_rights =0;
431 292
432 stag = stag_index << 8;
433 stag |= driver_key;
434 stag += (u32)stag_key;
435 293
436 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
437 cqp_request = nes_get_cqp_request(nesdev); 294 cqp_request = nes_get_cqp_request(nesdev);
438 if (cqp_request == NULL) { 295 if (cqp_request == NULL) {
439 nes_debug(NES_DBG_MR, "Failed to get a cqp_request.\n"); 296 nes_debug(NES_DBG_MR, "Failed to get a cqp_request.\n");
440 ret = -ENOMEM; 297 return -ENOMEM;
441 goto failed_leaf_vpbl_pages_alloc;
442 } 298 }
299 nes_debug(NES_DBG_MR, "alloc_fast_reg_mr: page_count = %d, "
300 "region_length = %llu\n",
301 page_count, region_length);
443 cqp_request->waiting = 1; 302 cqp_request->waiting = 1;
444 cqp_wqe = &cqp_request->cqp_wqe; 303 cqp_wqe = &cqp_request->cqp_wqe;
445 304
446 nes_debug(NES_DBG_MR, "Registering STag 0x%08X, index = 0x%08X\n", 305 spin_lock_irqsave(&nesadapter->pbl_lock, flags);
447 stag, stag_index); 306 if (nesadapter->free_4kpbl > 0) {
448 307 nesadapter->free_4kpbl--;
449 opcode = NES_CQP_ALLOCATE_STAG | NES_CQP_STAG_VA_TO | NES_CQP_STAG_MR; 308 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
450 309 } else {
451 if (nesfmr->nesmr.pbl_4k == 1) 310 /* No 4kpbl's available: */
452 opcode |= NES_CQP_STAG_PBL_BLK_SIZE; 311 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
453 312 nes_debug(NES_DBG_MR, "Out of Pbls\n");
454 if (ibmr_access_flags & IB_ACCESS_REMOTE_WRITE) { 313 nes_free_cqp_request(nesdev, cqp_request);
455 opcode |= NES_CQP_STAG_RIGHTS_REMOTE_WRITE | 314 return -ENOMEM;
456 NES_CQP_STAG_RIGHTS_LOCAL_WRITE | NES_CQP_STAG_REM_ACC_EN;
457 nesfmr->access_rights |=
458 NES_CQP_STAG_RIGHTS_REMOTE_WRITE | NES_CQP_STAG_RIGHTS_LOCAL_WRITE |
459 NES_CQP_STAG_REM_ACC_EN;
460 } 315 }
461 316
462 if (ibmr_access_flags & IB_ACCESS_REMOTE_READ) { 317 opcode = NES_CQP_ALLOCATE_STAG | NES_CQP_STAG_MR |
463 opcode |= NES_CQP_STAG_RIGHTS_REMOTE_READ | 318 NES_CQP_STAG_PBL_BLK_SIZE | NES_CQP_STAG_VA_TO |
464 NES_CQP_STAG_RIGHTS_LOCAL_READ | NES_CQP_STAG_REM_ACC_EN; 319 NES_CQP_STAG_REM_ACC_EN;
465 nesfmr->access_rights |= 320 /*
466 NES_CQP_STAG_RIGHTS_REMOTE_READ | NES_CQP_STAG_RIGHTS_LOCAL_READ | 321 * The current OFED API does not support the zero based TO option.
467 NES_CQP_STAG_REM_ACC_EN; 322 * If added then need to changed the NES_CQP_STAG_VA* option. Also,
468 } 323 * the API does not support that ability to have the MR set for local
324 * access only when created and not allow the SQ op to override. Given
325 * this the remote enable must be set here.
326 */
469 327
470 nes_fill_init_cqp_wqe(cqp_wqe, nesdev); 328 nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
471 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, opcode); 329 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, opcode);
472 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_LEN_HIGH_PD_IDX, (nespd->pd_id & 0x00007fff)); 330 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PBL_BLK_COUNT_IDX, 1);
473 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_STAG_IDX, stag);
474 331
475 cqp_wqe->wqe_words[NES_CQP_STAG_WQE_PBL_BLK_COUNT_IDX] = 332 cqp_wqe->wqe_words[NES_CQP_STAG_WQE_LEN_HIGH_PD_IDX] =
476 cpu_to_le32((nesfmr->nesmr.pbls_used>1) ? 333 cpu_to_le32((u32)(region_length >> 8) & 0xff000000);
477 (nesfmr->nesmr.pbls_used-1) : nesfmr->nesmr.pbls_used); 334 cqp_wqe->wqe_words[NES_CQP_STAG_WQE_LEN_HIGH_PD_IDX] |=
335 cpu_to_le32(nespd->pd_id & 0x00007fff);
336
337 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_STAG_IDX, stag);
338 set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_VA_LOW_IDX, 0);
339 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_LEN_LOW_IDX, 0);
340 set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PA_LOW_IDX, 0);
341 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PBL_LEN_IDX, (page_count * 8));
342 cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] |= cpu_to_le32(NES_CQP_STAG_PBL_BLK_SIZE);
343 barrier();
478 344
479 atomic_set(&cqp_request->refcount, 2); 345 atomic_set(&cqp_request->refcount, 2);
480 nes_post_cqp_request(nesdev, cqp_request); 346 nes_post_cqp_request(nesdev, cqp_request);
481 347
482 /* Wait for CQP */ 348 /* Wait for CQP */
483 ret = wait_event_timeout(cqp_request->waitq, (cqp_request->request_done != 0), 349 ret = wait_event_timeout(cqp_request->waitq,
484 NES_EVENT_TIMEOUT); 350 (0 != cqp_request->request_done),
485 nes_debug(NES_DBG_MR, "Register STag 0x%08X completed, wait_event_timeout ret = %u," 351 NES_EVENT_TIMEOUT);
486 " CQP Major:Minor codes = 0x%04X:0x%04X.\n", 352
487 stag, ret, cqp_request->major_code, cqp_request->minor_code); 353 nes_debug(NES_DBG_MR, "Allocate STag 0x%08X completed, "
488 354 "wait_event_timeout ret = %u, CQP Major:Minor codes = "
489 if ((!ret) || (cqp_request->major_code)) { 355 "0x%04X:0x%04X.\n", stag, ret, cqp_request->major_code,
490 nes_put_cqp_request(nesdev, cqp_request); 356 cqp_request->minor_code);
491 ret = (!ret) ? -ETIME : -EIO; 357 major_code = cqp_request->major_code;
492 goto failed_leaf_vpbl_pages_alloc;
493 }
494 nes_put_cqp_request(nesdev, cqp_request); 358 nes_put_cqp_request(nesdev, cqp_request);
495 nesfmr->nesmr.ibfmr.lkey = stag;
496 nesfmr->nesmr.ibfmr.rkey = stag;
497 nesfmr->attr = *ibfmr_attr;
498
499 return &nesfmr->nesmr.ibfmr;
500
501 failed_leaf_vpbl_pages_alloc:
502 /* unroll all allocated pages */
503 for (i=0; i<nesfmr->leaf_pbl_cnt; i++) {
504 if (nesfmr->root_vpbl.leaf_vpbl[i].pbl_vbase) {
505 pci_free_consistent(nesdev->pcidev, 4096, nesfmr->root_vpbl.leaf_vpbl[i].pbl_vbase,
506 nesfmr->root_vpbl.leaf_vpbl[i].pbl_pbase);
507 }
508 }
509 if (nesfmr->root_vpbl.leaf_vpbl)
510 kfree(nesfmr->root_vpbl.leaf_vpbl);
511 359
512 failed_leaf_vpbl_alloc: 360 if (!ret || major_code) {
513 if (nesfmr->leaf_pbl_cnt == 0) {
514 if (nesfmr->root_vpbl.pbl_vbase)
515 pci_free_consistent(nesdev->pcidev, 4096, nesfmr->root_vpbl.pbl_vbase,
516 nesfmr->root_vpbl.pbl_pbase);
517 } else
518 pci_free_consistent(nesdev->pcidev, 8192, nesfmr->root_vpbl.pbl_vbase,
519 nesfmr->root_vpbl.pbl_pbase);
520
521 failed_vpbl_alloc:
522 if (nesfmr->nesmr.pbls_used != 0) {
523 spin_lock_irqsave(&nesadapter->pbl_lock, flags); 361 spin_lock_irqsave(&nesadapter->pbl_lock, flags);
524 if (nesfmr->nesmr.pbl_4k) 362 nesadapter->free_4kpbl++;
525 nesadapter->free_4kpbl += nesfmr->nesmr.pbls_used;
526 else
527 nesadapter->free_256pbl += nesfmr->nesmr.pbls_used;
528 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); 363 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
529 } 364 }
530 365
531failed_vpbl_avail: 366 if (!ret)
532 kfree(nesfmr); 367 return -ETIME;
533 368 else if (major_code)
534 failed_fmr_alloc: 369 return -EIO;
535 nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index); 370 return 0;
536
537 failed_resource_alloc:
538 return ERR_PTR(ret);
539} 371}
540 372
541 373/*
542/** 374 * nes_alloc_fast_reg_mr
543 * nes_dealloc_fmr
544 */ 375 */
545static int nes_dealloc_fmr(struct ib_fmr *ibfmr) 376struct ib_mr *nes_alloc_fast_reg_mr(struct ib_pd *ibpd, int max_page_list_len)
546{ 377{
547 unsigned long flags; 378 struct nes_pd *nespd = to_nespd(ibpd);
548 struct nes_mr *nesmr = to_nesmr_from_ibfmr(ibfmr); 379 struct nes_vnic *nesvnic = to_nesvnic(ibpd->device);
549 struct nes_fmr *nesfmr = to_nesfmr(nesmr);
550 struct nes_vnic *nesvnic = to_nesvnic(ibfmr->device);
551 struct nes_device *nesdev = nesvnic->nesdev; 380 struct nes_device *nesdev = nesvnic->nesdev;
552 struct nes_adapter *nesadapter = nesdev->nesadapter; 381 struct nes_adapter *nesadapter = nesdev->nesadapter;
553 int i = 0;
554 int rc;
555 382
556 /* free the resources */ 383 u32 next_stag_index;
557 if (nesfmr->leaf_pbl_cnt == 0) { 384 u8 stag_key = 0;
558 /* single PBL case */ 385 u32 driver_key = 0;
559 if (nesfmr->root_vpbl.pbl_vbase) 386 int err = 0;
560 pci_free_consistent(nesdev->pcidev, 4096, nesfmr->root_vpbl.pbl_vbase, 387 u32 stag_index = 0;
561 nesfmr->root_vpbl.pbl_pbase); 388 struct nes_mr *nesmr;
562 } else { 389 u32 stag;
563 for (i = 0; i < nesfmr->leaf_pbl_cnt; i++) { 390 int ret;
564 pci_free_consistent(nesdev->pcidev, 4096, nesfmr->root_vpbl.leaf_vpbl[i].pbl_vbase, 391 struct ib_mr *ibmr;
565 nesfmr->root_vpbl.leaf_vpbl[i].pbl_pbase); 392/*
566 } 393 * Note: Set to always use a fixed length single page entry PBL. This is to allow
567 kfree(nesfmr->root_vpbl.leaf_vpbl); 394 * for the fast_reg_mr operation to always know the size of the PBL.
568 pci_free_consistent(nesdev->pcidev, 8192, nesfmr->root_vpbl.pbl_vbase, 395 */
569 nesfmr->root_vpbl.pbl_pbase); 396 if (max_page_list_len > (NES_4K_PBL_CHUNK_SIZE / sizeof(u64)))
570 } 397 return ERR_PTR(-E2BIG);
571 nesmr->ibmw.device = ibfmr->device;
572 nesmr->ibmw.pd = ibfmr->pd;
573 nesmr->ibmw.rkey = ibfmr->rkey;
574 nesmr->ibmw.uobject = NULL;
575 398
576 rc = nes_dealloc_mw(&nesmr->ibmw); 399 get_random_bytes(&next_stag_index, sizeof(next_stag_index));
400 stag_key = (u8)next_stag_index;
401 next_stag_index >>= 8;
402 next_stag_index %= nesadapter->max_mr;
577 403
578 if ((rc == 0) && (nesfmr->nesmr.pbls_used != 0)) { 404 err = nes_alloc_resource(nesadapter, nesadapter->allocated_mrs,
579 spin_lock_irqsave(&nesadapter->pbl_lock, flags); 405 nesadapter->max_mr, &stag_index,
580 if (nesfmr->nesmr.pbl_4k) { 406 &next_stag_index);
581 nesadapter->free_4kpbl += nesfmr->nesmr.pbls_used; 407 if (err)
582 WARN_ON(nesadapter->free_4kpbl > nesadapter->max_4kpbl); 408 return ERR_PTR(err);
583 } else { 409
584 nesadapter->free_256pbl += nesfmr->nesmr.pbls_used; 410 nesmr = kzalloc(sizeof(*nesmr), GFP_KERNEL);
585 WARN_ON(nesadapter->free_256pbl > nesadapter->max_256pbl); 411 if (!nesmr) {
586 } 412 nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
587 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); 413 return ERR_PTR(-ENOMEM);
588 } 414 }
589 415
590 return rc; 416 stag = stag_index << 8;
591} 417 stag |= driver_key;
418 stag += (u32)stag_key;
592 419
420 nes_debug(NES_DBG_MR, "Allocating STag 0x%08X index = 0x%08X\n",
421 stag, stag_index);
593 422
594/** 423 ret = alloc_fast_reg_mr(nesdev, nespd, stag, max_page_list_len);
595 * nes_map_phys_fmr 424
425 if (ret == 0) {
426 nesmr->ibmr.rkey = stag;
427 nesmr->ibmr.lkey = stag;
428 nesmr->mode = IWNES_MEMREG_TYPE_FMEM;
429 ibmr = &nesmr->ibmr;
430 } else {
431 kfree(nesmr);
432 nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
433 ibmr = ERR_PTR(-ENOMEM);
434 }
435 return ibmr;
436}
437
438/*
439 * nes_alloc_fast_reg_page_list
596 */ 440 */
597static int nes_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, 441static struct ib_fast_reg_page_list *nes_alloc_fast_reg_page_list(
598 int list_len, u64 iova) 442 struct ib_device *ibdev,
443 int page_list_len)
599{ 444{
600 return 0; 445 struct nes_vnic *nesvnic = to_nesvnic(ibdev);
601} 446 struct nes_device *nesdev = nesvnic->nesdev;
447 struct ib_fast_reg_page_list *pifrpl;
448 struct nes_ib_fast_reg_page_list *pnesfrpl;
602 449
450 if (page_list_len > (NES_4K_PBL_CHUNK_SIZE / sizeof(u64)))
451 return ERR_PTR(-E2BIG);
452 /*
453 * Allocate the ib_fast_reg_page_list structure, the
454 * nes_fast_bpl structure, and the PLB table.
455 */
456 pnesfrpl = kmalloc(sizeof(struct nes_ib_fast_reg_page_list) +
457 page_list_len * sizeof(u64), GFP_KERNEL);
458
459 if (!pnesfrpl)
460 return ERR_PTR(-ENOMEM);
603 461
604/** 462 pifrpl = &pnesfrpl->ibfrpl;
605 * nes_unmap_frm 463 pifrpl->page_list = &pnesfrpl->pbl;
464 pifrpl->max_page_list_len = page_list_len;
465 /*
466 * Allocate the WQE PBL
467 */
468 pnesfrpl->nes_wqe_pbl.kva = pci_alloc_consistent(nesdev->pcidev,
469 page_list_len * sizeof(u64),
470 &pnesfrpl->nes_wqe_pbl.paddr);
471
472 if (!pnesfrpl->nes_wqe_pbl.kva) {
473 kfree(pnesfrpl);
474 return ERR_PTR(-ENOMEM);
475 }
476 nes_debug(NES_DBG_MR, "nes_alloc_fast_reg_pbl: nes_frpl = %p, "
477 "ibfrpl = %p, ibfrpl.page_list = %p, pbl.kva = %p, "
478 "pbl.paddr= %p\n", pnesfrpl, &pnesfrpl->ibfrpl,
479 pnesfrpl->ibfrpl.page_list, pnesfrpl->nes_wqe_pbl.kva,
480 (void *)pnesfrpl->nes_wqe_pbl.paddr);
481
482 return pifrpl;
483}
484
485/*
486 * nes_free_fast_reg_page_list
606 */ 487 */
607static int nes_unmap_fmr(struct list_head *ibfmr_list) 488static void nes_free_fast_reg_page_list(struct ib_fast_reg_page_list *pifrpl)
608{ 489{
609 return 0; 490 struct nes_vnic *nesvnic = to_nesvnic(pifrpl->device);
491 struct nes_device *nesdev = nesvnic->nesdev;
492 struct nes_ib_fast_reg_page_list *pnesfrpl;
493
494 pnesfrpl = container_of(pifrpl, struct nes_ib_fast_reg_page_list, ibfrpl);
495 /*
496 * Free the WQE PBL.
497 */
498 pci_free_consistent(nesdev->pcidev,
499 pifrpl->max_page_list_len * sizeof(u64),
500 pnesfrpl->nes_wqe_pbl.kva,
501 pnesfrpl->nes_wqe_pbl.paddr);
502 /*
503 * Free the PBL structure
504 */
505 kfree(pnesfrpl);
610} 506}
611 507
612
613
614/** 508/**
615 * nes_query_device 509 * nes_query_device
616 */ 510 */
@@ -633,23 +527,23 @@ static int nes_query_device(struct ib_device *ibdev, struct ib_device_attr *prop
633 props->max_qp_wr = nesdev->nesadapter->max_qp_wr - 2; 527 props->max_qp_wr = nesdev->nesadapter->max_qp_wr - 2;
634 props->max_sge = nesdev->nesadapter->max_sge; 528 props->max_sge = nesdev->nesadapter->max_sge;
635 props->max_cq = nesibdev->max_cq; 529 props->max_cq = nesibdev->max_cq;
636 props->max_cqe = nesdev->nesadapter->max_cqe - 1; 530 props->max_cqe = nesdev->nesadapter->max_cqe;
637 props->max_mr = nesibdev->max_mr; 531 props->max_mr = nesibdev->max_mr;
638 props->max_mw = nesibdev->max_mr; 532 props->max_mw = nesibdev->max_mr;
639 props->max_pd = nesibdev->max_pd; 533 props->max_pd = nesibdev->max_pd;
640 props->max_sge_rd = 1; 534 props->max_sge_rd = 1;
641 switch (nesdev->nesadapter->max_irrq_wr) { 535 switch (nesdev->nesadapter->max_irrq_wr) {
642 case 0: 536 case 0:
643 props->max_qp_rd_atom = 1; 537 props->max_qp_rd_atom = 2;
644 break; 538 break;
645 case 1: 539 case 1:
646 props->max_qp_rd_atom = 4; 540 props->max_qp_rd_atom = 8;
647 break; 541 break;
648 case 2: 542 case 2:
649 props->max_qp_rd_atom = 16; 543 props->max_qp_rd_atom = 32;
650 break; 544 break;
651 case 3: 545 case 3:
652 props->max_qp_rd_atom = 32; 546 props->max_qp_rd_atom = 64;
653 break; 547 break;
654 default: 548 default:
655 props->max_qp_rd_atom = 0; 549 props->max_qp_rd_atom = 0;
@@ -1121,6 +1015,7 @@ static int nes_setup_virt_qp(struct nes_qp *nesqp, struct nes_pbl *nespbl,
1121 kunmap(nesqp->page); 1015 kunmap(nesqp->page);
1122 return -ENOMEM; 1016 return -ENOMEM;
1123 } 1017 }
1018 nesqp->sq_kmapped = 1;
1124 nesqp->hwqp.q2_vbase = mem; 1019 nesqp->hwqp.q2_vbase = mem;
1125 mem += 256; 1020 mem += 256;
1126 memset(nesqp->hwqp.q2_vbase, 0, 256); 1021 memset(nesqp->hwqp.q2_vbase, 0, 256);
@@ -1198,7 +1093,10 @@ static inline void nes_free_qp_mem(struct nes_device *nesdev,
1198 pci_free_consistent(nesdev->pcidev, nesqp->qp_mem_size, nesqp->hwqp.q2_vbase, nesqp->hwqp.q2_pbase); 1093 pci_free_consistent(nesdev->pcidev, nesqp->qp_mem_size, nesqp->hwqp.q2_vbase, nesqp->hwqp.q2_pbase);
1199 pci_free_consistent(nesdev->pcidev, 256, nesqp->pbl_vbase, nesqp->pbl_pbase ); 1094 pci_free_consistent(nesdev->pcidev, 256, nesqp->pbl_vbase, nesqp->pbl_pbase );
1200 nesqp->pbl_vbase = NULL; 1095 nesqp->pbl_vbase = NULL;
1201 kunmap(nesqp->page); 1096 if (nesqp->sq_kmapped) {
1097 nesqp->sq_kmapped = 0;
1098 kunmap(nesqp->page);
1099 }
1202 } 1100 }
1203} 1101}
1204 1102
@@ -1504,8 +1402,6 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
1504 nes_debug(NES_DBG_QP, "QP%u structure located @%p.Size = %u.\n", 1402 nes_debug(NES_DBG_QP, "QP%u structure located @%p.Size = %u.\n",
1505 nesqp->hwqp.qp_id, nesqp, (u32)sizeof(*nesqp)); 1403 nesqp->hwqp.qp_id, nesqp, (u32)sizeof(*nesqp));
1506 spin_lock_init(&nesqp->lock); 1404 spin_lock_init(&nesqp->lock);
1507 init_waitqueue_head(&nesqp->state_waitq);
1508 init_waitqueue_head(&nesqp->kick_waitq);
1509 nes_add_ref(&nesqp->ibqp); 1405 nes_add_ref(&nesqp->ibqp);
1510 break; 1406 break;
1511 default: 1407 default:
@@ -1513,6 +1409,8 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
1513 return ERR_PTR(-EINVAL); 1409 return ERR_PTR(-EINVAL);
1514 } 1410 }
1515 1411
1412 nesqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR);
1413
1516 /* update the QP table */ 1414 /* update the QP table */
1517 nesdev->nesadapter->qp_table[nesqp->hwqp.qp_id-NES_FIRST_QPN] = nesqp; 1415 nesdev->nesadapter->qp_table[nesqp->hwqp.qp_id-NES_FIRST_QPN] = nesqp;
1518 nes_debug(NES_DBG_QP, "netdev refcnt=%u\n", 1416 nes_debug(NES_DBG_QP, "netdev refcnt=%u\n",
@@ -1607,8 +1505,10 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
1607 nes_ucontext->first_free_wq = nesqp->mmap_sq_db_index; 1505 nes_ucontext->first_free_wq = nesqp->mmap_sq_db_index;
1608 } 1506 }
1609 } 1507 }
1610 if (nesqp->pbl_pbase) 1508 if (nesqp->pbl_pbase && nesqp->sq_kmapped) {
1509 nesqp->sq_kmapped = 0;
1611 kunmap(nesqp->page); 1510 kunmap(nesqp->page);
1511 }
1612 } else { 1512 } else {
1613 /* Clean any pending completions from the cq(s) */ 1513 /* Clean any pending completions from the cq(s) */
1614 if (nesqp->nesscq) 1514 if (nesqp->nesscq)
@@ -1649,6 +1549,9 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries,
1649 unsigned long flags; 1549 unsigned long flags;
1650 int ret; 1550 int ret;
1651 1551
1552 if (entries > nesadapter->max_cqe)
1553 return ERR_PTR(-EINVAL);
1554
1652 err = nes_alloc_resource(nesadapter, nesadapter->allocated_cqs, 1555 err = nes_alloc_resource(nesadapter, nesadapter->allocated_cqs,
1653 nesadapter->max_cq, &cq_num, &nesadapter->next_cq); 1556 nesadapter->max_cq, &cq_num, &nesadapter->next_cq);
1654 if (err) { 1557 if (err) {
@@ -2606,9 +2509,6 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
2606 stag = stag_index << 8; 2509 stag = stag_index << 8;
2607 stag |= driver_key; 2510 stag |= driver_key;
2608 stag += (u32)stag_key; 2511 stag += (u32)stag_key;
2609 if (stag == 0) {
2610 stag = 1;
2611 }
2612 2512
2613 iova_start = virt; 2513 iova_start = virt;
2614 /* Make the leaf PBL the root if only one PBL */ 2514 /* Make the leaf PBL the root if only one PBL */
@@ -3109,7 +3009,6 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
3109 " already done based on hw state.\n", 3009 " already done based on hw state.\n",
3110 nesqp->hwqp.qp_id); 3010 nesqp->hwqp.qp_id);
3111 issue_modify_qp = 0; 3011 issue_modify_qp = 0;
3112 nesqp->in_disconnect = 0;
3113 } 3012 }
3114 switch (nesqp->hw_iwarp_state) { 3013 switch (nesqp->hw_iwarp_state) {
3115 case NES_AEQE_IWARP_STATE_CLOSING: 3014 case NES_AEQE_IWARP_STATE_CLOSING:
@@ -3122,7 +3021,6 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
3122 break; 3021 break;
3123 default: 3022 default:
3124 next_iwarp_state = NES_CQP_QP_IWARP_STATE_CLOSING; 3023 next_iwarp_state = NES_CQP_QP_IWARP_STATE_CLOSING;
3125 nesqp->in_disconnect = 1;
3126 nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING; 3024 nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING;
3127 break; 3025 break;
3128 } 3026 }
@@ -3139,7 +3037,6 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
3139 next_iwarp_state = NES_CQP_QP_IWARP_STATE_TERMINATE; 3037 next_iwarp_state = NES_CQP_QP_IWARP_STATE_TERMINATE;
3140 nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_TERMINATE; 3038 nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_TERMINATE;
3141 issue_modify_qp = 1; 3039 issue_modify_qp = 1;
3142 nesqp->in_disconnect = 1;
3143 break; 3040 break;
3144 case IB_QPS_ERR: 3041 case IB_QPS_ERR:
3145 case IB_QPS_RESET: 3042 case IB_QPS_RESET:
@@ -3162,7 +3059,6 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
3162 if ((nesqp->hw_tcp_state > NES_AEQE_TCP_STATE_CLOSED) && 3059 if ((nesqp->hw_tcp_state > NES_AEQE_TCP_STATE_CLOSED) &&
3163 (nesqp->hw_tcp_state != NES_AEQE_TCP_STATE_TIME_WAIT)) { 3060 (nesqp->hw_tcp_state != NES_AEQE_TCP_STATE_TIME_WAIT)) {
3164 next_iwarp_state |= NES_CQP_QP_RESET; 3061 next_iwarp_state |= NES_CQP_QP_RESET;
3165 nesqp->in_disconnect = 1;
3166 } else { 3062 } else {
3167 nes_debug(NES_DBG_MOD_QP, "QP%u NOT setting NES_CQP_QP_RESET since TCP state = %u\n", 3063 nes_debug(NES_DBG_MOD_QP, "QP%u NOT setting NES_CQP_QP_RESET since TCP state = %u\n",
3168 nesqp->hwqp.qp_id, nesqp->hw_tcp_state); 3064 nesqp->hwqp.qp_id, nesqp->hw_tcp_state);
@@ -3373,21 +3269,17 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
3373 struct nes_device *nesdev = nesvnic->nesdev; 3269 struct nes_device *nesdev = nesvnic->nesdev;
3374 struct nes_qp *nesqp = to_nesqp(ibqp); 3270 struct nes_qp *nesqp = to_nesqp(ibqp);
3375 struct nes_hw_qp_wqe *wqe; 3271 struct nes_hw_qp_wqe *wqe;
3376 int err; 3272 int err = 0;
3377 u32 qsize = nesqp->hwqp.sq_size; 3273 u32 qsize = nesqp->hwqp.sq_size;
3378 u32 head; 3274 u32 head;
3379 u32 wqe_misc; 3275 u32 wqe_misc = 0;
3380 u32 wqe_count; 3276 u32 wqe_count = 0;
3381 u32 counter; 3277 u32 counter;
3382 u32 total_payload_length;
3383
3384 err = 0;
3385 wqe_misc = 0;
3386 wqe_count = 0;
3387 total_payload_length = 0;
3388 3278
3389 if (nesqp->ibqp_state > IB_QPS_RTS) 3279 if (nesqp->ibqp_state > IB_QPS_RTS) {
3390 return -EINVAL; 3280 err = -EINVAL;
3281 goto out;
3282 }
3391 3283
3392 spin_lock_irqsave(&nesqp->lock, flags); 3284 spin_lock_irqsave(&nesqp->lock, flags);
3393 3285
@@ -3413,94 +3305,208 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
3413 u64temp = (u64)(ib_wr->wr_id); 3305 u64temp = (u64)(ib_wr->wr_id);
3414 set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX, 3306 set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX,
3415 u64temp); 3307 u64temp);
3416 switch (ib_wr->opcode) { 3308 switch (ib_wr->opcode) {
3417 case IB_WR_SEND: 3309 case IB_WR_SEND:
3418 if (ib_wr->send_flags & IB_SEND_SOLICITED) { 3310 case IB_WR_SEND_WITH_INV:
3419 wqe_misc = NES_IWARP_SQ_OP_SENDSE; 3311 if (IB_WR_SEND == ib_wr->opcode) {
3420 } else { 3312 if (ib_wr->send_flags & IB_SEND_SOLICITED)
3421 wqe_misc = NES_IWARP_SQ_OP_SEND; 3313 wqe_misc = NES_IWARP_SQ_OP_SENDSE;
3422 } 3314 else
3423 if (ib_wr->num_sge > nesdev->nesadapter->max_sge) { 3315 wqe_misc = NES_IWARP_SQ_OP_SEND;
3424 err = -EINVAL; 3316 } else {
3425 break; 3317 if (ib_wr->send_flags & IB_SEND_SOLICITED)
3426 } 3318 wqe_misc = NES_IWARP_SQ_OP_SENDSEINV;
3427 if (ib_wr->send_flags & IB_SEND_FENCE) { 3319 else
3428 wqe_misc |= NES_IWARP_SQ_WQE_LOCAL_FENCE; 3320 wqe_misc = NES_IWARP_SQ_OP_SENDINV;
3429 }
3430 if ((ib_wr->send_flags & IB_SEND_INLINE) &&
3431 ((nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) == 0) &&
3432 (ib_wr->sg_list[0].length <= 64)) {
3433 memcpy(&wqe->wqe_words[NES_IWARP_SQ_WQE_IMM_DATA_START_IDX],
3434 (void *)(unsigned long)ib_wr->sg_list[0].addr, ib_wr->sg_list[0].length);
3435 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX,
3436 ib_wr->sg_list[0].length);
3437 wqe_misc |= NES_IWARP_SQ_WQE_IMM_DATA;
3438 } else {
3439 fill_wqe_sg_send(wqe, ib_wr, 1);
3440 }
3441 3321
3442 break; 3322 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_INV_STAG_LOW_IDX,
3443 case IB_WR_RDMA_WRITE: 3323 ib_wr->ex.invalidate_rkey);
3444 wqe_misc = NES_IWARP_SQ_OP_RDMAW; 3324 }
3445 if (ib_wr->num_sge > nesdev->nesadapter->max_sge) {
3446 nes_debug(NES_DBG_IW_TX, "Exceeded max sge, ib_wr=%u, max=%u\n",
3447 ib_wr->num_sge,
3448 nesdev->nesadapter->max_sge);
3449 err = -EINVAL;
3450 break;
3451 }
3452 if (ib_wr->send_flags & IB_SEND_FENCE) {
3453 wqe_misc |= NES_IWARP_SQ_WQE_LOCAL_FENCE;
3454 }
3455 3325
3456 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_STAG_IDX, 3326 if (ib_wr->num_sge > nesdev->nesadapter->max_sge) {
3457 ib_wr->wr.rdma.rkey); 3327 err = -EINVAL;
3458 set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_TO_LOW_IDX, 3328 break;
3459 ib_wr->wr.rdma.remote_addr);
3460
3461 if ((ib_wr->send_flags & IB_SEND_INLINE) &&
3462 ((nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) == 0) &&
3463 (ib_wr->sg_list[0].length <= 64)) {
3464 memcpy(&wqe->wqe_words[NES_IWARP_SQ_WQE_IMM_DATA_START_IDX],
3465 (void *)(unsigned long)ib_wr->sg_list[0].addr, ib_wr->sg_list[0].length);
3466 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX,
3467 ib_wr->sg_list[0].length);
3468 wqe_misc |= NES_IWARP_SQ_WQE_IMM_DATA;
3469 } else {
3470 fill_wqe_sg_send(wqe, ib_wr, 1);
3471 }
3472 wqe->wqe_words[NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX] =
3473 wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX];
3474 break;
3475 case IB_WR_RDMA_READ:
3476 /* iWARP only supports 1 sge for RDMA reads */
3477 if (ib_wr->num_sge > 1) {
3478 nes_debug(NES_DBG_IW_TX, "Exceeded max sge, ib_wr=%u, max=1\n",
3479 ib_wr->num_sge);
3480 err = -EINVAL;
3481 break;
3482 }
3483 wqe_misc = NES_IWARP_SQ_OP_RDMAR;
3484 set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_TO_LOW_IDX,
3485 ib_wr->wr.rdma.remote_addr);
3486 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_STAG_IDX,
3487 ib_wr->wr.rdma.rkey);
3488 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX,
3489 ib_wr->sg_list->length);
3490 set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_FRAG0_LOW_IDX,
3491 ib_wr->sg_list->addr);
3492 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_STAG0_IDX,
3493 ib_wr->sg_list->lkey);
3494 break;
3495 default:
3496 /* error */
3497 err = -EINVAL;
3498 break;
3499 } 3329 }
3500 3330
3501 if (ib_wr->send_flags & IB_SEND_SIGNALED) { 3331 if (ib_wr->send_flags & IB_SEND_FENCE)
3502 wqe_misc |= NES_IWARP_SQ_WQE_SIGNALED_COMPL; 3332 wqe_misc |= NES_IWARP_SQ_WQE_LOCAL_FENCE;
3333
3334 if ((ib_wr->send_flags & IB_SEND_INLINE) &&
3335 ((nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) == 0) &&
3336 (ib_wr->sg_list[0].length <= 64)) {
3337 memcpy(&wqe->wqe_words[NES_IWARP_SQ_WQE_IMM_DATA_START_IDX],
3338 (void *)(unsigned long)ib_wr->sg_list[0].addr, ib_wr->sg_list[0].length);
3339 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX,
3340 ib_wr->sg_list[0].length);
3341 wqe_misc |= NES_IWARP_SQ_WQE_IMM_DATA;
3342 } else {
3343 fill_wqe_sg_send(wqe, ib_wr, 1);
3344 }
3345
3346 break;
3347 case IB_WR_RDMA_WRITE:
3348 wqe_misc = NES_IWARP_SQ_OP_RDMAW;
3349 if (ib_wr->num_sge > nesdev->nesadapter->max_sge) {
3350 nes_debug(NES_DBG_IW_TX, "Exceeded max sge, ib_wr=%u, max=%u\n",
3351 ib_wr->num_sge, nesdev->nesadapter->max_sge);
3352 err = -EINVAL;
3353 break;
3354 }
3355
3356 if (ib_wr->send_flags & IB_SEND_FENCE)
3357 wqe_misc |= NES_IWARP_SQ_WQE_LOCAL_FENCE;
3358
3359 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_STAG_IDX,
3360 ib_wr->wr.rdma.rkey);
3361 set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_TO_LOW_IDX,
3362 ib_wr->wr.rdma.remote_addr);
3363
3364 if ((ib_wr->send_flags & IB_SEND_INLINE) &&
3365 ((nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) == 0) &&
3366 (ib_wr->sg_list[0].length <= 64)) {
3367 memcpy(&wqe->wqe_words[NES_IWARP_SQ_WQE_IMM_DATA_START_IDX],
3368 (void *)(unsigned long)ib_wr->sg_list[0].addr, ib_wr->sg_list[0].length);
3369 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX,
3370 ib_wr->sg_list[0].length);
3371 wqe_misc |= NES_IWARP_SQ_WQE_IMM_DATA;
3372 } else {
3373 fill_wqe_sg_send(wqe, ib_wr, 1);
3374 }
3375
3376 wqe->wqe_words[NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX] =
3377 wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX];
3378 break;
3379 case IB_WR_RDMA_READ:
3380 case IB_WR_RDMA_READ_WITH_INV:
3381 /* iWARP only supports 1 sge for RDMA reads */
3382 if (ib_wr->num_sge > 1) {
3383 nes_debug(NES_DBG_IW_TX, "Exceeded max sge, ib_wr=%u, max=1\n",
3384 ib_wr->num_sge);
3385 err = -EINVAL;
3386 break;
3387 }
3388 if (ib_wr->opcode == IB_WR_RDMA_READ) {
3389 wqe_misc = NES_IWARP_SQ_OP_RDMAR;
3390 } else {
3391 wqe_misc = NES_IWARP_SQ_OP_RDMAR_LOCINV;
3392 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_INV_STAG_LOW_IDX,
3393 ib_wr->ex.invalidate_rkey);
3394 }
3395
3396 set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_TO_LOW_IDX,
3397 ib_wr->wr.rdma.remote_addr);
3398 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_STAG_IDX,
3399 ib_wr->wr.rdma.rkey);
3400 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX,
3401 ib_wr->sg_list->length);
3402 set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_FRAG0_LOW_IDX,
3403 ib_wr->sg_list->addr);
3404 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_STAG0_IDX,
3405 ib_wr->sg_list->lkey);
3406 break;
3407 case IB_WR_LOCAL_INV:
3408 wqe_misc = NES_IWARP_SQ_OP_LOCINV;
3409 set_wqe_32bit_value(wqe->wqe_words,
3410 NES_IWARP_SQ_LOCINV_WQE_INV_STAG_IDX,
3411 ib_wr->ex.invalidate_rkey);
3412 break;
3413 case IB_WR_FAST_REG_MR:
3414 {
3415 int i;
3416 int flags = ib_wr->wr.fast_reg.access_flags;
3417 struct nes_ib_fast_reg_page_list *pnesfrpl =
3418 container_of(ib_wr->wr.fast_reg.page_list,
3419 struct nes_ib_fast_reg_page_list,
3420 ibfrpl);
3421 u64 *src_page_list = pnesfrpl->ibfrpl.page_list;
3422 u64 *dst_page_list = pnesfrpl->nes_wqe_pbl.kva;
3423
3424 if (ib_wr->wr.fast_reg.page_list_len >
3425 (NES_4K_PBL_CHUNK_SIZE / sizeof(u64))) {
3426 nes_debug(NES_DBG_IW_TX, "SQ_FMR: bad page_list_len\n");
3427 err = -EINVAL;
3428 break;
3429 }
3430 wqe_misc = NES_IWARP_SQ_OP_FAST_REG;
3431 set_wqe_64bit_value(wqe->wqe_words,
3432 NES_IWARP_SQ_FMR_WQE_VA_FBO_LOW_IDX,
3433 ib_wr->wr.fast_reg.iova_start);
3434 set_wqe_32bit_value(wqe->wqe_words,
3435 NES_IWARP_SQ_FMR_WQE_LENGTH_LOW_IDX,
3436 ib_wr->wr.fast_reg.length);
3437 set_wqe_32bit_value(wqe->wqe_words,
3438 NES_IWARP_SQ_FMR_WQE_MR_STAG_IDX,
3439 ib_wr->wr.fast_reg.rkey);
3440 /* Set page size: */
3441 if (ib_wr->wr.fast_reg.page_shift == 12) {
3442 wqe_misc |= NES_IWARP_SQ_FMR_WQE_PAGE_SIZE_4K;
3443 } else if (ib_wr->wr.fast_reg.page_shift == 21) {
3444 wqe_misc |= NES_IWARP_SQ_FMR_WQE_PAGE_SIZE_2M;
3445 } else {
3446 nes_debug(NES_DBG_IW_TX, "Invalid page shift,"
3447 " ib_wr=%u, max=1\n", ib_wr->num_sge);
3448 err = -EINVAL;
3449 break;
3450 }
3451 /* Set access_flags */
3452 wqe_misc |= NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_LOCAL_READ;
3453 if (flags & IB_ACCESS_LOCAL_WRITE)
3454 wqe_misc |= NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_LOCAL_WRITE;
3455
3456 if (flags & IB_ACCESS_REMOTE_WRITE)
3457 wqe_misc |= NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_REMOTE_WRITE;
3458
3459 if (flags & IB_ACCESS_REMOTE_READ)
3460 wqe_misc |= NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_REMOTE_READ;
3461
3462 if (flags & IB_ACCESS_MW_BIND)
3463 wqe_misc |= NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_WINDOW_BIND;
3464
3465 /* Fill in PBL info: */
3466 if (ib_wr->wr.fast_reg.page_list_len >
3467 pnesfrpl->ibfrpl.max_page_list_len) {
3468 nes_debug(NES_DBG_IW_TX, "Invalid page list length,"
3469 " ib_wr=%p, value=%u, max=%u\n",
3470 ib_wr, ib_wr->wr.fast_reg.page_list_len,
3471 pnesfrpl->ibfrpl.max_page_list_len);
3472 err = -EINVAL;
3473 break;
3474 }
3475
3476 set_wqe_64bit_value(wqe->wqe_words,
3477 NES_IWARP_SQ_FMR_WQE_PBL_ADDR_LOW_IDX,
3478 pnesfrpl->nes_wqe_pbl.paddr);
3479
3480 set_wqe_32bit_value(wqe->wqe_words,
3481 NES_IWARP_SQ_FMR_WQE_PBL_LENGTH_IDX,
3482 ib_wr->wr.fast_reg.page_list_len * 8);
3483
3484 for (i = 0; i < ib_wr->wr.fast_reg.page_list_len; i++)
3485 dst_page_list[i] = cpu_to_le64(src_page_list[i]);
3486
3487 nes_debug(NES_DBG_IW_TX, "SQ_FMR: iova_start: %p, "
3488 "length: %d, rkey: %0x, pgl_paddr: %p, "
3489 "page_list_len: %u, wqe_misc: %x\n",
3490 (void *)ib_wr->wr.fast_reg.iova_start,
3491 ib_wr->wr.fast_reg.length,
3492 ib_wr->wr.fast_reg.rkey,
3493 (void *)pnesfrpl->nes_wqe_pbl.paddr,
3494 ib_wr->wr.fast_reg.page_list_len,
3495 wqe_misc);
3496 break;
3497 }
3498 default:
3499 /* error */
3500 err = -EINVAL;
3501 break;
3503 } 3502 }
3503
3504 if (err)
3505 break;
3506
3507 if ((ib_wr->send_flags & IB_SEND_SIGNALED) || nesqp->sig_all)
3508 wqe_misc |= NES_IWARP_SQ_WQE_SIGNALED_COMPL;
3509
3504 wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] = cpu_to_le32(wqe_misc); 3510 wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] = cpu_to_le32(wqe_misc);
3505 3511
3506 ib_wr = ib_wr->next; 3512 ib_wr = ib_wr->next;
@@ -3522,6 +3528,7 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
3522 3528
3523 spin_unlock_irqrestore(&nesqp->lock, flags); 3529 spin_unlock_irqrestore(&nesqp->lock, flags);
3524 3530
3531out:
3525 if (err) 3532 if (err)
3526 *bad_wr = ib_wr; 3533 *bad_wr = ib_wr;
3527 return err; 3534 return err;
@@ -3548,8 +3555,10 @@ static int nes_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
3548 u32 counter; 3555 u32 counter;
3549 u32 total_payload_length; 3556 u32 total_payload_length;
3550 3557
3551 if (nesqp->ibqp_state > IB_QPS_RTS) 3558 if (nesqp->ibqp_state > IB_QPS_RTS) {
3552 return -EINVAL; 3559 err = -EINVAL;
3560 goto out;
3561 }
3553 3562
3554 spin_lock_irqsave(&nesqp->lock, flags); 3563 spin_lock_irqsave(&nesqp->lock, flags);
3555 3564
@@ -3612,6 +3621,7 @@ static int nes_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
3612 3621
3613 spin_unlock_irqrestore(&nesqp->lock, flags); 3622 spin_unlock_irqrestore(&nesqp->lock, flags);
3614 3623
3624out:
3615 if (err) 3625 if (err)
3616 *bad_wr = ib_wr; 3626 *bad_wr = ib_wr;
3617 return err; 3627 return err;
@@ -3720,6 +3730,12 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
3720 nes_debug(NES_DBG_CQ, "Operation = Send.\n"); 3730 nes_debug(NES_DBG_CQ, "Operation = Send.\n");
3721 entry->opcode = IB_WC_SEND; 3731 entry->opcode = IB_WC_SEND;
3722 break; 3732 break;
3733 case NES_IWARP_SQ_OP_LOCINV:
3734 entry->opcode = IB_WR_LOCAL_INV;
3735 break;
3736 case NES_IWARP_SQ_OP_FAST_REG:
3737 entry->opcode = IB_WC_FAST_REG_MR;
3738 break;
3723 } 3739 }
3724 3740
3725 nesqp->hwqp.sq_tail = (wqe_index+1)&(nesqp->hwqp.sq_size - 1); 3741 nesqp->hwqp.sq_tail = (wqe_index+1)&(nesqp->hwqp.sq_size - 1);
@@ -3890,10 +3906,9 @@ struct nes_ib_device *nes_init_ofa_device(struct net_device *netdev)
3890 nesibdev->ibdev.dealloc_mw = nes_dealloc_mw; 3906 nesibdev->ibdev.dealloc_mw = nes_dealloc_mw;
3891 nesibdev->ibdev.bind_mw = nes_bind_mw; 3907 nesibdev->ibdev.bind_mw = nes_bind_mw;
3892 3908
3893 nesibdev->ibdev.alloc_fmr = nes_alloc_fmr; 3909 nesibdev->ibdev.alloc_fast_reg_mr = nes_alloc_fast_reg_mr;
3894 nesibdev->ibdev.unmap_fmr = nes_unmap_fmr; 3910 nesibdev->ibdev.alloc_fast_reg_page_list = nes_alloc_fast_reg_page_list;
3895 nesibdev->ibdev.dealloc_fmr = nes_dealloc_fmr; 3911 nesibdev->ibdev.free_fast_reg_page_list = nes_free_fast_reg_page_list;
3896 nesibdev->ibdev.map_phys_fmr = nes_map_phys_fmr;
3897 3912
3898 nesibdev->ibdev.attach_mcast = nes_multicast_attach; 3913 nesibdev->ibdev.attach_mcast = nes_multicast_attach;
3899 nesibdev->ibdev.detach_mcast = nes_multicast_detach; 3914 nesibdev->ibdev.detach_mcast = nes_multicast_detach;
diff --git a/drivers/infiniband/hw/nes/nes_verbs.h b/drivers/infiniband/hw/nes/nes_verbs.h
index 89822d75f82e..2df9993e0cac 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.h
+++ b/drivers/infiniband/hw/nes/nes_verbs.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved. 2 * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. 3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -135,19 +135,15 @@ struct nes_qp {
135 struct ib_qp ibqp; 135 struct ib_qp ibqp;
136 void *allocated_buffer; 136 void *allocated_buffer;
137 struct iw_cm_id *cm_id; 137 struct iw_cm_id *cm_id;
138 struct workqueue_struct *wq;
139 struct nes_cq *nesscq; 138 struct nes_cq *nesscq;
140 struct nes_cq *nesrcq; 139 struct nes_cq *nesrcq;
141 struct nes_pd *nespd; 140 struct nes_pd *nespd;
142 void *cm_node; /* handle of the node this QP is associated with */ 141 void *cm_node; /* handle of the node this QP is associated with */
143 struct ietf_mpa_frame *ietf_frame; 142 struct ietf_mpa_frame *ietf_frame;
144 dma_addr_t ietf_frame_pbase; 143 dma_addr_t ietf_frame_pbase;
145 wait_queue_head_t state_waitq;
146 struct ib_mr *lsmm_mr; 144 struct ib_mr *lsmm_mr;
147 unsigned long socket;
148 struct nes_hw_qp hwqp; 145 struct nes_hw_qp hwqp;
149 struct work_struct work; 146 struct work_struct work;
150 struct work_struct ae_work;
151 enum ib_qp_state ibqp_state; 147 enum ib_qp_state ibqp_state;
152 u32 iwarp_state; 148 u32 iwarp_state;
153 u32 hte_index; 149 u32 hte_index;
@@ -165,19 +161,20 @@ struct nes_qp {
165 struct page *page; 161 struct page *page;
166 struct timer_list terminate_timer; 162 struct timer_list terminate_timer;
167 enum ib_event_type terminate_eventtype; 163 enum ib_event_type terminate_eventtype;
168 wait_queue_head_t kick_waitq; 164 u16 active_conn:1;
169 u16 in_disconnect; 165 u16 skip_lsmm:1;
166 u16 user_mode:1;
167 u16 hte_added:1;
168 u16 flush_issued:1;
169 u16 destroyed:1;
170 u16 sig_all:1;
171 u16 rsvd:9;
170 u16 private_data_len; 172 u16 private_data_len;
171 u16 term_sq_flush_code; 173 u16 term_sq_flush_code;
172 u16 term_rq_flush_code; 174 u16 term_rq_flush_code;
173 u8 active_conn;
174 u8 skip_lsmm;
175 u8 user_mode;
176 u8 hte_added;
177 u8 hw_iwarp_state; 175 u8 hw_iwarp_state;
178 u8 flush_issued;
179 u8 hw_tcp_state; 176 u8 hw_tcp_state;
180 u8 term_flags; 177 u8 term_flags;
181 u8 destroyed; 178 u8 sq_kmapped;
182}; 179};
183#endif /* NES_VERBS_H */ 180#endif /* NES_VERBS_H */
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 2bf5116deec4..df3eb8c9fd96 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -884,6 +884,7 @@ struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neighbour,
884 884
885 neigh->neighbour = neighbour; 885 neigh->neighbour = neighbour;
886 neigh->dev = dev; 886 neigh->dev = dev;
887 memset(&neigh->dgid.raw, 0, sizeof (union ib_gid));
887 *to_ipoib_neigh(neighbour) = neigh; 888 *to_ipoib_neigh(neighbour) = neigh;
888 skb_queue_head_init(&neigh->queue); 889 skb_queue_head_init(&neigh->queue);
889 ipoib_cm_set(neigh, NULL); 890 ipoib_cm_set(neigh, NULL);
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index b9453d068e9d..274c883ef3ea 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -209,6 +209,8 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
209 mem_copy->copy_buf = NULL; 209 mem_copy->copy_buf = NULL;
210} 210}
211 211
212#define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0)
213
212/** 214/**
213 * iser_sg_to_page_vec - Translates scatterlist entries to physical addresses 215 * iser_sg_to_page_vec - Translates scatterlist entries to physical addresses
214 * and returns the length of resulting physical address array (may be less than 216 * and returns the length of resulting physical address array (may be less than
@@ -221,62 +223,52 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
221 * where --few fragments of the same page-- are present in the SG as 223 * where --few fragments of the same page-- are present in the SG as
222 * consecutive elements. Also, it handles one entry SG. 224 * consecutive elements. Also, it handles one entry SG.
223 */ 225 */
226
224static int iser_sg_to_page_vec(struct iser_data_buf *data, 227static int iser_sg_to_page_vec(struct iser_data_buf *data,
225 struct iser_page_vec *page_vec, 228 struct iser_page_vec *page_vec,
226 struct ib_device *ibdev) 229 struct ib_device *ibdev)
227{ 230{
228 struct scatterlist *sgl = (struct scatterlist *)data->buf; 231 struct scatterlist *sg, *sgl = (struct scatterlist *)data->buf;
229 struct scatterlist *sg; 232 u64 start_addr, end_addr, page, chunk_start = 0;
230 u64 first_addr, last_addr, page;
231 int end_aligned;
232 unsigned int cur_page = 0;
233 unsigned long total_sz = 0; 233 unsigned long total_sz = 0;
234 int i; 234 unsigned int dma_len;
235 int i, new_chunk, cur_page, last_ent = data->dma_nents - 1;
235 236
236 /* compute the offset of first element */ 237 /* compute the offset of first element */
237 page_vec->offset = (u64) sgl[0].offset & ~MASK_4K; 238 page_vec->offset = (u64) sgl[0].offset & ~MASK_4K;
238 239
240 new_chunk = 1;
241 cur_page = 0;
239 for_each_sg(sgl, sg, data->dma_nents, i) { 242 for_each_sg(sgl, sg, data->dma_nents, i) {
240 unsigned int dma_len = ib_sg_dma_len(ibdev, sg); 243 start_addr = ib_sg_dma_address(ibdev, sg);
241 244 if (new_chunk)
245 chunk_start = start_addr;
246 dma_len = ib_sg_dma_len(ibdev, sg);
247 end_addr = start_addr + dma_len;
242 total_sz += dma_len; 248 total_sz += dma_len;
243 249
244 first_addr = ib_sg_dma_address(ibdev, sg); 250 /* collect page fragments until aligned or end of SG list */
245 last_addr = first_addr + dma_len; 251 if (!IS_4K_ALIGNED(end_addr) && i < last_ent) {
246 252 new_chunk = 0;
247 end_aligned = !(last_addr & ~MASK_4K); 253 continue;
248
249 /* continue to collect page fragments till aligned or SG ends */
250 while (!end_aligned && (i + 1 < data->dma_nents)) {
251 sg = sg_next(sg);
252 i++;
253 dma_len = ib_sg_dma_len(ibdev, sg);
254 total_sz += dma_len;
255 last_addr = ib_sg_dma_address(ibdev, sg) + dma_len;
256 end_aligned = !(last_addr & ~MASK_4K);
257 } 254 }
258 255 new_chunk = 1;
259 /* handle the 1st page in the 1st DMA element */ 256
260 if (cur_page == 0) { 257 /* address of the first page in the contiguous chunk;
261 page = first_addr & MASK_4K; 258 masking relevant for the very first SG entry,
262 page_vec->pages[cur_page] = page; 259 which might be unaligned */
263 cur_page++; 260 page = chunk_start & MASK_4K;
261 do {
262 page_vec->pages[cur_page++] = page;
264 page += SIZE_4K; 263 page += SIZE_4K;
265 } else 264 } while (page < end_addr);
266 page = first_addr;
267
268 for (; page < last_addr; page += SIZE_4K) {
269 page_vec->pages[cur_page] = page;
270 cur_page++;
271 }
272
273 } 265 }
266
274 page_vec->data_size = total_sz; 267 page_vec->data_size = total_sz;
275 iser_dbg("page_vec->data_size:%d cur_page %d\n", page_vec->data_size,cur_page); 268 iser_dbg("page_vec->data_size:%d cur_page %d\n", page_vec->data_size,cur_page);
276 return cur_page; 269 return cur_page;
277} 270}
278 271
279#define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0)
280 272
281/** 273/**
282 * iser_data_buf_aligned_len - Tries to determine the maximal correctly aligned 274 * iser_data_buf_aligned_len - Tries to determine the maximal correctly aligned
@@ -284,42 +276,40 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
284 * the number of entries which are aligned correctly. Supports the case where 276 * the number of entries which are aligned correctly. Supports the case where
285 * consecutive SG elements are actually fragments of the same physcial page. 277 * consecutive SG elements are actually fragments of the same physcial page.
286 */ 278 */
287static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data, 279static int iser_data_buf_aligned_len(struct iser_data_buf *data,
288 struct ib_device *ibdev) 280 struct ib_device *ibdev)
289{ 281{
290 struct scatterlist *sgl, *sg; 282 struct scatterlist *sgl, *sg, *next_sg = NULL;
291 u64 end_addr, next_addr; 283 u64 start_addr, end_addr;
292 int i, cnt; 284 int i, ret_len, start_check = 0;
293 unsigned int ret_len = 0; 285
286 if (data->dma_nents == 1)
287 return 1;
294 288
295 sgl = (struct scatterlist *)data->buf; 289 sgl = (struct scatterlist *)data->buf;
290 start_addr = ib_sg_dma_address(ibdev, sgl);
296 291
297 cnt = 0;
298 for_each_sg(sgl, sg, data->dma_nents, i) { 292 for_each_sg(sgl, sg, data->dma_nents, i) {
299 /* iser_dbg("Checking sg iobuf [%d]: phys=0x%08lX " 293 if (start_check && !IS_4K_ALIGNED(start_addr))
300 "offset: %ld sz: %ld\n", i, 294 break;
301 (unsigned long)sg_phys(sg), 295
302 (unsigned long)sg->offset, 296 next_sg = sg_next(sg);
303 (unsigned long)sg->length); */ 297 if (!next_sg)
304 end_addr = ib_sg_dma_address(ibdev, sg) + 298 break;
305 ib_sg_dma_len(ibdev, sg); 299
306 /* iser_dbg("Checking sg iobuf end address " 300 end_addr = start_addr + ib_sg_dma_len(ibdev, sg);
307 "0x%08lX\n", end_addr); */ 301 start_addr = ib_sg_dma_address(ibdev, next_sg);
308 if (i + 1 < data->dma_nents) { 302
309 next_addr = ib_sg_dma_address(ibdev, sg_next(sg)); 303 if (end_addr == start_addr) {
310 /* are i, i+1 fragments of the same page? */ 304 start_check = 0;
311 if (end_addr == next_addr) { 305 continue;
312 cnt++; 306 } else
313 continue; 307 start_check = 1;
314 } else if (!IS_4K_ALIGNED(end_addr)) { 308
315 ret_len = cnt + 1; 309 if (!IS_4K_ALIGNED(end_addr))
316 break; 310 break;
317 }
318 }
319 cnt++;
320 } 311 }
321 if (i == data->dma_nents) 312 ret_len = (next_sg) ? i : i+1;
322 ret_len = cnt; /* loop ended */
323 iser_dbg("Found %d aligned entries out of %d in sg:0x%p\n", 313 iser_dbg("Found %d aligned entries out of %d in sg:0x%p\n",
324 ret_len, data->dma_nents, data); 314 ret_len, data->dma_nents, data);
325 return ret_len; 315 return ret_len;
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 5c16001959cc..ab060710688f 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -296,9 +296,15 @@ static void input_handle_event(struct input_dev *dev,
296 * @value: value of the event 296 * @value: value of the event
297 * 297 *
298 * This function should be used by drivers implementing various input 298 * This function should be used by drivers implementing various input
299 * devices. See also input_inject_event(). 299 * devices to report input events. See also input_inject_event().
300 *
301 * NOTE: input_event() may be safely used right after input device was
302 * allocated with input_allocate_device(), even before it is registered
303 * with input_register_device(), but the event will not reach any of the
304 * input handlers. Such early invocation of input_event() may be used
305 * to 'seed' initial state of a switch or initial position of absolute
306 * axis, etc.
300 */ 307 */
301
302void input_event(struct input_dev *dev, 308void input_event(struct input_dev *dev,
303 unsigned int type, unsigned int code, int value) 309 unsigned int type, unsigned int code, int value)
304{ 310{
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c
index d48c808d5928..1edb596d927b 100644
--- a/drivers/input/keyboard/adp5588-keys.c
+++ b/drivers/input/keyboard/adp5588-keys.c
@@ -319,7 +319,7 @@ static int adp5588_resume(struct device *dev)
319 return 0; 319 return 0;
320} 320}
321 321
322static struct dev_pm_ops adp5588_dev_pm_ops = { 322static const struct dev_pm_ops adp5588_dev_pm_ops = {
323 .suspend = adp5588_suspend, 323 .suspend = adp5588_suspend,
324 .resume = adp5588_resume, 324 .resume = adp5588_resume,
325}; 325};
diff --git a/drivers/input/keyboard/ep93xx_keypad.c b/drivers/input/keyboard/ep93xx_keypad.c
index 181d30e3018e..e45740429f7e 100644
--- a/drivers/input/keyboard/ep93xx_keypad.c
+++ b/drivers/input/keyboard/ep93xx_keypad.c
@@ -22,11 +22,11 @@
22 22
23#include <linux/platform_device.h> 23#include <linux/platform_device.h>
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25#include <linux/input.h>
26#include <linux/clk.h> 25#include <linux/clk.h>
26#include <linux/io.h>
27#include <linux/input/matrix_keypad.h>
27 28
28#include <mach/hardware.h> 29#include <mach/hardware.h>
29#include <mach/gpio.h>
30#include <mach/ep93xx_keypad.h> 30#include <mach/ep93xx_keypad.h>
31 31
32/* 32/*
@@ -60,38 +60,37 @@
60#define KEY_REG_KEY1_MASK (0x0000003f) 60#define KEY_REG_KEY1_MASK (0x0000003f)
61#define KEY_REG_KEY1_SHIFT (0) 61#define KEY_REG_KEY1_SHIFT (0)
62 62
63#define keypad_readl(off) __raw_readl(keypad->mmio_base + (off)) 63#define EP93XX_MATRIX_SIZE (EP93XX_MATRIX_ROWS * EP93XX_MATRIX_COLS)
64#define keypad_writel(v, off) __raw_writel((v), keypad->mmio_base + (off))
65
66#define MAX_MATRIX_KEY_NUM (MAX_MATRIX_KEY_ROWS * MAX_MATRIX_KEY_COLS)
67 64
68struct ep93xx_keypad { 65struct ep93xx_keypad {
69 struct ep93xx_keypad_platform_data *pdata; 66 struct ep93xx_keypad_platform_data *pdata;
70
71 struct clk *clk;
72 struct input_dev *input_dev; 67 struct input_dev *input_dev;
68 struct clk *clk;
69
73 void __iomem *mmio_base; 70 void __iomem *mmio_base;
74 71
75 int irq; 72 unsigned int matrix_keycodes[EP93XX_MATRIX_SIZE];
76 int enabled;
77 73
78 int key1; 74 int key1;
79 int key2; 75 int key2;
80 76
81 unsigned int matrix_keycodes[MAX_MATRIX_KEY_NUM]; 77 int irq;
78
79 bool enabled;
82}; 80};
83 81
84static void ep93xx_keypad_build_keycode(struct ep93xx_keypad *keypad) 82static void ep93xx_keypad_build_keycode(struct ep93xx_keypad *keypad)
85{ 83{
86 struct ep93xx_keypad_platform_data *pdata = keypad->pdata; 84 struct ep93xx_keypad_platform_data *pdata = keypad->pdata;
87 struct input_dev *input_dev = keypad->input_dev; 85 struct input_dev *input_dev = keypad->input_dev;
86 unsigned int *key;
88 int i; 87 int i;
89 88
90 for (i = 0; i < pdata->matrix_key_map_size; i++) { 89 key = &pdata->matrix_key_map[0];
91 unsigned int key = pdata->matrix_key_map[i]; 90 for (i = 0; i < pdata->matrix_key_map_size; i++, key++) {
92 int row = (key >> 28) & 0xf; 91 int row = KEY_ROW(*key);
93 int col = (key >> 24) & 0xf; 92 int col = KEY_COL(*key);
94 int code = key & 0xffffff; 93 int code = KEY_VAL(*key);
95 94
96 keypad->matrix_keycodes[(row << 3) + col] = code; 95 keypad->matrix_keycodes[(row << 3) + col] = code;
97 __set_bit(code, input_dev->keybit); 96 __set_bit(code, input_dev->keybit);
@@ -102,9 +101,11 @@ static irqreturn_t ep93xx_keypad_irq_handler(int irq, void *dev_id)
102{ 101{
103 struct ep93xx_keypad *keypad = dev_id; 102 struct ep93xx_keypad *keypad = dev_id;
104 struct input_dev *input_dev = keypad->input_dev; 103 struct input_dev *input_dev = keypad->input_dev;
105 unsigned int status = keypad_readl(KEY_REG); 104 unsigned int status;
106 int keycode, key1, key2; 105 int keycode, key1, key2;
107 106
107 status = __raw_readl(keypad->mmio_base + KEY_REG);
108
108 keycode = (status & KEY_REG_KEY1_MASK) >> KEY_REG_KEY1_SHIFT; 109 keycode = (status & KEY_REG_KEY1_MASK) >> KEY_REG_KEY1_SHIFT;
109 key1 = keypad->matrix_keycodes[keycode]; 110 key1 = keypad->matrix_keycodes[keycode];
110 111
@@ -152,7 +153,10 @@ static void ep93xx_keypad_config(struct ep93xx_keypad *keypad)
152 struct ep93xx_keypad_platform_data *pdata = keypad->pdata; 153 struct ep93xx_keypad_platform_data *pdata = keypad->pdata;
153 unsigned int val = 0; 154 unsigned int val = 0;
154 155
155 clk_set_rate(keypad->clk, pdata->flags & EP93XX_KEYPAD_KDIV); 156 if (pdata->flags & EP93XX_KEYPAD_KDIV)
157 clk_set_rate(keypad->clk, EP93XX_KEYTCHCLK_DIV4);
158 else
159 clk_set_rate(keypad->clk, EP93XX_KEYTCHCLK_DIV16);
156 160
157 if (pdata->flags & EP93XX_KEYPAD_DISABLE_3_KEY) 161 if (pdata->flags & EP93XX_KEYPAD_DISABLE_3_KEY)
158 val |= KEY_INIT_DIS3KY; 162 val |= KEY_INIT_DIS3KY;
@@ -167,7 +171,7 @@ static void ep93xx_keypad_config(struct ep93xx_keypad *keypad)
167 171
168 val |= ((pdata->prescale << KEY_INIT_PRSCL_SHIFT) & KEY_INIT_PRSCL_MASK); 172 val |= ((pdata->prescale << KEY_INIT_PRSCL_SHIFT) & KEY_INIT_PRSCL_MASK);
169 173
170 keypad_writel(val, KEY_INIT); 174 __raw_writel(val, keypad->mmio_base + KEY_INIT);
171} 175}
172 176
173static int ep93xx_keypad_open(struct input_dev *pdev) 177static int ep93xx_keypad_open(struct input_dev *pdev)
@@ -177,7 +181,7 @@ static int ep93xx_keypad_open(struct input_dev *pdev)
177 if (!keypad->enabled) { 181 if (!keypad->enabled) {
178 ep93xx_keypad_config(keypad); 182 ep93xx_keypad_config(keypad);
179 clk_enable(keypad->clk); 183 clk_enable(keypad->clk);
180 keypad->enabled = 1; 184 keypad->enabled = true;
181 } 185 }
182 186
183 return 0; 187 return 0;
@@ -189,7 +193,7 @@ static void ep93xx_keypad_close(struct input_dev *pdev)
189 193
190 if (keypad->enabled) { 194 if (keypad->enabled) {
191 clk_disable(keypad->clk); 195 clk_disable(keypad->clk);
192 keypad->enabled = 0; 196 keypad->enabled = false;
193 } 197 }
194} 198}
195 199
@@ -211,7 +215,7 @@ static int ep93xx_keypad_suspend(struct platform_device *pdev,
211 215
212 if (keypad->enabled) { 216 if (keypad->enabled) {
213 clk_disable(keypad->clk); 217 clk_disable(keypad->clk);
214 keypad->enabled = 0; 218 keypad->enabled = false;
215 } 219 }
216 220
217 mutex_unlock(&input_dev->mutex); 221 mutex_unlock(&input_dev->mutex);
@@ -236,7 +240,7 @@ static int ep93xx_keypad_resume(struct platform_device *pdev)
236 if (!keypad->enabled) { 240 if (!keypad->enabled) {
237 ep93xx_keypad_config(keypad); 241 ep93xx_keypad_config(keypad);
238 clk_enable(keypad->clk); 242 clk_enable(keypad->clk);
239 keypad->enabled = 1; 243 keypad->enabled = true;
240 } 244 }
241 } 245 }
242 246
@@ -252,88 +256,56 @@ static int ep93xx_keypad_resume(struct platform_device *pdev)
252static int __devinit ep93xx_keypad_probe(struct platform_device *pdev) 256static int __devinit ep93xx_keypad_probe(struct platform_device *pdev)
253{ 257{
254 struct ep93xx_keypad *keypad; 258 struct ep93xx_keypad *keypad;
255 struct ep93xx_keypad_platform_data *pdata = pdev->dev.platform_data;
256 struct input_dev *input_dev; 259 struct input_dev *input_dev;
257 struct resource *res; 260 struct resource *res;
258 int irq, err, i, gpio; 261 int err;
259
260 if (!pdata ||
261 !pdata->matrix_key_rows ||
262 pdata->matrix_key_rows > MAX_MATRIX_KEY_ROWS ||
263 !pdata->matrix_key_cols ||
264 pdata->matrix_key_cols > MAX_MATRIX_KEY_COLS) {
265 dev_err(&pdev->dev, "invalid or missing platform data\n");
266 return -EINVAL;
267 }
268 262
269 keypad = kzalloc(sizeof(struct ep93xx_keypad), GFP_KERNEL); 263 keypad = kzalloc(sizeof(struct ep93xx_keypad), GFP_KERNEL);
270 if (!keypad) { 264 if (!keypad)
271 dev_err(&pdev->dev, "failed to allocate driver data\n");
272 return -ENOMEM; 265 return -ENOMEM;
273 }
274 266
275 keypad->pdata = pdata; 267 keypad->pdata = pdev->dev.platform_data;
268 if (!keypad->pdata) {
269 err = -EINVAL;
270 goto failed_free;
271 }
276 272
277 irq = platform_get_irq(pdev, 0); 273 keypad->irq = platform_get_irq(pdev, 0);
278 if (irq < 0) { 274 if (!keypad->irq) {
279 dev_err(&pdev->dev, "failed to get keypad irq\n");
280 err = -ENXIO; 275 err = -ENXIO;
281 goto failed_free; 276 goto failed_free;
282 } 277 }
283 278
284 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 279 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
285 if (!res) { 280 if (!res) {
286 dev_err(&pdev->dev, "failed to get I/O memory\n");
287 err = -ENXIO; 281 err = -ENXIO;
288 goto failed_free; 282 goto failed_free;
289 } 283 }
290 284
291 res = request_mem_region(res->start, resource_size(res), pdev->name); 285 res = request_mem_region(res->start, resource_size(res), pdev->name);
292 if (!res) { 286 if (!res) {
293 dev_err(&pdev->dev, "failed to request I/O memory\n");
294 err = -EBUSY; 287 err = -EBUSY;
295 goto failed_free; 288 goto failed_free;
296 } 289 }
297 290
298 keypad->mmio_base = ioremap(res->start, resource_size(res)); 291 keypad->mmio_base = ioremap(res->start, resource_size(res));
299 if (keypad->mmio_base == NULL) { 292 if (keypad->mmio_base == NULL) {
300 dev_err(&pdev->dev, "failed to remap I/O memory\n");
301 err = -ENXIO; 293 err = -ENXIO;
302 goto failed_free_mem; 294 goto failed_free_mem;
303 } 295 }
304 296
305 /* Request the needed GPIO's */ 297 err = ep93xx_keypad_acquire_gpio(pdev);
306 gpio = EP93XX_GPIO_LINE_ROW0; 298 if (err)
307 for (i = 0; i < keypad->pdata->matrix_key_rows; i++, gpio++) { 299 goto failed_free_io;
308 err = gpio_request(gpio, pdev->name);
309 if (err) {
310 dev_err(&pdev->dev, "failed to request gpio-%d\n",
311 gpio);
312 goto failed_free_rows;
313 }
314 }
315
316 gpio = EP93XX_GPIO_LINE_COL0;
317 for (i = 0; i < keypad->pdata->matrix_key_cols; i++, gpio++) {
318 err = gpio_request(gpio, pdev->name);
319 if (err) {
320 dev_err(&pdev->dev, "failed to request gpio-%d\n",
321 gpio);
322 goto failed_free_cols;
323 }
324 }
325 300
326 keypad->clk = clk_get(&pdev->dev, "key_clk"); 301 keypad->clk = clk_get(&pdev->dev, NULL);
327 if (IS_ERR(keypad->clk)) { 302 if (IS_ERR(keypad->clk)) {
328 dev_err(&pdev->dev, "failed to get keypad clock\n");
329 err = PTR_ERR(keypad->clk); 303 err = PTR_ERR(keypad->clk);
330 goto failed_free_io; 304 goto failed_free_gpio;
331 } 305 }
332 306
333 /* Create and register the input driver */
334 input_dev = input_allocate_device(); 307 input_dev = input_allocate_device();
335 if (!input_dev) { 308 if (!input_dev) {
336 dev_err(&pdev->dev, "failed to allocate input device\n");
337 err = -ENOMEM; 309 err = -ENOMEM;
338 goto failed_put_clk; 310 goto failed_put_clk;
339 } 311 }
@@ -358,44 +330,29 @@ static int __devinit ep93xx_keypad_probe(struct platform_device *pdev)
358 ep93xx_keypad_build_keycode(keypad); 330 ep93xx_keypad_build_keycode(keypad);
359 platform_set_drvdata(pdev, keypad); 331 platform_set_drvdata(pdev, keypad);
360 332
361 err = request_irq(irq, ep93xx_keypad_irq_handler, IRQF_DISABLED, 333 err = request_irq(keypad->irq, ep93xx_keypad_irq_handler,
362 pdev->name, keypad); 334 IRQF_DISABLED, pdev->name, keypad);
363 if (err) { 335 if (err)
364 dev_err(&pdev->dev, "failed to request IRQ\n");
365 goto failed_free_dev; 336 goto failed_free_dev;
366 }
367
368 keypad->irq = irq;
369 337
370 /* Register the input device */
371 err = input_register_device(input_dev); 338 err = input_register_device(input_dev);
372 if (err) { 339 if (err)
373 dev_err(&pdev->dev, "failed to register input device\n");
374 goto failed_free_irq; 340 goto failed_free_irq;
375 }
376 341
377 device_init_wakeup(&pdev->dev, 1); 342 device_init_wakeup(&pdev->dev, 1);
378 343
379 return 0; 344 return 0;
380 345
381failed_free_irq: 346failed_free_irq:
382 free_irq(irq, pdev); 347 free_irq(keypad->irq, pdev);
383 platform_set_drvdata(pdev, NULL); 348 platform_set_drvdata(pdev, NULL);
384failed_free_dev: 349failed_free_dev:
385 input_free_device(input_dev); 350 input_free_device(input_dev);
386failed_put_clk: 351failed_put_clk:
387 clk_put(keypad->clk); 352 clk_put(keypad->clk);
353failed_free_gpio:
354 ep93xx_keypad_release_gpio(pdev);
388failed_free_io: 355failed_free_io:
389 i = keypad->pdata->matrix_key_cols - 1;
390 gpio = EP93XX_GPIO_LINE_COL0 + i;
391failed_free_cols:
392 for ( ; i >= 0; i--, gpio--)
393 gpio_free(gpio);
394 i = keypad->pdata->matrix_key_rows - 1;
395 gpio = EP93XX_GPIO_LINE_ROW0 + i;
396failed_free_rows:
397 for ( ; i >= 0; i--, gpio--)
398 gpio_free(gpio);
399 iounmap(keypad->mmio_base); 356 iounmap(keypad->mmio_base);
400failed_free_mem: 357failed_free_mem:
401 release_mem_region(res->start, resource_size(res)); 358 release_mem_region(res->start, resource_size(res));
@@ -408,7 +365,6 @@ static int __devexit ep93xx_keypad_remove(struct platform_device *pdev)
408{ 365{
409 struct ep93xx_keypad *keypad = platform_get_drvdata(pdev); 366 struct ep93xx_keypad *keypad = platform_get_drvdata(pdev);
410 struct resource *res; 367 struct resource *res;
411 int i, gpio;
412 368
413 free_irq(keypad->irq, pdev); 369 free_irq(keypad->irq, pdev);
414 370
@@ -420,15 +376,7 @@ static int __devexit ep93xx_keypad_remove(struct platform_device *pdev)
420 376
421 input_unregister_device(keypad->input_dev); 377 input_unregister_device(keypad->input_dev);
422 378
423 i = keypad->pdata->matrix_key_cols - 1; 379 ep93xx_keypad_release_gpio(pdev);
424 gpio = EP93XX_GPIO_LINE_COL0 + i;
425 for ( ; i >= 0; i--, gpio--)
426 gpio_free(gpio);
427
428 i = keypad->pdata->matrix_key_rows - 1;
429 gpio = EP93XX_GPIO_LINE_ROW0 + i;
430 for ( ; i >= 0; i--, gpio--)
431 gpio_free(gpio);
432 380
433 iounmap(keypad->mmio_base); 381 iounmap(keypad->mmio_base);
434 382
diff --git a/drivers/input/keyboard/sh_keysc.c b/drivers/input/keyboard/sh_keysc.c
index 076111fc72d2..8e9380bfed40 100644
--- a/drivers/input/keyboard/sh_keysc.c
+++ b/drivers/input/keyboard/sh_keysc.c
@@ -295,7 +295,7 @@ static int sh_keysc_resume(struct device *dev)
295 return 0; 295 return 0;
296} 296}
297 297
298static struct dev_pm_ops sh_keysc_dev_pm_ops = { 298static const struct dev_pm_ops sh_keysc_dev_pm_ops = {
299 .suspend = sh_keysc_suspend, 299 .suspend = sh_keysc_suspend,
300 .resume = sh_keysc_resume, 300 .resume = sh_keysc_resume,
301}; 301};
diff --git a/drivers/input/misc/bfin_rotary.c b/drivers/input/misc/bfin_rotary.c
index 690f3fafa03b..61d10177fa83 100644
--- a/drivers/input/misc/bfin_rotary.c
+++ b/drivers/input/misc/bfin_rotary.c
@@ -247,7 +247,7 @@ static int bfin_rotary_resume(struct device *dev)
247 return 0; 247 return 0;
248} 248}
249 249
250static struct dev_pm_ops bfin_rotary_pm_ops = { 250static const struct dev_pm_ops bfin_rotary_pm_ops = {
251 .suspend = bfin_rotary_suspend, 251 .suspend = bfin_rotary_suspend,
252 .resume = bfin_rotary_resume, 252 .resume = bfin_rotary_resume,
253}; 253};
diff --git a/drivers/input/misc/pcspkr.c b/drivers/input/misc/pcspkr.c
index 21cb755a54fb..ea4e1fd12651 100644
--- a/drivers/input/misc/pcspkr.c
+++ b/drivers/input/misc/pcspkr.c
@@ -127,7 +127,7 @@ static void pcspkr_shutdown(struct platform_device *dev)
127 pcspkr_event(NULL, EV_SND, SND_BELL, 0); 127 pcspkr_event(NULL, EV_SND, SND_BELL, 0);
128} 128}
129 129
130static struct dev_pm_ops pcspkr_pm_ops = { 130static const struct dev_pm_ops pcspkr_pm_ops = {
131 .suspend = pcspkr_suspend, 131 .suspend = pcspkr_suspend,
132}; 132};
133 133
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index a3f492a50850..f93c2c0daf1f 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -5,6 +5,7 @@
5 * Copyright (c) 2003-2005 Peter Osterlund <petero2@telia.com> 5 * Copyright (c) 2003-2005 Peter Osterlund <petero2@telia.com>
6 * Copyright (c) 2004 Dmitry Torokhov <dtor@mail.ru> 6 * Copyright (c) 2004 Dmitry Torokhov <dtor@mail.ru>
7 * Copyright (c) 2005 Vojtech Pavlik <vojtech@suse.cz> 7 * Copyright (c) 2005 Vojtech Pavlik <vojtech@suse.cz>
8 * Copyright (c) 2009 Sebastian Kapfer <sebastian_kapfer@gmx.net>
8 * 9 *
9 * ALPS detection, tap switching and status querying info is taken from 10 * ALPS detection, tap switching and status querying info is taken from
10 * tpconfig utility (by C. Scott Ananian and Bruce Kall). 11 * tpconfig utility (by C. Scott Ananian and Bruce Kall).
@@ -28,7 +29,6 @@
28#define dbg(format, arg...) do {} while (0) 29#define dbg(format, arg...) do {} while (0)
29#endif 30#endif
30 31
31
32#define ALPS_OLDPROTO 0x01 /* old style input */ 32#define ALPS_OLDPROTO 0x01 /* old style input */
33#define ALPS_DUALPOINT 0x02 /* touchpad has trackstick */ 33#define ALPS_DUALPOINT 0x02 /* touchpad has trackstick */
34#define ALPS_PASS 0x04 /* device has a pass-through port */ 34#define ALPS_PASS 0x04 /* device has a pass-through port */
@@ -37,7 +37,8 @@
37#define ALPS_FW_BK_1 0x10 /* front & back buttons present */ 37#define ALPS_FW_BK_1 0x10 /* front & back buttons present */
38#define ALPS_FW_BK_2 0x20 /* front & back buttons present */ 38#define ALPS_FW_BK_2 0x20 /* front & back buttons present */
39#define ALPS_FOUR_BUTTONS 0x40 /* 4 direction button present */ 39#define ALPS_FOUR_BUTTONS 0x40 /* 4 direction button present */
40 40#define ALPS_PS2_INTERLEAVED 0x80 /* 3-byte PS/2 packet interleaved with
41 6-byte ALPS packet */
41 42
42static const struct alps_model_info alps_model_data[] = { 43static const struct alps_model_info alps_model_data[] = {
43 { { 0x32, 0x02, 0x14 }, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* Toshiba Salellite Pro M10 */ 44 { { 0x32, 0x02, 0x14 }, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* Toshiba Salellite Pro M10 */
@@ -58,7 +59,9 @@ static const struct alps_model_info alps_model_data[] = {
58 { { 0x20, 0x02, 0x0e }, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* XXX */ 59 { { 0x20, 0x02, 0x0e }, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* XXX */
59 { { 0x22, 0x02, 0x0a }, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, 60 { { 0x22, 0x02, 0x0a }, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT },
60 { { 0x22, 0x02, 0x14 }, 0xff, 0xff, ALPS_PASS | ALPS_DUALPOINT }, /* Dell Latitude D600 */ 61 { { 0x22, 0x02, 0x14 }, 0xff, 0xff, ALPS_PASS | ALPS_DUALPOINT }, /* Dell Latitude D600 */
61 { { 0x62, 0x02, 0x14 }, 0xcf, 0xcf, ALPS_PASS | ALPS_DUALPOINT }, /* Dell Latitude E6500 */ 62 /* Dell Latitude E5500, E6400, E6500, Precision M4400 */
63 { { 0x62, 0x02, 0x14 }, 0xcf, 0xcf,
64 ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED },
62 { { 0x73, 0x02, 0x50 }, 0xcf, 0xcf, ALPS_FOUR_BUTTONS }, /* Dell Vostro 1400 */ 65 { { 0x73, 0x02, 0x50 }, 0xcf, 0xcf, ALPS_FOUR_BUTTONS }, /* Dell Vostro 1400 */
63}; 66};
64 67
@@ -69,20 +72,88 @@ static const struct alps_model_info alps_model_data[] = {
69 */ 72 */
70 73
71/* 74/*
72 * ALPS abolute Mode - new format 75 * PS/2 packet format
76 *
77 * byte 0: 0 0 YSGN XSGN 1 M R L
78 * byte 1: X7 X6 X5 X4 X3 X2 X1 X0
79 * byte 2: Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0
80 *
81 * Note that the device never signals overflow condition.
82 *
83 * ALPS absolute Mode - new format
73 * 84 *
74 * byte 0: 1 ? ? ? 1 ? ? ? 85 * byte 0: 1 ? ? ? 1 ? ? ?
75 * byte 1: 0 x6 x5 x4 x3 x2 x1 x0 86 * byte 1: 0 x6 x5 x4 x3 x2 x1 x0
76 * byte 2: 0 x10 x9 x8 x7 ? fin ges 87 * byte 2: 0 x10 x9 x8 x7 ? fin ges
77 * byte 3: 0 y9 y8 y7 1 M R L 88 * byte 3: 0 y9 y8 y7 1 M R L
78 * byte 4: 0 y6 y5 y4 y3 y2 y1 y0 89 * byte 4: 0 y6 y5 y4 y3 y2 y1 y0
79 * byte 5: 0 z6 z5 z4 z3 z2 z1 z0 90 * byte 5: 0 z6 z5 z4 z3 z2 z1 z0
80 * 91 *
92 * Dualpoint device -- interleaved packet format
93 *
94 * byte 0: 1 1 0 0 1 1 1 1
95 * byte 1: 0 x6 x5 x4 x3 x2 x1 x0
96 * byte 2: 0 x10 x9 x8 x7 0 fin ges
97 * byte 3: 0 0 YSGN XSGN 1 1 1 1
98 * byte 4: X7 X6 X5 X4 X3 X2 X1 X0
99 * byte 5: Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0
100 * byte 6: 0 y9 y8 y7 1 m r l
101 * byte 7: 0 y6 y5 y4 y3 y2 y1 y0
102 * byte 8: 0 z6 z5 z4 z3 z2 z1 z0
103 *
104 * CAPITALS = stick, miniscules = touchpad
105 *
81 * ?'s can have different meanings on different models, 106 * ?'s can have different meanings on different models,
82 * such as wheel rotation, extra buttons, stick buttons 107 * such as wheel rotation, extra buttons, stick buttons
83 * on a dualpoint, etc. 108 * on a dualpoint, etc.
84 */ 109 */
85 110
111static bool alps_is_valid_first_byte(const struct alps_model_info *model,
112 unsigned char data)
113{
114 return (data & model->mask0) == model->byte0;
115}
116
117static void alps_report_buttons(struct psmouse *psmouse,
118 struct input_dev *dev1, struct input_dev *dev2,
119 int left, int right, int middle)
120{
121 struct alps_data *priv = psmouse->private;
122 const struct alps_model_info *model = priv->i;
123
124 if (model->flags & ALPS_PS2_INTERLEAVED) {
125 struct input_dev *dev;
126
127 /*
128 * If shared button has already been reported on the
129 * other device (dev2) then this event should be also
130 * sent through that device.
131 */
132 dev = test_bit(BTN_LEFT, dev2->key) ? dev2 : dev1;
133 input_report_key(dev, BTN_LEFT, left);
134
135 dev = test_bit(BTN_RIGHT, dev2->key) ? dev2 : dev1;
136 input_report_key(dev, BTN_RIGHT, right);
137
138 dev = test_bit(BTN_MIDDLE, dev2->key) ? dev2 : dev1;
139 input_report_key(dev, BTN_MIDDLE, middle);
140
141 /*
142 * Sync the _other_ device now, we'll do the first
143 * device later once we report the rest of the events.
144 */
145 input_sync(dev2);
146 } else {
147 /*
148 * For devices with non-interleaved packets we know what
149 * device buttons belong to so we can simply report them.
150 */
151 input_report_key(dev1, BTN_LEFT, left);
152 input_report_key(dev1, BTN_RIGHT, right);
153 input_report_key(dev1, BTN_MIDDLE, middle);
154 }
155}
156
86static void alps_process_packet(struct psmouse *psmouse) 157static void alps_process_packet(struct psmouse *psmouse)
87{ 158{
88 struct alps_data *priv = psmouse->private; 159 struct alps_data *priv = psmouse->private;
@@ -93,18 +164,6 @@ static void alps_process_packet(struct psmouse *psmouse)
93 int x, y, z, ges, fin, left, right, middle; 164 int x, y, z, ges, fin, left, right, middle;
94 int back = 0, forward = 0; 165 int back = 0, forward = 0;
95 166
96 if ((packet[0] & 0xc8) == 0x08) { /* 3-byte PS/2 packet */
97 input_report_key(dev2, BTN_LEFT, packet[0] & 1);
98 input_report_key(dev2, BTN_RIGHT, packet[0] & 2);
99 input_report_key(dev2, BTN_MIDDLE, packet[0] & 4);
100 input_report_rel(dev2, REL_X,
101 packet[1] ? packet[1] - ((packet[0] << 4) & 0x100) : 0);
102 input_report_rel(dev2, REL_Y,
103 packet[2] ? ((packet[0] << 3) & 0x100) - packet[2] : 0);
104 input_sync(dev2);
105 return;
106 }
107
108 if (model->flags & ALPS_OLDPROTO) { 167 if (model->flags & ALPS_OLDPROTO) {
109 left = packet[2] & 0x10; 168 left = packet[2] & 0x10;
110 right = packet[2] & 0x08; 169 right = packet[2] & 0x08;
@@ -140,18 +199,13 @@ static void alps_process_packet(struct psmouse *psmouse)
140 input_report_rel(dev2, REL_X, (x > 383 ? (x - 768) : x)); 199 input_report_rel(dev2, REL_X, (x > 383 ? (x - 768) : x));
141 input_report_rel(dev2, REL_Y, -(y > 255 ? (y - 512) : y)); 200 input_report_rel(dev2, REL_Y, -(y > 255 ? (y - 512) : y));
142 201
143 input_report_key(dev2, BTN_LEFT, left); 202 alps_report_buttons(psmouse, dev2, dev, left, right, middle);
144 input_report_key(dev2, BTN_RIGHT, right);
145 input_report_key(dev2, BTN_MIDDLE, middle);
146 203
147 input_sync(dev);
148 input_sync(dev2); 204 input_sync(dev2);
149 return; 205 return;
150 } 206 }
151 207
152 input_report_key(dev, BTN_LEFT, left); 208 alps_report_buttons(psmouse, dev, dev2, left, right, middle);
153 input_report_key(dev, BTN_RIGHT, right);
154 input_report_key(dev, BTN_MIDDLE, middle);
155 209
156 /* Convert hardware tap to a reasonable Z value */ 210 /* Convert hardware tap to a reasonable Z value */
157 if (ges && !fin) 211 if (ges && !fin)
@@ -202,25 +256,168 @@ static void alps_process_packet(struct psmouse *psmouse)
202 input_sync(dev); 256 input_sync(dev);
203} 257}
204 258
259static void alps_report_bare_ps2_packet(struct psmouse *psmouse,
260 unsigned char packet[],
261 bool report_buttons)
262{
263 struct alps_data *priv = psmouse->private;
264 struct input_dev *dev2 = priv->dev2;
265
266 if (report_buttons)
267 alps_report_buttons(psmouse, dev2, psmouse->dev,
268 packet[0] & 1, packet[0] & 2, packet[0] & 4);
269
270 input_report_rel(dev2, REL_X,
271 packet[1] ? packet[1] - ((packet[0] << 4) & 0x100) : 0);
272 input_report_rel(dev2, REL_Y,
273 packet[2] ? ((packet[0] << 3) & 0x100) - packet[2] : 0);
274
275 input_sync(dev2);
276}
277
278static psmouse_ret_t alps_handle_interleaved_ps2(struct psmouse *psmouse)
279{
280 struct alps_data *priv = psmouse->private;
281
282 if (psmouse->pktcnt < 6)
283 return PSMOUSE_GOOD_DATA;
284
285 if (psmouse->pktcnt == 6) {
286 /*
287 * Start a timer to flush the packet if it ends up last
288 * 6-byte packet in the stream. Timer needs to fire
289 * psmouse core times out itself. 20 ms should be enough
290 * to decide if we are getting more data or not.
291 */
292 mod_timer(&priv->timer, jiffies + msecs_to_jiffies(20));
293 return PSMOUSE_GOOD_DATA;
294 }
295
296 del_timer(&priv->timer);
297
298 if (psmouse->packet[6] & 0x80) {
299
300 /*
301 * Highest bit is set - that means we either had
302 * complete ALPS packet and this is start of the
303 * next packet or we got garbage.
304 */
305
306 if (((psmouse->packet[3] |
307 psmouse->packet[4] |
308 psmouse->packet[5]) & 0x80) ||
309 (!alps_is_valid_first_byte(priv->i, psmouse->packet[6]))) {
310 dbg("refusing packet %x %x %x %x "
311 "(suspected interleaved ps/2)\n",
312 psmouse->packet[3], psmouse->packet[4],
313 psmouse->packet[5], psmouse->packet[6]);
314 return PSMOUSE_BAD_DATA;
315 }
316
317 alps_process_packet(psmouse);
318
319 /* Continue with the next packet */
320 psmouse->packet[0] = psmouse->packet[6];
321 psmouse->pktcnt = 1;
322
323 } else {
324
325 /*
326 * High bit is 0 - that means that we indeed got a PS/2
327 * packet in the middle of ALPS packet.
328 *
329 * There is also possibility that we got 6-byte ALPS
330 * packet followed by 3-byte packet from trackpoint. We
331 * can not distinguish between these 2 scenarios but
332 * becase the latter is unlikely to happen in course of
333 * normal operation (user would need to press all
334 * buttons on the pad and start moving trackpoint
335 * without touching the pad surface) we assume former.
336 * Even if we are wrong the wost thing that would happen
337 * the cursor would jump but we should not get protocol
338 * desynchronization.
339 */
340
341 alps_report_bare_ps2_packet(psmouse, &psmouse->packet[3],
342 false);
343
344 /*
345 * Continue with the standard ALPS protocol handling,
346 * but make sure we won't process it as an interleaved
347 * packet again, which may happen if all buttons are
348 * pressed. To avoid this let's reset the 4th bit which
349 * is normally 1.
350 */
351 psmouse->packet[3] = psmouse->packet[6] & 0xf7;
352 psmouse->pktcnt = 4;
353 }
354
355 return PSMOUSE_GOOD_DATA;
356}
357
358static void alps_flush_packet(unsigned long data)
359{
360 struct psmouse *psmouse = (struct psmouse *)data;
361
362 serio_pause_rx(psmouse->ps2dev.serio);
363
364 if (psmouse->pktcnt == 6) {
365
366 /*
367 * We did not any more data in reasonable amount of time.
368 * Validate the last 3 bytes and process as a standard
369 * ALPS packet.
370 */
371 if ((psmouse->packet[3] |
372 psmouse->packet[4] |
373 psmouse->packet[5]) & 0x80) {
374 dbg("refusing packet %x %x %x "
375 "(suspected interleaved ps/2)\n",
376 psmouse->packet[3], psmouse->packet[4],
377 psmouse->packet[5]);
378 } else {
379 alps_process_packet(psmouse);
380 }
381 psmouse->pktcnt = 0;
382 }
383
384 serio_continue_rx(psmouse->ps2dev.serio);
385}
386
205static psmouse_ret_t alps_process_byte(struct psmouse *psmouse) 387static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
206{ 388{
207 struct alps_data *priv = psmouse->private; 389 struct alps_data *priv = psmouse->private;
390 const struct alps_model_info *model = priv->i;
208 391
209 if ((psmouse->packet[0] & 0xc8) == 0x08) { /* PS/2 packet */ 392 if ((psmouse->packet[0] & 0xc8) == 0x08) { /* PS/2 packet */
210 if (psmouse->pktcnt == 3) { 393 if (psmouse->pktcnt == 3) {
211 alps_process_packet(psmouse); 394 alps_report_bare_ps2_packet(psmouse, psmouse->packet,
395 true);
212 return PSMOUSE_FULL_PACKET; 396 return PSMOUSE_FULL_PACKET;
213 } 397 }
214 return PSMOUSE_GOOD_DATA; 398 return PSMOUSE_GOOD_DATA;
215 } 399 }
216 400
217 if ((psmouse->packet[0] & priv->i->mask0) != priv->i->byte0) 401 /* Check for PS/2 packet stuffed in the middle of ALPS packet. */
402
403 if ((model->flags & ALPS_PS2_INTERLEAVED) &&
404 psmouse->pktcnt >= 4 && (psmouse->packet[3] & 0x0f) == 0x0f) {
405 return alps_handle_interleaved_ps2(psmouse);
406 }
407
408 if (!alps_is_valid_first_byte(model, psmouse->packet[0])) {
409 dbg("refusing packet[0] = %x (mask0 = %x, byte0 = %x)\n",
410 psmouse->packet[0], model->mask0, model->byte0);
218 return PSMOUSE_BAD_DATA; 411 return PSMOUSE_BAD_DATA;
412 }
219 413
220 /* Bytes 2 - 6 should have 0 in the highest bit */ 414 /* Bytes 2 - 6 should have 0 in the highest bit */
221 if (psmouse->pktcnt >= 2 && psmouse->pktcnt <= 6 && 415 if (psmouse->pktcnt >= 2 && psmouse->pktcnt <= 6 &&
222 (psmouse->packet[psmouse->pktcnt - 1] & 0x80)) 416 (psmouse->packet[psmouse->pktcnt - 1] & 0x80)) {
417 dbg("refusing packet[%i] = %x\n",
418 psmouse->pktcnt - 1, psmouse->packet[psmouse->pktcnt - 1]);
223 return PSMOUSE_BAD_DATA; 419 return PSMOUSE_BAD_DATA;
420 }
224 421
225 if (psmouse->pktcnt == 6) { 422 if (psmouse->pktcnt == 6) {
226 alps_process_packet(psmouse); 423 alps_process_packet(psmouse);
@@ -459,6 +656,7 @@ static void alps_disconnect(struct psmouse *psmouse)
459 struct alps_data *priv = psmouse->private; 656 struct alps_data *priv = psmouse->private;
460 657
461 psmouse_reset(psmouse); 658 psmouse_reset(psmouse);
659 del_timer_sync(&priv->timer);
462 input_unregister_device(priv->dev2); 660 input_unregister_device(priv->dev2);
463 kfree(priv); 661 kfree(priv);
464} 662}
@@ -476,6 +674,8 @@ int alps_init(struct psmouse *psmouse)
476 goto init_fail; 674 goto init_fail;
477 675
478 priv->dev2 = dev2; 676 priv->dev2 = dev2;
677 setup_timer(&priv->timer, alps_flush_packet, (unsigned long)psmouse);
678
479 psmouse->private = priv; 679 psmouse->private = priv;
480 680
481 model = alps_get_model(psmouse, &version); 681 model = alps_get_model(psmouse, &version);
@@ -487,6 +687,17 @@ int alps_init(struct psmouse *psmouse)
487 if (alps_hw_init(psmouse)) 687 if (alps_hw_init(psmouse))
488 goto init_fail; 688 goto init_fail;
489 689
690 /*
691 * Undo part of setup done for us by psmouse core since touchpad
692 * is not a relative device.
693 */
694 __clear_bit(EV_REL, dev1->evbit);
695 __clear_bit(REL_X, dev1->relbit);
696 __clear_bit(REL_Y, dev1->relbit);
697
698 /*
699 * Now set up our capabilities.
700 */
490 dev1->evbit[BIT_WORD(EV_KEY)] |= BIT_MASK(EV_KEY); 701 dev1->evbit[BIT_WORD(EV_KEY)] |= BIT_MASK(EV_KEY);
491 dev1->keybit[BIT_WORD(BTN_TOUCH)] |= BIT_MASK(BTN_TOUCH); 702 dev1->keybit[BIT_WORD(BTN_TOUCH)] |= BIT_MASK(BTN_TOUCH);
492 dev1->keybit[BIT_WORD(BTN_TOOL_FINGER)] |= BIT_MASK(BTN_TOOL_FINGER); 703 dev1->keybit[BIT_WORD(BTN_TOOL_FINGER)] |= BIT_MASK(BTN_TOOL_FINGER);
diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h
index bc87936fee1a..904ed8b3c8be 100644
--- a/drivers/input/mouse/alps.h
+++ b/drivers/input/mouse/alps.h
@@ -23,6 +23,7 @@ struct alps_data {
23 char phys[32]; /* Phys */ 23 char phys[32]; /* Phys */
24 const struct alps_model_info *i;/* Info */ 24 const struct alps_model_info *i;/* Info */
25 int prev_fin; /* Finger bit from previous packet */ 25 int prev_fin; /* Finger bit from previous packet */
26 struct timer_list timer;
26}; 27};
27 28
28#ifdef CONFIG_MOUSE_PS2_ALPS 29#ifdef CONFIG_MOUSE_PS2_ALPS
diff --git a/drivers/input/serio/altera_ps2.c b/drivers/input/serio/altera_ps2.c
index f479ea50919f..320b7ca48bf8 100644
--- a/drivers/input/serio/altera_ps2.c
+++ b/drivers/input/serio/altera_ps2.c
@@ -79,11 +79,11 @@ static void altera_ps2_close(struct serio *io)
79/* 79/*
80 * Add one device to this driver. 80 * Add one device to this driver.
81 */ 81 */
82static int altera_ps2_probe(struct platform_device *pdev) 82static int __devinit altera_ps2_probe(struct platform_device *pdev)
83{ 83{
84 struct ps2if *ps2if; 84 struct ps2if *ps2if;
85 struct serio *serio; 85 struct serio *serio;
86 int error; 86 int error, irq;
87 87
88 ps2if = kzalloc(sizeof(struct ps2if), GFP_KERNEL); 88 ps2if = kzalloc(sizeof(struct ps2if), GFP_KERNEL);
89 serio = kzalloc(sizeof(struct serio), GFP_KERNEL); 89 serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
@@ -108,11 +108,13 @@ static int altera_ps2_probe(struct platform_device *pdev)
108 goto err_free_mem; 108 goto err_free_mem;
109 } 109 }
110 110
111 ps2if->irq = platform_get_irq(pdev, 0); 111
112 if (ps2if->irq < 0) { 112 irq = platform_get_irq(pdev, 0);
113 if (irq < 0) {
113 error = -ENXIO; 114 error = -ENXIO;
114 goto err_free_mem; 115 goto err_free_mem;
115 } 116 }
117 ps2if->irq = irq;
116 118
117 if (!request_mem_region(ps2if->iomem_res->start, 119 if (!request_mem_region(ps2if->iomem_res->start,
118 resource_size(ps2if->iomem_res), pdev->name)) { 120 resource_size(ps2if->iomem_res), pdev->name)) {
@@ -155,7 +157,7 @@ static int altera_ps2_probe(struct platform_device *pdev)
155/* 157/*
156 * Remove one device from this driver. 158 * Remove one device from this driver.
157 */ 159 */
158static int altera_ps2_remove(struct platform_device *pdev) 160static int __devexit altera_ps2_remove(struct platform_device *pdev)
159{ 161{
160 struct ps2if *ps2if = platform_get_drvdata(pdev); 162 struct ps2if *ps2if = platform_get_drvdata(pdev);
161 163
@@ -175,9 +177,10 @@ static int altera_ps2_remove(struct platform_device *pdev)
175 */ 177 */
176static struct platform_driver altera_ps2_driver = { 178static struct platform_driver altera_ps2_driver = {
177 .probe = altera_ps2_probe, 179 .probe = altera_ps2_probe,
178 .remove = altera_ps2_remove, 180 .remove = __devexit_p(altera_ps2_remove),
179 .driver = { 181 .driver = {
180 .name = DRV_NAME, 182 .name = DRV_NAME,
183 .owner = THIS_MODULE,
181 }, 184 },
182}; 185};
183 186
diff --git a/drivers/input/serio/ambakmi.c b/drivers/input/serio/ambakmi.c
index 89b394183a75..92563a681d65 100644
--- a/drivers/input/serio/ambakmi.c
+++ b/drivers/input/serio/ambakmi.c
@@ -107,7 +107,7 @@ static void amba_kmi_close(struct serio *io)
107 clk_disable(kmi->clk); 107 clk_disable(kmi->clk);
108} 108}
109 109
110static int amba_kmi_probe(struct amba_device *dev, struct amba_id *id) 110static int __devinit amba_kmi_probe(struct amba_device *dev, struct amba_id *id)
111{ 111{
112 struct amba_kmi_port *kmi; 112 struct amba_kmi_port *kmi;
113 struct serio *io; 113 struct serio *io;
@@ -134,7 +134,7 @@ static int amba_kmi_probe(struct amba_device *dev, struct amba_id *id)
134 io->port_data = kmi; 134 io->port_data = kmi;
135 io->dev.parent = &dev->dev; 135 io->dev.parent = &dev->dev;
136 136
137 kmi->io = io; 137 kmi->io = io;
138 kmi->base = ioremap(dev->res.start, resource_size(&dev->res)); 138 kmi->base = ioremap(dev->res.start, resource_size(&dev->res));
139 if (!kmi->base) { 139 if (!kmi->base) {
140 ret = -ENOMEM; 140 ret = -ENOMEM;
@@ -162,7 +162,7 @@ static int amba_kmi_probe(struct amba_device *dev, struct amba_id *id)
162 return ret; 162 return ret;
163} 163}
164 164
165static int amba_kmi_remove(struct amba_device *dev) 165static int __devexit amba_kmi_remove(struct amba_device *dev)
166{ 166{
167 struct amba_kmi_port *kmi = amba_get_drvdata(dev); 167 struct amba_kmi_port *kmi = amba_get_drvdata(dev);
168 168
@@ -197,10 +197,11 @@ static struct amba_id amba_kmi_idtable[] = {
197static struct amba_driver ambakmi_driver = { 197static struct amba_driver ambakmi_driver = {
198 .drv = { 198 .drv = {
199 .name = "kmi-pl050", 199 .name = "kmi-pl050",
200 .owner = THIS_MODULE,
200 }, 201 },
201 .id_table = amba_kmi_idtable, 202 .id_table = amba_kmi_idtable,
202 .probe = amba_kmi_probe, 203 .probe = amba_kmi_probe,
203 .remove = amba_kmi_remove, 204 .remove = __devexit_p(amba_kmi_remove),
204 .resume = amba_kmi_resume, 205 .resume = amba_kmi_resume,
205}; 206};
206 207
diff --git a/drivers/input/serio/at32psif.c b/drivers/input/serio/at32psif.c
index a6fb7a3dcc46..b54452a8c771 100644
--- a/drivers/input/serio/at32psif.c
+++ b/drivers/input/serio/at32psif.c
@@ -137,7 +137,7 @@ static int psif_write(struct serio *io, unsigned char val)
137 spin_lock_irqsave(&psif->lock, flags); 137 spin_lock_irqsave(&psif->lock, flags);
138 138
139 while (!(psif_readl(psif, SR) & PSIF_BIT(TXEMPTY)) && timeout--) 139 while (!(psif_readl(psif, SR) & PSIF_BIT(TXEMPTY)) && timeout--)
140 msleep(10); 140 udelay(50);
141 141
142 if (timeout >= 0) { 142 if (timeout >= 0) {
143 psif_writel(psif, THR, val); 143 psif_writel(psif, THR, val);
@@ -352,6 +352,7 @@ static struct platform_driver psif_driver = {
352 .remove = __exit_p(psif_remove), 352 .remove = __exit_p(psif_remove),
353 .driver = { 353 .driver = {
354 .name = "atmel_psif", 354 .name = "atmel_psif",
355 .owner = THIS_MODULE,
355 }, 356 },
356 .suspend = psif_suspend, 357 .suspend = psif_suspend,
357 .resume = psif_resume, 358 .resume = psif_resume,
diff --git a/drivers/input/serio/gscps2.c b/drivers/input/serio/gscps2.c
index bd0f92d9f40f..06addfa7cc47 100644
--- a/drivers/input/serio/gscps2.c
+++ b/drivers/input/serio/gscps2.c
@@ -6,7 +6,7 @@
6 * Copyright (c) 2002 Thibaut Varene <varenet@parisc-linux.org> 6 * Copyright (c) 2002 Thibaut Varene <varenet@parisc-linux.org>
7 * 7 *
8 * Pieces of code based on linux-2.4's hp_mouse.c & hp_keyb.c 8 * Pieces of code based on linux-2.4's hp_mouse.c & hp_keyb.c
9 * Copyright (c) 1999 Alex deVries <alex@onefishtwo.ca> 9 * Copyright (c) 1999 Alex deVries <alex@onefishtwo.ca>
10 * Copyright (c) 1999-2000 Philipp Rumpf <prumpf@tux.org> 10 * Copyright (c) 1999-2000 Philipp Rumpf <prumpf@tux.org>
11 * Copyright (c) 2000 Xavier Debacker <debackex@esiee.fr> 11 * Copyright (c) 2000 Xavier Debacker <debackex@esiee.fr>
12 * Copyright (c) 2000-2001 Thomas Marteau <marteaut@esiee.fr> 12 * Copyright (c) 2000-2001 Thomas Marteau <marteaut@esiee.fr>
@@ -326,7 +326,7 @@ static void gscps2_close(struct serio *port)
326 * @return: success/error report 326 * @return: success/error report
327 */ 327 */
328 328
329static int __init gscps2_probe(struct parisc_device *dev) 329static int __devinit gscps2_probe(struct parisc_device *dev)
330{ 330{
331 struct gscps2port *ps2port; 331 struct gscps2port *ps2port;
332 struct serio *serio; 332 struct serio *serio;
@@ -443,7 +443,7 @@ static struct parisc_driver parisc_ps2_driver = {
443 .name = "gsc_ps2", 443 .name = "gsc_ps2",
444 .id_table = gscps2_device_tbl, 444 .id_table = gscps2_device_tbl,
445 .probe = gscps2_probe, 445 .probe = gscps2_probe,
446 .remove = gscps2_remove, 446 .remove = __devexit_p(gscps2_remove),
447}; 447};
448 448
449static int __init gscps2_init(void) 449static int __init gscps2_init(void)
diff --git a/drivers/input/serio/hil_mlc.c b/drivers/input/serio/hil_mlc.c
index 7ba9f2b2c041..6cd03ebaf5fb 100644
--- a/drivers/input/serio/hil_mlc.c
+++ b/drivers/input/serio/hil_mlc.c
@@ -993,10 +993,8 @@ int hil_mlc_unregister(hil_mlc *mlc)
993 993
994static int __init hil_mlc_init(void) 994static int __init hil_mlc_init(void)
995{ 995{
996 init_timer(&hil_mlcs_kicker); 996 setup_timer(&hil_mlcs_kicker, &hil_mlcs_timer, 0);
997 hil_mlcs_kicker.expires = jiffies + HZ; 997 mod_timer(&hil_mlcs_kicker, jiffies + HZ);
998 hil_mlcs_kicker.function = &hil_mlcs_timer;
999 add_timer(&hil_mlcs_kicker);
1000 998
1001 tasklet_enable(&hil_mlcs_tasklet); 999 tasklet_enable(&hil_mlcs_tasklet);
1002 1000
@@ -1005,7 +1003,7 @@ static int __init hil_mlc_init(void)
1005 1003
1006static void __exit hil_mlc_exit(void) 1004static void __exit hil_mlc_exit(void)
1007{ 1005{
1008 del_timer(&hil_mlcs_kicker); 1006 del_timer_sync(&hil_mlcs_kicker);
1009 1007
1010 tasklet_disable(&hil_mlcs_tasklet); 1008 tasklet_disable(&hil_mlcs_tasklet);
1011 tasklet_kill(&hil_mlcs_tasklet); 1009 tasklet_kill(&hil_mlcs_tasklet);
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 7fbffe431bc5..64b688daf48a 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -158,6 +158,14 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
158 }, 158 },
159 }, 159 },
160 { 160 {
161 /* Gigabyte M1022M netbook */
162 .matches = {
163 DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co.,Ltd."),
164 DMI_MATCH(DMI_BOARD_NAME, "M1022E"),
165 DMI_MATCH(DMI_BOARD_VERSION, "1.02"),
166 },
167 },
168 {
161 .matches = { 169 .matches = {
162 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 170 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
163 DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv9700"), 171 DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv9700"),
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 1df02d25aca5..d84a36e545f6 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -126,6 +126,8 @@ static unsigned char i8042_suppress_kbd_ack;
126static struct platform_device *i8042_platform_device; 126static struct platform_device *i8042_platform_device;
127 127
128static irqreturn_t i8042_interrupt(int irq, void *dev_id); 128static irqreturn_t i8042_interrupt(int irq, void *dev_id);
129static bool (*i8042_platform_filter)(unsigned char data, unsigned char str,
130 struct serio *serio);
129 131
130void i8042_lock_chip(void) 132void i8042_lock_chip(void)
131{ 133{
@@ -139,6 +141,48 @@ void i8042_unlock_chip(void)
139} 141}
140EXPORT_SYMBOL(i8042_unlock_chip); 142EXPORT_SYMBOL(i8042_unlock_chip);
141 143
144int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str,
145 struct serio *serio))
146{
147 unsigned long flags;
148 int ret = 0;
149
150 spin_lock_irqsave(&i8042_lock, flags);
151
152 if (i8042_platform_filter) {
153 ret = -EBUSY;
154 goto out;
155 }
156
157 i8042_platform_filter = filter;
158
159out:
160 spin_unlock_irqrestore(&i8042_lock, flags);
161 return ret;
162}
163EXPORT_SYMBOL(i8042_install_filter);
164
165int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str,
166 struct serio *port))
167{
168 unsigned long flags;
169 int ret = 0;
170
171 spin_lock_irqsave(&i8042_lock, flags);
172
173 if (i8042_platform_filter != filter) {
174 ret = -EINVAL;
175 goto out;
176 }
177
178 i8042_platform_filter = NULL;
179
180out:
181 spin_unlock_irqrestore(&i8042_lock, flags);
182 return ret;
183}
184EXPORT_SYMBOL(i8042_remove_filter);
185
142/* 186/*
143 * The i8042_wait_read() and i8042_wait_write functions wait for the i8042 to 187 * The i8042_wait_read() and i8042_wait_write functions wait for the i8042 to
144 * be ready for reading values from it / writing values to it. 188 * be ready for reading values from it / writing values to it.
@@ -369,6 +413,31 @@ static void i8042_stop(struct serio *serio)
369} 413}
370 414
371/* 415/*
416 * i8042_filter() filters out unwanted bytes from the input data stream.
417 * It is called from i8042_interrupt and thus is running with interrupts
418 * off and i8042_lock held.
419 */
420static bool i8042_filter(unsigned char data, unsigned char str,
421 struct serio *serio)
422{
423 if (unlikely(i8042_suppress_kbd_ack)) {
424 if ((~str & I8042_STR_AUXDATA) &&
425 (data == 0xfa || data == 0xfe)) {
426 i8042_suppress_kbd_ack--;
427 dbg("Extra keyboard ACK - filtered out\n");
428 return true;
429 }
430 }
431
432 if (i8042_platform_filter && i8042_platform_filter(data, str, serio)) {
433 dbg("Filtered out by platfrom filter\n");
434 return true;
435 }
436
437 return false;
438}
439
440/*
372 * i8042_interrupt() is the most important function in this driver - 441 * i8042_interrupt() is the most important function in this driver -
373 * it handles the interrupts from the i8042, and sends incoming bytes 442 * it handles the interrupts from the i8042, and sends incoming bytes
374 * to the upper layers. 443 * to the upper layers.
@@ -377,13 +446,16 @@ static void i8042_stop(struct serio *serio)
377static irqreturn_t i8042_interrupt(int irq, void *dev_id) 446static irqreturn_t i8042_interrupt(int irq, void *dev_id)
378{ 447{
379 struct i8042_port *port; 448 struct i8042_port *port;
449 struct serio *serio;
380 unsigned long flags; 450 unsigned long flags;
381 unsigned char str, data; 451 unsigned char str, data;
382 unsigned int dfl; 452 unsigned int dfl;
383 unsigned int port_no; 453 unsigned int port_no;
454 bool filtered;
384 int ret = 1; 455 int ret = 1;
385 456
386 spin_lock_irqsave(&i8042_lock, flags); 457 spin_lock_irqsave(&i8042_lock, flags);
458
387 str = i8042_read_status(); 459 str = i8042_read_status();
388 if (unlikely(~str & I8042_STR_OBF)) { 460 if (unlikely(~str & I8042_STR_OBF)) {
389 spin_unlock_irqrestore(&i8042_lock, flags); 461 spin_unlock_irqrestore(&i8042_lock, flags);
@@ -391,8 +463,8 @@ static irqreturn_t i8042_interrupt(int irq, void *dev_id)
391 ret = 0; 463 ret = 0;
392 goto out; 464 goto out;
393 } 465 }
466
394 data = i8042_read_data(); 467 data = i8042_read_data();
395 spin_unlock_irqrestore(&i8042_lock, flags);
396 468
397 if (i8042_mux_present && (str & I8042_STR_AUXDATA)) { 469 if (i8042_mux_present && (str & I8042_STR_AUXDATA)) {
398 static unsigned long last_transmit; 470 static unsigned long last_transmit;
@@ -441,21 +513,19 @@ static irqreturn_t i8042_interrupt(int irq, void *dev_id)
441 } 513 }
442 514
443 port = &i8042_ports[port_no]; 515 port = &i8042_ports[port_no];
516 serio = port->exists ? port->serio : NULL;
444 517
445 dbg("%02x <- i8042 (interrupt, %d, %d%s%s)", 518 dbg("%02x <- i8042 (interrupt, %d, %d%s%s)",
446 data, port_no, irq, 519 data, port_no, irq,
447 dfl & SERIO_PARITY ? ", bad parity" : "", 520 dfl & SERIO_PARITY ? ", bad parity" : "",
448 dfl & SERIO_TIMEOUT ? ", timeout" : ""); 521 dfl & SERIO_TIMEOUT ? ", timeout" : "");
449 522
450 if (unlikely(i8042_suppress_kbd_ack)) 523 filtered = i8042_filter(data, str, serio);
451 if (port_no == I8042_KBD_PORT_NO && 524
452 (data == 0xfa || data == 0xfe)) { 525 spin_unlock_irqrestore(&i8042_lock, flags);
453 i8042_suppress_kbd_ack--;
454 goto out;
455 }
456 526
457 if (likely(port->exists)) 527 if (likely(port->exists && !filtered))
458 serio_interrupt(port->serio, data, dfl); 528 serio_interrupt(serio, data, dfl);
459 529
460 out: 530 out:
461 return IRQ_RETVAL(ret); 531 return IRQ_RETVAL(ret);
diff --git a/drivers/input/serio/sa1111ps2.c b/drivers/input/serio/sa1111ps2.c
index f412c69478a8..d55874e5d1c2 100644
--- a/drivers/input/serio/sa1111ps2.c
+++ b/drivers/input/serio/sa1111ps2.c
@@ -180,8 +180,8 @@ static void __devinit ps2_clear_input(struct ps2if *ps2if)
180 } 180 }
181} 181}
182 182
183static inline unsigned int 183static unsigned int __devinit ps2_test_one(struct ps2if *ps2if,
184ps2_test_one(struct ps2if *ps2if, unsigned int mask) 184 unsigned int mask)
185{ 185{
186 unsigned int val; 186 unsigned int val;
187 187
@@ -197,7 +197,7 @@ ps2_test_one(struct ps2if *ps2if, unsigned int mask)
197 * Test the keyboard interface. We basically check to make sure that 197 * Test the keyboard interface. We basically check to make sure that
198 * we can drive each line to the keyboard independently of each other. 198 * we can drive each line to the keyboard independently of each other.
199 */ 199 */
200static int __init ps2_test(struct ps2if *ps2if) 200static int __devinit ps2_test(struct ps2if *ps2if)
201{ 201{
202 unsigned int stat; 202 unsigned int stat;
203 int ret = 0; 203 int ret = 0;
@@ -312,7 +312,7 @@ static int __devinit ps2_probe(struct sa1111_dev *dev)
312/* 312/*
313 * Remove one device from this driver. 313 * Remove one device from this driver.
314 */ 314 */
315static int ps2_remove(struct sa1111_dev *dev) 315static int __devexit ps2_remove(struct sa1111_dev *dev)
316{ 316{
317 struct ps2if *ps2if = sa1111_get_drvdata(dev); 317 struct ps2if *ps2if = sa1111_get_drvdata(dev);
318 318
@@ -335,7 +335,7 @@ static struct sa1111_driver ps2_driver = {
335 }, 335 },
336 .devid = SA1111_DEVID_PS2, 336 .devid = SA1111_DEVID_PS2,
337 .probe = ps2_probe, 337 .probe = ps2_probe,
338 .remove = ps2_remove, 338 .remove = __devexit_p(ps2_remove),
339}; 339};
340 340
341static int __init ps2_init(void) 341static int __init ps2_init(void)
diff --git a/drivers/input/tablet/wacom.h b/drivers/input/tablet/wacom.h
index 9114ae1c7488..16310f368dab 100644
--- a/drivers/input/tablet/wacom.h
+++ b/drivers/input/tablet/wacom.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * drivers/input/tablet/wacom.h 2 * drivers/input/tablet/wacom.h
3 * 3 *
4 * USB Wacom Graphire and Wacom Intuos tablet support 4 * USB Wacom tablet support
5 * 5 *
6 * Copyright (c) 2000-2004 Vojtech Pavlik <vojtech@ucw.cz> 6 * Copyright (c) 2000-2004 Vojtech Pavlik <vojtech@ucw.cz>
7 * Copyright (c) 2000 Andreas Bach Aaen <abach@stofanet.dk> 7 * Copyright (c) 2000 Andreas Bach Aaen <abach@stofanet.dk>
@@ -69,6 +69,9 @@
69 * v1.49 (pc) - Added support for USB Tablet PC (0x90, 0x93, and 0x9A) 69 * v1.49 (pc) - Added support for USB Tablet PC (0x90, 0x93, and 0x9A)
70 * v1.50 (pc) - Fixed a TabletPC touch bug in 2.6.28 70 * v1.50 (pc) - Fixed a TabletPC touch bug in 2.6.28
71 * v1.51 (pc) - Added support for Intuos4 71 * v1.51 (pc) - Added support for Intuos4
72 * v1.52 (pc) - Query Wacom data upon system resume
73 * - add defines for features->type
74 * - add new devices (0x9F, 0xE2, and 0XE3)
72 */ 75 */
73 76
74/* 77/*
@@ -89,9 +92,9 @@
89/* 92/*
90 * Version Information 93 * Version Information
91 */ 94 */
92#define DRIVER_VERSION "v1.51" 95#define DRIVER_VERSION "v1.52"
93#define DRIVER_AUTHOR "Vojtech Pavlik <vojtech@ucw.cz>" 96#define DRIVER_AUTHOR "Vojtech Pavlik <vojtech@ucw.cz>"
94#define DRIVER_DESC "USB Wacom Graphire and Wacom Intuos tablet driver" 97#define DRIVER_DESC "USB Wacom tablet driver"
95#define DRIVER_LICENSE "GPL" 98#define DRIVER_LICENSE "GPL"
96 99
97MODULE_AUTHOR(DRIVER_AUTHOR); 100MODULE_AUTHOR(DRIVER_AUTHOR);
@@ -133,6 +136,8 @@ extern void input_dev_i4s(struct input_dev *input_dev, struct wacom_wac *wacom_w
133extern void input_dev_i4(struct input_dev *input_dev, struct wacom_wac *wacom_wac); 136extern void input_dev_i4(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
134extern void input_dev_pl(struct input_dev *input_dev, struct wacom_wac *wacom_wac); 137extern void input_dev_pl(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
135extern void input_dev_pt(struct input_dev *input_dev, struct wacom_wac *wacom_wac); 138extern void input_dev_pt(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
139extern void input_dev_tpc(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
140extern void input_dev_tpc2fg(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
136extern void input_dev_mo(struct input_dev *input_dev, struct wacom_wac *wacom_wac); 141extern void input_dev_mo(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
137extern void input_dev_bee(struct input_dev *input_dev, struct wacom_wac *wacom_wac); 142extern void input_dev_bee(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
138extern __u16 wacom_le16_to_cpu(unsigned char *data); 143extern __u16 wacom_le16_to_cpu(unsigned char *data);
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index ea30c983a33e..072f33b3b2b0 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * drivers/input/tablet/wacom_sys.c 2 * drivers/input/tablet/wacom_sys.c
3 * 3 *
4 * USB Wacom Graphire and Wacom Intuos tablet support - system specific code 4 * USB Wacom tablet support - system specific code
5 */ 5 */
6 6
7/* 7/*
@@ -209,6 +209,7 @@ void input_dev_g(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
209 input_dev->keybit[BIT_WORD(BTN_MOUSE)] |= BIT_MASK(BTN_LEFT) | 209 input_dev->keybit[BIT_WORD(BTN_MOUSE)] |= BIT_MASK(BTN_LEFT) |
210 BIT_MASK(BTN_RIGHT) | BIT_MASK(BTN_MIDDLE); 210 BIT_MASK(BTN_RIGHT) | BIT_MASK(BTN_MIDDLE);
211 input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_RUBBER) | 211 input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_RUBBER) |
212 BIT_MASK(BTN_TOOL_PEN) | BIT_MASK(BTN_STYLUS) |
212 BIT_MASK(BTN_TOOL_MOUSE) | BIT_MASK(BTN_STYLUS2); 213 BIT_MASK(BTN_TOOL_MOUSE) | BIT_MASK(BTN_STYLUS2);
213 input_set_abs_params(input_dev, ABS_DISTANCE, 0, wacom_wac->features->distance_max, 0, 0); 214 input_set_abs_params(input_dev, ABS_DISTANCE, 0, wacom_wac->features->distance_max, 0, 0);
214} 215}
@@ -256,6 +257,7 @@ void input_dev_i(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
256 BIT_MASK(BTN_RIGHT) | BIT_MASK(BTN_MIDDLE) | 257 BIT_MASK(BTN_RIGHT) | BIT_MASK(BTN_MIDDLE) |
257 BIT_MASK(BTN_SIDE) | BIT_MASK(BTN_EXTRA); 258 BIT_MASK(BTN_SIDE) | BIT_MASK(BTN_EXTRA);
258 input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_RUBBER) | 259 input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_RUBBER) |
260 BIT_MASK(BTN_TOOL_PEN) | BIT_MASK(BTN_STYLUS) |
259 BIT_MASK(BTN_TOOL_MOUSE) | BIT_MASK(BTN_TOOL_BRUSH) | 261 BIT_MASK(BTN_TOOL_MOUSE) | BIT_MASK(BTN_TOOL_BRUSH) |
260 BIT_MASK(BTN_TOOL_PENCIL) | BIT_MASK(BTN_TOOL_AIRBRUSH) | 262 BIT_MASK(BTN_TOOL_PENCIL) | BIT_MASK(BTN_TOOL_AIRBRUSH) |
261 BIT_MASK(BTN_TOOL_LENS) | BIT_MASK(BTN_STYLUS2); 263 BIT_MASK(BTN_TOOL_LENS) | BIT_MASK(BTN_STYLUS2);
@@ -269,7 +271,8 @@ void input_dev_i(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
269 271
270void input_dev_pl(struct input_dev *input_dev, struct wacom_wac *wacom_wac) 272void input_dev_pl(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
271{ 273{
272 input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_STYLUS2); 274 input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_PEN) |
275 BIT_MASK(BTN_STYLUS) | BIT_MASK(BTN_STYLUS2);
273} 276}
274 277
275void input_dev_pt(struct input_dev *input_dev, struct wacom_wac *wacom_wac) 278void input_dev_pt(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
@@ -277,12 +280,32 @@ void input_dev_pt(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
277 input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_RUBBER); 280 input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_RUBBER);
278} 281}
279 282
283void input_dev_tpc(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
284{
285 if (wacom_wac->features->device_type == BTN_TOOL_DOUBLETAP ||
286 wacom_wac->features->device_type == BTN_TOOL_TRIPLETAP) {
287 input_set_abs_params(input_dev, ABS_RX, 0, wacom_wac->features->x_phy, 0, 0);
288 input_set_abs_params(input_dev, ABS_RY, 0, wacom_wac->features->y_phy, 0, 0);
289 input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_DOUBLETAP);
290 }
291}
292
293void input_dev_tpc2fg(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
294{
295 if (wacom_wac->features->device_type == BTN_TOOL_TRIPLETAP) {
296 input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_TRIPLETAP);
297 input_dev->evbit[0] |= BIT_MASK(EV_MSC);
298 input_dev->mscbit[0] |= BIT_MASK(MSC_SERIAL);
299 }
300}
301
280static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hid_desc, 302static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hid_desc,
281 struct wacom_wac *wacom_wac) 303 struct wacom_features *features)
282{ 304{
283 struct usb_device *dev = interface_to_usbdev(intf); 305 struct usb_device *dev = interface_to_usbdev(intf);
284 struct wacom_features *features = wacom_wac->features; 306 char limit = 0;
285 char limit = 0, result = 0; 307 /* result has to be defined as int for some devices */
308 int result = 0;
286 int i = 0, usage = WCM_UNDEFINED, finger = 0, pen = 0; 309 int i = 0, usage = WCM_UNDEFINED, finger = 0, pen = 0;
287 unsigned char *report; 310 unsigned char *report;
288 311
@@ -328,13 +351,24 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
328 case HID_USAGE_X: 351 case HID_USAGE_X:
329 if (usage == WCM_DESKTOP) { 352 if (usage == WCM_DESKTOP) {
330 if (finger) { 353 if (finger) {
331 features->touch_x_max = 354 features->device_type = BTN_TOOL_DOUBLETAP;
332 features->touch_y_max = 355 if (features->type == TABLETPC2FG) {
333 wacom_le16_to_cpu(&report[i + 3]); 356 /* need to reset back */
357 features->pktlen = WACOM_PKGLEN_TPC2FG;
358 features->device_type = BTN_TOOL_TRIPLETAP;
359 }
334 features->x_max = 360 features->x_max =
361 wacom_le16_to_cpu(&report[i + 3]);
362 features->x_phy =
335 wacom_le16_to_cpu(&report[i + 6]); 363 wacom_le16_to_cpu(&report[i + 6]);
336 i += 7; 364 features->unit = report[i + 9];
365 features->unitExpo = report[i + 11];
366 i += 12;
337 } else if (pen) { 367 } else if (pen) {
368 /* penabled only accepts exact bytes of data */
369 if (features->type == TABLETPC2FG)
370 features->pktlen = WACOM_PKGLEN_PENABLED;
371 features->device_type = BTN_TOOL_PEN;
338 features->x_max = 372 features->x_max =
339 wacom_le16_to_cpu(&report[i + 3]); 373 wacom_le16_to_cpu(&report[i + 3]);
340 i += 4; 374 i += 4;
@@ -350,10 +384,35 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
350 break; 384 break;
351 385
352 case HID_USAGE_Y: 386 case HID_USAGE_Y:
353 if (usage == WCM_DESKTOP) 387 if (usage == WCM_DESKTOP) {
354 features->y_max = 388 if (finger) {
355 wacom_le16_to_cpu(&report[i + 3]); 389 features->device_type = BTN_TOOL_DOUBLETAP;
356 i += 4; 390 if (features->type == TABLETPC2FG) {
391 /* need to reset back */
392 features->pktlen = WACOM_PKGLEN_TPC2FG;
393 features->device_type = BTN_TOOL_TRIPLETAP;
394 features->y_max =
395 wacom_le16_to_cpu(&report[i + 3]);
396 features->y_phy =
397 wacom_le16_to_cpu(&report[i + 6]);
398 i += 7;
399 } else {
400 features->y_max =
401 features->x_max;
402 features->y_phy =
403 wacom_le16_to_cpu(&report[i + 3]);
404 i += 4;
405 }
406 } else if (pen) {
407 /* penabled only accepts exact bytes of data */
408 if (features->type == TABLETPC2FG)
409 features->pktlen = WACOM_PKGLEN_PENABLED;
410 features->device_type = BTN_TOOL_PEN;
411 features->y_max =
412 wacom_le16_to_cpu(&report[i + 3]);
413 i += 4;
414 }
415 }
357 break; 416 break;
358 417
359 case HID_USAGE_FINGER: 418 case HID_USAGE_FINGER:
@@ -376,7 +435,7 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
376 break; 435 break;
377 436
378 case HID_COLLECTION: 437 case HID_COLLECTION:
379 /* reset UsagePage ans Finger */ 438 /* reset UsagePage and Finger */
380 finger = usage = 0; 439 finger = usage = 0;
381 break; 440 break;
382 } 441 }
@@ -388,43 +447,92 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
388 return result; 447 return result;
389} 448}
390 449
391static int wacom_query_tablet_data(struct usb_interface *intf) 450static int wacom_query_tablet_data(struct usb_interface *intf, struct wacom_features *features)
392{ 451{
393 unsigned char *rep_data; 452 unsigned char *rep_data;
394 int limit = 0; 453 int limit = 0, report_id = 2;
395 int error; 454 int error = -ENOMEM;
396 455
397 rep_data = kmalloc(2, GFP_KERNEL); 456 rep_data = kmalloc(2, GFP_KERNEL);
398 if (!rep_data) 457 if (!rep_data)
399 return -ENOMEM; 458 return error;
400 459
401 do { 460 /* ask to report tablet data if it is 2FGT or not a Tablet PC */
402 rep_data[0] = 2; 461 if (features->device_type == BTN_TOOL_TRIPLETAP) {
403 rep_data[1] = 2; 462 do {
404 error = usb_set_report(intf, WAC_HID_FEATURE_REPORT, 463 rep_data[0] = 3;
405 2, rep_data, 2); 464 rep_data[1] = 4;
406 if (error >= 0) 465 report_id = 3;
407 error = usb_get_report(intf, 466 error = usb_set_report(intf, WAC_HID_FEATURE_REPORT,
408 WAC_HID_FEATURE_REPORT, 2, 467 report_id, rep_data, 2);
409 rep_data, 2); 468 if (error >= 0)
410 } while ((error < 0 || rep_data[1] != 2) && limit++ < 5); 469 error = usb_get_report(intf,
470 WAC_HID_FEATURE_REPORT, report_id,
471 rep_data, 3);
472 } while ((error < 0 || rep_data[1] != 4) && limit++ < 5);
473 } else if (features->type != TABLETPC && features->type != TABLETPC2FG) {
474 do {
475 rep_data[0] = 2;
476 rep_data[1] = 2;
477 error = usb_set_report(intf, WAC_HID_FEATURE_REPORT,
478 report_id, rep_data, 2);
479 if (error >= 0)
480 error = usb_get_report(intf,
481 WAC_HID_FEATURE_REPORT, report_id,
482 rep_data, 2);
483 } while ((error < 0 || rep_data[1] != 2) && limit++ < 5);
484 }
411 485
412 kfree(rep_data); 486 kfree(rep_data);
413 487
414 return error < 0 ? error : 0; 488 return error < 0 ? error : 0;
415} 489}
416 490
491static int wacom_retrieve_hid_descriptor(struct usb_interface *intf,
492 struct wacom_features *features)
493{
494 int error = 0;
495 struct usb_host_interface *interface = intf->cur_altsetting;
496 struct hid_descriptor *hid_desc;
497
498 /* default device to penabled */
499 features->device_type = BTN_TOOL_PEN;
500
501 /* only Tablet PCs need to retrieve the info */
502 if ((features->type != TABLETPC) && (features->type != TABLETPC2FG))
503 goto out;
504
505 if (usb_get_extra_descriptor(interface, HID_DEVICET_HID, &hid_desc)) {
506 if (usb_get_extra_descriptor(&interface->endpoint[0],
507 HID_DEVICET_REPORT, &hid_desc)) {
508 printk("wacom: can not retrieve extra class descriptor\n");
509 error = 1;
510 goto out;
511 }
512 }
513 error = wacom_parse_hid(intf, hid_desc, features);
514 if (error)
515 goto out;
516
517 /* touch device found but size is not defined. use default */
518 if (features->device_type == BTN_TOOL_DOUBLETAP && !features->x_max) {
519 features->x_max = 1023;
520 features->y_max = 1023;
521 }
522
523 out:
524 return error;
525}
526
417static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *id) 527static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *id)
418{ 528{
419 struct usb_device *dev = interface_to_usbdev(intf); 529 struct usb_device *dev = interface_to_usbdev(intf);
420 struct usb_host_interface *interface = intf->cur_altsetting;
421 struct usb_endpoint_descriptor *endpoint; 530 struct usb_endpoint_descriptor *endpoint;
422 struct wacom *wacom; 531 struct wacom *wacom;
423 struct wacom_wac *wacom_wac; 532 struct wacom_wac *wacom_wac;
424 struct wacom_features *features; 533 struct wacom_features *features;
425 struct input_dev *input_dev; 534 struct input_dev *input_dev;
426 int error = -ENOMEM; 535 int error = -ENOMEM;
427 struct hid_descriptor *hid_desc;
428 536
429 wacom = kzalloc(sizeof(struct wacom), GFP_KERNEL); 537 wacom = kzalloc(sizeof(struct wacom), GFP_KERNEL);
430 wacom_wac = kzalloc(sizeof(struct wacom_wac), GFP_KERNEL); 538 wacom_wac = kzalloc(sizeof(struct wacom_wac), GFP_KERNEL);
@@ -432,7 +540,7 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
432 if (!wacom || !input_dev || !wacom_wac) 540 if (!wacom || !input_dev || !wacom_wac)
433 goto fail1; 541 goto fail1;
434 542
435 wacom_wac->data = usb_buffer_alloc(dev, 10, GFP_KERNEL, &wacom->data_dma); 543 wacom_wac->data = usb_buffer_alloc(dev, WACOM_PKGLEN_MAX, GFP_KERNEL, &wacom->data_dma);
436 if (!wacom_wac->data) 544 if (!wacom_wac->data)
437 goto fail1; 545 goto fail1;
438 546
@@ -448,7 +556,7 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
448 strlcat(wacom->phys, "/input0", sizeof(wacom->phys)); 556 strlcat(wacom->phys, "/input0", sizeof(wacom->phys));
449 557
450 wacom_wac->features = features = get_wacom_feature(id); 558 wacom_wac->features = features = get_wacom_feature(id);
451 BUG_ON(features->pktlen > 10); 559 BUG_ON(features->pktlen > WACOM_PKGLEN_MAX);
452 560
453 input_dev->name = wacom_wac->features->name; 561 input_dev->name = wacom_wac->features->name;
454 wacom->wacom_wac = wacom_wac; 562 wacom->wacom_wac = wacom_wac;
@@ -463,47 +571,24 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
463 571
464 endpoint = &intf->cur_altsetting->endpoint[0].desc; 572 endpoint = &intf->cur_altsetting->endpoint[0].desc;
465 573
466 /* Initialize touch_x_max and touch_y_max in case it is not defined */ 574 /* Retrieve the physical and logical size for OEM devices */
467 if (wacom_wac->features->type == TABLETPC) { 575 error = wacom_retrieve_hid_descriptor(intf, features);
468 features->touch_x_max = 1023; 576 if (error)
469 features->touch_y_max = 1023; 577 goto fail2;
470 } else {
471 features->touch_x_max = 0;
472 features->touch_y_max = 0;
473 }
474
475 /* TabletPC need to retrieve the physical and logical maximum from report descriptor */
476 if (wacom_wac->features->type == TABLETPC) {
477 if (usb_get_extra_descriptor(interface, HID_DEVICET_HID, &hid_desc)) {
478 if (usb_get_extra_descriptor(&interface->endpoint[0],
479 HID_DEVICET_REPORT, &hid_desc)) {
480 printk("wacom: can not retrive extra class descriptor\n");
481 goto fail2;
482 }
483 }
484 error = wacom_parse_hid(intf, hid_desc, wacom_wac);
485 if (error)
486 goto fail2;
487 }
488 578
489 input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); 579 input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
490 input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_PEN) | 580 input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOUCH);
491 BIT_MASK(BTN_TOUCH) | BIT_MASK(BTN_STYLUS); 581
492 input_set_abs_params(input_dev, ABS_X, 0, features->x_max, 4, 0); 582 input_set_abs_params(input_dev, ABS_X, 0, features->x_max, 4, 0);
493 input_set_abs_params(input_dev, ABS_Y, 0, features->y_max, 4, 0); 583 input_set_abs_params(input_dev, ABS_Y, 0, features->y_max, 4, 0);
494 input_set_abs_params(input_dev, ABS_PRESSURE, 0, features->pressure_max, 0, 0); 584 input_set_abs_params(input_dev, ABS_PRESSURE, 0, features->pressure_max, 0, 0);
495 if (features->type == TABLETPC) {
496 input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_DOUBLETAP);
497 input_set_abs_params(input_dev, ABS_RX, 0, features->touch_x_max, 4, 0);
498 input_set_abs_params(input_dev, ABS_RY, 0, features->touch_y_max, 4, 0);
499 }
500 input_dev->absbit[BIT_WORD(ABS_MISC)] |= BIT_MASK(ABS_MISC); 585 input_dev->absbit[BIT_WORD(ABS_MISC)] |= BIT_MASK(ABS_MISC);
501 586
502 wacom_init_input_dev(input_dev, wacom_wac); 587 wacom_init_input_dev(input_dev, wacom_wac);
503 588
504 usb_fill_int_urb(wacom->irq, dev, 589 usb_fill_int_urb(wacom->irq, dev,
505 usb_rcvintpipe(dev, endpoint->bEndpointAddress), 590 usb_rcvintpipe(dev, endpoint->bEndpointAddress),
506 wacom_wac->data, wacom_wac->features->pktlen, 591 wacom_wac->data, features->pktlen,
507 wacom_sys_irq, wacom, endpoint->bInterval); 592 wacom_sys_irq, wacom, endpoint->bInterval);
508 wacom->irq->transfer_dma = wacom->data_dma; 593 wacom->irq->transfer_dma = wacom->data_dma;
509 wacom->irq->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; 594 wacom->irq->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
@@ -512,18 +597,14 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
512 if (error) 597 if (error)
513 goto fail3; 598 goto fail3;
514 599
515 /* 600 /* Note that if query fails it is not a hard failure */
516 * Ask the tablet to report tablet data if it is not a Tablet PC. 601 wacom_query_tablet_data(intf, features);
517 * Note that if query fails it is not a hard failure.
518 */
519 if (wacom_wac->features->type != TABLETPC)
520 wacom_query_tablet_data(intf);
521 602
522 usb_set_intfdata(intf, wacom); 603 usb_set_intfdata(intf, wacom);
523 return 0; 604 return 0;
524 605
525 fail3: usb_free_urb(wacom->irq); 606 fail3: usb_free_urb(wacom->irq);
526 fail2: usb_buffer_free(dev, 10, wacom_wac->data, wacom->data_dma); 607 fail2: usb_buffer_free(dev, WACOM_PKGLEN_MAX, wacom_wac->data, wacom->data_dma);
527 fail1: input_free_device(input_dev); 608 fail1: input_free_device(input_dev);
528 kfree(wacom); 609 kfree(wacom);
529 kfree(wacom_wac); 610 kfree(wacom_wac);
@@ -539,7 +620,7 @@ static void wacom_disconnect(struct usb_interface *intf)
539 usb_kill_urb(wacom->irq); 620 usb_kill_urb(wacom->irq);
540 input_unregister_device(wacom->dev); 621 input_unregister_device(wacom->dev);
541 usb_free_urb(wacom->irq); 622 usb_free_urb(wacom->irq);
542 usb_buffer_free(interface_to_usbdev(intf), 10, 623 usb_buffer_free(interface_to_usbdev(intf), WACOM_PKGLEN_MAX,
543 wacom->wacom_wac->data, wacom->data_dma); 624 wacom->wacom_wac->data, wacom->data_dma);
544 kfree(wacom->wacom_wac); 625 kfree(wacom->wacom_wac);
545 kfree(wacom); 626 kfree(wacom);
@@ -559,12 +640,16 @@ static int wacom_suspend(struct usb_interface *intf, pm_message_t message)
559static int wacom_resume(struct usb_interface *intf) 640static int wacom_resume(struct usb_interface *intf)
560{ 641{
561 struct wacom *wacom = usb_get_intfdata(intf); 642 struct wacom *wacom = usb_get_intfdata(intf);
643 struct wacom_features *features = wacom->wacom_wac->features;
562 int rv; 644 int rv;
563 645
564 mutex_lock(&wacom->lock); 646 mutex_lock(&wacom->lock);
565 if (wacom->open) 647 if (wacom->open) {
566 rv = usb_submit_urb(wacom->irq, GFP_NOIO); 648 rv = usb_submit_urb(wacom->irq, GFP_NOIO);
567 else 649 /* switch to wacom mode if needed */
650 if (!wacom_retrieve_hid_descriptor(intf, features))
651 wacom_query_tablet_data(intf, features);
652 } else
568 rv = 0; 653 rv = 0;
569 mutex_unlock(&wacom->lock); 654 mutex_unlock(&wacom->lock);
570 655
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index c896d6a21b7e..1056f149fe31 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * drivers/input/tablet/wacom_wac.c 2 * drivers/input/tablet/wacom_wac.c
3 * 3 *
4 * USB Wacom Graphire and Wacom Intuos tablet support - Wacom specific code 4 * USB Wacom tablet support - Wacom specific code
5 * 5 *
6 */ 6 */
7 7
@@ -58,16 +58,15 @@ static int wacom_pl_irq(struct wacom_wac *wacom, void *wcombo)
58 unsigned char *data = wacom->data; 58 unsigned char *data = wacom->data;
59 int prox, pressure; 59 int prox, pressure;
60 60
61 if (data[0] != 2) { 61 if (data[0] != WACOM_REPORT_PENABLED) {
62 dbg("wacom_pl_irq: received unknown report #%d", data[0]); 62 dbg("wacom_pl_irq: received unknown report #%d", data[0]);
63 return 0; 63 return 0;
64 } 64 }
65 65
66 prox = data[1] & 0x40; 66 prox = data[1] & 0x40;
67 67
68 wacom->id[0] = ERASER_DEVICE_ID;
69 if (prox) { 68 if (prox) {
70 69 wacom->id[0] = ERASER_DEVICE_ID;
71 pressure = (signed char)((data[7] << 1) | ((data[4] >> 2) & 1)); 70 pressure = (signed char)((data[7] << 1) | ((data[4] >> 2) & 1));
72 if (wacom->features->pressure_max > 255) 71 if (wacom->features->pressure_max > 255)
73 pressure = (pressure << 1) | ((data[4] >> 6) & 1); 72 pressure = (pressure << 1) | ((data[4] >> 6) & 1);
@@ -128,7 +127,7 @@ static int wacom_ptu_irq(struct wacom_wac *wacom, void *wcombo)
128{ 127{
129 unsigned char *data = wacom->data; 128 unsigned char *data = wacom->data;
130 129
131 if (data[0] != 2) { 130 if (data[0] != WACOM_REPORT_PENABLED) {
132 printk(KERN_INFO "wacom_ptu_irq: received unknown report #%d\n", data[0]); 131 printk(KERN_INFO "wacom_ptu_irq: received unknown report #%d\n", data[0]);
133 return 0; 132 return 0;
134 } 133 }
@@ -155,14 +154,16 @@ static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo)
155{ 154{
156 unsigned char *data = wacom->data; 155 unsigned char *data = wacom->data;
157 int x, y, rw; 156 int x, y, rw;
157 static int penData = 0;
158 158
159 if (data[0] != 2) { 159 if (data[0] != WACOM_REPORT_PENABLED) {
160 dbg("wacom_graphire_irq: received unknown report #%d", data[0]); 160 dbg("wacom_graphire_irq: received unknown report #%d", data[0]);
161 return 0; 161 return 0;
162 } 162 }
163 163
164 if (data[1] & 0x80) { 164 if (data[1] & 0x80) {
165 /* in prox and not a pad data */ 165 /* in prox and not a pad data */
166 penData = 1;
166 167
167 switch ((data[1] >> 5) & 3) { 168 switch ((data[1] >> 5) & 3) {
168 169
@@ -232,7 +233,11 @@ static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo)
232 switch (wacom->features->type) { 233 switch (wacom->features->type) {
233 case WACOM_G4: 234 case WACOM_G4:
234 if (data[7] & 0xf8) { 235 if (data[7] & 0xf8) {
235 wacom_input_sync(wcombo); /* sync last event */ 236 if (penData) {
237 wacom_input_sync(wcombo); /* sync last event */
238 if (!wacom->id[0])
239 penData = 0;
240 }
236 wacom->id[1] = PAD_DEVICE_ID; 241 wacom->id[1] = PAD_DEVICE_ID;
237 wacom_report_key(wcombo, BTN_0, (data[7] & 0x40)); 242 wacom_report_key(wcombo, BTN_0, (data[7] & 0x40));
238 wacom_report_key(wcombo, BTN_4, (data[7] & 0x80)); 243 wacom_report_key(wcombo, BTN_4, (data[7] & 0x80));
@@ -242,10 +247,15 @@ static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo)
242 wacom_report_abs(wcombo, ABS_MISC, wacom->id[1]); 247 wacom_report_abs(wcombo, ABS_MISC, wacom->id[1]);
243 wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0); 248 wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0);
244 } else if (wacom->id[1]) { 249 } else if (wacom->id[1]) {
245 wacom_input_sync(wcombo); /* sync last event */ 250 if (penData) {
251 wacom_input_sync(wcombo); /* sync last event */
252 if (!wacom->id[0])
253 penData = 0;
254 }
246 wacom->id[1] = 0; 255 wacom->id[1] = 0;
247 wacom_report_key(wcombo, BTN_0, (data[7] & 0x40)); 256 wacom_report_key(wcombo, BTN_0, (data[7] & 0x40));
248 wacom_report_key(wcombo, BTN_4, (data[7] & 0x80)); 257 wacom_report_key(wcombo, BTN_4, (data[7] & 0x80));
258 wacom_report_rel(wcombo, REL_WHEEL, 0);
249 wacom_report_key(wcombo, BTN_TOOL_FINGER, 0); 259 wacom_report_key(wcombo, BTN_TOOL_FINGER, 0);
250 wacom_report_abs(wcombo, ABS_MISC, 0); 260 wacom_report_abs(wcombo, ABS_MISC, 0);
251 wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0); 261 wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0);
@@ -253,7 +263,11 @@ static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo)
253 break; 263 break;
254 case WACOM_MO: 264 case WACOM_MO:
255 if ((data[7] & 0xf8) || (data[8] & 0xff)) { 265 if ((data[7] & 0xf8) || (data[8] & 0xff)) {
256 wacom_input_sync(wcombo); /* sync last event */ 266 if (penData) {
267 wacom_input_sync(wcombo); /* sync last event */
268 if (!wacom->id[0])
269 penData = 0;
270 }
257 wacom->id[1] = PAD_DEVICE_ID; 271 wacom->id[1] = PAD_DEVICE_ID;
258 wacom_report_key(wcombo, BTN_0, (data[7] & 0x08)); 272 wacom_report_key(wcombo, BTN_0, (data[7] & 0x08));
259 wacom_report_key(wcombo, BTN_1, (data[7] & 0x20)); 273 wacom_report_key(wcombo, BTN_1, (data[7] & 0x20));
@@ -264,7 +278,11 @@ static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo)
264 wacom_report_abs(wcombo, ABS_MISC, wacom->id[1]); 278 wacom_report_abs(wcombo, ABS_MISC, wacom->id[1]);
265 wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0); 279 wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0);
266 } else if (wacom->id[1]) { 280 } else if (wacom->id[1]) {
267 wacom_input_sync(wcombo); /* sync last event */ 281 if (penData) {
282 wacom_input_sync(wcombo); /* sync last event */
283 if (!wacom->id[0])
284 penData = 0;
285 }
268 wacom->id[1] = 0; 286 wacom->id[1] = 0;
269 wacom_report_key(wcombo, BTN_0, (data[7] & 0x08)); 287 wacom_report_key(wcombo, BTN_0, (data[7] & 0x08));
270 wacom_report_key(wcombo, BTN_1, (data[7] & 0x20)); 288 wacom_report_key(wcombo, BTN_1, (data[7] & 0x20));
@@ -432,7 +450,8 @@ static int wacom_intuos_irq(struct wacom_wac *wacom, void *wcombo)
432 unsigned int t; 450 unsigned int t;
433 int idx = 0, result; 451 int idx = 0, result;
434 452
435 if (data[0] != 2 && data[0] != 5 && data[0] != 6 && data[0] != 12) { 453 if (data[0] != WACOM_REPORT_PENABLED && data[0] != WACOM_REPORT_INTUOSREAD
454 && data[0] != WACOM_REPORT_INTUOSWRITE && data[0] != WACOM_REPORT_INTUOSPAD) {
436 dbg("wacom_intuos_irq: received unknown report #%d", data[0]); 455 dbg("wacom_intuos_irq: received unknown report #%d", data[0]);
437 return 0; 456 return 0;
438 } 457 }
@@ -442,7 +461,7 @@ static int wacom_intuos_irq(struct wacom_wac *wacom, void *wcombo)
442 idx = data[1] & 0x01; 461 idx = data[1] & 0x01;
443 462
444 /* pad packets. Works as a second tool and is always in prox */ 463 /* pad packets. Works as a second tool and is always in prox */
445 if (data[0] == 12) { 464 if (data[0] == WACOM_REPORT_INTUOSPAD) {
446 /* initiate the pad as a device */ 465 /* initiate the pad as a device */
447 if (wacom->tool[1] != BTN_TOOL_FINGER) 466 if (wacom->tool[1] != BTN_TOOL_FINGER)
448 wacom->tool[1] = BTN_TOOL_FINGER; 467 wacom->tool[1] = BTN_TOOL_FINGER;
@@ -608,95 +627,163 @@ static int wacom_intuos_irq(struct wacom_wac *wacom, void *wcombo)
608 return 1; 627 return 1;
609} 628}
610 629
630
631static void wacom_tpc_finger_in(struct wacom_wac *wacom, void *wcombo, char *data, int idx)
632{
633 wacom_report_abs(wcombo, ABS_X,
634 (data[2 + idx * 2] & 0xff) | ((data[3 + idx * 2] & 0x7f) << 8));
635 wacom_report_abs(wcombo, ABS_Y,
636 (data[6 + idx * 2] & 0xff) | ((data[7 + idx * 2] & 0x7f) << 8));
637 wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]);
638 wacom_report_key(wcombo, wacom->tool[idx], 1);
639 if (idx)
640 wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0);
641 else
642 wacom_report_key(wcombo, BTN_TOUCH, 1);
643}
644
645static void wacom_tpc_touch_out(struct wacom_wac *wacom, void *wcombo, int idx)
646{
647 wacom_report_abs(wcombo, ABS_X, 0);
648 wacom_report_abs(wcombo, ABS_Y, 0);
649 wacom_report_abs(wcombo, ABS_MISC, 0);
650 wacom_report_key(wcombo, wacom->tool[idx], 0);
651 if (idx)
652 wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0);
653 else
654 wacom_report_key(wcombo, BTN_TOUCH, 0);
655 return;
656}
657
658static void wacom_tpc_touch_in(struct wacom_wac *wacom, void *wcombo)
659{
660 char *data = wacom->data;
661 struct urb *urb = ((struct wacom_combo *)wcombo)->urb;
662 static int firstFinger = 0;
663 static int secondFinger = 0;
664
665 wacom->tool[0] = BTN_TOOL_DOUBLETAP;
666 wacom->id[0] = TOUCH_DEVICE_ID;
667 wacom->tool[1] = BTN_TOOL_TRIPLETAP;
668
669 if (urb->actual_length != WACOM_PKGLEN_TPC1FG) {
670 switch (data[0]) {
671 case WACOM_REPORT_TPC1FG:
672 wacom_report_abs(wcombo, ABS_X, wacom_le16_to_cpu(&data[2]));
673 wacom_report_abs(wcombo, ABS_Y, wacom_le16_to_cpu(&data[4]));
674 wacom_report_abs(wcombo, ABS_PRESSURE, wacom_le16_to_cpu(&data[6]));
675 wacom_report_key(wcombo, BTN_TOUCH, wacom_le16_to_cpu(&data[6]));
676 wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]);
677 wacom_report_key(wcombo, wacom->tool[0], 1);
678 break;
679 case WACOM_REPORT_TPC2FG:
680 /* keep this byte to send proper out-prox event */
681 wacom->id[1] = data[1] & 0x03;
682
683 if (data[1] & 0x01) {
684 wacom_tpc_finger_in(wacom, wcombo, data, 0);
685 firstFinger = 1;
686 } else if (firstFinger) {
687 wacom_tpc_touch_out(wacom, wcombo, 0);
688 }
689
690 if (data[1] & 0x02) {
691 /* sync first finger data */
692 if (firstFinger)
693 wacom_input_sync(wcombo);
694
695 wacom_tpc_finger_in(wacom, wcombo, data, 1);
696 secondFinger = 1;
697 } else if (secondFinger) {
698 /* sync first finger data */
699 if (firstFinger)
700 wacom_input_sync(wcombo);
701
702 wacom_tpc_touch_out(wacom, wcombo, 1);
703 secondFinger = 0;
704 }
705 if (!(data[1] & 0x01))
706 firstFinger = 0;
707 break;
708 }
709 } else {
710 wacom_report_abs(wcombo, ABS_X, wacom_le16_to_cpu(&data[1]));
711 wacom_report_abs(wcombo, ABS_Y, wacom_le16_to_cpu(&data[3]));
712 wacom_report_key(wcombo, BTN_TOUCH, 1);
713 wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]);
714 wacom_report_key(wcombo, wacom->tool[0], 1);
715 }
716 return;
717}
718
611static int wacom_tpc_irq(struct wacom_wac *wacom, void *wcombo) 719static int wacom_tpc_irq(struct wacom_wac *wacom, void *wcombo)
612{ 720{
613 char *data = wacom->data; 721 char *data = wacom->data;
614 int prox = 0, pressure; 722 int prox = 0, pressure, idx = -1;
615 static int stylusInProx, touchInProx = 1, touchOut; 723 static int stylusInProx, touchInProx = 1, touchOut;
616 struct urb *urb = ((struct wacom_combo *)wcombo)->urb; 724 struct urb *urb = ((struct wacom_combo *)wcombo)->urb;
617 725
618 dbg("wacom_tpc_irq: received report #%d", data[0]); 726 dbg("wacom_tpc_irq: received report #%d", data[0]);
619 727
620 if (urb->actual_length == 5 || data[0] == 6) { /* Touch data */ 728 if (urb->actual_length == WACOM_PKGLEN_TPC1FG || /* single touch */
621 if (urb->actual_length == 5) { /* with touch */ 729 data[0] == WACOM_REPORT_TPC1FG || /* single touch */
622 prox = data[0] & 0x03; 730 data[0] == WACOM_REPORT_TPC2FG) { /* 2FG touch */
731 if (urb->actual_length == WACOM_PKGLEN_TPC1FG) { /* with touch */
732 prox = data[0] & 0x01;
623 } else { /* with capacity */ 733 } else { /* with capacity */
624 prox = data[1] & 0x03; 734 if (data[0] == WACOM_REPORT_TPC1FG)
735 /* single touch */
736 prox = data[1] & 0x01;
737 else
738 /* 2FG touch data */
739 prox = data[1] & 0x03;
625 } 740 }
626 741
627 if (!stylusInProx) { /* stylus not in prox */ 742 if (!stylusInProx) { /* stylus not in prox */
628 if (prox) { 743 if (prox) {
629 if (touchInProx) { 744 if (touchInProx) {
630 wacom->tool[1] = BTN_TOOL_DOUBLETAP; 745 wacom_tpc_touch_in(wacom, wcombo);
631 wacom->id[0] = TOUCH_DEVICE_ID;
632 if (urb->actual_length != 5) {
633 wacom_report_abs(wcombo, ABS_X, wacom_le16_to_cpu(&data[2]));
634 wacom_report_abs(wcombo, ABS_Y, wacom_le16_to_cpu(&data[4]));
635 wacom_report_abs(wcombo, ABS_PRESSURE, wacom_le16_to_cpu(&data[6]));
636 wacom_report_key(wcombo, BTN_TOUCH, wacom_le16_to_cpu(&data[6]));
637 } else {
638 wacom_report_abs(wcombo, ABS_X, wacom_le16_to_cpu(&data[1]));
639 wacom_report_abs(wcombo, ABS_Y, wacom_le16_to_cpu(&data[3]));
640 wacom_report_key(wcombo, BTN_TOUCH, 1);
641 }
642 wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]);
643 wacom_report_key(wcombo, wacom->tool[1], prox & 0x01);
644 touchOut = 1; 746 touchOut = 1;
645 return 1; 747 return 1;
646 } 748 }
647 } else { 749 } else {
648 wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]); 750 /* 2FGT out-prox */
649 wacom_report_key(wcombo, wacom->tool[1], prox & 0x01); 751 if (data[0] == WACOM_REPORT_TPC2FG) {
650 wacom_report_key(wcombo, BTN_TOUCH, 0); 752 idx = (wacom->id[1] & 0x01) - 1;
753 if (idx == 0) {
754 wacom_tpc_touch_out(wacom, wcombo, idx);
755 /* sync first finger event */
756 if (wacom->id[1] & 0x02)
757 wacom_input_sync(wcombo);
758 }
759 idx = (wacom->id[1] & 0x02) - 1;
760 if (idx == 1)
761 wacom_tpc_touch_out(wacom, wcombo, idx);
762 } else /* one finger touch */
763 wacom_tpc_touch_out(wacom, wcombo, 0);
651 touchOut = 0; 764 touchOut = 0;
652 touchInProx = 1; 765 touchInProx = 1;
653 return 1; 766 return 1;
654 } 767 }
655 } else if (touchOut || !prox) { /* force touch out-prox */ 768 } else if (touchOut || !prox) { /* force touch out-prox */
656 wacom_report_abs(wcombo, ABS_MISC, TOUCH_DEVICE_ID); 769 wacom_tpc_touch_out(wacom, wcombo, 0);
657 wacom_report_key(wcombo, wacom->tool[1], 0);
658 wacom_report_key(wcombo, BTN_TOUCH, 0);
659 touchOut = 0; 770 touchOut = 0;
660 touchInProx = 1; 771 touchInProx = 1;
661 return 1; 772 return 1;
662 } 773 }
663 } else if (data[0] == 2) { /* Penabled */ 774 } else if (data[0] == WACOM_REPORT_PENABLED) { /* Penabled */
664 prox = data[1] & 0x20; 775 prox = data[1] & 0x20;
665 776
666 touchInProx = 0; 777 touchInProx = 0;
667 778
668 wacom->id[0] = ERASER_DEVICE_ID;
669
670 /*
671 * if going from out of proximity into proximity select between the eraser
672 * and the pen based on the state of the stylus2 button, choose eraser if
673 * pressed else choose pen. if not a proximity change from out to in, send
674 * an out of proximity for previous tool then a in for new tool.
675 */
676 if (prox) { /* in prox */ 779 if (prox) { /* in prox */
677 if (!wacom->tool[0]) { 780 if (!wacom->id[0]) {
678 /* Going into proximity select tool */ 781 /* Going into proximity select tool */
679 wacom->tool[1] = (data[1] & 0x08) ? BTN_TOOL_RUBBER : BTN_TOOL_PEN; 782 wacom->tool[0] = (data[1] & 0x0c) ? BTN_TOOL_RUBBER : BTN_TOOL_PEN;
680 if (wacom->tool[1] == BTN_TOOL_PEN) 783 if (wacom->tool[0] == BTN_TOOL_PEN)
681 wacom->id[0] = STYLUS_DEVICE_ID; 784 wacom->id[0] = STYLUS_DEVICE_ID;
682 } else if (wacom->tool[1] == BTN_TOOL_RUBBER && !(data[1] & 0x08)) { 785 else
683 /* 786 wacom->id[0] = ERASER_DEVICE_ID;
684 * was entered with stylus2 pressed
685 * report out proximity for previous tool
686 */
687 wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]);
688 wacom_report_key(wcombo, wacom->tool[1], 0);
689 wacom_input_sync(wcombo);
690
691 /* set new tool */
692 wacom->tool[1] = BTN_TOOL_PEN;
693 wacom->id[0] = STYLUS_DEVICE_ID;
694 return 0;
695 }
696 if (wacom->tool[1] != BTN_TOOL_RUBBER) {
697 /* Unknown tool selected default to pen tool */
698 wacom->tool[1] = BTN_TOOL_PEN;
699 wacom->id[0] = STYLUS_DEVICE_ID;
700 } 787 }
701 wacom_report_key(wcombo, BTN_STYLUS, data[1] & 0x02); 788 wacom_report_key(wcombo, BTN_STYLUS, data[1] & 0x02);
702 wacom_report_key(wcombo, BTN_STYLUS2, data[1] & 0x10); 789 wacom_report_key(wcombo, BTN_STYLUS2, data[1] & 0x10);
@@ -706,17 +793,21 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, void *wcombo)
706 if (pressure < 0) 793 if (pressure < 0)
707 pressure = wacom->features->pressure_max + pressure + 1; 794 pressure = wacom->features->pressure_max + pressure + 1;
708 wacom_report_abs(wcombo, ABS_PRESSURE, pressure); 795 wacom_report_abs(wcombo, ABS_PRESSURE, pressure);
709 wacom_report_key(wcombo, BTN_TOUCH, pressure); 796 wacom_report_key(wcombo, BTN_TOUCH, data[1] & 0x05);
710 } else { 797 } else {
798 wacom_report_abs(wcombo, ABS_X, 0);
799 wacom_report_abs(wcombo, ABS_Y, 0);
711 wacom_report_abs(wcombo, ABS_PRESSURE, 0); 800 wacom_report_abs(wcombo, ABS_PRESSURE, 0);
712 wacom_report_key(wcombo, BTN_STYLUS, 0); 801 wacom_report_key(wcombo, BTN_STYLUS, 0);
713 wacom_report_key(wcombo, BTN_STYLUS2, 0); 802 wacom_report_key(wcombo, BTN_STYLUS2, 0);
714 wacom_report_key(wcombo, BTN_TOUCH, 0); 803 wacom_report_key(wcombo, BTN_TOUCH, 0);
804 wacom->id[0] = 0;
805 /* pen is out so touch can be enabled now */
806 touchInProx = 1;
715 } 807 }
716 wacom_report_key(wcombo, wacom->tool[1], prox); 808 wacom_report_key(wcombo, wacom->tool[0], prox);
717 wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]); 809 wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]);
718 stylusInProx = prox; 810 stylusInProx = prox;
719 wacom->tool[0] = prox;
720 return 1; 811 return 1;
721 } 812 }
722 return 0; 813 return 0;
@@ -751,6 +842,7 @@ int wacom_wac_irq(struct wacom_wac *wacom_wac, void *wcombo)
751 return wacom_intuos_irq(wacom_wac, wcombo); 842 return wacom_intuos_irq(wacom_wac, wcombo);
752 843
753 case TABLETPC: 844 case TABLETPC:
845 case TABLETPC2FG:
754 return wacom_tpc_irq(wacom_wac, wcombo); 846 return wacom_tpc_irq(wacom_wac, wcombo);
755 847
756 default: 848 default:
@@ -791,9 +883,17 @@ void wacom_init_input_dev(struct input_dev *input_dev, struct wacom_wac *wacom_w
791 input_dev_i4s(input_dev, wacom_wac); 883 input_dev_i4s(input_dev, wacom_wac);
792 input_dev_i(input_dev, wacom_wac); 884 input_dev_i(input_dev, wacom_wac);
793 break; 885 break;
886 case TABLETPC2FG:
887 input_dev_tpc2fg(input_dev, wacom_wac);
888 /* fall through */
889 case TABLETPC:
890 input_dev_tpc(input_dev, wacom_wac);
891 if (wacom_wac->features->device_type != BTN_TOOL_PEN)
892 break; /* no need to process stylus stuff */
893
894 /* fall through */
794 case PL: 895 case PL:
795 case PTU: 896 case PTU:
796 case TABLETPC:
797 input_dev_pl(input_dev, wacom_wac); 897 input_dev_pl(input_dev, wacom_wac);
798 /* fall through */ 898 /* fall through */
799 case PENPARTNER: 899 case PENPARTNER:
@@ -804,66 +904,69 @@ void wacom_init_input_dev(struct input_dev *input_dev, struct wacom_wac *wacom_w
804} 904}
805 905
806static struct wacom_features wacom_features[] = { 906static struct wacom_features wacom_features[] = {
807 { "Wacom Penpartner", 7, 5040, 3780, 255, 0, PENPARTNER }, 907 { "Wacom Penpartner", WACOM_PKGLEN_PENPRTN, 5040, 3780, 255, 0, PENPARTNER },
808 { "Wacom Graphire", 8, 10206, 7422, 511, 63, GRAPHIRE }, 908 { "Wacom Graphire", WACOM_PKGLEN_GRAPHIRE, 10206, 7422, 511, 63, GRAPHIRE },
809 { "Wacom Graphire2 4x5", 8, 10206, 7422, 511, 63, GRAPHIRE }, 909 { "Wacom Graphire2 4x5", WACOM_PKGLEN_GRAPHIRE, 10206, 7422, 511, 63, GRAPHIRE },
810 { "Wacom Graphire2 5x7", 8, 13918, 10206, 511, 63, GRAPHIRE }, 910 { "Wacom Graphire2 5x7", WACOM_PKGLEN_GRAPHIRE, 13918, 10206, 511, 63, GRAPHIRE },
811 { "Wacom Graphire3", 8, 10208, 7424, 511, 63, GRAPHIRE }, 911 { "Wacom Graphire3", WACOM_PKGLEN_GRAPHIRE, 10208, 7424, 511, 63, GRAPHIRE },
812 { "Wacom Graphire3 6x8", 8, 16704, 12064, 511, 63, GRAPHIRE }, 912 { "Wacom Graphire3 6x8", WACOM_PKGLEN_GRAPHIRE, 16704, 12064, 511, 63, GRAPHIRE },
813 { "Wacom Graphire4 4x5", 8, 10208, 7424, 511, 63, WACOM_G4 }, 913 { "Wacom Graphire4 4x5", WACOM_PKGLEN_GRAPHIRE, 10208, 7424, 511, 63, WACOM_G4 },
814 { "Wacom Graphire4 6x8", 8, 16704, 12064, 511, 63, WACOM_G4 }, 914 { "Wacom Graphire4 6x8", WACOM_PKGLEN_GRAPHIRE, 16704, 12064, 511, 63, WACOM_G4 },
815 { "Wacom BambooFun 4x5", 9, 14760, 9225, 511, 63, WACOM_MO }, 915 { "Wacom BambooFun 4x5", WACOM_PKGLEN_BBFUN, 14760, 9225, 511, 63, WACOM_MO },
816 { "Wacom BambooFun 6x8", 9, 21648, 13530, 511, 63, WACOM_MO }, 916 { "Wacom BambooFun 6x8", WACOM_PKGLEN_BBFUN, 21648, 13530, 511, 63, WACOM_MO },
817 { "Wacom Bamboo1 Medium",8, 16704, 12064, 511, 63, GRAPHIRE }, 917 { "Wacom Bamboo1 Medium", WACOM_PKGLEN_GRAPHIRE, 16704, 12064, 511, 63, GRAPHIRE },
818 { "Wacom Volito", 8, 5104, 3712, 511, 63, GRAPHIRE }, 918 { "Wacom Volito", WACOM_PKGLEN_GRAPHIRE, 5104, 3712, 511, 63, GRAPHIRE },
819 { "Wacom PenStation2", 8, 3250, 2320, 255, 63, GRAPHIRE }, 919 { "Wacom PenStation2", WACOM_PKGLEN_GRAPHIRE, 3250, 2320, 255, 63, GRAPHIRE },
820 { "Wacom Volito2 4x5", 8, 5104, 3712, 511, 63, GRAPHIRE }, 920 { "Wacom Volito2 4x5", WACOM_PKGLEN_GRAPHIRE, 5104, 3712, 511, 63, GRAPHIRE },
821 { "Wacom Volito2 2x3", 8, 3248, 2320, 511, 63, GRAPHIRE }, 921 { "Wacom Volito2 2x3", WACOM_PKGLEN_GRAPHIRE, 3248, 2320, 511, 63, GRAPHIRE },
822 { "Wacom PenPartner2", 8, 3250, 2320, 511, 63, GRAPHIRE }, 922 { "Wacom PenPartner2", WACOM_PKGLEN_GRAPHIRE, 3250, 2320, 511, 63, GRAPHIRE },
823 { "Wacom Bamboo", 9, 14760, 9225, 511, 63, WACOM_MO }, 923 { "Wacom Bamboo", WACOM_PKGLEN_BBFUN, 14760, 9225, 511, 63, WACOM_MO },
824 { "Wacom Bamboo1", 8, 5104, 3712, 511, 63, GRAPHIRE }, 924 { "Wacom Bamboo1", WACOM_PKGLEN_GRAPHIRE, 5104, 3712, 511, 63, GRAPHIRE },
825 { "Wacom Intuos 4x5", 10, 12700, 10600, 1023, 31, INTUOS }, 925 { "Wacom Intuos 4x5", WACOM_PKGLEN_INTUOS, 12700, 10600, 1023, 31, INTUOS },
826 { "Wacom Intuos 6x8", 10, 20320, 16240, 1023, 31, INTUOS }, 926 { "Wacom Intuos 6x8", WACOM_PKGLEN_INTUOS, 20320, 16240, 1023, 31, INTUOS },
827 { "Wacom Intuos 9x12", 10, 30480, 24060, 1023, 31, INTUOS }, 927 { "Wacom Intuos 9x12", WACOM_PKGLEN_INTUOS, 30480, 24060, 1023, 31, INTUOS },
828 { "Wacom Intuos 12x12", 10, 30480, 31680, 1023, 31, INTUOS }, 928 { "Wacom Intuos 12x12", WACOM_PKGLEN_INTUOS, 30480, 31680, 1023, 31, INTUOS },
829 { "Wacom Intuos 12x18", 10, 45720, 31680, 1023, 31, INTUOS }, 929 { "Wacom Intuos 12x18", WACOM_PKGLEN_INTUOS, 45720, 31680, 1023, 31, INTUOS },
830 { "Wacom PL400", 8, 5408, 4056, 255, 0, PL }, 930 { "Wacom PL400", WACOM_PKGLEN_GRAPHIRE, 5408, 4056, 255, 0, PL },
831 { "Wacom PL500", 8, 6144, 4608, 255, 0, PL }, 931 { "Wacom PL500", WACOM_PKGLEN_GRAPHIRE, 6144, 4608, 255, 0, PL },
832 { "Wacom PL600", 8, 6126, 4604, 255, 0, PL }, 932 { "Wacom PL600", WACOM_PKGLEN_GRAPHIRE, 6126, 4604, 255, 0, PL },
833 { "Wacom PL600SX", 8, 6260, 5016, 255, 0, PL }, 933 { "Wacom PL600SX", WACOM_PKGLEN_GRAPHIRE, 6260, 5016, 255, 0, PL },
834 { "Wacom PL550", 8, 6144, 4608, 511, 0, PL }, 934 { "Wacom PL550", WACOM_PKGLEN_GRAPHIRE, 6144, 4608, 511, 0, PL },
835 { "Wacom PL800", 8, 7220, 5780, 511, 0, PL }, 935 { "Wacom PL800", WACOM_PKGLEN_GRAPHIRE, 7220, 5780, 511, 0, PL },
836 { "Wacom PL700", 8, 6758, 5406, 511, 0, PL }, 936 { "Wacom PL700", WACOM_PKGLEN_GRAPHIRE, 6758, 5406, 511, 0, PL },
837 { "Wacom PL510", 8, 6282, 4762, 511, 0, PL }, 937 { "Wacom PL510", WACOM_PKGLEN_GRAPHIRE, 6282, 4762, 511, 0, PL },
838 { "Wacom DTU710", 8, 34080, 27660, 511, 0, PL }, 938 { "Wacom DTU710", WACOM_PKGLEN_GRAPHIRE, 34080, 27660, 511, 0, PL },
839 { "Wacom DTF521", 8, 6282, 4762, 511, 0, PL }, 939 { "Wacom DTF521", WACOM_PKGLEN_GRAPHIRE, 6282, 4762, 511, 0, PL },
840 { "Wacom DTF720", 8, 6858, 5506, 511, 0, PL }, 940 { "Wacom DTF720", WACOM_PKGLEN_GRAPHIRE, 6858, 5506, 511, 0, PL },
841 { "Wacom DTF720a", 8, 6858, 5506, 511, 0, PL }, 941 { "Wacom DTF720a", WACOM_PKGLEN_GRAPHIRE, 6858, 5506, 511, 0, PL },
842 { "Wacom Cintiq Partner",8, 20480, 15360, 511, 0, PTU }, 942 { "Wacom Cintiq Partner", WACOM_PKGLEN_GRAPHIRE, 20480, 15360, 511, 0, PTU },
843 { "Wacom Intuos2 4x5", 10, 12700, 10600, 1023, 31, INTUOS }, 943 { "Wacom Intuos2 4x5", WACOM_PKGLEN_INTUOS, 12700, 10600, 1023, 31, INTUOS },
844 { "Wacom Intuos2 6x8", 10, 20320, 16240, 1023, 31, INTUOS }, 944 { "Wacom Intuos2 6x8", WACOM_PKGLEN_INTUOS, 20320, 16240, 1023, 31, INTUOS },
845 { "Wacom Intuos2 9x12", 10, 30480, 24060, 1023, 31, INTUOS }, 945 { "Wacom Intuos2 9x12", WACOM_PKGLEN_INTUOS, 30480, 24060, 1023, 31, INTUOS },
846 { "Wacom Intuos2 12x12", 10, 30480, 31680, 1023, 31, INTUOS }, 946 { "Wacom Intuos2 12x12", WACOM_PKGLEN_INTUOS, 30480, 31680, 1023, 31, INTUOS },
847 { "Wacom Intuos2 12x18", 10, 45720, 31680, 1023, 31, INTUOS }, 947 { "Wacom Intuos2 12x18", WACOM_PKGLEN_INTUOS, 45720, 31680, 1023, 31, INTUOS },
848 { "Wacom Intuos3 4x5", 10, 25400, 20320, 1023, 63, INTUOS3S }, 948 { "Wacom Intuos3 4x5", WACOM_PKGLEN_INTUOS, 25400, 20320, 1023, 63, INTUOS3S },
849 { "Wacom Intuos3 6x8", 10, 40640, 30480, 1023, 63, INTUOS3 }, 949 { "Wacom Intuos3 6x8", WACOM_PKGLEN_INTUOS, 40640, 30480, 1023, 63, INTUOS3 },
850 { "Wacom Intuos3 9x12", 10, 60960, 45720, 1023, 63, INTUOS3 }, 950 { "Wacom Intuos3 9x12", WACOM_PKGLEN_INTUOS, 60960, 45720, 1023, 63, INTUOS3 },
851 { "Wacom Intuos3 12x12", 10, 60960, 60960, 1023, 63, INTUOS3L }, 951 { "Wacom Intuos3 12x12", WACOM_PKGLEN_INTUOS, 60960, 60960, 1023, 63, INTUOS3L },
852 { "Wacom Intuos3 12x19", 10, 97536, 60960, 1023, 63, INTUOS3L }, 952 { "Wacom Intuos3 12x19", WACOM_PKGLEN_INTUOS, 97536, 60960, 1023, 63, INTUOS3L },
853 { "Wacom Intuos3 6x11", 10, 54204, 31750, 1023, 63, INTUOS3 }, 953 { "Wacom Intuos3 6x11", WACOM_PKGLEN_INTUOS, 54204, 31750, 1023, 63, INTUOS3 },
854 { "Wacom Intuos3 4x6", 10, 31496, 19685, 1023, 63, INTUOS3S }, 954 { "Wacom Intuos3 4x6", WACOM_PKGLEN_INTUOS, 31496, 19685, 1023, 63, INTUOS3S },
855 { "Wacom Intuos4 4x6", 10, 31496, 19685, 2047, 63, INTUOS4S }, 955 { "Wacom Intuos4 4x6", WACOM_PKGLEN_INTUOS, 31496, 19685, 2047, 63, INTUOS4S },
856 { "Wacom Intuos4 6x9", 10, 44704, 27940, 2047, 63, INTUOS4 }, 956 { "Wacom Intuos4 6x9", WACOM_PKGLEN_INTUOS, 44704, 27940, 2047, 63, INTUOS4 },
857 { "Wacom Intuos4 8x13", 10, 65024, 40640, 2047, 63, INTUOS4L }, 957 { "Wacom Intuos4 8x13", WACOM_PKGLEN_INTUOS, 65024, 40640, 2047, 63, INTUOS4L },
858 { "Wacom Intuos4 12x19", 10, 97536, 60960, 2047, 63, INTUOS4L }, 958 { "Wacom Intuos4 12x19", WACOM_PKGLEN_INTUOS, 97536, 60960, 2047, 63, INTUOS4L },
859 { "Wacom Cintiq 21UX", 10, 87200, 65600, 1023, 63, CINTIQ }, 959 { "Wacom Cintiq 21UX", WACOM_PKGLEN_INTUOS, 87200, 65600, 1023, 63, CINTIQ },
860 { "Wacom Cintiq 20WSX", 10, 86680, 54180, 1023, 63, WACOM_BEE }, 960 { "Wacom Cintiq 20WSX", WACOM_PKGLEN_INTUOS, 86680, 54180, 1023, 63, WACOM_BEE },
861 { "Wacom Cintiq 12WX", 10, 53020, 33440, 1023, 63, WACOM_BEE }, 961 { "Wacom Cintiq 12WX", WACOM_PKGLEN_INTUOS, 53020, 33440, 1023, 63, WACOM_BEE },
862 { "Wacom DTU1931", 8, 37832, 30305, 511, 0, PL }, 962 { "Wacom DTU1931", WACOM_PKGLEN_GRAPHIRE, 37832, 30305, 511, 0, PL },
863 { "Wacom ISDv4 90", 8, 26202, 16325, 255, 0, TABLETPC }, 963 { "Wacom ISDv4 90", WACOM_PKGLEN_GRAPHIRE, 26202, 16325, 255, 0, TABLETPC },
864 { "Wacom ISDv4 93", 8, 26202, 16325, 255, 0, TABLETPC }, 964 { "Wacom ISDv4 93", WACOM_PKGLEN_GRAPHIRE, 26202, 16325, 255, 0, TABLETPC },
865 { "Wacom ISDv4 9A", 8, 26202, 16325, 255, 0, TABLETPC }, 965 { "Wacom ISDv4 9A", WACOM_PKGLEN_GRAPHIRE, 26202, 16325, 255, 0, TABLETPC },
866 { "Wacom Intuos2 6x8", 10, 20320, 16240, 1023, 31, INTUOS }, 966 { "Wacom ISDv4 9F", WACOM_PKGLEN_PENABLED, 26202, 16325, 255, 0, TABLETPC },
967 { "Wacom ISDv4 E2", WACOM_PKGLEN_TPC2FG, 26202, 16325, 255, 0, TABLETPC2FG },
968 { "Wacom ISDv4 E3", WACOM_PKGLEN_TPC2FG, 26202, 16325, 255, 0, TABLETPC2FG },
969 { "Wacom Intuos2 6x8", WACOM_PKGLEN_INTUOS, 20320, 16240, 1023, 31, INTUOS },
867 { } 970 { }
868}; 971};
869 972
@@ -927,6 +1030,9 @@ static struct usb_device_id wacom_ids[] = {
927 { USB_DEVICE(USB_VENDOR_ID_WACOM, 0x90) }, 1030 { USB_DEVICE(USB_VENDOR_ID_WACOM, 0x90) },
928 { USB_DEVICE(USB_VENDOR_ID_WACOM, 0x93) }, 1031 { USB_DEVICE(USB_VENDOR_ID_WACOM, 0x93) },
929 { USB_DEVICE(USB_VENDOR_ID_WACOM, 0x9A) }, 1032 { USB_DEVICE(USB_VENDOR_ID_WACOM, 0x9A) },
1033 { USB_DEVICE(USB_VENDOR_ID_WACOM, 0x9F) },
1034 { USB_DEVICE(USB_VENDOR_ID_WACOM, 0xE2) },
1035 { USB_DEVICE(USB_VENDOR_ID_WACOM, 0xE3) },
930 { USB_DEVICE(USB_VENDOR_ID_WACOM, 0x47) }, 1036 { USB_DEVICE(USB_VENDOR_ID_WACOM, 0x47) },
931 { } 1037 { }
932}; 1038};
diff --git a/drivers/input/tablet/wacom_wac.h b/drivers/input/tablet/wacom_wac.h
index c10235aba7e5..ee01e1902785 100644
--- a/drivers/input/tablet/wacom_wac.h
+++ b/drivers/input/tablet/wacom_wac.h
@@ -9,12 +9,33 @@
9#ifndef WACOM_WAC_H 9#ifndef WACOM_WAC_H
10#define WACOM_WAC_H 10#define WACOM_WAC_H
11 11
12/* maximum packet length for USB devices */
13#define WACOM_PKGLEN_MAX 32
14
15/* packet length for individual models */
16#define WACOM_PKGLEN_PENPRTN 7
17#define WACOM_PKGLEN_GRAPHIRE 8
18#define WACOM_PKGLEN_BBFUN 9
19#define WACOM_PKGLEN_INTUOS 10
20#define WACOM_PKGLEN_PENABLED 8
21#define WACOM_PKGLEN_TPC1FG 5
22#define WACOM_PKGLEN_TPC2FG 14
23
24/* device IDs */
12#define STYLUS_DEVICE_ID 0x02 25#define STYLUS_DEVICE_ID 0x02
13#define TOUCH_DEVICE_ID 0x03 26#define TOUCH_DEVICE_ID 0x03
14#define CURSOR_DEVICE_ID 0x06 27#define CURSOR_DEVICE_ID 0x06
15#define ERASER_DEVICE_ID 0x0A 28#define ERASER_DEVICE_ID 0x0A
16#define PAD_DEVICE_ID 0x0F 29#define PAD_DEVICE_ID 0x0F
17 30
31/* wacom data packet report IDs */
32#define WACOM_REPORT_PENABLED 2
33#define WACOM_REPORT_INTUOSREAD 5
34#define WACOM_REPORT_INTUOSWRITE 6
35#define WACOM_REPORT_INTUOSPAD 12
36#define WACOM_REPORT_TPC1FG 6
37#define WACOM_REPORT_TPC2FG 13
38
18enum { 39enum {
19 PENPARTNER = 0, 40 PENPARTNER = 0,
20 GRAPHIRE, 41 GRAPHIRE,
@@ -32,6 +53,7 @@ enum {
32 WACOM_BEE, 53 WACOM_BEE,
33 WACOM_MO, 54 WACOM_MO,
34 TABLETPC, 55 TABLETPC,
56 TABLETPC2FG,
35 MAX_TYPE 57 MAX_TYPE
36}; 58};
37 59
@@ -43,8 +65,11 @@ struct wacom_features {
43 int pressure_max; 65 int pressure_max;
44 int distance_max; 66 int distance_max;
45 int type; 67 int type;
46 int touch_x_max; 68 int device_type;
47 int touch_y_max; 69 int x_phy;
70 int y_phy;
71 unsigned char unit;
72 unsigned char unitExpo;
48}; 73};
49 74
50struct wacom_wac { 75struct wacom_wac {
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 32fc8ba039aa..dfafc76da4fb 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -450,6 +450,18 @@ config TOUCHSCREEN_USB_COMPOSITE
450 To compile this driver as a module, choose M here: the 450 To compile this driver as a module, choose M here: the
451 module will be called usbtouchscreen. 451 module will be called usbtouchscreen.
452 452
453config TOUCHSCREEN_MC13783
454 tristate "Freescale MC13783 touchscreen input driver"
455 depends on MFD_MC13783
456 help
457 Say Y here if you have an Freescale MC13783 PMIC on your
458 board and want to use its touchscreen
459
460 If unsure, say N.
461
462 To compile this driver as a module, choose M here: the
463 module will be called mc13783_ts.
464
453config TOUCHSCREEN_USB_EGALAX 465config TOUCHSCREEN_USB_EGALAX
454 default y 466 default y
455 bool "eGalax, eTurboTouch CT-410/510/700 device support" if EMBEDDED 467 bool "eGalax, eTurboTouch CT-410/510/700 device support" if EMBEDDED
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index f1f59c9e1211..d61a3b4def9a 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_TOUCHSCREEN_EETI) += eeti_ts.o
18obj-$(CONFIG_TOUCHSCREEN_ELO) += elo.o 18obj-$(CONFIG_TOUCHSCREEN_ELO) += elo.o
19obj-$(CONFIG_TOUCHSCREEN_FUJITSU) += fujitsu_ts.o 19obj-$(CONFIG_TOUCHSCREEN_FUJITSU) += fujitsu_ts.o
20obj-$(CONFIG_TOUCHSCREEN_INEXIO) += inexio.o 20obj-$(CONFIG_TOUCHSCREEN_INEXIO) += inexio.o
21obj-$(CONFIG_TOUCHSCREEN_MC13783) += mc13783_ts.o
21obj-$(CONFIG_TOUCHSCREEN_MCS5000) += mcs5000_ts.o 22obj-$(CONFIG_TOUCHSCREEN_MCS5000) += mcs5000_ts.o
22obj-$(CONFIG_TOUCHSCREEN_MIGOR) += migor_ts.o 23obj-$(CONFIG_TOUCHSCREEN_MIGOR) += migor_ts.o
23obj-$(CONFIG_TOUCHSCREEN_MTOUCH) += mtouch.o 24obj-$(CONFIG_TOUCHSCREEN_MTOUCH) += mtouch.o
diff --git a/drivers/input/touchscreen/mc13783_ts.c b/drivers/input/touchscreen/mc13783_ts.c
new file mode 100644
index 000000000000..be115b3b65eb
--- /dev/null
+++ b/drivers/input/touchscreen/mc13783_ts.c
@@ -0,0 +1,258 @@
1/*
2 * Driver for the Freescale Semiconductor MC13783 touchscreen.
3 *
4 * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
5 * Copyright (C) 2009 Sascha Hauer, Pengutronix
6 *
7 * Initial development of this code was funded by
8 * Phytec Messtechnik GmbH, http://www.phytec.de/
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published by
12 * the Free Software Foundation.
13 */
14#include <linux/platform_device.h>
15#include <linux/mfd/mc13783.h>
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/input.h>
19#include <linux/sched.h>
20#include <linux/init.h>
21
22#define MC13783_TS_NAME "mc13783-ts"
23
24#define DEFAULT_SAMPLE_TOLERANCE 300
25
26static unsigned int sample_tolerance = DEFAULT_SAMPLE_TOLERANCE;
27module_param(sample_tolerance, uint, S_IRUGO | S_IWUSR);
28MODULE_PARM_DESC(sample_tolerance,
29 "If the minimal and maximal value read out for one axis (out "
30 "of three) differ by this value (default: "
31 __stringify(DEFAULT_SAMPLE_TOLERANCE) ") or more, the reading "
32 "is supposed to be wrong and is discarded. Set to 0 to "
33 "disable this check.");
34
35struct mc13783_ts_priv {
36 struct input_dev *idev;
37 struct mc13783 *mc13783;
38 struct delayed_work work;
39 struct workqueue_struct *workq;
40 unsigned int sample[4];
41};
42
43static irqreturn_t mc13783_ts_handler(int irq, void *data)
44{
45 struct mc13783_ts_priv *priv = data;
46
47 mc13783_ackirq(priv->mc13783, irq);
48
49 /*
50 * Kick off reading coordinates. Note that if work happens already
51 * be queued for future execution (it rearms itself) it will not
52 * be rescheduled for immediate execution here. However the rearm
53 * delay is HZ / 50 which is acceptable.
54 */
55 queue_delayed_work(priv->workq, &priv->work, 0);
56
57 return IRQ_HANDLED;
58}
59
60#define sort3(a0, a1, a2) ({ \
61 if (a0 > a1) \
62 swap(a0, a1); \
63 if (a1 > a2) \
64 swap(a1, a2); \
65 if (a0 > a1) \
66 swap(a0, a1); \
67 })
68
69static void mc13783_ts_report_sample(struct mc13783_ts_priv *priv)
70{
71 struct input_dev *idev = priv->idev;
72 int x0, x1, x2, y0, y1, y2;
73 int cr0, cr1;
74
75 /*
76 * the values are 10-bit wide only, but the two least significant
77 * bits are for future 12 bit use and reading yields 0
78 */
79 x0 = priv->sample[0] & 0xfff;
80 x1 = priv->sample[1] & 0xfff;
81 x2 = priv->sample[2] & 0xfff;
82 y0 = priv->sample[3] & 0xfff;
83 y1 = (priv->sample[0] >> 12) & 0xfff;
84 y2 = (priv->sample[1] >> 12) & 0xfff;
85 cr0 = (priv->sample[2] >> 12) & 0xfff;
86 cr1 = (priv->sample[3] >> 12) & 0xfff;
87
88 dev_dbg(&idev->dev,
89 "x: (% 4d,% 4d,% 4d) y: (% 4d, % 4d,% 4d) cr: (% 4d, % 4d)\n",
90 x0, x1, x2, y0, y1, y2, cr0, cr1);
91
92 sort3(x0, x1, x2);
93 sort3(y0, y1, y2);
94
95 cr0 = (cr0 + cr1) / 2;
96
97 if (!cr0 || !sample_tolerance ||
98 (x2 - x0 < sample_tolerance &&
99 y2 - y0 < sample_tolerance)) {
100 /* report the median coordinate and average pressure */
101 if (cr0) {
102 input_report_abs(idev, ABS_X, x1);
103 input_report_abs(idev, ABS_Y, y1);
104
105 dev_dbg(&idev->dev, "report (%d, %d, %d)\n",
106 x1, y1, 0x1000 - cr0);
107 queue_delayed_work(priv->workq, &priv->work, HZ / 50);
108 } else
109 dev_dbg(&idev->dev, "report release\n");
110
111 input_report_abs(idev, ABS_PRESSURE,
112 cr0 ? 0x1000 - cr0 : cr0);
113 input_report_key(idev, BTN_TOUCH, cr0);
114 input_sync(idev);
115 } else
116 dev_dbg(&idev->dev, "discard event\n");
117}
118
119static void mc13783_ts_work(struct work_struct *work)
120{
121 struct mc13783_ts_priv *priv =
122 container_of(work, struct mc13783_ts_priv, work.work);
123 unsigned int mode = MC13783_ADC_MODE_TS;
124 unsigned int channel = 12;
125
126 if (mc13783_adc_do_conversion(priv->mc13783,
127 mode, channel, priv->sample) == 0)
128 mc13783_ts_report_sample(priv);
129}
130
131static int mc13783_ts_open(struct input_dev *dev)
132{
133 struct mc13783_ts_priv *priv = input_get_drvdata(dev);
134 int ret;
135
136 mc13783_lock(priv->mc13783);
137
138 mc13783_ackirq(priv->mc13783, MC13783_IRQ_TS);
139
140 ret = mc13783_irq_request(priv->mc13783, MC13783_IRQ_TS,
141 mc13783_ts_handler, MC13783_TS_NAME, priv);
142 if (ret)
143 goto out;
144
145 ret = mc13783_reg_rmw(priv->mc13783, MC13783_ADC0,
146 MC13783_ADC0_TSMOD_MASK, MC13783_ADC0_TSMOD0);
147 if (ret)
148 mc13783_irq_free(priv->mc13783, MC13783_IRQ_TS, priv);
149out:
150 mc13783_unlock(priv->mc13783);
151 return ret;
152}
153
154static void mc13783_ts_close(struct input_dev *dev)
155{
156 struct mc13783_ts_priv *priv = input_get_drvdata(dev);
157
158 mc13783_lock(priv->mc13783);
159 mc13783_reg_rmw(priv->mc13783, MC13783_ADC0,
160 MC13783_ADC0_TSMOD_MASK, 0);
161 mc13783_irq_free(priv->mc13783, MC13783_IRQ_TS, priv);
162 mc13783_unlock(priv->mc13783);
163
164 cancel_delayed_work_sync(&priv->work);
165}
166
167static int __init mc13783_ts_probe(struct platform_device *pdev)
168{
169 struct mc13783_ts_priv *priv;
170 struct input_dev *idev;
171 int ret = -ENOMEM;
172
173 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
174 idev = input_allocate_device();
175 if (!priv || !idev)
176 goto err_free_mem;
177
178 INIT_DELAYED_WORK(&priv->work, mc13783_ts_work);
179 priv->mc13783 = dev_get_drvdata(pdev->dev.parent);
180 priv->idev = idev;
181
182 /*
183 * We need separate workqueue because mc13783_adc_do_conversion
184 * uses keventd and thus would deadlock.
185 */
186 priv->workq = create_singlethread_workqueue("mc13783_ts");
187 if (!priv->workq)
188 goto err_free_mem;
189
190 idev->name = MC13783_TS_NAME;
191 idev->dev.parent = &pdev->dev;
192
193 idev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
194 idev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
195 input_set_abs_params(idev, ABS_X, 0, 0xfff, 0, 0);
196 input_set_abs_params(idev, ABS_Y, 0, 0xfff, 0, 0);
197 input_set_abs_params(idev, ABS_PRESSURE, 0, 0xfff, 0, 0);
198
199 idev->open = mc13783_ts_open;
200 idev->close = mc13783_ts_close;
201
202 input_set_drvdata(idev, priv);
203
204 ret = input_register_device(priv->idev);
205 if (ret) {
206 dev_err(&pdev->dev,
207 "register input device failed with %d\n", ret);
208 goto err_destroy_wq;
209 }
210
211 platform_set_drvdata(pdev, priv);
212 return 0;
213
214err_destroy_wq:
215 destroy_workqueue(priv->workq);
216err_free_mem:
217 input_free_device(idev);
218 kfree(priv);
219 return ret;
220}
221
222static int __devexit mc13783_ts_remove(struct platform_device *pdev)
223{
224 struct mc13783_ts_priv *priv = platform_get_drvdata(pdev);
225
226 platform_set_drvdata(pdev, NULL);
227
228 destroy_workqueue(priv->workq);
229 input_unregister_device(priv->idev);
230 kfree(priv);
231
232 return 0;
233}
234
235static struct platform_driver mc13783_ts_driver = {
236 .remove = __devexit_p(mc13783_ts_remove),
237 .driver = {
238 .owner = THIS_MODULE,
239 .name = MC13783_TS_NAME,
240 },
241};
242
243static int __init mc13783_ts_init(void)
244{
245 return platform_driver_probe(&mc13783_ts_driver, &mc13783_ts_probe);
246}
247module_init(mc13783_ts_init);
248
249static void __exit mc13783_ts_exit(void)
250{
251 platform_driver_unregister(&mc13783_ts_driver);
252}
253module_exit(mc13783_ts_exit);
254
255MODULE_DESCRIPTION("MC13783 input touchscreen driver");
256MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
257MODULE_LICENSE("GPL v2");
258MODULE_ALIAS("platform:" MC13783_TS_NAME);
diff --git a/drivers/input/touchscreen/pcap_ts.c b/drivers/input/touchscreen/pcap_ts.c
index 67fcd33595de..b79097e3028a 100644
--- a/drivers/input/touchscreen/pcap_ts.c
+++ b/drivers/input/touchscreen/pcap_ts.c
@@ -233,7 +233,7 @@ static int pcap_ts_resume(struct device *dev)
233 return 0; 233 return 0;
234} 234}
235 235
236static struct dev_pm_ops pcap_ts_pm_ops = { 236static const struct dev_pm_ops pcap_ts_pm_ops = {
237 .suspend = pcap_ts_suspend, 237 .suspend = pcap_ts_suspend,
238 .resume = pcap_ts_resume, 238 .resume = pcap_ts_resume,
239}; 239};
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index f2cc13d76810..782f95822eab 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -50,7 +50,7 @@ static ssize_t led_brightness_store(struct device *dev,
50 unsigned long state = simple_strtoul(buf, &after, 10); 50 unsigned long state = simple_strtoul(buf, &after, 10);
51 size_t count = after - buf; 51 size_t count = after - buf;
52 52
53 if (*after && isspace(*after)) 53 if (isspace(*after))
54 count++; 54 count++;
55 55
56 if (count == size) { 56 if (count == size) {
diff --git a/drivers/leds/ledtrig-timer.c b/drivers/leds/ledtrig-timer.c
index 3b83406de752..38b3378be442 100644
--- a/drivers/leds/ledtrig-timer.c
+++ b/drivers/leds/ledtrig-timer.c
@@ -83,7 +83,7 @@ static ssize_t led_delay_on_store(struct device *dev,
83 unsigned long state = simple_strtoul(buf, &after, 10); 83 unsigned long state = simple_strtoul(buf, &after, 10);
84 size_t count = after - buf; 84 size_t count = after - buf;
85 85
86 if (*after && isspace(*after)) 86 if (isspace(*after))
87 count++; 87 count++;
88 88
89 if (count == size) { 89 if (count == size) {
@@ -127,7 +127,7 @@ static ssize_t led_delay_off_store(struct device *dev,
127 unsigned long state = simple_strtoul(buf, &after, 10); 127 unsigned long state = simple_strtoul(buf, &after, 10);
128 size_t count = after - buf; 128 size_t count = after - buf;
129 129
130 if (*after && isspace(*after)) 130 if (isspace(*after))
131 count++; 131 count++;
132 132
133 if (count == size) { 133 if (count == size) {
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index e412980763bd..a93637223c8d 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Copyright (C) 2003 Christophe Saout <christophe@saout.de> 2 * Copyright (C) 2003 Christophe Saout <christophe@saout.de>
3 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org> 3 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
4 * Copyright (C) 2006-2008 Red Hat, Inc. All rights reserved. 4 * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved.
5 * 5 *
6 * This file is released under the GPL. 6 * This file is released under the GPL.
7 */ 7 */
@@ -71,10 +71,21 @@ struct crypt_iv_operations {
71 int (*ctr)(struct crypt_config *cc, struct dm_target *ti, 71 int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
72 const char *opts); 72 const char *opts);
73 void (*dtr)(struct crypt_config *cc); 73 void (*dtr)(struct crypt_config *cc);
74 const char *(*status)(struct crypt_config *cc); 74 int (*init)(struct crypt_config *cc);
75 int (*wipe)(struct crypt_config *cc);
75 int (*generator)(struct crypt_config *cc, u8 *iv, sector_t sector); 76 int (*generator)(struct crypt_config *cc, u8 *iv, sector_t sector);
76}; 77};
77 78
79struct iv_essiv_private {
80 struct crypto_cipher *tfm;
81 struct crypto_hash *hash_tfm;
82 u8 *salt;
83};
84
85struct iv_benbi_private {
86 int shift;
87};
88
78/* 89/*
79 * Crypt: maps a linear range of a block device 90 * Crypt: maps a linear range of a block device
80 * and encrypts / decrypts at the same time. 91 * and encrypts / decrypts at the same time.
@@ -102,8 +113,8 @@ struct crypt_config {
102 struct crypt_iv_operations *iv_gen_ops; 113 struct crypt_iv_operations *iv_gen_ops;
103 char *iv_mode; 114 char *iv_mode;
104 union { 115 union {
105 struct crypto_cipher *essiv_tfm; 116 struct iv_essiv_private essiv;
106 int benbi_shift; 117 struct iv_benbi_private benbi;
107 } iv_gen_private; 118 } iv_gen_private;
108 sector_t iv_offset; 119 sector_t iv_offset;
109 unsigned int iv_size; 120 unsigned int iv_size;
@@ -147,6 +158,9 @@ static void kcryptd_queue_crypt(struct dm_crypt_io *io);
147 * plain: the initial vector is the 32-bit little-endian version of the sector 158 * plain: the initial vector is the 32-bit little-endian version of the sector
148 * number, padded with zeros if necessary. 159 * number, padded with zeros if necessary.
149 * 160 *
161 * plain64: the initial vector is the 64-bit little-endian version of the sector
162 * number, padded with zeros if necessary.
163 *
150 * essiv: "encrypted sector|salt initial vector", the sector number is 164 * essiv: "encrypted sector|salt initial vector", the sector number is
151 * encrypted with the bulk cipher using a salt as key. The salt 165 * encrypted with the bulk cipher using a salt as key. The salt
152 * should be derived from the bulk cipher's key via hashing. 166 * should be derived from the bulk cipher's key via hashing.
@@ -169,88 +183,123 @@ static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
169 return 0; 183 return 0;
170} 184}
171 185
172static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, 186static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
173 const char *opts) 187 sector_t sector)
174{ 188{
175 struct crypto_cipher *essiv_tfm; 189 memset(iv, 0, cc->iv_size);
176 struct crypto_hash *hash_tfm; 190 *(u64 *)iv = cpu_to_le64(sector);
191
192 return 0;
193}
194
195/* Initialise ESSIV - compute salt but no local memory allocations */
196static int crypt_iv_essiv_init(struct crypt_config *cc)
197{
198 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
177 struct hash_desc desc; 199 struct hash_desc desc;
178 struct scatterlist sg; 200 struct scatterlist sg;
179 unsigned int saltsize;
180 u8 *salt;
181 int err; 201 int err;
182 202
183 if (opts == NULL) { 203 sg_init_one(&sg, cc->key, cc->key_size);
204 desc.tfm = essiv->hash_tfm;
205 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
206
207 err = crypto_hash_digest(&desc, &sg, cc->key_size, essiv->salt);
208 if (err)
209 return err;
210
211 return crypto_cipher_setkey(essiv->tfm, essiv->salt,
212 crypto_hash_digestsize(essiv->hash_tfm));
213}
214
215/* Wipe salt and reset key derived from volume key */
216static int crypt_iv_essiv_wipe(struct crypt_config *cc)
217{
218 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
219 unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm);
220
221 memset(essiv->salt, 0, salt_size);
222
223 return crypto_cipher_setkey(essiv->tfm, essiv->salt, salt_size);
224}
225
226static void crypt_iv_essiv_dtr(struct crypt_config *cc)
227{
228 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
229
230 crypto_free_cipher(essiv->tfm);
231 essiv->tfm = NULL;
232
233 crypto_free_hash(essiv->hash_tfm);
234 essiv->hash_tfm = NULL;
235
236 kzfree(essiv->salt);
237 essiv->salt = NULL;
238}
239
240static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
241 const char *opts)
242{
243 struct crypto_cipher *essiv_tfm = NULL;
244 struct crypto_hash *hash_tfm = NULL;
245 u8 *salt = NULL;
246 int err;
247
248 if (!opts) {
184 ti->error = "Digest algorithm missing for ESSIV mode"; 249 ti->error = "Digest algorithm missing for ESSIV mode";
185 return -EINVAL; 250 return -EINVAL;
186 } 251 }
187 252
188 /* Hash the cipher key with the given hash algorithm */ 253 /* Allocate hash algorithm */
189 hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC); 254 hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC);
190 if (IS_ERR(hash_tfm)) { 255 if (IS_ERR(hash_tfm)) {
191 ti->error = "Error initializing ESSIV hash"; 256 ti->error = "Error initializing ESSIV hash";
192 return PTR_ERR(hash_tfm); 257 err = PTR_ERR(hash_tfm);
258 goto bad;
193 } 259 }
194 260
195 saltsize = crypto_hash_digestsize(hash_tfm); 261 salt = kzalloc(crypto_hash_digestsize(hash_tfm), GFP_KERNEL);
196 salt = kmalloc(saltsize, GFP_KERNEL); 262 if (!salt) {
197 if (salt == NULL) {
198 ti->error = "Error kmallocing salt storage in ESSIV"; 263 ti->error = "Error kmallocing salt storage in ESSIV";
199 crypto_free_hash(hash_tfm); 264 err = -ENOMEM;
200 return -ENOMEM; 265 goto bad;
201 } 266 }
202 267
203 sg_init_one(&sg, cc->key, cc->key_size); 268 /* Allocate essiv_tfm */
204 desc.tfm = hash_tfm;
205 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
206 err = crypto_hash_digest(&desc, &sg, cc->key_size, salt);
207 crypto_free_hash(hash_tfm);
208
209 if (err) {
210 ti->error = "Error calculating hash in ESSIV";
211 kfree(salt);
212 return err;
213 }
214
215 /* Setup the essiv_tfm with the given salt */
216 essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC); 269 essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
217 if (IS_ERR(essiv_tfm)) { 270 if (IS_ERR(essiv_tfm)) {
218 ti->error = "Error allocating crypto tfm for ESSIV"; 271 ti->error = "Error allocating crypto tfm for ESSIV";
219 kfree(salt); 272 err = PTR_ERR(essiv_tfm);
220 return PTR_ERR(essiv_tfm); 273 goto bad;
221 } 274 }
222 if (crypto_cipher_blocksize(essiv_tfm) != 275 if (crypto_cipher_blocksize(essiv_tfm) !=
223 crypto_ablkcipher_ivsize(cc->tfm)) { 276 crypto_ablkcipher_ivsize(cc->tfm)) {
224 ti->error = "Block size of ESSIV cipher does " 277 ti->error = "Block size of ESSIV cipher does "
225 "not match IV size of block cipher"; 278 "not match IV size of block cipher";
226 crypto_free_cipher(essiv_tfm); 279 err = -EINVAL;
227 kfree(salt); 280 goto bad;
228 return -EINVAL;
229 } 281 }
230 err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
231 if (err) {
232 ti->error = "Failed to set key for ESSIV cipher";
233 crypto_free_cipher(essiv_tfm);
234 kfree(salt);
235 return err;
236 }
237 kfree(salt);
238 282
239 cc->iv_gen_private.essiv_tfm = essiv_tfm; 283 cc->iv_gen_private.essiv.salt = salt;
284 cc->iv_gen_private.essiv.tfm = essiv_tfm;
285 cc->iv_gen_private.essiv.hash_tfm = hash_tfm;
286
240 return 0; 287 return 0;
241}
242 288
243static void crypt_iv_essiv_dtr(struct crypt_config *cc) 289bad:
244{ 290 if (essiv_tfm && !IS_ERR(essiv_tfm))
245 crypto_free_cipher(cc->iv_gen_private.essiv_tfm); 291 crypto_free_cipher(essiv_tfm);
246 cc->iv_gen_private.essiv_tfm = NULL; 292 if (hash_tfm && !IS_ERR(hash_tfm))
293 crypto_free_hash(hash_tfm);
294 kfree(salt);
295 return err;
247} 296}
248 297
249static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector) 298static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
250{ 299{
251 memset(iv, 0, cc->iv_size); 300 memset(iv, 0, cc->iv_size);
252 *(u64 *)iv = cpu_to_le64(sector); 301 *(u64 *)iv = cpu_to_le64(sector);
253 crypto_cipher_encrypt_one(cc->iv_gen_private.essiv_tfm, iv, iv); 302 crypto_cipher_encrypt_one(cc->iv_gen_private.essiv.tfm, iv, iv);
254 return 0; 303 return 0;
255} 304}
256 305
@@ -273,7 +322,7 @@ static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
273 return -EINVAL; 322 return -EINVAL;
274 } 323 }
275 324
276 cc->iv_gen_private.benbi_shift = 9 - log; 325 cc->iv_gen_private.benbi.shift = 9 - log;
277 326
278 return 0; 327 return 0;
279} 328}
@@ -288,7 +337,7 @@ static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
288 337
289 memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */ 338 memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */
290 339
291 val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi_shift) + 1); 340 val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi.shift) + 1);
292 put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64))); 341 put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
293 342
294 return 0; 343 return 0;
@@ -305,9 +354,15 @@ static struct crypt_iv_operations crypt_iv_plain_ops = {
305 .generator = crypt_iv_plain_gen 354 .generator = crypt_iv_plain_gen
306}; 355};
307 356
357static struct crypt_iv_operations crypt_iv_plain64_ops = {
358 .generator = crypt_iv_plain64_gen
359};
360
308static struct crypt_iv_operations crypt_iv_essiv_ops = { 361static struct crypt_iv_operations crypt_iv_essiv_ops = {
309 .ctr = crypt_iv_essiv_ctr, 362 .ctr = crypt_iv_essiv_ctr,
310 .dtr = crypt_iv_essiv_dtr, 363 .dtr = crypt_iv_essiv_dtr,
364 .init = crypt_iv_essiv_init,
365 .wipe = crypt_iv_essiv_wipe,
311 .generator = crypt_iv_essiv_gen 366 .generator = crypt_iv_essiv_gen
312}; 367};
313 368
@@ -934,14 +989,14 @@ static int crypt_set_key(struct crypt_config *cc, char *key)
934 989
935 set_bit(DM_CRYPT_KEY_VALID, &cc->flags); 990 set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
936 991
937 return 0; 992 return crypto_ablkcipher_setkey(cc->tfm, cc->key, cc->key_size);
938} 993}
939 994
940static int crypt_wipe_key(struct crypt_config *cc) 995static int crypt_wipe_key(struct crypt_config *cc)
941{ 996{
942 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags); 997 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
943 memset(&cc->key, 0, cc->key_size * sizeof(u8)); 998 memset(&cc->key, 0, cc->key_size * sizeof(u8));
944 return 0; 999 return crypto_ablkcipher_setkey(cc->tfm, cc->key, cc->key_size);
945} 1000}
946 1001
947/* 1002/*
@@ -983,11 +1038,6 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
983 return -ENOMEM; 1038 return -ENOMEM;
984 } 1039 }
985 1040
986 if (crypt_set_key(cc, argv[1])) {
987 ti->error = "Error decoding key";
988 goto bad_cipher;
989 }
990
991 /* Compatibility mode for old dm-crypt cipher strings */ 1041 /* Compatibility mode for old dm-crypt cipher strings */
992 if (!chainmode || (strcmp(chainmode, "plain") == 0 && !ivmode)) { 1042 if (!chainmode || (strcmp(chainmode, "plain") == 0 && !ivmode)) {
993 chainmode = "cbc"; 1043 chainmode = "cbc";
@@ -1015,6 +1065,11 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1015 strcpy(cc->chainmode, chainmode); 1065 strcpy(cc->chainmode, chainmode);
1016 cc->tfm = tfm; 1066 cc->tfm = tfm;
1017 1067
1068 if (crypt_set_key(cc, argv[1]) < 0) {
1069 ti->error = "Error decoding and setting key";
1070 goto bad_ivmode;
1071 }
1072
1018 /* 1073 /*
1019 * Choose ivmode. Valid modes: "plain", "essiv:<esshash>", "benbi". 1074 * Choose ivmode. Valid modes: "plain", "essiv:<esshash>", "benbi".
1020 * See comments at iv code 1075 * See comments at iv code
@@ -1024,6 +1079,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1024 cc->iv_gen_ops = NULL; 1079 cc->iv_gen_ops = NULL;
1025 else if (strcmp(ivmode, "plain") == 0) 1080 else if (strcmp(ivmode, "plain") == 0)
1026 cc->iv_gen_ops = &crypt_iv_plain_ops; 1081 cc->iv_gen_ops = &crypt_iv_plain_ops;
1082 else if (strcmp(ivmode, "plain64") == 0)
1083 cc->iv_gen_ops = &crypt_iv_plain64_ops;
1027 else if (strcmp(ivmode, "essiv") == 0) 1084 else if (strcmp(ivmode, "essiv") == 0)
1028 cc->iv_gen_ops = &crypt_iv_essiv_ops; 1085 cc->iv_gen_ops = &crypt_iv_essiv_ops;
1029 else if (strcmp(ivmode, "benbi") == 0) 1086 else if (strcmp(ivmode, "benbi") == 0)
@@ -1039,6 +1096,12 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1039 cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0) 1096 cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0)
1040 goto bad_ivmode; 1097 goto bad_ivmode;
1041 1098
1099 if (cc->iv_gen_ops && cc->iv_gen_ops->init &&
1100 cc->iv_gen_ops->init(cc) < 0) {
1101 ti->error = "Error initialising IV";
1102 goto bad_slab_pool;
1103 }
1104
1042 cc->iv_size = crypto_ablkcipher_ivsize(tfm); 1105 cc->iv_size = crypto_ablkcipher_ivsize(tfm);
1043 if (cc->iv_size) 1106 if (cc->iv_size)
1044 /* at least a 64 bit sector number should fit in our buffer */ 1107 /* at least a 64 bit sector number should fit in our buffer */
@@ -1085,11 +1148,6 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1085 goto bad_bs; 1148 goto bad_bs;
1086 } 1149 }
1087 1150
1088 if (crypto_ablkcipher_setkey(tfm, cc->key, key_size) < 0) {
1089 ti->error = "Error setting key";
1090 goto bad_device;
1091 }
1092
1093 if (sscanf(argv[2], "%llu", &tmpll) != 1) { 1151 if (sscanf(argv[2], "%llu", &tmpll) != 1) {
1094 ti->error = "Invalid iv_offset sector"; 1152 ti->error = "Invalid iv_offset sector";
1095 goto bad_device; 1153 goto bad_device;
@@ -1278,6 +1336,7 @@ static void crypt_resume(struct dm_target *ti)
1278static int crypt_message(struct dm_target *ti, unsigned argc, char **argv) 1336static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
1279{ 1337{
1280 struct crypt_config *cc = ti->private; 1338 struct crypt_config *cc = ti->private;
1339 int ret = -EINVAL;
1281 1340
1282 if (argc < 2) 1341 if (argc < 2)
1283 goto error; 1342 goto error;
@@ -1287,10 +1346,22 @@ static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
1287 DMWARN("not suspended during key manipulation."); 1346 DMWARN("not suspended during key manipulation.");
1288 return -EINVAL; 1347 return -EINVAL;
1289 } 1348 }
1290 if (argc == 3 && !strnicmp(argv[1], MESG_STR("set"))) 1349 if (argc == 3 && !strnicmp(argv[1], MESG_STR("set"))) {
1291 return crypt_set_key(cc, argv[2]); 1350 ret = crypt_set_key(cc, argv[2]);
1292 if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe"))) 1351 if (ret)
1352 return ret;
1353 if (cc->iv_gen_ops && cc->iv_gen_ops->init)
1354 ret = cc->iv_gen_ops->init(cc);
1355 return ret;
1356 }
1357 if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe"))) {
1358 if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) {
1359 ret = cc->iv_gen_ops->wipe(cc);
1360 if (ret)
1361 return ret;
1362 }
1293 return crypt_wipe_key(cc); 1363 return crypt_wipe_key(cc);
1364 }
1294 } 1365 }
1295 1366
1296error: 1367error:
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
index 7dbe652efb5a..2b7907b6dd09 100644
--- a/drivers/md/dm-exception-store.c
+++ b/drivers/md/dm-exception-store.c
@@ -172,7 +172,8 @@ int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
172 } 172 }
173 173
174 /* Validate the chunk size against the device block size */ 174 /* Validate the chunk size against the device block size */
175 if (chunk_size % (bdev_logical_block_size(store->cow->bdev) >> 9)) { 175 if (chunk_size %
176 (bdev_logical_block_size(dm_snap_cow(store->snap)->bdev) >> 9)) {
176 *error = "Chunk size is not a multiple of device blocksize"; 177 *error = "Chunk size is not a multiple of device blocksize";
177 return -EINVAL; 178 return -EINVAL;
178 } 179 }
@@ -190,6 +191,7 @@ int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
190} 191}
191 192
192int dm_exception_store_create(struct dm_target *ti, int argc, char **argv, 193int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
194 struct dm_snapshot *snap,
193 unsigned *args_used, 195 unsigned *args_used,
194 struct dm_exception_store **store) 196 struct dm_exception_store **store)
195{ 197{
@@ -198,7 +200,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
198 struct dm_exception_store *tmp_store; 200 struct dm_exception_store *tmp_store;
199 char persistent; 201 char persistent;
200 202
201 if (argc < 3) { 203 if (argc < 2) {
202 ti->error = "Insufficient exception store arguments"; 204 ti->error = "Insufficient exception store arguments";
203 return -EINVAL; 205 return -EINVAL;
204 } 206 }
@@ -209,14 +211,15 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
209 return -ENOMEM; 211 return -ENOMEM;
210 } 212 }
211 213
212 persistent = toupper(*argv[1]); 214 persistent = toupper(*argv[0]);
213 if (persistent == 'P') 215 if (persistent == 'P')
214 type = get_type("P"); 216 type = get_type("P");
215 else if (persistent == 'N') 217 else if (persistent == 'N')
216 type = get_type("N"); 218 type = get_type("N");
217 else { 219 else {
218 ti->error = "Persistent flag is not P or N"; 220 ti->error = "Persistent flag is not P or N";
219 return -EINVAL; 221 r = -EINVAL;
222 goto bad_type;
220 } 223 }
221 224
222 if (!type) { 225 if (!type) {
@@ -226,32 +229,23 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
226 } 229 }
227 230
228 tmp_store->type = type; 231 tmp_store->type = type;
229 tmp_store->ti = ti; 232 tmp_store->snap = snap;
230
231 r = dm_get_device(ti, argv[0], 0, 0,
232 FMODE_READ | FMODE_WRITE, &tmp_store->cow);
233 if (r) {
234 ti->error = "Cannot get COW device";
235 goto bad_cow;
236 }
237 233
238 r = set_chunk_size(tmp_store, argv[2], &ti->error); 234 r = set_chunk_size(tmp_store, argv[1], &ti->error);
239 if (r) 235 if (r)
240 goto bad_ctr; 236 goto bad;
241 237
242 r = type->ctr(tmp_store, 0, NULL); 238 r = type->ctr(tmp_store, 0, NULL);
243 if (r) { 239 if (r) {
244 ti->error = "Exception store type constructor failed"; 240 ti->error = "Exception store type constructor failed";
245 goto bad_ctr; 241 goto bad;
246 } 242 }
247 243
248 *args_used = 3; 244 *args_used = 2;
249 *store = tmp_store; 245 *store = tmp_store;
250 return 0; 246 return 0;
251 247
252bad_ctr: 248bad:
253 dm_put_device(ti, tmp_store->cow);
254bad_cow:
255 put_type(type); 249 put_type(type);
256bad_type: 250bad_type:
257 kfree(tmp_store); 251 kfree(tmp_store);
@@ -262,7 +256,6 @@ EXPORT_SYMBOL(dm_exception_store_create);
262void dm_exception_store_destroy(struct dm_exception_store *store) 256void dm_exception_store_destroy(struct dm_exception_store *store)
263{ 257{
264 store->type->dtr(store); 258 store->type->dtr(store);
265 dm_put_device(store->ti, store->cow);
266 put_type(store->type); 259 put_type(store->type);
267 kfree(store); 260 kfree(store);
268} 261}
diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h
index 8a223a48802c..e8dfa06af3ba 100644
--- a/drivers/md/dm-exception-store.h
+++ b/drivers/md/dm-exception-store.h
@@ -26,7 +26,7 @@ typedef sector_t chunk_t;
26 * of chunks that follow contiguously. Remaining bits hold the number of the 26 * of chunks that follow contiguously. Remaining bits hold the number of the
27 * chunk within the device. 27 * chunk within the device.
28 */ 28 */
29struct dm_snap_exception { 29struct dm_exception {
30 struct list_head hash_list; 30 struct list_head hash_list;
31 31
32 chunk_t old_chunk; 32 chunk_t old_chunk;
@@ -64,17 +64,34 @@ struct dm_exception_store_type {
64 * Find somewhere to store the next exception. 64 * Find somewhere to store the next exception.
65 */ 65 */
66 int (*prepare_exception) (struct dm_exception_store *store, 66 int (*prepare_exception) (struct dm_exception_store *store,
67 struct dm_snap_exception *e); 67 struct dm_exception *e);
68 68
69 /* 69 /*
70 * Update the metadata with this exception. 70 * Update the metadata with this exception.
71 */ 71 */
72 void (*commit_exception) (struct dm_exception_store *store, 72 void (*commit_exception) (struct dm_exception_store *store,
73 struct dm_snap_exception *e, 73 struct dm_exception *e,
74 void (*callback) (void *, int success), 74 void (*callback) (void *, int success),
75 void *callback_context); 75 void *callback_context);
76 76
77 /* 77 /*
78 * Returns 0 if the exception store is empty.
79 *
80 * If there are exceptions still to be merged, sets
81 * *last_old_chunk and *last_new_chunk to the most recent
82 * still-to-be-merged chunk and returns the number of
83 * consecutive previous ones.
84 */
85 int (*prepare_merge) (struct dm_exception_store *store,
86 chunk_t *last_old_chunk, chunk_t *last_new_chunk);
87
88 /*
89 * Clear the last n exceptions.
90 * nr_merged must be <= the value returned by prepare_merge.
91 */
92 int (*commit_merge) (struct dm_exception_store *store, int nr_merged);
93
94 /*
78 * The snapshot is invalid, note this in the metadata. 95 * The snapshot is invalid, note this in the metadata.
79 */ 96 */
80 void (*drop_snapshot) (struct dm_exception_store *store); 97 void (*drop_snapshot) (struct dm_exception_store *store);
@@ -86,19 +103,19 @@ struct dm_exception_store_type {
86 /* 103 /*
87 * Return how full the snapshot is. 104 * Return how full the snapshot is.
88 */ 105 */
89 void (*fraction_full) (struct dm_exception_store *store, 106 void (*usage) (struct dm_exception_store *store,
90 sector_t *numerator, 107 sector_t *total_sectors, sector_t *sectors_allocated,
91 sector_t *denominator); 108 sector_t *metadata_sectors);
92 109
93 /* For internal device-mapper use only. */ 110 /* For internal device-mapper use only. */
94 struct list_head list; 111 struct list_head list;
95}; 112};
96 113
114struct dm_snapshot;
115
97struct dm_exception_store { 116struct dm_exception_store {
98 struct dm_exception_store_type *type; 117 struct dm_exception_store_type *type;
99 struct dm_target *ti; 118 struct dm_snapshot *snap;
100
101 struct dm_dev *cow;
102 119
103 /* Size of data blocks saved - must be a power of 2 */ 120 /* Size of data blocks saved - must be a power of 2 */
104 unsigned chunk_size; 121 unsigned chunk_size;
@@ -109,6 +126,11 @@ struct dm_exception_store {
109}; 126};
110 127
111/* 128/*
129 * Obtain the cow device used by a given snapshot.
130 */
131struct dm_dev *dm_snap_cow(struct dm_snapshot *snap);
132
133/*
112 * Funtions to manipulate consecutive chunks 134 * Funtions to manipulate consecutive chunks
113 */ 135 */
114# if defined(CONFIG_LBDAF) || (BITS_PER_LONG == 64) 136# if defined(CONFIG_LBDAF) || (BITS_PER_LONG == 64)
@@ -120,18 +142,25 @@ static inline chunk_t dm_chunk_number(chunk_t chunk)
120 return chunk & (chunk_t)((1ULL << DM_CHUNK_NUMBER_BITS) - 1ULL); 142 return chunk & (chunk_t)((1ULL << DM_CHUNK_NUMBER_BITS) - 1ULL);
121} 143}
122 144
123static inline unsigned dm_consecutive_chunk_count(struct dm_snap_exception *e) 145static inline unsigned dm_consecutive_chunk_count(struct dm_exception *e)
124{ 146{
125 return e->new_chunk >> DM_CHUNK_NUMBER_BITS; 147 return e->new_chunk >> DM_CHUNK_NUMBER_BITS;
126} 148}
127 149
128static inline void dm_consecutive_chunk_count_inc(struct dm_snap_exception *e) 150static inline void dm_consecutive_chunk_count_inc(struct dm_exception *e)
129{ 151{
130 e->new_chunk += (1ULL << DM_CHUNK_NUMBER_BITS); 152 e->new_chunk += (1ULL << DM_CHUNK_NUMBER_BITS);
131 153
132 BUG_ON(!dm_consecutive_chunk_count(e)); 154 BUG_ON(!dm_consecutive_chunk_count(e));
133} 155}
134 156
157static inline void dm_consecutive_chunk_count_dec(struct dm_exception *e)
158{
159 BUG_ON(!dm_consecutive_chunk_count(e));
160
161 e->new_chunk -= (1ULL << DM_CHUNK_NUMBER_BITS);
162}
163
135# else 164# else
136# define DM_CHUNK_CONSECUTIVE_BITS 0 165# define DM_CHUNK_CONSECUTIVE_BITS 0
137 166
@@ -140,12 +169,16 @@ static inline chunk_t dm_chunk_number(chunk_t chunk)
140 return chunk; 169 return chunk;
141} 170}
142 171
143static inline unsigned dm_consecutive_chunk_count(struct dm_snap_exception *e) 172static inline unsigned dm_consecutive_chunk_count(struct dm_exception *e)
144{ 173{
145 return 0; 174 return 0;
146} 175}
147 176
148static inline void dm_consecutive_chunk_count_inc(struct dm_snap_exception *e) 177static inline void dm_consecutive_chunk_count_inc(struct dm_exception *e)
178{
179}
180
181static inline void dm_consecutive_chunk_count_dec(struct dm_exception *e)
149{ 182{
150} 183}
151 184
@@ -162,7 +195,7 @@ static inline sector_t get_dev_size(struct block_device *bdev)
162static inline chunk_t sector_to_chunk(struct dm_exception_store *store, 195static inline chunk_t sector_to_chunk(struct dm_exception_store *store,
163 sector_t sector) 196 sector_t sector)
164{ 197{
165 return (sector & ~store->chunk_mask) >> store->chunk_shift; 198 return sector >> store->chunk_shift;
166} 199}
167 200
168int dm_exception_store_type_register(struct dm_exception_store_type *type); 201int dm_exception_store_type_register(struct dm_exception_store_type *type);
@@ -173,6 +206,7 @@ int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
173 char **error); 206 char **error);
174 207
175int dm_exception_store_create(struct dm_target *ti, int argc, char **argv, 208int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
209 struct dm_snapshot *snap,
176 unsigned *args_used, 210 unsigned *args_used,
177 struct dm_exception_store **store); 211 struct dm_exception_store **store);
178void dm_exception_store_destroy(struct dm_exception_store *store); 212void dm_exception_store_destroy(struct dm_exception_store *store);
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 3a2e6a2f8bdd..10f457ca6af2 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -5,6 +5,8 @@
5 * This file is released under the GPL. 5 * This file is released under the GPL.
6 */ 6 */
7 7
8#include "dm.h"
9
8#include <linux/device-mapper.h> 10#include <linux/device-mapper.h>
9 11
10#include <linux/bio.h> 12#include <linux/bio.h>
@@ -14,12 +16,19 @@
14#include <linux/slab.h> 16#include <linux/slab.h>
15#include <linux/dm-io.h> 17#include <linux/dm-io.h>
16 18
19#define DM_MSG_PREFIX "io"
20
21#define DM_IO_MAX_REGIONS BITS_PER_LONG
22
17struct dm_io_client { 23struct dm_io_client {
18 mempool_t *pool; 24 mempool_t *pool;
19 struct bio_set *bios; 25 struct bio_set *bios;
20}; 26};
21 27
22/* FIXME: can we shrink this ? */ 28/*
29 * Aligning 'struct io' reduces the number of bits required to store
30 * its address. Refer to store_io_and_region_in_bio() below.
31 */
23struct io { 32struct io {
24 unsigned long error_bits; 33 unsigned long error_bits;
25 unsigned long eopnotsupp_bits; 34 unsigned long eopnotsupp_bits;
@@ -28,7 +37,9 @@ struct io {
28 struct dm_io_client *client; 37 struct dm_io_client *client;
29 io_notify_fn callback; 38 io_notify_fn callback;
30 void *context; 39 void *context;
31}; 40} __attribute__((aligned(DM_IO_MAX_REGIONS)));
41
42static struct kmem_cache *_dm_io_cache;
32 43
33/* 44/*
34 * io contexts are only dynamically allocated for asynchronous 45 * io contexts are only dynamically allocated for asynchronous
@@ -53,7 +64,7 @@ struct dm_io_client *dm_io_client_create(unsigned num_pages)
53 if (!client) 64 if (!client)
54 return ERR_PTR(-ENOMEM); 65 return ERR_PTR(-ENOMEM);
55 66
56 client->pool = mempool_create_kmalloc_pool(ios, sizeof(struct io)); 67 client->pool = mempool_create_slab_pool(ios, _dm_io_cache);
57 if (!client->pool) 68 if (!client->pool)
58 goto bad; 69 goto bad;
59 70
@@ -88,18 +99,29 @@ EXPORT_SYMBOL(dm_io_client_destroy);
88 99
89/*----------------------------------------------------------------- 100/*-----------------------------------------------------------------
90 * We need to keep track of which region a bio is doing io for. 101 * We need to keep track of which region a bio is doing io for.
91 * In order to save a memory allocation we store this the last 102 * To avoid a memory allocation to store just 5 or 6 bits, we
92 * bvec which we know is unused (blech). 103 * ensure the 'struct io' pointer is aligned so enough low bits are
93 * XXX This is ugly and can OOPS with some configs... find another way. 104 * always zero and then combine it with the region number directly in
105 * bi_private.
94 *---------------------------------------------------------------*/ 106 *---------------------------------------------------------------*/
95static inline void bio_set_region(struct bio *bio, unsigned region) 107static void store_io_and_region_in_bio(struct bio *bio, struct io *io,
108 unsigned region)
96{ 109{
97 bio->bi_io_vec[bio->bi_max_vecs].bv_len = region; 110 if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) {
111 DMCRIT("Unaligned struct io pointer %p", io);
112 BUG();
113 }
114
115 bio->bi_private = (void *)((unsigned long)io | region);
98} 116}
99 117
100static inline unsigned bio_get_region(struct bio *bio) 118static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io,
119 unsigned *region)
101{ 120{
102 return bio->bi_io_vec[bio->bi_max_vecs].bv_len; 121 unsigned long val = (unsigned long)bio->bi_private;
122
123 *io = (void *)(val & -(unsigned long)DM_IO_MAX_REGIONS);
124 *region = val & (DM_IO_MAX_REGIONS - 1);
103} 125}
104 126
105/*----------------------------------------------------------------- 127/*-----------------------------------------------------------------
@@ -140,10 +162,8 @@ static void endio(struct bio *bio, int error)
140 /* 162 /*
141 * The bio destructor in bio_put() may use the io object. 163 * The bio destructor in bio_put() may use the io object.
142 */ 164 */
143 io = bio->bi_private; 165 retrieve_io_and_region_from_bio(bio, &io, &region);
144 region = bio_get_region(bio);
145 166
146 bio->bi_max_vecs++;
147 bio_put(bio); 167 bio_put(bio);
148 168
149 dec_count(io, region, error); 169 dec_count(io, region, error);
@@ -243,7 +263,10 @@ static void vm_dp_init(struct dpages *dp, void *data)
243 263
244static void dm_bio_destructor(struct bio *bio) 264static void dm_bio_destructor(struct bio *bio)
245{ 265{
246 struct io *io = bio->bi_private; 266 unsigned region;
267 struct io *io;
268
269 retrieve_io_and_region_from_bio(bio, &io, &region);
247 270
248 bio_free(bio, io->client->bios); 271 bio_free(bio, io->client->bios);
249} 272}
@@ -286,26 +309,23 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
286 unsigned num_bvecs; 309 unsigned num_bvecs;
287 sector_t remaining = where->count; 310 sector_t remaining = where->count;
288 311
289 while (remaining) { 312 /*
313 * where->count may be zero if rw holds a write barrier and we
314 * need to send a zero-sized barrier.
315 */
316 do {
290 /* 317 /*
291 * Allocate a suitably sized-bio: we add an extra 318 * Allocate a suitably sized-bio.
292 * bvec for bio_get/set_region() and decrement bi_max_vecs
293 * to hide it from bio_add_page().
294 */ 319 */
295 num_bvecs = dm_sector_div_up(remaining, 320 num_bvecs = dm_sector_div_up(remaining,
296 (PAGE_SIZE >> SECTOR_SHIFT)); 321 (PAGE_SIZE >> SECTOR_SHIFT));
297 num_bvecs = 1 + min_t(int, bio_get_nr_vecs(where->bdev), 322 num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev), num_bvecs);
298 num_bvecs);
299 if (unlikely(num_bvecs > BIO_MAX_PAGES))
300 num_bvecs = BIO_MAX_PAGES;
301 bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios); 323 bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
302 bio->bi_sector = where->sector + (where->count - remaining); 324 bio->bi_sector = where->sector + (where->count - remaining);
303 bio->bi_bdev = where->bdev; 325 bio->bi_bdev = where->bdev;
304 bio->bi_end_io = endio; 326 bio->bi_end_io = endio;
305 bio->bi_private = io;
306 bio->bi_destructor = dm_bio_destructor; 327 bio->bi_destructor = dm_bio_destructor;
307 bio->bi_max_vecs--; 328 store_io_and_region_in_bio(bio, io, region);
308 bio_set_region(bio, region);
309 329
310 /* 330 /*
311 * Try and add as many pages as possible. 331 * Try and add as many pages as possible.
@@ -323,7 +343,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
323 343
324 atomic_inc(&io->count); 344 atomic_inc(&io->count);
325 submit_bio(rw, bio); 345 submit_bio(rw, bio);
326 } 346 } while (remaining);
327} 347}
328 348
329static void dispatch_io(int rw, unsigned int num_regions, 349static void dispatch_io(int rw, unsigned int num_regions,
@@ -333,6 +353,8 @@ static void dispatch_io(int rw, unsigned int num_regions,
333 int i; 353 int i;
334 struct dpages old_pages = *dp; 354 struct dpages old_pages = *dp;
335 355
356 BUG_ON(num_regions > DM_IO_MAX_REGIONS);
357
336 if (sync) 358 if (sync)
337 rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG); 359 rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
338 360
@@ -342,7 +364,7 @@ static void dispatch_io(int rw, unsigned int num_regions,
342 */ 364 */
343 for (i = 0; i < num_regions; i++) { 365 for (i = 0; i < num_regions; i++) {
344 *dp = old_pages; 366 *dp = old_pages;
345 if (where[i].count) 367 if (where[i].count || (rw & (1 << BIO_RW_BARRIER)))
346 do_region(rw, i, where + i, dp, io); 368 do_region(rw, i, where + i, dp, io);
347 } 369 }
348 370
@@ -357,7 +379,14 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
357 struct dm_io_region *where, int rw, struct dpages *dp, 379 struct dm_io_region *where, int rw, struct dpages *dp,
358 unsigned long *error_bits) 380 unsigned long *error_bits)
359{ 381{
360 struct io io; 382 /*
383 * gcc <= 4.3 can't do the alignment for stack variables, so we must
384 * align it on our own.
385 * volatile prevents the optimizer from removing or reusing
386 * "io_" field from the stack frame (allowed in ANSI C).
387 */
388 volatile char io_[sizeof(struct io) + __alignof__(struct io) - 1];
389 struct io *io = (struct io *)PTR_ALIGN(&io_, __alignof__(struct io));
361 390
362 if (num_regions > 1 && (rw & RW_MASK) != WRITE) { 391 if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
363 WARN_ON(1); 392 WARN_ON(1);
@@ -365,33 +394,33 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
365 } 394 }
366 395
367retry: 396retry:
368 io.error_bits = 0; 397 io->error_bits = 0;
369 io.eopnotsupp_bits = 0; 398 io->eopnotsupp_bits = 0;
370 atomic_set(&io.count, 1); /* see dispatch_io() */ 399 atomic_set(&io->count, 1); /* see dispatch_io() */
371 io.sleeper = current; 400 io->sleeper = current;
372 io.client = client; 401 io->client = client;
373 402
374 dispatch_io(rw, num_regions, where, dp, &io, 1); 403 dispatch_io(rw, num_regions, where, dp, io, 1);
375 404
376 while (1) { 405 while (1) {
377 set_current_state(TASK_UNINTERRUPTIBLE); 406 set_current_state(TASK_UNINTERRUPTIBLE);
378 407
379 if (!atomic_read(&io.count)) 408 if (!atomic_read(&io->count))
380 break; 409 break;
381 410
382 io_schedule(); 411 io_schedule();
383 } 412 }
384 set_current_state(TASK_RUNNING); 413 set_current_state(TASK_RUNNING);
385 414
386 if (io.eopnotsupp_bits && (rw & (1 << BIO_RW_BARRIER))) { 415 if (io->eopnotsupp_bits && (rw & (1 << BIO_RW_BARRIER))) {
387 rw &= ~(1 << BIO_RW_BARRIER); 416 rw &= ~(1 << BIO_RW_BARRIER);
388 goto retry; 417 goto retry;
389 } 418 }
390 419
391 if (error_bits) 420 if (error_bits)
392 *error_bits = io.error_bits; 421 *error_bits = io->error_bits;
393 422
394 return io.error_bits ? -EIO : 0; 423 return io->error_bits ? -EIO : 0;
395} 424}
396 425
397static int async_io(struct dm_io_client *client, unsigned int num_regions, 426static int async_io(struct dm_io_client *client, unsigned int num_regions,
@@ -472,3 +501,18 @@ int dm_io(struct dm_io_request *io_req, unsigned num_regions,
472 &dp, io_req->notify.fn, io_req->notify.context); 501 &dp, io_req->notify.fn, io_req->notify.context);
473} 502}
474EXPORT_SYMBOL(dm_io); 503EXPORT_SYMBOL(dm_io);
504
505int __init dm_io_init(void)
506{
507 _dm_io_cache = KMEM_CACHE(io, 0);
508 if (!_dm_io_cache)
509 return -ENOMEM;
510
511 return 0;
512}
513
514void dm_io_exit(void)
515{
516 kmem_cache_destroy(_dm_io_cache);
517 _dm_io_cache = NULL;
518}
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index a67942931582..1d669322b27c 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -56,6 +56,11 @@ static void dm_hash_remove_all(int keep_open_devices);
56 */ 56 */
57static DECLARE_RWSEM(_hash_lock); 57static DECLARE_RWSEM(_hash_lock);
58 58
59/*
60 * Protects use of mdptr to obtain hash cell name and uuid from mapped device.
61 */
62static DEFINE_MUTEX(dm_hash_cells_mutex);
63
59static void init_buckets(struct list_head *buckets) 64static void init_buckets(struct list_head *buckets)
60{ 65{
61 unsigned int i; 66 unsigned int i;
@@ -206,7 +211,9 @@ static int dm_hash_insert(const char *name, const char *uuid, struct mapped_devi
206 list_add(&cell->uuid_list, _uuid_buckets + hash_str(uuid)); 211 list_add(&cell->uuid_list, _uuid_buckets + hash_str(uuid));
207 } 212 }
208 dm_get(md); 213 dm_get(md);
214 mutex_lock(&dm_hash_cells_mutex);
209 dm_set_mdptr(md, cell); 215 dm_set_mdptr(md, cell);
216 mutex_unlock(&dm_hash_cells_mutex);
210 up_write(&_hash_lock); 217 up_write(&_hash_lock);
211 218
212 return 0; 219 return 0;
@@ -224,9 +231,11 @@ static void __hash_remove(struct hash_cell *hc)
224 /* remove from the dev hash */ 231 /* remove from the dev hash */
225 list_del(&hc->uuid_list); 232 list_del(&hc->uuid_list);
226 list_del(&hc->name_list); 233 list_del(&hc->name_list);
234 mutex_lock(&dm_hash_cells_mutex);
227 dm_set_mdptr(hc->md, NULL); 235 dm_set_mdptr(hc->md, NULL);
236 mutex_unlock(&dm_hash_cells_mutex);
228 237
229 table = dm_get_table(hc->md); 238 table = dm_get_live_table(hc->md);
230 if (table) { 239 if (table) {
231 dm_table_event(table); 240 dm_table_event(table);
232 dm_table_put(table); 241 dm_table_put(table);
@@ -321,13 +330,15 @@ static int dm_hash_rename(uint32_t cookie, const char *old, const char *new)
321 */ 330 */
322 list_del(&hc->name_list); 331 list_del(&hc->name_list);
323 old_name = hc->name; 332 old_name = hc->name;
333 mutex_lock(&dm_hash_cells_mutex);
324 hc->name = new_name; 334 hc->name = new_name;
335 mutex_unlock(&dm_hash_cells_mutex);
325 list_add(&hc->name_list, _name_buckets + hash_str(new_name)); 336 list_add(&hc->name_list, _name_buckets + hash_str(new_name));
326 337
327 /* 338 /*
328 * Wake up any dm event waiters. 339 * Wake up any dm event waiters.
329 */ 340 */
330 table = dm_get_table(hc->md); 341 table = dm_get_live_table(hc->md);
331 if (table) { 342 if (table) {
332 dm_table_event(table); 343 dm_table_event(table);
333 dm_table_put(table); 344 dm_table_put(table);
@@ -512,8 +523,6 @@ static int list_versions(struct dm_ioctl *param, size_t param_size)
512 return 0; 523 return 0;
513} 524}
514 525
515
516
517static int check_name(const char *name) 526static int check_name(const char *name)
518{ 527{
519 if (strchr(name, '/')) { 528 if (strchr(name, '/')) {
@@ -525,6 +534,40 @@ static int check_name(const char *name)
525} 534}
526 535
527/* 536/*
537 * On successful return, the caller must not attempt to acquire
538 * _hash_lock without first calling dm_table_put, because dm_table_destroy
539 * waits for this dm_table_put and could be called under this lock.
540 */
541static struct dm_table *dm_get_inactive_table(struct mapped_device *md)
542{
543 struct hash_cell *hc;
544 struct dm_table *table = NULL;
545
546 down_read(&_hash_lock);
547 hc = dm_get_mdptr(md);
548 if (!hc || hc->md != md) {
549 DMWARN("device has been removed from the dev hash table.");
550 goto out;
551 }
552
553 table = hc->new_map;
554 if (table)
555 dm_table_get(table);
556
557out:
558 up_read(&_hash_lock);
559
560 return table;
561}
562
563static struct dm_table *dm_get_live_or_inactive_table(struct mapped_device *md,
564 struct dm_ioctl *param)
565{
566 return (param->flags & DM_QUERY_INACTIVE_TABLE_FLAG) ?
567 dm_get_inactive_table(md) : dm_get_live_table(md);
568}
569
570/*
528 * Fills in a dm_ioctl structure, ready for sending back to 571 * Fills in a dm_ioctl structure, ready for sending back to
529 * userland. 572 * userland.
530 */ 573 */
@@ -536,7 +579,7 @@ static int __dev_status(struct mapped_device *md, struct dm_ioctl *param)
536 param->flags &= ~(DM_SUSPEND_FLAG | DM_READONLY_FLAG | 579 param->flags &= ~(DM_SUSPEND_FLAG | DM_READONLY_FLAG |
537 DM_ACTIVE_PRESENT_FLAG); 580 DM_ACTIVE_PRESENT_FLAG);
538 581
539 if (dm_suspended(md)) 582 if (dm_suspended_md(md))
540 param->flags |= DM_SUSPEND_FLAG; 583 param->flags |= DM_SUSPEND_FLAG;
541 584
542 param->dev = huge_encode_dev(disk_devt(disk)); 585 param->dev = huge_encode_dev(disk_devt(disk));
@@ -548,18 +591,30 @@ static int __dev_status(struct mapped_device *md, struct dm_ioctl *param)
548 */ 591 */
549 param->open_count = dm_open_count(md); 592 param->open_count = dm_open_count(md);
550 593
551 if (get_disk_ro(disk))
552 param->flags |= DM_READONLY_FLAG;
553
554 param->event_nr = dm_get_event_nr(md); 594 param->event_nr = dm_get_event_nr(md);
595 param->target_count = 0;
555 596
556 table = dm_get_table(md); 597 table = dm_get_live_table(md);
557 if (table) { 598 if (table) {
558 param->flags |= DM_ACTIVE_PRESENT_FLAG; 599 if (!(param->flags & DM_QUERY_INACTIVE_TABLE_FLAG)) {
559 param->target_count = dm_table_get_num_targets(table); 600 if (get_disk_ro(disk))
601 param->flags |= DM_READONLY_FLAG;
602 param->target_count = dm_table_get_num_targets(table);
603 }
560 dm_table_put(table); 604 dm_table_put(table);
561 } else 605
562 param->target_count = 0; 606 param->flags |= DM_ACTIVE_PRESENT_FLAG;
607 }
608
609 if (param->flags & DM_QUERY_INACTIVE_TABLE_FLAG) {
610 table = dm_get_inactive_table(md);
611 if (table) {
612 if (!(dm_table_get_mode(table) & FMODE_WRITE))
613 param->flags |= DM_READONLY_FLAG;
614 param->target_count = dm_table_get_num_targets(table);
615 dm_table_put(table);
616 }
617 }
563 618
564 return 0; 619 return 0;
565} 620}
@@ -634,9 +689,9 @@ static struct mapped_device *find_device(struct dm_ioctl *param)
634 * Sneakily write in both the name and the uuid 689 * Sneakily write in both the name and the uuid
635 * while we have the cell. 690 * while we have the cell.
636 */ 691 */
637 strncpy(param->name, hc->name, sizeof(param->name)); 692 strlcpy(param->name, hc->name, sizeof(param->name));
638 if (hc->uuid) 693 if (hc->uuid)
639 strncpy(param->uuid, hc->uuid, sizeof(param->uuid)-1); 694 strlcpy(param->uuid, hc->uuid, sizeof(param->uuid));
640 else 695 else
641 param->uuid[0] = '\0'; 696 param->uuid[0] = '\0';
642 697
@@ -784,7 +839,7 @@ static int do_suspend(struct dm_ioctl *param)
784 if (param->flags & DM_NOFLUSH_FLAG) 839 if (param->flags & DM_NOFLUSH_FLAG)
785 suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG; 840 suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG;
786 841
787 if (!dm_suspended(md)) 842 if (!dm_suspended_md(md))
788 r = dm_suspend(md, suspend_flags); 843 r = dm_suspend(md, suspend_flags);
789 844
790 if (!r) 845 if (!r)
@@ -800,7 +855,7 @@ static int do_resume(struct dm_ioctl *param)
800 unsigned suspend_flags = DM_SUSPEND_LOCKFS_FLAG; 855 unsigned suspend_flags = DM_SUSPEND_LOCKFS_FLAG;
801 struct hash_cell *hc; 856 struct hash_cell *hc;
802 struct mapped_device *md; 857 struct mapped_device *md;
803 struct dm_table *new_map; 858 struct dm_table *new_map, *old_map = NULL;
804 859
805 down_write(&_hash_lock); 860 down_write(&_hash_lock);
806 861
@@ -826,14 +881,14 @@ static int do_resume(struct dm_ioctl *param)
826 suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG; 881 suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG;
827 if (param->flags & DM_NOFLUSH_FLAG) 882 if (param->flags & DM_NOFLUSH_FLAG)
828 suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG; 883 suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG;
829 if (!dm_suspended(md)) 884 if (!dm_suspended_md(md))
830 dm_suspend(md, suspend_flags); 885 dm_suspend(md, suspend_flags);
831 886
832 r = dm_swap_table(md, new_map); 887 old_map = dm_swap_table(md, new_map);
833 if (r) { 888 if (IS_ERR(old_map)) {
834 dm_table_destroy(new_map); 889 dm_table_destroy(new_map);
835 dm_put(md); 890 dm_put(md);
836 return r; 891 return PTR_ERR(old_map);
837 } 892 }
838 893
839 if (dm_table_get_mode(new_map) & FMODE_WRITE) 894 if (dm_table_get_mode(new_map) & FMODE_WRITE)
@@ -842,9 +897,11 @@ static int do_resume(struct dm_ioctl *param)
842 set_disk_ro(dm_disk(md), 1); 897 set_disk_ro(dm_disk(md), 1);
843 } 898 }
844 899
845 if (dm_suspended(md)) 900 if (dm_suspended_md(md))
846 r = dm_resume(md); 901 r = dm_resume(md);
847 902
903 if (old_map)
904 dm_table_destroy(old_map);
848 905
849 if (!r) { 906 if (!r) {
850 dm_kobject_uevent(md, KOBJ_CHANGE, param->event_nr); 907 dm_kobject_uevent(md, KOBJ_CHANGE, param->event_nr);
@@ -982,7 +1039,7 @@ static int dev_wait(struct dm_ioctl *param, size_t param_size)
982 if (r) 1039 if (r)
983 goto out; 1040 goto out;
984 1041
985 table = dm_get_table(md); 1042 table = dm_get_live_or_inactive_table(md, param);
986 if (table) { 1043 if (table) {
987 retrieve_status(table, param, param_size); 1044 retrieve_status(table, param, param_size);
988 dm_table_put(table); 1045 dm_table_put(table);
@@ -1215,7 +1272,7 @@ static int table_deps(struct dm_ioctl *param, size_t param_size)
1215 if (r) 1272 if (r)
1216 goto out; 1273 goto out;
1217 1274
1218 table = dm_get_table(md); 1275 table = dm_get_live_or_inactive_table(md, param);
1219 if (table) { 1276 if (table) {
1220 retrieve_deps(table, param, param_size); 1277 retrieve_deps(table, param, param_size);
1221 dm_table_put(table); 1278 dm_table_put(table);
@@ -1244,13 +1301,13 @@ static int table_status(struct dm_ioctl *param, size_t param_size)
1244 if (r) 1301 if (r)
1245 goto out; 1302 goto out;
1246 1303
1247 table = dm_get_table(md); 1304 table = dm_get_live_or_inactive_table(md, param);
1248 if (table) { 1305 if (table) {
1249 retrieve_status(table, param, param_size); 1306 retrieve_status(table, param, param_size);
1250 dm_table_put(table); 1307 dm_table_put(table);
1251 } 1308 }
1252 1309
1253 out: 1310out:
1254 dm_put(md); 1311 dm_put(md);
1255 return r; 1312 return r;
1256} 1313}
@@ -1288,10 +1345,15 @@ static int target_message(struct dm_ioctl *param, size_t param_size)
1288 goto out; 1345 goto out;
1289 } 1346 }
1290 1347
1291 table = dm_get_table(md); 1348 table = dm_get_live_table(md);
1292 if (!table) 1349 if (!table)
1293 goto out_argv; 1350 goto out_argv;
1294 1351
1352 if (dm_deleting_md(md)) {
1353 r = -ENXIO;
1354 goto out_table;
1355 }
1356
1295 ti = dm_table_find_target(table, tmsg->sector); 1357 ti = dm_table_find_target(table, tmsg->sector);
1296 if (!dm_target_is_valid(ti)) { 1358 if (!dm_target_is_valid(ti)) {
1297 DMWARN("Target message sector outside device."); 1359 DMWARN("Target message sector outside device.");
@@ -1303,6 +1365,7 @@ static int target_message(struct dm_ioctl *param, size_t param_size)
1303 r = -EINVAL; 1365 r = -EINVAL;
1304 } 1366 }
1305 1367
1368 out_table:
1306 dm_table_put(table); 1369 dm_table_put(table);
1307 out_argv: 1370 out_argv:
1308 kfree(argv); 1371 kfree(argv);
@@ -1582,8 +1645,7 @@ int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid)
1582 if (!md) 1645 if (!md)
1583 return -ENXIO; 1646 return -ENXIO;
1584 1647
1585 dm_get(md); 1648 mutex_lock(&dm_hash_cells_mutex);
1586 down_read(&_hash_lock);
1587 hc = dm_get_mdptr(md); 1649 hc = dm_get_mdptr(md);
1588 if (!hc || hc->md != md) { 1650 if (!hc || hc->md != md) {
1589 r = -ENXIO; 1651 r = -ENXIO;
@@ -1596,8 +1658,7 @@ int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid)
1596 strcpy(uuid, hc->uuid ? : ""); 1658 strcpy(uuid, hc->uuid ? : "");
1597 1659
1598out: 1660out:
1599 up_read(&_hash_lock); 1661 mutex_unlock(&dm_hash_cells_mutex);
1600 dm_put(md);
1601 1662
1602 return r; 1663 return r;
1603} 1664}
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index 3e3fc06cb861..addf83475040 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -450,7 +450,10 @@ static void dispatch_job(struct kcopyd_job *job)
450{ 450{
451 struct dm_kcopyd_client *kc = job->kc; 451 struct dm_kcopyd_client *kc = job->kc;
452 atomic_inc(&kc->nr_jobs); 452 atomic_inc(&kc->nr_jobs);
453 push(&kc->pages_jobs, job); 453 if (unlikely(!job->source.count))
454 push(&kc->complete_jobs, job);
455 else
456 push(&kc->pages_jobs, job);
454 wake(kc); 457 wake(kc);
455} 458}
456 459
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index 9443896ede07..7035582786fb 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -145,8 +145,9 @@ int dm_dirty_log_type_unregister(struct dm_dirty_log_type *type)
145EXPORT_SYMBOL(dm_dirty_log_type_unregister); 145EXPORT_SYMBOL(dm_dirty_log_type_unregister);
146 146
147struct dm_dirty_log *dm_dirty_log_create(const char *type_name, 147struct dm_dirty_log *dm_dirty_log_create(const char *type_name,
148 struct dm_target *ti, 148 struct dm_target *ti,
149 unsigned int argc, char **argv) 149 int (*flush_callback_fn)(struct dm_target *ti),
150 unsigned int argc, char **argv)
150{ 151{
151 struct dm_dirty_log_type *type; 152 struct dm_dirty_log_type *type;
152 struct dm_dirty_log *log; 153 struct dm_dirty_log *log;
@@ -161,6 +162,7 @@ struct dm_dirty_log *dm_dirty_log_create(const char *type_name,
161 return NULL; 162 return NULL;
162 } 163 }
163 164
165 log->flush_callback_fn = flush_callback_fn;
164 log->type = type; 166 log->type = type;
165 if (type->ctr(log, ti, argc, argv)) { 167 if (type->ctr(log, ti, argc, argv)) {
166 kfree(log); 168 kfree(log);
@@ -208,7 +210,9 @@ struct log_header {
208 210
209struct log_c { 211struct log_c {
210 struct dm_target *ti; 212 struct dm_target *ti;
211 int touched; 213 int touched_dirtied;
214 int touched_cleaned;
215 int flush_failed;
212 uint32_t region_size; 216 uint32_t region_size;
213 unsigned int region_count; 217 unsigned int region_count;
214 region_t sync_count; 218 region_t sync_count;
@@ -233,6 +237,7 @@ struct log_c {
233 * Disk log fields 237 * Disk log fields
234 */ 238 */
235 int log_dev_failed; 239 int log_dev_failed;
240 int log_dev_flush_failed;
236 struct dm_dev *log_dev; 241 struct dm_dev *log_dev;
237 struct log_header header; 242 struct log_header header;
238 243
@@ -253,14 +258,14 @@ static inline void log_set_bit(struct log_c *l,
253 uint32_t *bs, unsigned bit) 258 uint32_t *bs, unsigned bit)
254{ 259{
255 ext2_set_bit(bit, (unsigned long *) bs); 260 ext2_set_bit(bit, (unsigned long *) bs);
256 l->touched = 1; 261 l->touched_cleaned = 1;
257} 262}
258 263
259static inline void log_clear_bit(struct log_c *l, 264static inline void log_clear_bit(struct log_c *l,
260 uint32_t *bs, unsigned bit) 265 uint32_t *bs, unsigned bit)
261{ 266{
262 ext2_clear_bit(bit, (unsigned long *) bs); 267 ext2_clear_bit(bit, (unsigned long *) bs);
263 l->touched = 1; 268 l->touched_dirtied = 1;
264} 269}
265 270
266/*---------------------------------------------------------------- 271/*----------------------------------------------------------------
@@ -287,6 +292,19 @@ static int rw_header(struct log_c *lc, int rw)
287 return dm_io(&lc->io_req, 1, &lc->header_location, NULL); 292 return dm_io(&lc->io_req, 1, &lc->header_location, NULL);
288} 293}
289 294
295static int flush_header(struct log_c *lc)
296{
297 struct dm_io_region null_location = {
298 .bdev = lc->header_location.bdev,
299 .sector = 0,
300 .count = 0,
301 };
302
303 lc->io_req.bi_rw = WRITE_BARRIER;
304
305 return dm_io(&lc->io_req, 1, &null_location, NULL);
306}
307
290static int read_header(struct log_c *log) 308static int read_header(struct log_c *log)
291{ 309{
292 int r; 310 int r;
@@ -378,7 +396,9 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
378 } 396 }
379 397
380 lc->ti = ti; 398 lc->ti = ti;
381 lc->touched = 0; 399 lc->touched_dirtied = 0;
400 lc->touched_cleaned = 0;
401 lc->flush_failed = 0;
382 lc->region_size = region_size; 402 lc->region_size = region_size;
383 lc->region_count = region_count; 403 lc->region_count = region_count;
384 lc->sync = sync; 404 lc->sync = sync;
@@ -406,6 +426,7 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
406 } else { 426 } else {
407 lc->log_dev = dev; 427 lc->log_dev = dev;
408 lc->log_dev_failed = 0; 428 lc->log_dev_failed = 0;
429 lc->log_dev_flush_failed = 0;
409 lc->header_location.bdev = lc->log_dev->bdev; 430 lc->header_location.bdev = lc->log_dev->bdev;
410 lc->header_location.sector = 0; 431 lc->header_location.sector = 0;
411 432
@@ -614,6 +635,11 @@ static int disk_resume(struct dm_dirty_log *log)
614 635
615 /* write the new header */ 636 /* write the new header */
616 r = rw_header(lc, WRITE); 637 r = rw_header(lc, WRITE);
638 if (!r) {
639 r = flush_header(lc);
640 if (r)
641 lc->log_dev_flush_failed = 1;
642 }
617 if (r) { 643 if (r) {
618 DMWARN("%s: Failed to write header on dirty region log device", 644 DMWARN("%s: Failed to write header on dirty region log device",
619 lc->log_dev->name); 645 lc->log_dev->name);
@@ -656,18 +682,40 @@ static int core_flush(struct dm_dirty_log *log)
656 682
657static int disk_flush(struct dm_dirty_log *log) 683static int disk_flush(struct dm_dirty_log *log)
658{ 684{
659 int r; 685 int r, i;
660 struct log_c *lc = (struct log_c *) log->context; 686 struct log_c *lc = log->context;
661 687
662 /* only write if the log has changed */ 688 /* only write if the log has changed */
663 if (!lc->touched) 689 if (!lc->touched_cleaned && !lc->touched_dirtied)
664 return 0; 690 return 0;
665 691
692 if (lc->touched_cleaned && log->flush_callback_fn &&
693 log->flush_callback_fn(lc->ti)) {
694 /*
695 * At this point it is impossible to determine which
696 * regions are clean and which are dirty (without
697 * re-reading the log off disk). So mark all of them
698 * dirty.
699 */
700 lc->flush_failed = 1;
701 for (i = 0; i < lc->region_count; i++)
702 log_clear_bit(lc, lc->clean_bits, i);
703 }
704
666 r = rw_header(lc, WRITE); 705 r = rw_header(lc, WRITE);
667 if (r) 706 if (r)
668 fail_log_device(lc); 707 fail_log_device(lc);
669 else 708 else {
670 lc->touched = 0; 709 if (lc->touched_dirtied) {
710 r = flush_header(lc);
711 if (r) {
712 lc->log_dev_flush_failed = 1;
713 fail_log_device(lc);
714 } else
715 lc->touched_dirtied = 0;
716 }
717 lc->touched_cleaned = 0;
718 }
671 719
672 return r; 720 return r;
673} 721}
@@ -681,7 +729,8 @@ static void core_mark_region(struct dm_dirty_log *log, region_t region)
681static void core_clear_region(struct dm_dirty_log *log, region_t region) 729static void core_clear_region(struct dm_dirty_log *log, region_t region)
682{ 730{
683 struct log_c *lc = (struct log_c *) log->context; 731 struct log_c *lc = (struct log_c *) log->context;
684 log_set_bit(lc, lc->clean_bits, region); 732 if (likely(!lc->flush_failed))
733 log_set_bit(lc, lc->clean_bits, region);
685} 734}
686 735
687static int core_get_resync_work(struct dm_dirty_log *log, region_t *region) 736static int core_get_resync_work(struct dm_dirty_log *log, region_t *region)
@@ -762,7 +811,9 @@ static int disk_status(struct dm_dirty_log *log, status_type_t status,
762 switch(status) { 811 switch(status) {
763 case STATUSTYPE_INFO: 812 case STATUSTYPE_INFO:
764 DMEMIT("3 %s %s %c", log->type->name, lc->log_dev->name, 813 DMEMIT("3 %s %s %c", log->type->name, lc->log_dev->name,
765 lc->log_dev_failed ? 'D' : 'A'); 814 lc->log_dev_flush_failed ? 'F' :
815 lc->log_dev_failed ? 'D' :
816 'A');
766 break; 817 break;
767 818
768 case STATUSTYPE_TABLE: 819 case STATUSTYPE_TABLE:
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index dce971dbdfa3..e81345a1d08f 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -93,6 +93,10 @@ struct multipath {
93 * can resubmit bios on error. 93 * can resubmit bios on error.
94 */ 94 */
95 mempool_t *mpio_pool; 95 mempool_t *mpio_pool;
96
97 struct mutex work_mutex;
98
99 unsigned suspended; /* Don't create new I/O internally when set. */
96}; 100};
97 101
98/* 102/*
@@ -198,6 +202,7 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
198 m->queue_io = 1; 202 m->queue_io = 1;
199 INIT_WORK(&m->process_queued_ios, process_queued_ios); 203 INIT_WORK(&m->process_queued_ios, process_queued_ios);
200 INIT_WORK(&m->trigger_event, trigger_event); 204 INIT_WORK(&m->trigger_event, trigger_event);
205 mutex_init(&m->work_mutex);
201 m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache); 206 m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache);
202 if (!m->mpio_pool) { 207 if (!m->mpio_pool) {
203 kfree(m); 208 kfree(m);
@@ -885,13 +890,18 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
885 return r; 890 return r;
886} 891}
887 892
888static void multipath_dtr(struct dm_target *ti) 893static void flush_multipath_work(void)
889{ 894{
890 struct multipath *m = (struct multipath *) ti->private;
891
892 flush_workqueue(kmpath_handlerd); 895 flush_workqueue(kmpath_handlerd);
893 flush_workqueue(kmultipathd); 896 flush_workqueue(kmultipathd);
894 flush_scheduled_work(); 897 flush_scheduled_work();
898}
899
900static void multipath_dtr(struct dm_target *ti)
901{
902 struct multipath *m = ti->private;
903
904 flush_multipath_work();
895 free_multipath(m); 905 free_multipath(m);
896} 906}
897 907
@@ -1261,6 +1271,16 @@ static void multipath_presuspend(struct dm_target *ti)
1261 queue_if_no_path(m, 0, 1); 1271 queue_if_no_path(m, 0, 1);
1262} 1272}
1263 1273
1274static void multipath_postsuspend(struct dm_target *ti)
1275{
1276 struct multipath *m = ti->private;
1277
1278 mutex_lock(&m->work_mutex);
1279 m->suspended = 1;
1280 flush_multipath_work();
1281 mutex_unlock(&m->work_mutex);
1282}
1283
1264/* 1284/*
1265 * Restore the queue_if_no_path setting. 1285 * Restore the queue_if_no_path setting.
1266 */ 1286 */
@@ -1269,6 +1289,10 @@ static void multipath_resume(struct dm_target *ti)
1269 struct multipath *m = (struct multipath *) ti->private; 1289 struct multipath *m = (struct multipath *) ti->private;
1270 unsigned long flags; 1290 unsigned long flags;
1271 1291
1292 mutex_lock(&m->work_mutex);
1293 m->suspended = 0;
1294 mutex_unlock(&m->work_mutex);
1295
1272 spin_lock_irqsave(&m->lock, flags); 1296 spin_lock_irqsave(&m->lock, flags);
1273 m->queue_if_no_path = m->saved_queue_if_no_path; 1297 m->queue_if_no_path = m->saved_queue_if_no_path;
1274 spin_unlock_irqrestore(&m->lock, flags); 1298 spin_unlock_irqrestore(&m->lock, flags);
@@ -1397,51 +1421,71 @@ static int multipath_status(struct dm_target *ti, status_type_t type,
1397 1421
1398static int multipath_message(struct dm_target *ti, unsigned argc, char **argv) 1422static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
1399{ 1423{
1400 int r; 1424 int r = -EINVAL;
1401 struct dm_dev *dev; 1425 struct dm_dev *dev;
1402 struct multipath *m = (struct multipath *) ti->private; 1426 struct multipath *m = (struct multipath *) ti->private;
1403 action_fn action; 1427 action_fn action;
1404 1428
1429 mutex_lock(&m->work_mutex);
1430
1431 if (m->suspended) {
1432 r = -EBUSY;
1433 goto out;
1434 }
1435
1436 if (dm_suspended(ti)) {
1437 r = -EBUSY;
1438 goto out;
1439 }
1440
1405 if (argc == 1) { 1441 if (argc == 1) {
1406 if (!strnicmp(argv[0], MESG_STR("queue_if_no_path"))) 1442 if (!strnicmp(argv[0], MESG_STR("queue_if_no_path"))) {
1407 return queue_if_no_path(m, 1, 0); 1443 r = queue_if_no_path(m, 1, 0);
1408 else if (!strnicmp(argv[0], MESG_STR("fail_if_no_path"))) 1444 goto out;
1409 return queue_if_no_path(m, 0, 0); 1445 } else if (!strnicmp(argv[0], MESG_STR("fail_if_no_path"))) {
1446 r = queue_if_no_path(m, 0, 0);
1447 goto out;
1448 }
1410 } 1449 }
1411 1450
1412 if (argc != 2) 1451 if (argc != 2) {
1413 goto error; 1452 DMWARN("Unrecognised multipath message received.");
1453 goto out;
1454 }
1414 1455
1415 if (!strnicmp(argv[0], MESG_STR("disable_group"))) 1456 if (!strnicmp(argv[0], MESG_STR("disable_group"))) {
1416 return bypass_pg_num(m, argv[1], 1); 1457 r = bypass_pg_num(m, argv[1], 1);
1417 else if (!strnicmp(argv[0], MESG_STR("enable_group"))) 1458 goto out;
1418 return bypass_pg_num(m, argv[1], 0); 1459 } else if (!strnicmp(argv[0], MESG_STR("enable_group"))) {
1419 else if (!strnicmp(argv[0], MESG_STR("switch_group"))) 1460 r = bypass_pg_num(m, argv[1], 0);
1420 return switch_pg_num(m, argv[1]); 1461 goto out;
1421 else if (!strnicmp(argv[0], MESG_STR("reinstate_path"))) 1462 } else if (!strnicmp(argv[0], MESG_STR("switch_group"))) {
1463 r = switch_pg_num(m, argv[1]);
1464 goto out;
1465 } else if (!strnicmp(argv[0], MESG_STR("reinstate_path")))
1422 action = reinstate_path; 1466 action = reinstate_path;
1423 else if (!strnicmp(argv[0], MESG_STR("fail_path"))) 1467 else if (!strnicmp(argv[0], MESG_STR("fail_path")))
1424 action = fail_path; 1468 action = fail_path;
1425 else 1469 else {
1426 goto error; 1470 DMWARN("Unrecognised multipath message received.");
1471 goto out;
1472 }
1427 1473
1428 r = dm_get_device(ti, argv[1], ti->begin, ti->len, 1474 r = dm_get_device(ti, argv[1], ti->begin, ti->len,
1429 dm_table_get_mode(ti->table), &dev); 1475 dm_table_get_mode(ti->table), &dev);
1430 if (r) { 1476 if (r) {
1431 DMWARN("message: error getting device %s", 1477 DMWARN("message: error getting device %s",
1432 argv[1]); 1478 argv[1]);
1433 return -EINVAL; 1479 goto out;
1434 } 1480 }
1435 1481
1436 r = action_dev(m, dev, action); 1482 r = action_dev(m, dev, action);
1437 1483
1438 dm_put_device(ti, dev); 1484 dm_put_device(ti, dev);
1439 1485
1486out:
1487 mutex_unlock(&m->work_mutex);
1440 return r; 1488 return r;
1441
1442error:
1443 DMWARN("Unrecognised multipath message received.");
1444 return -EINVAL;
1445} 1489}
1446 1490
1447static int multipath_ioctl(struct dm_target *ti, unsigned int cmd, 1491static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
@@ -1567,13 +1611,14 @@ out:
1567 *---------------------------------------------------------------*/ 1611 *---------------------------------------------------------------*/
1568static struct target_type multipath_target = { 1612static struct target_type multipath_target = {
1569 .name = "multipath", 1613 .name = "multipath",
1570 .version = {1, 1, 0}, 1614 .version = {1, 1, 1},
1571 .module = THIS_MODULE, 1615 .module = THIS_MODULE,
1572 .ctr = multipath_ctr, 1616 .ctr = multipath_ctr,
1573 .dtr = multipath_dtr, 1617 .dtr = multipath_dtr,
1574 .map_rq = multipath_map, 1618 .map_rq = multipath_map,
1575 .rq_end_io = multipath_end_io, 1619 .rq_end_io = multipath_end_io,
1576 .presuspend = multipath_presuspend, 1620 .presuspend = multipath_presuspend,
1621 .postsuspend = multipath_postsuspend,
1577 .resume = multipath_resume, 1622 .resume = multipath_resume,
1578 .status = multipath_status, 1623 .status = multipath_status,
1579 .message = multipath_message, 1624 .message = multipath_message,
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index cc9dc79b0784..ad779bd13aec 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -35,6 +35,7 @@ static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
35 *---------------------------------------------------------------*/ 35 *---------------------------------------------------------------*/
36enum dm_raid1_error { 36enum dm_raid1_error {
37 DM_RAID1_WRITE_ERROR, 37 DM_RAID1_WRITE_ERROR,
38 DM_RAID1_FLUSH_ERROR,
38 DM_RAID1_SYNC_ERROR, 39 DM_RAID1_SYNC_ERROR,
39 DM_RAID1_READ_ERROR 40 DM_RAID1_READ_ERROR
40}; 41};
@@ -57,6 +58,7 @@ struct mirror_set {
57 struct bio_list reads; 58 struct bio_list reads;
58 struct bio_list writes; 59 struct bio_list writes;
59 struct bio_list failures; 60 struct bio_list failures;
61 struct bio_list holds; /* bios are waiting until suspend */
60 62
61 struct dm_region_hash *rh; 63 struct dm_region_hash *rh;
62 struct dm_kcopyd_client *kcopyd_client; 64 struct dm_kcopyd_client *kcopyd_client;
@@ -67,6 +69,7 @@ struct mirror_set {
67 region_t nr_regions; 69 region_t nr_regions;
68 int in_sync; 70 int in_sync;
69 int log_failure; 71 int log_failure;
72 int leg_failure;
70 atomic_t suspend; 73 atomic_t suspend;
71 74
72 atomic_t default_mirror; /* Default mirror */ 75 atomic_t default_mirror; /* Default mirror */
@@ -179,6 +182,17 @@ static void set_default_mirror(struct mirror *m)
179 atomic_set(&ms->default_mirror, m - m0); 182 atomic_set(&ms->default_mirror, m - m0);
180} 183}
181 184
185static struct mirror *get_valid_mirror(struct mirror_set *ms)
186{
187 struct mirror *m;
188
189 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
190 if (!atomic_read(&m->error_count))
191 return m;
192
193 return NULL;
194}
195
182/* fail_mirror 196/* fail_mirror
183 * @m: mirror device to fail 197 * @m: mirror device to fail
184 * @error_type: one of the enum's, DM_RAID1_*_ERROR 198 * @error_type: one of the enum's, DM_RAID1_*_ERROR
@@ -198,6 +212,8 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
198 struct mirror_set *ms = m->ms; 212 struct mirror_set *ms = m->ms;
199 struct mirror *new; 213 struct mirror *new;
200 214
215 ms->leg_failure = 1;
216
201 /* 217 /*
202 * error_count is used for nothing more than a 218 * error_count is used for nothing more than a
203 * simple way to tell if a device has encountered 219 * simple way to tell if a device has encountered
@@ -224,19 +240,50 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
224 goto out; 240 goto out;
225 } 241 }
226 242
227 for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++) 243 new = get_valid_mirror(ms);
228 if (!atomic_read(&new->error_count)) { 244 if (new)
229 set_default_mirror(new); 245 set_default_mirror(new);
230 break; 246 else
231 }
232
233 if (unlikely(new == ms->mirror + ms->nr_mirrors))
234 DMWARN("All sides of mirror have failed."); 247 DMWARN("All sides of mirror have failed.");
235 248
236out: 249out:
237 schedule_work(&ms->trigger_event); 250 schedule_work(&ms->trigger_event);
238} 251}
239 252
253static int mirror_flush(struct dm_target *ti)
254{
255 struct mirror_set *ms = ti->private;
256 unsigned long error_bits;
257
258 unsigned int i;
259 struct dm_io_region io[ms->nr_mirrors];
260 struct mirror *m;
261 struct dm_io_request io_req = {
262 .bi_rw = WRITE_BARRIER,
263 .mem.type = DM_IO_KMEM,
264 .mem.ptr.bvec = NULL,
265 .client = ms->io_client,
266 };
267
268 for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++) {
269 io[i].bdev = m->dev->bdev;
270 io[i].sector = 0;
271 io[i].count = 0;
272 }
273
274 error_bits = -1;
275 dm_io(&io_req, ms->nr_mirrors, io, &error_bits);
276 if (unlikely(error_bits != 0)) {
277 for (i = 0; i < ms->nr_mirrors; i++)
278 if (test_bit(i, &error_bits))
279 fail_mirror(ms->mirror + i,
280 DM_RAID1_FLUSH_ERROR);
281 return -EIO;
282 }
283
284 return 0;
285}
286
240/*----------------------------------------------------------------- 287/*-----------------------------------------------------------------
241 * Recovery. 288 * Recovery.
242 * 289 *
@@ -396,6 +443,8 @@ static int mirror_available(struct mirror_set *ms, struct bio *bio)
396 */ 443 */
397static sector_t map_sector(struct mirror *m, struct bio *bio) 444static sector_t map_sector(struct mirror *m, struct bio *bio)
398{ 445{
446 if (unlikely(!bio->bi_size))
447 return 0;
399 return m->offset + (bio->bi_sector - m->ms->ti->begin); 448 return m->offset + (bio->bi_sector - m->ms->ti->begin);
400} 449}
401 450
@@ -413,6 +462,27 @@ static void map_region(struct dm_io_region *io, struct mirror *m,
413 io->count = bio->bi_size >> 9; 462 io->count = bio->bi_size >> 9;
414} 463}
415 464
465static void hold_bio(struct mirror_set *ms, struct bio *bio)
466{
467 /*
468 * If device is suspended, complete the bio.
469 */
470 if (atomic_read(&ms->suspend)) {
471 if (dm_noflush_suspending(ms->ti))
472 bio_endio(bio, DM_ENDIO_REQUEUE);
473 else
474 bio_endio(bio, -EIO);
475 return;
476 }
477
478 /*
479 * Hold bio until the suspend is complete.
480 */
481 spin_lock_irq(&ms->lock);
482 bio_list_add(&ms->holds, bio);
483 spin_unlock_irq(&ms->lock);
484}
485
416/*----------------------------------------------------------------- 486/*-----------------------------------------------------------------
417 * Reads 487 * Reads
418 *---------------------------------------------------------------*/ 488 *---------------------------------------------------------------*/
@@ -511,7 +581,6 @@ static void write_callback(unsigned long error, void *context)
511 unsigned i, ret = 0; 581 unsigned i, ret = 0;
512 struct bio *bio = (struct bio *) context; 582 struct bio *bio = (struct bio *) context;
513 struct mirror_set *ms; 583 struct mirror_set *ms;
514 int uptodate = 0;
515 int should_wake = 0; 584 int should_wake = 0;
516 unsigned long flags; 585 unsigned long flags;
517 586
@@ -524,36 +593,27 @@ static void write_callback(unsigned long error, void *context)
524 * This way we handle both writes to SYNC and NOSYNC 593 * This way we handle both writes to SYNC and NOSYNC
525 * regions with the same code. 594 * regions with the same code.
526 */ 595 */
527 if (likely(!error)) 596 if (likely(!error)) {
528 goto out; 597 bio_endio(bio, ret);
598 return;
599 }
529 600
530 for (i = 0; i < ms->nr_mirrors; i++) 601 for (i = 0; i < ms->nr_mirrors; i++)
531 if (test_bit(i, &error)) 602 if (test_bit(i, &error))
532 fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR); 603 fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR);
533 else
534 uptodate = 1;
535 604
536 if (unlikely(!uptodate)) { 605 /*
537 DMERR("All replicated volumes dead, failing I/O"); 606 * Need to raise event. Since raising
538 /* None of the writes succeeded, fail the I/O. */ 607 * events can block, we need to do it in
539 ret = -EIO; 608 * the main thread.
540 } else if (errors_handled(ms)) { 609 */
541 /* 610 spin_lock_irqsave(&ms->lock, flags);
542 * Need to raise event. Since raising 611 if (!ms->failures.head)
543 * events can block, we need to do it in 612 should_wake = 1;
544 * the main thread. 613 bio_list_add(&ms->failures, bio);
545 */ 614 spin_unlock_irqrestore(&ms->lock, flags);
546 spin_lock_irqsave(&ms->lock, flags); 615 if (should_wake)
547 if (!ms->failures.head) 616 wakeup_mirrord(ms);
548 should_wake = 1;
549 bio_list_add(&ms->failures, bio);
550 spin_unlock_irqrestore(&ms->lock, flags);
551 if (should_wake)
552 wakeup_mirrord(ms);
553 return;
554 }
555out:
556 bio_endio(bio, ret);
557} 617}
558 618
559static void do_write(struct mirror_set *ms, struct bio *bio) 619static void do_write(struct mirror_set *ms, struct bio *bio)
@@ -562,7 +622,7 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
562 struct dm_io_region io[ms->nr_mirrors], *dest = io; 622 struct dm_io_region io[ms->nr_mirrors], *dest = io;
563 struct mirror *m; 623 struct mirror *m;
564 struct dm_io_request io_req = { 624 struct dm_io_request io_req = {
565 .bi_rw = WRITE, 625 .bi_rw = WRITE | (bio->bi_rw & WRITE_BARRIER),
566 .mem.type = DM_IO_BVEC, 626 .mem.type = DM_IO_BVEC,
567 .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx, 627 .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
568 .notify.fn = write_callback, 628 .notify.fn = write_callback,
@@ -603,6 +663,11 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
603 bio_list_init(&requeue); 663 bio_list_init(&requeue);
604 664
605 while ((bio = bio_list_pop(writes))) { 665 while ((bio = bio_list_pop(writes))) {
666 if (unlikely(bio_empty_barrier(bio))) {
667 bio_list_add(&sync, bio);
668 continue;
669 }
670
606 region = dm_rh_bio_to_region(ms->rh, bio); 671 region = dm_rh_bio_to_region(ms->rh, bio);
607 672
608 if (log->type->is_remote_recovering && 673 if (log->type->is_remote_recovering &&
@@ -672,8 +737,12 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
672 dm_rh_delay(ms->rh, bio); 737 dm_rh_delay(ms->rh, bio);
673 738
674 while ((bio = bio_list_pop(&nosync))) { 739 while ((bio = bio_list_pop(&nosync))) {
675 map_bio(get_default_mirror(ms), bio); 740 if (unlikely(ms->leg_failure) && errors_handled(ms))
676 generic_make_request(bio); 741 hold_bio(ms, bio);
742 else {
743 map_bio(get_default_mirror(ms), bio);
744 generic_make_request(bio);
745 }
677 } 746 }
678} 747}
679 748
@@ -681,20 +750,12 @@ static void do_failures(struct mirror_set *ms, struct bio_list *failures)
681{ 750{
682 struct bio *bio; 751 struct bio *bio;
683 752
684 if (!failures->head) 753 if (likely(!failures->head))
685 return;
686
687 if (!ms->log_failure) {
688 while ((bio = bio_list_pop(failures))) {
689 ms->in_sync = 0;
690 dm_rh_mark_nosync(ms->rh, bio, bio->bi_size, 0);
691 }
692 return; 754 return;
693 }
694 755
695 /* 756 /*
696 * If the log has failed, unattempted writes are being 757 * If the log has failed, unattempted writes are being
697 * put on the failures list. We can't issue those writes 758 * put on the holds list. We can't issue those writes
698 * until a log has been marked, so we must store them. 759 * until a log has been marked, so we must store them.
699 * 760 *
700 * If a 'noflush' suspend is in progress, we can requeue 761 * If a 'noflush' suspend is in progress, we can requeue
@@ -709,23 +770,27 @@ static void do_failures(struct mirror_set *ms, struct bio_list *failures)
709 * for us to treat them the same and requeue them 770 * for us to treat them the same and requeue them
710 * as well. 771 * as well.
711 */ 772 */
712 if (dm_noflush_suspending(ms->ti)) { 773 while ((bio = bio_list_pop(failures))) {
713 while ((bio = bio_list_pop(failures))) 774 if (!ms->log_failure) {
714 bio_endio(bio, DM_ENDIO_REQUEUE); 775 ms->in_sync = 0;
715 return; 776 dm_rh_mark_nosync(ms->rh, bio);
716 } 777 }
717 778
718 if (atomic_read(&ms->suspend)) { 779 /*
719 while ((bio = bio_list_pop(failures))) 780 * If all the legs are dead, fail the I/O.
781 * If we have been told to handle errors, hold the bio
782 * and wait for userspace to deal with the problem.
783 * Otherwise pretend that the I/O succeeded. (This would
784 * be wrong if the failed leg returned after reboot and
785 * got replicated back to the good legs.)
786 */
787 if (!get_valid_mirror(ms))
720 bio_endio(bio, -EIO); 788 bio_endio(bio, -EIO);
721 return; 789 else if (errors_handled(ms))
790 hold_bio(ms, bio);
791 else
792 bio_endio(bio, 0);
722 } 793 }
723
724 spin_lock_irq(&ms->lock);
725 bio_list_merge(&ms->failures, failures);
726 spin_unlock_irq(&ms->lock);
727
728 delayed_wake(ms);
729} 794}
730 795
731static void trigger_event(struct work_struct *work) 796static void trigger_event(struct work_struct *work)
@@ -784,12 +849,17 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors,
784 } 849 }
785 850
786 spin_lock_init(&ms->lock); 851 spin_lock_init(&ms->lock);
852 bio_list_init(&ms->reads);
853 bio_list_init(&ms->writes);
854 bio_list_init(&ms->failures);
855 bio_list_init(&ms->holds);
787 856
788 ms->ti = ti; 857 ms->ti = ti;
789 ms->nr_mirrors = nr_mirrors; 858 ms->nr_mirrors = nr_mirrors;
790 ms->nr_regions = dm_sector_div_up(ti->len, region_size); 859 ms->nr_regions = dm_sector_div_up(ti->len, region_size);
791 ms->in_sync = 0; 860 ms->in_sync = 0;
792 ms->log_failure = 0; 861 ms->log_failure = 0;
862 ms->leg_failure = 0;
793 atomic_set(&ms->suspend, 0); 863 atomic_set(&ms->suspend, 0);
794 atomic_set(&ms->default_mirror, DEFAULT_MIRROR); 864 atomic_set(&ms->default_mirror, DEFAULT_MIRROR);
795 865
@@ -889,7 +959,8 @@ static struct dm_dirty_log *create_dirty_log(struct dm_target *ti,
889 return NULL; 959 return NULL;
890 } 960 }
891 961
892 dl = dm_dirty_log_create(argv[0], ti, param_count, argv + 2); 962 dl = dm_dirty_log_create(argv[0], ti, mirror_flush, param_count,
963 argv + 2);
893 if (!dl) { 964 if (!dl) {
894 ti->error = "Error creating mirror dirty log"; 965 ti->error = "Error creating mirror dirty log";
895 return NULL; 966 return NULL;
@@ -995,6 +1066,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
995 1066
996 ti->private = ms; 1067 ti->private = ms;
997 ti->split_io = dm_rh_get_region_size(ms->rh); 1068 ti->split_io = dm_rh_get_region_size(ms->rh);
1069 ti->num_flush_requests = 1;
998 1070
999 ms->kmirrord_wq = create_singlethread_workqueue("kmirrord"); 1071 ms->kmirrord_wq = create_singlethread_workqueue("kmirrord");
1000 if (!ms->kmirrord_wq) { 1072 if (!ms->kmirrord_wq) {
@@ -1122,7 +1194,8 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
1122 * We need to dec pending if this was a write. 1194 * We need to dec pending if this was a write.
1123 */ 1195 */
1124 if (rw == WRITE) { 1196 if (rw == WRITE) {
1125 dm_rh_dec(ms->rh, map_context->ll); 1197 if (likely(!bio_empty_barrier(bio)))
1198 dm_rh_dec(ms->rh, map_context->ll);
1126 return error; 1199 return error;
1127 } 1200 }
1128 1201
@@ -1180,6 +1253,9 @@ static void mirror_presuspend(struct dm_target *ti)
1180 struct mirror_set *ms = (struct mirror_set *) ti->private; 1253 struct mirror_set *ms = (struct mirror_set *) ti->private;
1181 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); 1254 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1182 1255
1256 struct bio_list holds;
1257 struct bio *bio;
1258
1183 atomic_set(&ms->suspend, 1); 1259 atomic_set(&ms->suspend, 1);
1184 1260
1185 /* 1261 /*
@@ -1202,6 +1278,22 @@ static void mirror_presuspend(struct dm_target *ti)
1202 * we know that all of our I/O has been pushed. 1278 * we know that all of our I/O has been pushed.
1203 */ 1279 */
1204 flush_workqueue(ms->kmirrord_wq); 1280 flush_workqueue(ms->kmirrord_wq);
1281
1282 /*
1283 * Now set ms->suspend is set and the workqueue flushed, no more
1284 * entries can be added to ms->hold list, so process it.
1285 *
1286 * Bios can still arrive concurrently with or after this
1287 * presuspend function, but they cannot join the hold list
1288 * because ms->suspend is set.
1289 */
1290 spin_lock_irq(&ms->lock);
1291 holds = ms->holds;
1292 bio_list_init(&ms->holds);
1293 spin_unlock_irq(&ms->lock);
1294
1295 while ((bio = bio_list_pop(&holds)))
1296 hold_bio(ms, bio);
1205} 1297}
1206 1298
1207static void mirror_postsuspend(struct dm_target *ti) 1299static void mirror_postsuspend(struct dm_target *ti)
@@ -1244,7 +1336,8 @@ static char device_status_char(struct mirror *m)
1244 if (!atomic_read(&(m->error_count))) 1336 if (!atomic_read(&(m->error_count)))
1245 return 'A'; 1337 return 'A';
1246 1338
1247 return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' : 1339 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
1340 (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
1248 (test_bit(DM_RAID1_SYNC_ERROR, &(m->error_type))) ? 'S' : 1341 (test_bit(DM_RAID1_SYNC_ERROR, &(m->error_type))) ? 'S' :
1249 (test_bit(DM_RAID1_READ_ERROR, &(m->error_type))) ? 'R' : 'U'; 1342 (test_bit(DM_RAID1_READ_ERROR, &(m->error_type))) ? 'R' : 'U';
1250} 1343}
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c
index 36dbe29f2fd6..5f19ceb6fe91 100644
--- a/drivers/md/dm-region-hash.c
+++ b/drivers/md/dm-region-hash.c
@@ -79,6 +79,11 @@ struct dm_region_hash {
79 struct list_head recovered_regions; 79 struct list_head recovered_regions;
80 struct list_head failed_recovered_regions; 80 struct list_head failed_recovered_regions;
81 81
82 /*
83 * If there was a barrier failure no regions can be marked clean.
84 */
85 int barrier_failure;
86
82 void *context; 87 void *context;
83 sector_t target_begin; 88 sector_t target_begin;
84 89
@@ -211,6 +216,7 @@ struct dm_region_hash *dm_region_hash_create(
211 INIT_LIST_HEAD(&rh->quiesced_regions); 216 INIT_LIST_HEAD(&rh->quiesced_regions);
212 INIT_LIST_HEAD(&rh->recovered_regions); 217 INIT_LIST_HEAD(&rh->recovered_regions);
213 INIT_LIST_HEAD(&rh->failed_recovered_regions); 218 INIT_LIST_HEAD(&rh->failed_recovered_regions);
219 rh->barrier_failure = 0;
214 220
215 rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS, 221 rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS,
216 sizeof(struct dm_region)); 222 sizeof(struct dm_region));
@@ -377,8 +383,6 @@ static void complete_resync_work(struct dm_region *reg, int success)
377/* dm_rh_mark_nosync 383/* dm_rh_mark_nosync
378 * @ms 384 * @ms
379 * @bio 385 * @bio
380 * @done
381 * @error
382 * 386 *
383 * The bio was written on some mirror(s) but failed on other mirror(s). 387 * The bio was written on some mirror(s) but failed on other mirror(s).
384 * We can successfully endio the bio but should avoid the region being 388 * We can successfully endio the bio but should avoid the region being
@@ -386,8 +390,7 @@ static void complete_resync_work(struct dm_region *reg, int success)
386 * 390 *
387 * This function is _not_ safe in interrupt context! 391 * This function is _not_ safe in interrupt context!
388 */ 392 */
389void dm_rh_mark_nosync(struct dm_region_hash *rh, 393void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio)
390 struct bio *bio, unsigned done, int error)
391{ 394{
392 unsigned long flags; 395 unsigned long flags;
393 struct dm_dirty_log *log = rh->log; 396 struct dm_dirty_log *log = rh->log;
@@ -395,6 +398,11 @@ void dm_rh_mark_nosync(struct dm_region_hash *rh,
395 region_t region = dm_rh_bio_to_region(rh, bio); 398 region_t region = dm_rh_bio_to_region(rh, bio);
396 int recovering = 0; 399 int recovering = 0;
397 400
401 if (bio_empty_barrier(bio)) {
402 rh->barrier_failure = 1;
403 return;
404 }
405
398 /* We must inform the log that the sync count has changed. */ 406 /* We must inform the log that the sync count has changed. */
399 log->type->set_region_sync(log, region, 0); 407 log->type->set_region_sync(log, region, 0);
400 408
@@ -419,7 +427,6 @@ void dm_rh_mark_nosync(struct dm_region_hash *rh,
419 BUG_ON(!list_empty(&reg->list)); 427 BUG_ON(!list_empty(&reg->list));
420 spin_unlock_irqrestore(&rh->region_lock, flags); 428 spin_unlock_irqrestore(&rh->region_lock, flags);
421 429
422 bio_endio(bio, error);
423 if (recovering) 430 if (recovering)
424 complete_resync_work(reg, 0); 431 complete_resync_work(reg, 0);
425} 432}
@@ -515,8 +522,11 @@ void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios)
515{ 522{
516 struct bio *bio; 523 struct bio *bio;
517 524
518 for (bio = bios->head; bio; bio = bio->bi_next) 525 for (bio = bios->head; bio; bio = bio->bi_next) {
526 if (bio_empty_barrier(bio))
527 continue;
519 rh_inc(rh, dm_rh_bio_to_region(rh, bio)); 528 rh_inc(rh, dm_rh_bio_to_region(rh, bio));
529 }
520} 530}
521EXPORT_SYMBOL_GPL(dm_rh_inc_pending); 531EXPORT_SYMBOL_GPL(dm_rh_inc_pending);
522 532
@@ -544,7 +554,14 @@ void dm_rh_dec(struct dm_region_hash *rh, region_t region)
544 */ 554 */
545 555
546 /* do nothing for DM_RH_NOSYNC */ 556 /* do nothing for DM_RH_NOSYNC */
547 if (reg->state == DM_RH_RECOVERING) { 557 if (unlikely(rh->barrier_failure)) {
558 /*
559 * If a write barrier failed some time ago, we
560 * don't know whether or not this write made it
561 * to the disk, so we must resync the device.
562 */
563 reg->state = DM_RH_NOSYNC;
564 } else if (reg->state == DM_RH_RECOVERING) {
548 list_add_tail(&reg->list, &rh->quiesced_regions); 565 list_add_tail(&reg->list, &rh->quiesced_regions);
549 } else if (reg->state == DM_RH_DIRTY) { 566 } else if (reg->state == DM_RH_DIRTY) {
550 reg->state = DM_RH_CLEAN; 567 reg->state = DM_RH_CLEAN;
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 0c746420c008..7d08879689ac 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -55,6 +55,8 @@
55 */ 55 */
56#define SNAPSHOT_DISK_VERSION 1 56#define SNAPSHOT_DISK_VERSION 1
57 57
58#define NUM_SNAPSHOT_HDR_CHUNKS 1
59
58struct disk_header { 60struct disk_header {
59 uint32_t magic; 61 uint32_t magic;
60 62
@@ -120,7 +122,22 @@ struct pstore {
120 122
121 /* 123 /*
122 * The next free chunk for an exception. 124 * The next free chunk for an exception.
125 *
126 * When creating exceptions, all the chunks here and above are
127 * free. It holds the next chunk to be allocated. On rare
128 * occasions (e.g. after a system crash) holes can be left in
129 * the exception store because chunks can be committed out of
130 * order.
131 *
132 * When merging exceptions, it does not necessarily mean all the
133 * chunks here and above are free. It holds the value it would
134 * have held if all chunks had been committed in order of
135 * allocation. Consequently the value may occasionally be
136 * slightly too low, but since it's only used for 'status' and
137 * it can never reach its minimum value too early this doesn't
138 * matter.
123 */ 139 */
140
124 chunk_t next_free; 141 chunk_t next_free;
125 142
126 /* 143 /*
@@ -214,7 +231,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
214 int metadata) 231 int metadata)
215{ 232{
216 struct dm_io_region where = { 233 struct dm_io_region where = {
217 .bdev = ps->store->cow->bdev, 234 .bdev = dm_snap_cow(ps->store->snap)->bdev,
218 .sector = ps->store->chunk_size * chunk, 235 .sector = ps->store->chunk_size * chunk,
219 .count = ps->store->chunk_size, 236 .count = ps->store->chunk_size,
220 }; 237 };
@@ -294,7 +311,8 @@ static int read_header(struct pstore *ps, int *new_snapshot)
294 */ 311 */
295 if (!ps->store->chunk_size) { 312 if (!ps->store->chunk_size) {
296 ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS, 313 ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS,
297 bdev_logical_block_size(ps->store->cow->bdev) >> 9); 314 bdev_logical_block_size(dm_snap_cow(ps->store->snap)->
315 bdev) >> 9);
298 ps->store->chunk_mask = ps->store->chunk_size - 1; 316 ps->store->chunk_mask = ps->store->chunk_size - 1;
299 ps->store->chunk_shift = ffs(ps->store->chunk_size) - 1; 317 ps->store->chunk_shift = ffs(ps->store->chunk_size) - 1;
300 chunk_size_supplied = 0; 318 chunk_size_supplied = 0;
@@ -408,6 +426,15 @@ static void write_exception(struct pstore *ps,
408 e->new_chunk = cpu_to_le64(de->new_chunk); 426 e->new_chunk = cpu_to_le64(de->new_chunk);
409} 427}
410 428
429static void clear_exception(struct pstore *ps, uint32_t index)
430{
431 struct disk_exception *e = get_exception(ps, index);
432
433 /* clear it */
434 e->old_chunk = 0;
435 e->new_chunk = 0;
436}
437
411/* 438/*
412 * Registers the exceptions that are present in the current area. 439 * Registers the exceptions that are present in the current area.
413 * 'full' is filled in to indicate if the area has been 440 * 'full' is filled in to indicate if the area has been
@@ -489,11 +516,23 @@ static struct pstore *get_info(struct dm_exception_store *store)
489 return (struct pstore *) store->context; 516 return (struct pstore *) store->context;
490} 517}
491 518
492static void persistent_fraction_full(struct dm_exception_store *store, 519static void persistent_usage(struct dm_exception_store *store,
493 sector_t *numerator, sector_t *denominator) 520 sector_t *total_sectors,
521 sector_t *sectors_allocated,
522 sector_t *metadata_sectors)
494{ 523{
495 *numerator = get_info(store)->next_free * store->chunk_size; 524 struct pstore *ps = get_info(store);
496 *denominator = get_dev_size(store->cow->bdev); 525
526 *sectors_allocated = ps->next_free * store->chunk_size;
527 *total_sectors = get_dev_size(dm_snap_cow(store->snap)->bdev);
528
529 /*
530 * First chunk is the fixed header.
531 * Then there are (ps->current_area + 1) metadata chunks, each one
532 * separated from the next by ps->exceptions_per_area data chunks.
533 */
534 *metadata_sectors = (ps->current_area + 1 + NUM_SNAPSHOT_HDR_CHUNKS) *
535 store->chunk_size;
497} 536}
498 537
499static void persistent_dtr(struct dm_exception_store *store) 538static void persistent_dtr(struct dm_exception_store *store)
@@ -552,44 +591,40 @@ static int persistent_read_metadata(struct dm_exception_store *store,
552 ps->current_area = 0; 591 ps->current_area = 0;
553 zero_memory_area(ps); 592 zero_memory_area(ps);
554 r = zero_disk_area(ps, 0); 593 r = zero_disk_area(ps, 0);
555 if (r) { 594 if (r)
556 DMWARN("zero_disk_area(0) failed"); 595 DMWARN("zero_disk_area(0) failed");
557 return r; 596 return r;
558 } 597 }
559 } else { 598 /*
560 /* 599 * Sanity checks.
561 * Sanity checks. 600 */
562 */ 601 if (ps->version != SNAPSHOT_DISK_VERSION) {
563 if (ps->version != SNAPSHOT_DISK_VERSION) { 602 DMWARN("unable to handle snapshot disk version %d",
564 DMWARN("unable to handle snapshot disk version %d", 603 ps->version);
565 ps->version); 604 return -EINVAL;
566 return -EINVAL; 605 }
567 }
568 606
569 /* 607 /*
570 * Metadata are valid, but snapshot is invalidated 608 * Metadata are valid, but snapshot is invalidated
571 */ 609 */
572 if (!ps->valid) 610 if (!ps->valid)
573 return 1; 611 return 1;
574 612
575 /* 613 /*
576 * Read the metadata. 614 * Read the metadata.
577 */ 615 */
578 r = read_exceptions(ps, callback, callback_context); 616 r = read_exceptions(ps, callback, callback_context);
579 if (r)
580 return r;
581 }
582 617
583 return 0; 618 return r;
584} 619}
585 620
586static int persistent_prepare_exception(struct dm_exception_store *store, 621static int persistent_prepare_exception(struct dm_exception_store *store,
587 struct dm_snap_exception *e) 622 struct dm_exception *e)
588{ 623{
589 struct pstore *ps = get_info(store); 624 struct pstore *ps = get_info(store);
590 uint32_t stride; 625 uint32_t stride;
591 chunk_t next_free; 626 chunk_t next_free;
592 sector_t size = get_dev_size(store->cow->bdev); 627 sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev);
593 628
594 /* Is there enough room ? */ 629 /* Is there enough room ? */
595 if (size < ((ps->next_free + 1) * store->chunk_size)) 630 if (size < ((ps->next_free + 1) * store->chunk_size))
@@ -611,7 +646,7 @@ static int persistent_prepare_exception(struct dm_exception_store *store,
611} 646}
612 647
613static void persistent_commit_exception(struct dm_exception_store *store, 648static void persistent_commit_exception(struct dm_exception_store *store,
614 struct dm_snap_exception *e, 649 struct dm_exception *e,
615 void (*callback) (void *, int success), 650 void (*callback) (void *, int success),
616 void *callback_context) 651 void *callback_context)
617{ 652{
@@ -672,6 +707,85 @@ static void persistent_commit_exception(struct dm_exception_store *store,
672 ps->callback_count = 0; 707 ps->callback_count = 0;
673} 708}
674 709
710static int persistent_prepare_merge(struct dm_exception_store *store,
711 chunk_t *last_old_chunk,
712 chunk_t *last_new_chunk)
713{
714 struct pstore *ps = get_info(store);
715 struct disk_exception de;
716 int nr_consecutive;
717 int r;
718
719 /*
720 * When current area is empty, move back to preceding area.
721 */
722 if (!ps->current_committed) {
723 /*
724 * Have we finished?
725 */
726 if (!ps->current_area)
727 return 0;
728
729 ps->current_area--;
730 r = area_io(ps, READ);
731 if (r < 0)
732 return r;
733 ps->current_committed = ps->exceptions_per_area;
734 }
735
736 read_exception(ps, ps->current_committed - 1, &de);
737 *last_old_chunk = de.old_chunk;
738 *last_new_chunk = de.new_chunk;
739
740 /*
741 * Find number of consecutive chunks within the current area,
742 * working backwards.
743 */
744 for (nr_consecutive = 1; nr_consecutive < ps->current_committed;
745 nr_consecutive++) {
746 read_exception(ps, ps->current_committed - 1 - nr_consecutive,
747 &de);
748 if (de.old_chunk != *last_old_chunk - nr_consecutive ||
749 de.new_chunk != *last_new_chunk - nr_consecutive)
750 break;
751 }
752
753 return nr_consecutive;
754}
755
756static int persistent_commit_merge(struct dm_exception_store *store,
757 int nr_merged)
758{
759 int r, i;
760 struct pstore *ps = get_info(store);
761
762 BUG_ON(nr_merged > ps->current_committed);
763
764 for (i = 0; i < nr_merged; i++)
765 clear_exception(ps, ps->current_committed - 1 - i);
766
767 r = area_io(ps, WRITE);
768 if (r < 0)
769 return r;
770
771 ps->current_committed -= nr_merged;
772
773 /*
774 * At this stage, only persistent_usage() uses ps->next_free, so
775 * we make no attempt to keep ps->next_free strictly accurate
776 * as exceptions may have been committed out-of-order originally.
777 * Once a snapshot has become merging, we set it to the value it
778 * would have held had all the exceptions been committed in order.
779 *
780 * ps->current_area does not get reduced by prepare_merge() until
781 * after commit_merge() has removed the nr_merged previous exceptions.
782 */
783 ps->next_free = (area_location(ps, ps->current_area) - 1) +
784 (ps->current_committed + 1) + NUM_SNAPSHOT_HDR_CHUNKS;
785
786 return 0;
787}
788
675static void persistent_drop_snapshot(struct dm_exception_store *store) 789static void persistent_drop_snapshot(struct dm_exception_store *store)
676{ 790{
677 struct pstore *ps = get_info(store); 791 struct pstore *ps = get_info(store);
@@ -697,7 +811,7 @@ static int persistent_ctr(struct dm_exception_store *store,
697 ps->area = NULL; 811 ps->area = NULL;
698 ps->zero_area = NULL; 812 ps->zero_area = NULL;
699 ps->header_area = NULL; 813 ps->header_area = NULL;
700 ps->next_free = 2; /* skipping the header and first area */ 814 ps->next_free = NUM_SNAPSHOT_HDR_CHUNKS + 1; /* header and 1st area */
701 ps->current_committed = 0; 815 ps->current_committed = 0;
702 816
703 ps->callback_count = 0; 817 ps->callback_count = 0;
@@ -726,8 +840,7 @@ static unsigned persistent_status(struct dm_exception_store *store,
726 case STATUSTYPE_INFO: 840 case STATUSTYPE_INFO:
727 break; 841 break;
728 case STATUSTYPE_TABLE: 842 case STATUSTYPE_TABLE:
729 DMEMIT(" %s P %llu", store->cow->name, 843 DMEMIT(" P %llu", (unsigned long long)store->chunk_size);
730 (unsigned long long)store->chunk_size);
731 } 844 }
732 845
733 return sz; 846 return sz;
@@ -741,8 +854,10 @@ static struct dm_exception_store_type _persistent_type = {
741 .read_metadata = persistent_read_metadata, 854 .read_metadata = persistent_read_metadata,
742 .prepare_exception = persistent_prepare_exception, 855 .prepare_exception = persistent_prepare_exception,
743 .commit_exception = persistent_commit_exception, 856 .commit_exception = persistent_commit_exception,
857 .prepare_merge = persistent_prepare_merge,
858 .commit_merge = persistent_commit_merge,
744 .drop_snapshot = persistent_drop_snapshot, 859 .drop_snapshot = persistent_drop_snapshot,
745 .fraction_full = persistent_fraction_full, 860 .usage = persistent_usage,
746 .status = persistent_status, 861 .status = persistent_status,
747}; 862};
748 863
@@ -754,8 +869,10 @@ static struct dm_exception_store_type _persistent_compat_type = {
754 .read_metadata = persistent_read_metadata, 869 .read_metadata = persistent_read_metadata,
755 .prepare_exception = persistent_prepare_exception, 870 .prepare_exception = persistent_prepare_exception,
756 .commit_exception = persistent_commit_exception, 871 .commit_exception = persistent_commit_exception,
872 .prepare_merge = persistent_prepare_merge,
873 .commit_merge = persistent_commit_merge,
757 .drop_snapshot = persistent_drop_snapshot, 874 .drop_snapshot = persistent_drop_snapshot,
758 .fraction_full = persistent_fraction_full, 875 .usage = persistent_usage,
759 .status = persistent_status, 876 .status = persistent_status,
760}; 877};
761 878
diff --git a/drivers/md/dm-snap-transient.c b/drivers/md/dm-snap-transient.c
index cde5aa558e6d..a0898a66a2f8 100644
--- a/drivers/md/dm-snap-transient.c
+++ b/drivers/md/dm-snap-transient.c
@@ -36,10 +36,10 @@ static int transient_read_metadata(struct dm_exception_store *store,
36} 36}
37 37
38static int transient_prepare_exception(struct dm_exception_store *store, 38static int transient_prepare_exception(struct dm_exception_store *store,
39 struct dm_snap_exception *e) 39 struct dm_exception *e)
40{ 40{
41 struct transient_c *tc = store->context; 41 struct transient_c *tc = store->context;
42 sector_t size = get_dev_size(store->cow->bdev); 42 sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev);
43 43
44 if (size < (tc->next_free + store->chunk_size)) 44 if (size < (tc->next_free + store->chunk_size))
45 return -1; 45 return -1;
@@ -51,7 +51,7 @@ static int transient_prepare_exception(struct dm_exception_store *store,
51} 51}
52 52
53static void transient_commit_exception(struct dm_exception_store *store, 53static void transient_commit_exception(struct dm_exception_store *store,
54 struct dm_snap_exception *e, 54 struct dm_exception *e,
55 void (*callback) (void *, int success), 55 void (*callback) (void *, int success),
56 void *callback_context) 56 void *callback_context)
57{ 57{
@@ -59,11 +59,14 @@ static void transient_commit_exception(struct dm_exception_store *store,
59 callback(callback_context, 1); 59 callback(callback_context, 1);
60} 60}
61 61
62static void transient_fraction_full(struct dm_exception_store *store, 62static void transient_usage(struct dm_exception_store *store,
63 sector_t *numerator, sector_t *denominator) 63 sector_t *total_sectors,
64 sector_t *sectors_allocated,
65 sector_t *metadata_sectors)
64{ 66{
65 *numerator = ((struct transient_c *) store->context)->next_free; 67 *sectors_allocated = ((struct transient_c *) store->context)->next_free;
66 *denominator = get_dev_size(store->cow->bdev); 68 *total_sectors = get_dev_size(dm_snap_cow(store->snap)->bdev);
69 *metadata_sectors = 0;
67} 70}
68 71
69static int transient_ctr(struct dm_exception_store *store, 72static int transient_ctr(struct dm_exception_store *store,
@@ -91,8 +94,7 @@ static unsigned transient_status(struct dm_exception_store *store,
91 case STATUSTYPE_INFO: 94 case STATUSTYPE_INFO:
92 break; 95 break;
93 case STATUSTYPE_TABLE: 96 case STATUSTYPE_TABLE:
94 DMEMIT(" %s N %llu", store->cow->name, 97 DMEMIT(" N %llu", (unsigned long long)store->chunk_size);
95 (unsigned long long)store->chunk_size);
96 } 98 }
97 99
98 return sz; 100 return sz;
@@ -106,7 +108,7 @@ static struct dm_exception_store_type _transient_type = {
106 .read_metadata = transient_read_metadata, 108 .read_metadata = transient_read_metadata,
107 .prepare_exception = transient_prepare_exception, 109 .prepare_exception = transient_prepare_exception,
108 .commit_exception = transient_commit_exception, 110 .commit_exception = transient_commit_exception,
109 .fraction_full = transient_fraction_full, 111 .usage = transient_usage,
110 .status = transient_status, 112 .status = transient_status,
111}; 113};
112 114
@@ -118,7 +120,7 @@ static struct dm_exception_store_type _transient_compat_type = {
118 .read_metadata = transient_read_metadata, 120 .read_metadata = transient_read_metadata,
119 .prepare_exception = transient_prepare_exception, 121 .prepare_exception = transient_prepare_exception,
120 .commit_exception = transient_commit_exception, 122 .commit_exception = transient_commit_exception,
121 .fraction_full = transient_fraction_full, 123 .usage = transient_usage,
122 .status = transient_status, 124 .status = transient_status,
123}; 125};
124 126
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 3a3ba46e6d4b..ee8eb283650d 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -25,6 +25,11 @@
25 25
26#define DM_MSG_PREFIX "snapshots" 26#define DM_MSG_PREFIX "snapshots"
27 27
28static const char dm_snapshot_merge_target_name[] = "snapshot-merge";
29
30#define dm_target_is_snapshot_merge(ti) \
31 ((ti)->type->name == dm_snapshot_merge_target_name)
32
28/* 33/*
29 * The percentage increment we will wake up users at 34 * The percentage increment we will wake up users at
30 */ 35 */
@@ -49,7 +54,7 @@
49#define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \ 54#define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \
50 (DM_TRACKED_CHUNK_HASH_SIZE - 1)) 55 (DM_TRACKED_CHUNK_HASH_SIZE - 1))
51 56
52struct exception_table { 57struct dm_exception_table {
53 uint32_t hash_mask; 58 uint32_t hash_mask;
54 unsigned hash_shift; 59 unsigned hash_shift;
55 struct list_head *table; 60 struct list_head *table;
@@ -59,22 +64,31 @@ struct dm_snapshot {
59 struct rw_semaphore lock; 64 struct rw_semaphore lock;
60 65
61 struct dm_dev *origin; 66 struct dm_dev *origin;
67 struct dm_dev *cow;
68
69 struct dm_target *ti;
62 70
63 /* List of snapshots per Origin */ 71 /* List of snapshots per Origin */
64 struct list_head list; 72 struct list_head list;
65 73
66 /* You can't use a snapshot if this is 0 (e.g. if full) */ 74 /*
75 * You can't use a snapshot if this is 0 (e.g. if full).
76 * A snapshot-merge target never clears this.
77 */
67 int valid; 78 int valid;
68 79
69 /* Origin writes don't trigger exceptions until this is set */ 80 /* Origin writes don't trigger exceptions until this is set */
70 int active; 81 int active;
71 82
83 /* Whether or not owning mapped_device is suspended */
84 int suspended;
85
72 mempool_t *pending_pool; 86 mempool_t *pending_pool;
73 87
74 atomic_t pending_exceptions_count; 88 atomic_t pending_exceptions_count;
75 89
76 struct exception_table pending; 90 struct dm_exception_table pending;
77 struct exception_table complete; 91 struct dm_exception_table complete;
78 92
79 /* 93 /*
80 * pe_lock protects all pending_exception operations and access 94 * pe_lock protects all pending_exception operations and access
@@ -95,8 +109,51 @@ struct dm_snapshot {
95 mempool_t *tracked_chunk_pool; 109 mempool_t *tracked_chunk_pool;
96 spinlock_t tracked_chunk_lock; 110 spinlock_t tracked_chunk_lock;
97 struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE]; 111 struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE];
112
113 /*
114 * The merge operation failed if this flag is set.
115 * Failure modes are handled as follows:
116 * - I/O error reading the header
117 * => don't load the target; abort.
118 * - Header does not have "valid" flag set
119 * => use the origin; forget about the snapshot.
120 * - I/O error when reading exceptions
121 * => don't load the target; abort.
122 * (We can't use the intermediate origin state.)
123 * - I/O error while merging
124 * => stop merging; set merge_failed; process I/O normally.
125 */
126 int merge_failed;
127
128 /* Wait for events based on state_bits */
129 unsigned long state_bits;
130
131 /* Range of chunks currently being merged. */
132 chunk_t first_merging_chunk;
133 int num_merging_chunks;
134
135 /*
136 * Incoming bios that overlap with chunks being merged must wait
137 * for them to be committed.
138 */
139 struct bio_list bios_queued_during_merge;
98}; 140};
99 141
142/*
143 * state_bits:
144 * RUNNING_MERGE - Merge operation is in progress.
145 * SHUTDOWN_MERGE - Set to signal that merge needs to be stopped;
146 * cleared afterwards.
147 */
148#define RUNNING_MERGE 0
149#define SHUTDOWN_MERGE 1
150
151struct dm_dev *dm_snap_cow(struct dm_snapshot *s)
152{
153 return s->cow;
154}
155EXPORT_SYMBOL(dm_snap_cow);
156
100static struct workqueue_struct *ksnapd; 157static struct workqueue_struct *ksnapd;
101static void flush_queued_bios(struct work_struct *work); 158static void flush_queued_bios(struct work_struct *work);
102 159
@@ -116,7 +173,7 @@ static int bdev_equal(struct block_device *lhs, struct block_device *rhs)
116} 173}
117 174
118struct dm_snap_pending_exception { 175struct dm_snap_pending_exception {
119 struct dm_snap_exception e; 176 struct dm_exception e;
120 177
121 /* 178 /*
122 * Origin buffers waiting for this to complete are held 179 * Origin buffers waiting for this to complete are held
@@ -125,28 +182,6 @@ struct dm_snap_pending_exception {
125 struct bio_list origin_bios; 182 struct bio_list origin_bios;
126 struct bio_list snapshot_bios; 183 struct bio_list snapshot_bios;
127 184
128 /*
129 * Short-term queue of pending exceptions prior to submission.
130 */
131 struct list_head list;
132
133 /*
134 * The primary pending_exception is the one that holds
135 * the ref_count and the list of origin_bios for a
136 * group of pending_exceptions. It is always last to get freed.
137 * These fields get set up when writing to the origin.
138 */
139 struct dm_snap_pending_exception *primary_pe;
140
141 /*
142 * Number of pending_exceptions processing this chunk.
143 * When this drops to zero we must complete the origin bios.
144 * If incrementing or decrementing this, hold pe->snap->lock for
145 * the sibling concerned and not pe->primary_pe->snap->lock unless
146 * they are the same.
147 */
148 atomic_t ref_count;
149
150 /* Pointer back to snapshot context */ 185 /* Pointer back to snapshot context */
151 struct dm_snapshot *snap; 186 struct dm_snapshot *snap;
152 187
@@ -222,6 +257,16 @@ static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
222} 257}
223 258
224/* 259/*
260 * This conflicting I/O is extremely improbable in the caller,
261 * so msleep(1) is sufficient and there is no need for a wait queue.
262 */
263static void __check_for_conflicting_io(struct dm_snapshot *s, chunk_t chunk)
264{
265 while (__chunk_is_tracked(s, chunk))
266 msleep(1);
267}
268
269/*
225 * One of these per registered origin, held in the snapshot_origins hash 270 * One of these per registered origin, held in the snapshot_origins hash
226 */ 271 */
227struct origin { 272struct origin {
@@ -243,6 +288,10 @@ struct origin {
243static struct list_head *_origins; 288static struct list_head *_origins;
244static struct rw_semaphore _origins_lock; 289static struct rw_semaphore _origins_lock;
245 290
291static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done);
292static DEFINE_SPINLOCK(_pending_exceptions_done_spinlock);
293static uint64_t _pending_exceptions_done_count;
294
246static int init_origin_hash(void) 295static int init_origin_hash(void)
247{ 296{
248 int i; 297 int i;
@@ -291,22 +340,144 @@ static void __insert_origin(struct origin *o)
291} 340}
292 341
293/* 342/*
343 * _origins_lock must be held when calling this function.
344 * Returns number of snapshots registered using the supplied cow device, plus:
345 * snap_src - a snapshot suitable for use as a source of exception handover
346 * snap_dest - a snapshot capable of receiving exception handover.
347 * snap_merge - an existing snapshot-merge target linked to the same origin.
348 * There can be at most one snapshot-merge target. The parameter is optional.
349 *
350 * Possible return values and states of snap_src and snap_dest.
351 * 0: NULL, NULL - first new snapshot
352 * 1: snap_src, NULL - normal snapshot
353 * 2: snap_src, snap_dest - waiting for handover
354 * 2: snap_src, NULL - handed over, waiting for old to be deleted
355 * 1: NULL, snap_dest - source got destroyed without handover
356 */
357static int __find_snapshots_sharing_cow(struct dm_snapshot *snap,
358 struct dm_snapshot **snap_src,
359 struct dm_snapshot **snap_dest,
360 struct dm_snapshot **snap_merge)
361{
362 struct dm_snapshot *s;
363 struct origin *o;
364 int count = 0;
365 int active;
366
367 o = __lookup_origin(snap->origin->bdev);
368 if (!o)
369 goto out;
370
371 list_for_each_entry(s, &o->snapshots, list) {
372 if (dm_target_is_snapshot_merge(s->ti) && snap_merge)
373 *snap_merge = s;
374 if (!bdev_equal(s->cow->bdev, snap->cow->bdev))
375 continue;
376
377 down_read(&s->lock);
378 active = s->active;
379 up_read(&s->lock);
380
381 if (active) {
382 if (snap_src)
383 *snap_src = s;
384 } else if (snap_dest)
385 *snap_dest = s;
386
387 count++;
388 }
389
390out:
391 return count;
392}
393
394/*
395 * On success, returns 1 if this snapshot is a handover destination,
396 * otherwise returns 0.
397 */
398static int __validate_exception_handover(struct dm_snapshot *snap)
399{
400 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
401 struct dm_snapshot *snap_merge = NULL;
402
403 /* Does snapshot need exceptions handed over to it? */
404 if ((__find_snapshots_sharing_cow(snap, &snap_src, &snap_dest,
405 &snap_merge) == 2) ||
406 snap_dest) {
407 snap->ti->error = "Snapshot cow pairing for exception "
408 "table handover failed";
409 return -EINVAL;
410 }
411
412 /*
413 * If no snap_src was found, snap cannot become a handover
414 * destination.
415 */
416 if (!snap_src)
417 return 0;
418
419 /*
420 * Non-snapshot-merge handover?
421 */
422 if (!dm_target_is_snapshot_merge(snap->ti))
423 return 1;
424
425 /*
426 * Do not allow more than one merging snapshot.
427 */
428 if (snap_merge) {
429 snap->ti->error = "A snapshot is already merging.";
430 return -EINVAL;
431 }
432
433 if (!snap_src->store->type->prepare_merge ||
434 !snap_src->store->type->commit_merge) {
435 snap->ti->error = "Snapshot exception store does not "
436 "support snapshot-merge.";
437 return -EINVAL;
438 }
439
440 return 1;
441}
442
443static void __insert_snapshot(struct origin *o, struct dm_snapshot *s)
444{
445 struct dm_snapshot *l;
446
447 /* Sort the list according to chunk size, largest-first smallest-last */
448 list_for_each_entry(l, &o->snapshots, list)
449 if (l->store->chunk_size < s->store->chunk_size)
450 break;
451 list_add_tail(&s->list, &l->list);
452}
453
454/*
294 * Make a note of the snapshot and its origin so we can look it 455 * Make a note of the snapshot and its origin so we can look it
295 * up when the origin has a write on it. 456 * up when the origin has a write on it.
457 *
458 * Also validate snapshot exception store handovers.
459 * On success, returns 1 if this registration is a handover destination,
460 * otherwise returns 0.
296 */ 461 */
297static int register_snapshot(struct dm_snapshot *snap) 462static int register_snapshot(struct dm_snapshot *snap)
298{ 463{
299 struct dm_snapshot *l; 464 struct origin *o, *new_o = NULL;
300 struct origin *o, *new_o;
301 struct block_device *bdev = snap->origin->bdev; 465 struct block_device *bdev = snap->origin->bdev;
466 int r = 0;
302 467
303 new_o = kmalloc(sizeof(*new_o), GFP_KERNEL); 468 new_o = kmalloc(sizeof(*new_o), GFP_KERNEL);
304 if (!new_o) 469 if (!new_o)
305 return -ENOMEM; 470 return -ENOMEM;
306 471
307 down_write(&_origins_lock); 472 down_write(&_origins_lock);
308 o = __lookup_origin(bdev);
309 473
474 r = __validate_exception_handover(snap);
475 if (r < 0) {
476 kfree(new_o);
477 goto out;
478 }
479
480 o = __lookup_origin(bdev);
310 if (o) 481 if (o)
311 kfree(new_o); 482 kfree(new_o);
312 else { 483 else {
@@ -320,14 +491,27 @@ static int register_snapshot(struct dm_snapshot *snap)
320 __insert_origin(o); 491 __insert_origin(o);
321 } 492 }
322 493
323 /* Sort the list according to chunk size, largest-first smallest-last */ 494 __insert_snapshot(o, snap);
324 list_for_each_entry(l, &o->snapshots, list) 495
325 if (l->store->chunk_size < snap->store->chunk_size) 496out:
326 break; 497 up_write(&_origins_lock);
327 list_add_tail(&snap->list, &l->list); 498
499 return r;
500}
501
502/*
503 * Move snapshot to correct place in list according to chunk size.
504 */
505static void reregister_snapshot(struct dm_snapshot *s)
506{
507 struct block_device *bdev = s->origin->bdev;
508
509 down_write(&_origins_lock);
510
511 list_del(&s->list);
512 __insert_snapshot(__lookup_origin(bdev), s);
328 513
329 up_write(&_origins_lock); 514 up_write(&_origins_lock);
330 return 0;
331} 515}
332 516
333static void unregister_snapshot(struct dm_snapshot *s) 517static void unregister_snapshot(struct dm_snapshot *s)
@@ -338,7 +522,7 @@ static void unregister_snapshot(struct dm_snapshot *s)
338 o = __lookup_origin(s->origin->bdev); 522 o = __lookup_origin(s->origin->bdev);
339 523
340 list_del(&s->list); 524 list_del(&s->list);
341 if (list_empty(&o->snapshots)) { 525 if (o && list_empty(&o->snapshots)) {
342 list_del(&o->hash_list); 526 list_del(&o->hash_list);
343 kfree(o); 527 kfree(o);
344 } 528 }
@@ -351,8 +535,8 @@ static void unregister_snapshot(struct dm_snapshot *s)
351 * The lowest hash_shift bits of the chunk number are ignored, allowing 535 * The lowest hash_shift bits of the chunk number are ignored, allowing
352 * some consecutive chunks to be grouped together. 536 * some consecutive chunks to be grouped together.
353 */ 537 */
354static int init_exception_table(struct exception_table *et, uint32_t size, 538static int dm_exception_table_init(struct dm_exception_table *et,
355 unsigned hash_shift) 539 uint32_t size, unsigned hash_shift)
356{ 540{
357 unsigned int i; 541 unsigned int i;
358 542
@@ -368,10 +552,11 @@ static int init_exception_table(struct exception_table *et, uint32_t size,
368 return 0; 552 return 0;
369} 553}
370 554
371static void exit_exception_table(struct exception_table *et, struct kmem_cache *mem) 555static void dm_exception_table_exit(struct dm_exception_table *et,
556 struct kmem_cache *mem)
372{ 557{
373 struct list_head *slot; 558 struct list_head *slot;
374 struct dm_snap_exception *ex, *next; 559 struct dm_exception *ex, *next;
375 int i, size; 560 int i, size;
376 561
377 size = et->hash_mask + 1; 562 size = et->hash_mask + 1;
@@ -385,19 +570,12 @@ static void exit_exception_table(struct exception_table *et, struct kmem_cache *
385 vfree(et->table); 570 vfree(et->table);
386} 571}
387 572
388static uint32_t exception_hash(struct exception_table *et, chunk_t chunk) 573static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk)
389{ 574{
390 return (chunk >> et->hash_shift) & et->hash_mask; 575 return (chunk >> et->hash_shift) & et->hash_mask;
391} 576}
392 577
393static void insert_exception(struct exception_table *eh, 578static void dm_remove_exception(struct dm_exception *e)
394 struct dm_snap_exception *e)
395{
396 struct list_head *l = &eh->table[exception_hash(eh, e->old_chunk)];
397 list_add(&e->hash_list, l);
398}
399
400static void remove_exception(struct dm_snap_exception *e)
401{ 579{
402 list_del(&e->hash_list); 580 list_del(&e->hash_list);
403} 581}
@@ -406,11 +584,11 @@ static void remove_exception(struct dm_snap_exception *e)
406 * Return the exception data for a sector, or NULL if not 584 * Return the exception data for a sector, or NULL if not
407 * remapped. 585 * remapped.
408 */ 586 */
409static struct dm_snap_exception *lookup_exception(struct exception_table *et, 587static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et,
410 chunk_t chunk) 588 chunk_t chunk)
411{ 589{
412 struct list_head *slot; 590 struct list_head *slot;
413 struct dm_snap_exception *e; 591 struct dm_exception *e;
414 592
415 slot = &et->table[exception_hash(et, chunk)]; 593 slot = &et->table[exception_hash(et, chunk)];
416 list_for_each_entry (e, slot, hash_list) 594 list_for_each_entry (e, slot, hash_list)
@@ -421,9 +599,9 @@ static struct dm_snap_exception *lookup_exception(struct exception_table *et,
421 return NULL; 599 return NULL;
422} 600}
423 601
424static struct dm_snap_exception *alloc_exception(void) 602static struct dm_exception *alloc_completed_exception(void)
425{ 603{
426 struct dm_snap_exception *e; 604 struct dm_exception *e;
427 605
428 e = kmem_cache_alloc(exception_cache, GFP_NOIO); 606 e = kmem_cache_alloc(exception_cache, GFP_NOIO);
429 if (!e) 607 if (!e)
@@ -432,7 +610,7 @@ static struct dm_snap_exception *alloc_exception(void)
432 return e; 610 return e;
433} 611}
434 612
435static void free_exception(struct dm_snap_exception *e) 613static void free_completed_exception(struct dm_exception *e)
436{ 614{
437 kmem_cache_free(exception_cache, e); 615 kmem_cache_free(exception_cache, e);
438} 616}
@@ -457,12 +635,11 @@ static void free_pending_exception(struct dm_snap_pending_exception *pe)
457 atomic_dec(&s->pending_exceptions_count); 635 atomic_dec(&s->pending_exceptions_count);
458} 636}
459 637
460static void insert_completed_exception(struct dm_snapshot *s, 638static void dm_insert_exception(struct dm_exception_table *eh,
461 struct dm_snap_exception *new_e) 639 struct dm_exception *new_e)
462{ 640{
463 struct exception_table *eh = &s->complete;
464 struct list_head *l; 641 struct list_head *l;
465 struct dm_snap_exception *e = NULL; 642 struct dm_exception *e = NULL;
466 643
467 l = &eh->table[exception_hash(eh, new_e->old_chunk)]; 644 l = &eh->table[exception_hash(eh, new_e->old_chunk)];
468 645
@@ -478,7 +655,7 @@ static void insert_completed_exception(struct dm_snapshot *s,
478 new_e->new_chunk == (dm_chunk_number(e->new_chunk) + 655 new_e->new_chunk == (dm_chunk_number(e->new_chunk) +
479 dm_consecutive_chunk_count(e) + 1)) { 656 dm_consecutive_chunk_count(e) + 1)) {
480 dm_consecutive_chunk_count_inc(e); 657 dm_consecutive_chunk_count_inc(e);
481 free_exception(new_e); 658 free_completed_exception(new_e);
482 return; 659 return;
483 } 660 }
484 661
@@ -488,7 +665,7 @@ static void insert_completed_exception(struct dm_snapshot *s,
488 dm_consecutive_chunk_count_inc(e); 665 dm_consecutive_chunk_count_inc(e);
489 e->old_chunk--; 666 e->old_chunk--;
490 e->new_chunk--; 667 e->new_chunk--;
491 free_exception(new_e); 668 free_completed_exception(new_e);
492 return; 669 return;
493 } 670 }
494 671
@@ -507,9 +684,9 @@ out:
507static int dm_add_exception(void *context, chunk_t old, chunk_t new) 684static int dm_add_exception(void *context, chunk_t old, chunk_t new)
508{ 685{
509 struct dm_snapshot *s = context; 686 struct dm_snapshot *s = context;
510 struct dm_snap_exception *e; 687 struct dm_exception *e;
511 688
512 e = alloc_exception(); 689 e = alloc_completed_exception();
513 if (!e) 690 if (!e)
514 return -ENOMEM; 691 return -ENOMEM;
515 692
@@ -518,11 +695,30 @@ static int dm_add_exception(void *context, chunk_t old, chunk_t new)
518 /* Consecutive_count is implicitly initialised to zero */ 695 /* Consecutive_count is implicitly initialised to zero */
519 e->new_chunk = new; 696 e->new_chunk = new;
520 697
521 insert_completed_exception(s, e); 698 dm_insert_exception(&s->complete, e);
522 699
523 return 0; 700 return 0;
524} 701}
525 702
703#define min_not_zero(l, r) (((l) == 0) ? (r) : (((r) == 0) ? (l) : min(l, r)))
704
705/*
706 * Return a minimum chunk size of all snapshots that have the specified origin.
707 * Return zero if the origin has no snapshots.
708 */
709static sector_t __minimum_chunk_size(struct origin *o)
710{
711 struct dm_snapshot *snap;
712 unsigned chunk_size = 0;
713
714 if (o)
715 list_for_each_entry(snap, &o->snapshots, list)
716 chunk_size = min_not_zero(chunk_size,
717 snap->store->chunk_size);
718
719 return chunk_size;
720}
721
526/* 722/*
527 * Hard coded magic. 723 * Hard coded magic.
528 */ 724 */
@@ -546,16 +742,18 @@ static int init_hash_tables(struct dm_snapshot *s)
546 * Calculate based on the size of the original volume or 742 * Calculate based on the size of the original volume or
547 * the COW volume... 743 * the COW volume...
548 */ 744 */
549 cow_dev_size = get_dev_size(s->store->cow->bdev); 745 cow_dev_size = get_dev_size(s->cow->bdev);
550 origin_dev_size = get_dev_size(s->origin->bdev); 746 origin_dev_size = get_dev_size(s->origin->bdev);
551 max_buckets = calc_max_buckets(); 747 max_buckets = calc_max_buckets();
552 748
553 hash_size = min(origin_dev_size, cow_dev_size) >> s->store->chunk_shift; 749 hash_size = min(origin_dev_size, cow_dev_size) >> s->store->chunk_shift;
554 hash_size = min(hash_size, max_buckets); 750 hash_size = min(hash_size, max_buckets);
555 751
752 if (hash_size < 64)
753 hash_size = 64;
556 hash_size = rounddown_pow_of_two(hash_size); 754 hash_size = rounddown_pow_of_two(hash_size);
557 if (init_exception_table(&s->complete, hash_size, 755 if (dm_exception_table_init(&s->complete, hash_size,
558 DM_CHUNK_CONSECUTIVE_BITS)) 756 DM_CHUNK_CONSECUTIVE_BITS))
559 return -ENOMEM; 757 return -ENOMEM;
560 758
561 /* 759 /*
@@ -566,14 +764,284 @@ static int init_hash_tables(struct dm_snapshot *s)
566 if (hash_size < 64) 764 if (hash_size < 64)
567 hash_size = 64; 765 hash_size = 64;
568 766
569 if (init_exception_table(&s->pending, hash_size, 0)) { 767 if (dm_exception_table_init(&s->pending, hash_size, 0)) {
570 exit_exception_table(&s->complete, exception_cache); 768 dm_exception_table_exit(&s->complete, exception_cache);
571 return -ENOMEM; 769 return -ENOMEM;
572 } 770 }
573 771
574 return 0; 772 return 0;
575} 773}
576 774
775static void merge_shutdown(struct dm_snapshot *s)
776{
777 clear_bit_unlock(RUNNING_MERGE, &s->state_bits);
778 smp_mb__after_clear_bit();
779 wake_up_bit(&s->state_bits, RUNNING_MERGE);
780}
781
782static struct bio *__release_queued_bios_after_merge(struct dm_snapshot *s)
783{
784 s->first_merging_chunk = 0;
785 s->num_merging_chunks = 0;
786
787 return bio_list_get(&s->bios_queued_during_merge);
788}
789
790/*
791 * Remove one chunk from the index of completed exceptions.
792 */
793static int __remove_single_exception_chunk(struct dm_snapshot *s,
794 chunk_t old_chunk)
795{
796 struct dm_exception *e;
797
798 e = dm_lookup_exception(&s->complete, old_chunk);
799 if (!e) {
800 DMERR("Corruption detected: exception for block %llu is "
801 "on disk but not in memory",
802 (unsigned long long)old_chunk);
803 return -EINVAL;
804 }
805
806 /*
807 * If this is the only chunk using this exception, remove exception.
808 */
809 if (!dm_consecutive_chunk_count(e)) {
810 dm_remove_exception(e);
811 free_completed_exception(e);
812 return 0;
813 }
814
815 /*
816 * The chunk may be either at the beginning or the end of a
817 * group of consecutive chunks - never in the middle. We are
818 * removing chunks in the opposite order to that in which they
819 * were added, so this should always be true.
820 * Decrement the consecutive chunk counter and adjust the
821 * starting point if necessary.
822 */
823 if (old_chunk == e->old_chunk) {
824 e->old_chunk++;
825 e->new_chunk++;
826 } else if (old_chunk != e->old_chunk +
827 dm_consecutive_chunk_count(e)) {
828 DMERR("Attempt to merge block %llu from the "
829 "middle of a chunk range [%llu - %llu]",
830 (unsigned long long)old_chunk,
831 (unsigned long long)e->old_chunk,
832 (unsigned long long)
833 e->old_chunk + dm_consecutive_chunk_count(e));
834 return -EINVAL;
835 }
836
837 dm_consecutive_chunk_count_dec(e);
838
839 return 0;
840}
841
842static void flush_bios(struct bio *bio);
843
844static int remove_single_exception_chunk(struct dm_snapshot *s)
845{
846 struct bio *b = NULL;
847 int r;
848 chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1;
849
850 down_write(&s->lock);
851
852 /*
853 * Process chunks (and associated exceptions) in reverse order
854 * so that dm_consecutive_chunk_count_dec() accounting works.
855 */
856 do {
857 r = __remove_single_exception_chunk(s, old_chunk);
858 if (r)
859 goto out;
860 } while (old_chunk-- > s->first_merging_chunk);
861
862 b = __release_queued_bios_after_merge(s);
863
864out:
865 up_write(&s->lock);
866 if (b)
867 flush_bios(b);
868
869 return r;
870}
871
872static int origin_write_extent(struct dm_snapshot *merging_snap,
873 sector_t sector, unsigned chunk_size);
874
875static void merge_callback(int read_err, unsigned long write_err,
876 void *context);
877
878static uint64_t read_pending_exceptions_done_count(void)
879{
880 uint64_t pending_exceptions_done;
881
882 spin_lock(&_pending_exceptions_done_spinlock);
883 pending_exceptions_done = _pending_exceptions_done_count;
884 spin_unlock(&_pending_exceptions_done_spinlock);
885
886 return pending_exceptions_done;
887}
888
889static void increment_pending_exceptions_done_count(void)
890{
891 spin_lock(&_pending_exceptions_done_spinlock);
892 _pending_exceptions_done_count++;
893 spin_unlock(&_pending_exceptions_done_spinlock);
894
895 wake_up_all(&_pending_exceptions_done);
896}
897
898static void snapshot_merge_next_chunks(struct dm_snapshot *s)
899{
900 int i, linear_chunks;
901 chunk_t old_chunk, new_chunk;
902 struct dm_io_region src, dest;
903 sector_t io_size;
904 uint64_t previous_count;
905
906 BUG_ON(!test_bit(RUNNING_MERGE, &s->state_bits));
907 if (unlikely(test_bit(SHUTDOWN_MERGE, &s->state_bits)))
908 goto shut;
909
910 /*
911 * valid flag never changes during merge, so no lock required.
912 */
913 if (!s->valid) {
914 DMERR("Snapshot is invalid: can't merge");
915 goto shut;
916 }
917
918 linear_chunks = s->store->type->prepare_merge(s->store, &old_chunk,
919 &new_chunk);
920 if (linear_chunks <= 0) {
921 if (linear_chunks < 0) {
922 DMERR("Read error in exception store: "
923 "shutting down merge");
924 down_write(&s->lock);
925 s->merge_failed = 1;
926 up_write(&s->lock);
927 }
928 goto shut;
929 }
930
931 /* Adjust old_chunk and new_chunk to reflect start of linear region */
932 old_chunk = old_chunk + 1 - linear_chunks;
933 new_chunk = new_chunk + 1 - linear_chunks;
934
935 /*
936 * Use one (potentially large) I/O to copy all 'linear_chunks'
937 * from the exception store to the origin
938 */
939 io_size = linear_chunks * s->store->chunk_size;
940
941 dest.bdev = s->origin->bdev;
942 dest.sector = chunk_to_sector(s->store, old_chunk);
943 dest.count = min(io_size, get_dev_size(dest.bdev) - dest.sector);
944
945 src.bdev = s->cow->bdev;
946 src.sector = chunk_to_sector(s->store, new_chunk);
947 src.count = dest.count;
948
949 /*
950 * Reallocate any exceptions needed in other snapshots then
951 * wait for the pending exceptions to complete.
952 * Each time any pending exception (globally on the system)
953 * completes we are woken and repeat the process to find out
954 * if we can proceed. While this may not seem a particularly
955 * efficient algorithm, it is not expected to have any
956 * significant impact on performance.
957 */
958 previous_count = read_pending_exceptions_done_count();
959 while (origin_write_extent(s, dest.sector, io_size)) {
960 wait_event(_pending_exceptions_done,
961 (read_pending_exceptions_done_count() !=
962 previous_count));
963 /* Retry after the wait, until all exceptions are done. */
964 previous_count = read_pending_exceptions_done_count();
965 }
966
967 down_write(&s->lock);
968 s->first_merging_chunk = old_chunk;
969 s->num_merging_chunks = linear_chunks;
970 up_write(&s->lock);
971
972 /* Wait until writes to all 'linear_chunks' drain */
973 for (i = 0; i < linear_chunks; i++)
974 __check_for_conflicting_io(s, old_chunk + i);
975
976 dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, merge_callback, s);
977 return;
978
979shut:
980 merge_shutdown(s);
981}
982
983static void error_bios(struct bio *bio);
984
985static void merge_callback(int read_err, unsigned long write_err, void *context)
986{
987 struct dm_snapshot *s = context;
988 struct bio *b = NULL;
989
990 if (read_err || write_err) {
991 if (read_err)
992 DMERR("Read error: shutting down merge.");
993 else
994 DMERR("Write error: shutting down merge.");
995 goto shut;
996 }
997
998 if (s->store->type->commit_merge(s->store,
999 s->num_merging_chunks) < 0) {
1000 DMERR("Write error in exception store: shutting down merge");
1001 goto shut;
1002 }
1003
1004 if (remove_single_exception_chunk(s) < 0)
1005 goto shut;
1006
1007 snapshot_merge_next_chunks(s);
1008
1009 return;
1010
1011shut:
1012 down_write(&s->lock);
1013 s->merge_failed = 1;
1014 b = __release_queued_bios_after_merge(s);
1015 up_write(&s->lock);
1016 error_bios(b);
1017
1018 merge_shutdown(s);
1019}
1020
1021static void start_merge(struct dm_snapshot *s)
1022{
1023 if (!test_and_set_bit(RUNNING_MERGE, &s->state_bits))
1024 snapshot_merge_next_chunks(s);
1025}
1026
1027static int wait_schedule(void *ptr)
1028{
1029 schedule();
1030
1031 return 0;
1032}
1033
1034/*
1035 * Stop the merging process and wait until it finishes.
1036 */
1037static void stop_merge(struct dm_snapshot *s)
1038{
1039 set_bit(SHUTDOWN_MERGE, &s->state_bits);
1040 wait_on_bit(&s->state_bits, RUNNING_MERGE, wait_schedule,
1041 TASK_UNINTERRUPTIBLE);
1042 clear_bit(SHUTDOWN_MERGE, &s->state_bits);
1043}
1044
577/* 1045/*
578 * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size> 1046 * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
579 */ 1047 */
@@ -582,50 +1050,73 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
582 struct dm_snapshot *s; 1050 struct dm_snapshot *s;
583 int i; 1051 int i;
584 int r = -EINVAL; 1052 int r = -EINVAL;
585 char *origin_path; 1053 char *origin_path, *cow_path;
586 struct dm_exception_store *store; 1054 unsigned args_used, num_flush_requests = 1;
587 unsigned args_used; 1055 fmode_t origin_mode = FMODE_READ;
588 1056
589 if (argc != 4) { 1057 if (argc != 4) {
590 ti->error = "requires exactly 4 arguments"; 1058 ti->error = "requires exactly 4 arguments";
591 r = -EINVAL; 1059 r = -EINVAL;
592 goto bad_args; 1060 goto bad;
1061 }
1062
1063 if (dm_target_is_snapshot_merge(ti)) {
1064 num_flush_requests = 2;
1065 origin_mode = FMODE_WRITE;
593 } 1066 }
594 1067
595 origin_path = argv[0]; 1068 origin_path = argv[0];
596 argv++; 1069 argv++;
597 argc--; 1070 argc--;
598 1071
599 r = dm_exception_store_create(ti, argc, argv, &args_used, &store); 1072 s = kmalloc(sizeof(*s), GFP_KERNEL);
1073 if (!s) {
1074 ti->error = "Cannot allocate snapshot context private "
1075 "structure";
1076 r = -ENOMEM;
1077 goto bad;
1078 }
1079
1080 cow_path = argv[0];
1081 argv++;
1082 argc--;
1083
1084 r = dm_get_device(ti, cow_path, 0, 0,
1085 FMODE_READ | FMODE_WRITE, &s->cow);
1086 if (r) {
1087 ti->error = "Cannot get COW device";
1088 goto bad_cow;
1089 }
1090
1091 r = dm_exception_store_create(ti, argc, argv, s, &args_used, &s->store);
600 if (r) { 1092 if (r) {
601 ti->error = "Couldn't create exception store"; 1093 ti->error = "Couldn't create exception store";
602 r = -EINVAL; 1094 r = -EINVAL;
603 goto bad_args; 1095 goto bad_store;
604 } 1096 }
605 1097
606 argv += args_used; 1098 argv += args_used;
607 argc -= args_used; 1099 argc -= args_used;
608 1100
609 s = kmalloc(sizeof(*s), GFP_KERNEL); 1101 r = dm_get_device(ti, origin_path, 0, ti->len, origin_mode, &s->origin);
610 if (!s) {
611 ti->error = "Cannot allocate snapshot context private "
612 "structure";
613 r = -ENOMEM;
614 goto bad_snap;
615 }
616
617 r = dm_get_device(ti, origin_path, 0, ti->len, FMODE_READ, &s->origin);
618 if (r) { 1102 if (r) {
619 ti->error = "Cannot get origin device"; 1103 ti->error = "Cannot get origin device";
620 goto bad_origin; 1104 goto bad_origin;
621 } 1105 }
622 1106
623 s->store = store; 1107 s->ti = ti;
624 s->valid = 1; 1108 s->valid = 1;
625 s->active = 0; 1109 s->active = 0;
1110 s->suspended = 0;
626 atomic_set(&s->pending_exceptions_count, 0); 1111 atomic_set(&s->pending_exceptions_count, 0);
627 init_rwsem(&s->lock); 1112 init_rwsem(&s->lock);
1113 INIT_LIST_HEAD(&s->list);
628 spin_lock_init(&s->pe_lock); 1114 spin_lock_init(&s->pe_lock);
1115 s->state_bits = 0;
1116 s->merge_failed = 0;
1117 s->first_merging_chunk = 0;
1118 s->num_merging_chunks = 0;
1119 bio_list_init(&s->bios_queued_during_merge);
629 1120
630 /* Allocate hash table for COW data */ 1121 /* Allocate hash table for COW data */
631 if (init_hash_tables(s)) { 1122 if (init_hash_tables(s)) {
@@ -659,39 +1150,55 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
659 1150
660 spin_lock_init(&s->tracked_chunk_lock); 1151 spin_lock_init(&s->tracked_chunk_lock);
661 1152
662 /* Metadata must only be loaded into one table at once */ 1153 bio_list_init(&s->queued_bios);
1154 INIT_WORK(&s->queued_bios_work, flush_queued_bios);
1155
1156 ti->private = s;
1157 ti->num_flush_requests = num_flush_requests;
1158
1159 /* Add snapshot to the list of snapshots for this origin */
1160 /* Exceptions aren't triggered till snapshot_resume() is called */
1161 r = register_snapshot(s);
1162 if (r == -ENOMEM) {
1163 ti->error = "Snapshot origin struct allocation failed";
1164 goto bad_load_and_register;
1165 } else if (r < 0) {
1166 /* invalid handover, register_snapshot has set ti->error */
1167 goto bad_load_and_register;
1168 }
1169
1170 /*
1171 * Metadata must only be loaded into one table at once, so skip this
1172 * if metadata will be handed over during resume.
1173 * Chunk size will be set during the handover - set it to zero to
1174 * ensure it's ignored.
1175 */
1176 if (r > 0) {
1177 s->store->chunk_size = 0;
1178 return 0;
1179 }
1180
663 r = s->store->type->read_metadata(s->store, dm_add_exception, 1181 r = s->store->type->read_metadata(s->store, dm_add_exception,
664 (void *)s); 1182 (void *)s);
665 if (r < 0) { 1183 if (r < 0) {
666 ti->error = "Failed to read snapshot metadata"; 1184 ti->error = "Failed to read snapshot metadata";
667 goto bad_load_and_register; 1185 goto bad_read_metadata;
668 } else if (r > 0) { 1186 } else if (r > 0) {
669 s->valid = 0; 1187 s->valid = 0;
670 DMWARN("Snapshot is marked invalid."); 1188 DMWARN("Snapshot is marked invalid.");
671 } 1189 }
672 1190
673 bio_list_init(&s->queued_bios);
674 INIT_WORK(&s->queued_bios_work, flush_queued_bios);
675
676 if (!s->store->chunk_size) { 1191 if (!s->store->chunk_size) {
677 ti->error = "Chunk size not set"; 1192 ti->error = "Chunk size not set";
678 goto bad_load_and_register; 1193 goto bad_read_metadata;
679 }
680
681 /* Add snapshot to the list of snapshots for this origin */
682 /* Exceptions aren't triggered till snapshot_resume() is called */
683 if (register_snapshot(s)) {
684 r = -EINVAL;
685 ti->error = "Cannot register snapshot origin";
686 goto bad_load_and_register;
687 } 1194 }
688
689 ti->private = s;
690 ti->split_io = s->store->chunk_size; 1195 ti->split_io = s->store->chunk_size;
691 ti->num_flush_requests = 1;
692 1196
693 return 0; 1197 return 0;
694 1198
1199bad_read_metadata:
1200 unregister_snapshot(s);
1201
695bad_load_and_register: 1202bad_load_and_register:
696 mempool_destroy(s->tracked_chunk_pool); 1203 mempool_destroy(s->tracked_chunk_pool);
697 1204
@@ -702,19 +1209,22 @@ bad_pending_pool:
702 dm_kcopyd_client_destroy(s->kcopyd_client); 1209 dm_kcopyd_client_destroy(s->kcopyd_client);
703 1210
704bad_kcopyd: 1211bad_kcopyd:
705 exit_exception_table(&s->pending, pending_cache); 1212 dm_exception_table_exit(&s->pending, pending_cache);
706 exit_exception_table(&s->complete, exception_cache); 1213 dm_exception_table_exit(&s->complete, exception_cache);
707 1214
708bad_hash_tables: 1215bad_hash_tables:
709 dm_put_device(ti, s->origin); 1216 dm_put_device(ti, s->origin);
710 1217
711bad_origin: 1218bad_origin:
712 kfree(s); 1219 dm_exception_store_destroy(s->store);
713 1220
714bad_snap: 1221bad_store:
715 dm_exception_store_destroy(store); 1222 dm_put_device(ti, s->cow);
1223
1224bad_cow:
1225 kfree(s);
716 1226
717bad_args: 1227bad:
718 return r; 1228 return r;
719} 1229}
720 1230
@@ -723,8 +1233,39 @@ static void __free_exceptions(struct dm_snapshot *s)
723 dm_kcopyd_client_destroy(s->kcopyd_client); 1233 dm_kcopyd_client_destroy(s->kcopyd_client);
724 s->kcopyd_client = NULL; 1234 s->kcopyd_client = NULL;
725 1235
726 exit_exception_table(&s->pending, pending_cache); 1236 dm_exception_table_exit(&s->pending, pending_cache);
727 exit_exception_table(&s->complete, exception_cache); 1237 dm_exception_table_exit(&s->complete, exception_cache);
1238}
1239
1240static void __handover_exceptions(struct dm_snapshot *snap_src,
1241 struct dm_snapshot *snap_dest)
1242{
1243 union {
1244 struct dm_exception_table table_swap;
1245 struct dm_exception_store *store_swap;
1246 } u;
1247
1248 /*
1249 * Swap all snapshot context information between the two instances.
1250 */
1251 u.table_swap = snap_dest->complete;
1252 snap_dest->complete = snap_src->complete;
1253 snap_src->complete = u.table_swap;
1254
1255 u.store_swap = snap_dest->store;
1256 snap_dest->store = snap_src->store;
1257 snap_src->store = u.store_swap;
1258
1259 snap_dest->store->snap = snap_dest;
1260 snap_src->store->snap = snap_src;
1261
1262 snap_dest->ti->split_io = snap_dest->store->chunk_size;
1263 snap_dest->valid = snap_src->valid;
1264
1265 /*
1266 * Set source invalid to ensure it receives no further I/O.
1267 */
1268 snap_src->valid = 0;
728} 1269}
729 1270
730static void snapshot_dtr(struct dm_target *ti) 1271static void snapshot_dtr(struct dm_target *ti)
@@ -733,9 +1274,24 @@ static void snapshot_dtr(struct dm_target *ti)
733 int i; 1274 int i;
734#endif 1275#endif
735 struct dm_snapshot *s = ti->private; 1276 struct dm_snapshot *s = ti->private;
1277 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
736 1278
737 flush_workqueue(ksnapd); 1279 flush_workqueue(ksnapd);
738 1280
1281 down_read(&_origins_lock);
1282 /* Check whether exception handover must be cancelled */
1283 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1284 if (snap_src && snap_dest && (s == snap_src)) {
1285 down_write(&snap_dest->lock);
1286 snap_dest->valid = 0;
1287 up_write(&snap_dest->lock);
1288 DMERR("Cancelling snapshot handover.");
1289 }
1290 up_read(&_origins_lock);
1291
1292 if (dm_target_is_snapshot_merge(ti))
1293 stop_merge(s);
1294
739 /* Prevent further origin writes from using this snapshot. */ 1295 /* Prevent further origin writes from using this snapshot. */
740 /* After this returns there can be no new kcopyd jobs. */ 1296 /* After this returns there can be no new kcopyd jobs. */
741 unregister_snapshot(s); 1297 unregister_snapshot(s);
@@ -763,6 +1319,8 @@ static void snapshot_dtr(struct dm_target *ti)
763 1319
764 dm_exception_store_destroy(s->store); 1320 dm_exception_store_destroy(s->store);
765 1321
1322 dm_put_device(ti, s->cow);
1323
766 kfree(s); 1324 kfree(s);
767} 1325}
768 1326
@@ -795,6 +1353,26 @@ static void flush_queued_bios(struct work_struct *work)
795 flush_bios(queued_bios); 1353 flush_bios(queued_bios);
796} 1354}
797 1355
1356static int do_origin(struct dm_dev *origin, struct bio *bio);
1357
1358/*
1359 * Flush a list of buffers.
1360 */
1361static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio)
1362{
1363 struct bio *n;
1364 int r;
1365
1366 while (bio) {
1367 n = bio->bi_next;
1368 bio->bi_next = NULL;
1369 r = do_origin(s->origin, bio);
1370 if (r == DM_MAPIO_REMAPPED)
1371 generic_make_request(bio);
1372 bio = n;
1373 }
1374}
1375
798/* 1376/*
799 * Error a list of buffers. 1377 * Error a list of buffers.
800 */ 1378 */
@@ -825,45 +1403,12 @@ static void __invalidate_snapshot(struct dm_snapshot *s, int err)
825 1403
826 s->valid = 0; 1404 s->valid = 0;
827 1405
828 dm_table_event(s->store->ti->table); 1406 dm_table_event(s->ti->table);
829}
830
831static void get_pending_exception(struct dm_snap_pending_exception *pe)
832{
833 atomic_inc(&pe->ref_count);
834}
835
836static struct bio *put_pending_exception(struct dm_snap_pending_exception *pe)
837{
838 struct dm_snap_pending_exception *primary_pe;
839 struct bio *origin_bios = NULL;
840
841 primary_pe = pe->primary_pe;
842
843 /*
844 * If this pe is involved in a write to the origin and
845 * it is the last sibling to complete then release
846 * the bios for the original write to the origin.
847 */
848 if (primary_pe &&
849 atomic_dec_and_test(&primary_pe->ref_count)) {
850 origin_bios = bio_list_get(&primary_pe->origin_bios);
851 free_pending_exception(primary_pe);
852 }
853
854 /*
855 * Free the pe if it's not linked to an origin write or if
856 * it's not itself a primary pe.
857 */
858 if (!primary_pe || primary_pe != pe)
859 free_pending_exception(pe);
860
861 return origin_bios;
862} 1407}
863 1408
864static void pending_complete(struct dm_snap_pending_exception *pe, int success) 1409static void pending_complete(struct dm_snap_pending_exception *pe, int success)
865{ 1410{
866 struct dm_snap_exception *e; 1411 struct dm_exception *e;
867 struct dm_snapshot *s = pe->snap; 1412 struct dm_snapshot *s = pe->snap;
868 struct bio *origin_bios = NULL; 1413 struct bio *origin_bios = NULL;
869 struct bio *snapshot_bios = NULL; 1414 struct bio *snapshot_bios = NULL;
@@ -877,7 +1422,7 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success)
877 goto out; 1422 goto out;
878 } 1423 }
879 1424
880 e = alloc_exception(); 1425 e = alloc_completed_exception();
881 if (!e) { 1426 if (!e) {
882 down_write(&s->lock); 1427 down_write(&s->lock);
883 __invalidate_snapshot(s, -ENOMEM); 1428 __invalidate_snapshot(s, -ENOMEM);
@@ -888,28 +1433,27 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success)
888 1433
889 down_write(&s->lock); 1434 down_write(&s->lock);
890 if (!s->valid) { 1435 if (!s->valid) {
891 free_exception(e); 1436 free_completed_exception(e);
892 error = 1; 1437 error = 1;
893 goto out; 1438 goto out;
894 } 1439 }
895 1440
896 /* 1441 /* Check for conflicting reads */
897 * Check for conflicting reads. This is extremely improbable, 1442 __check_for_conflicting_io(s, pe->e.old_chunk);
898 * so msleep(1) is sufficient and there is no need for a wait queue.
899 */
900 while (__chunk_is_tracked(s, pe->e.old_chunk))
901 msleep(1);
902 1443
903 /* 1444 /*
904 * Add a proper exception, and remove the 1445 * Add a proper exception, and remove the
905 * in-flight exception from the list. 1446 * in-flight exception from the list.
906 */ 1447 */
907 insert_completed_exception(s, e); 1448 dm_insert_exception(&s->complete, e);
908 1449
909 out: 1450 out:
910 remove_exception(&pe->e); 1451 dm_remove_exception(&pe->e);
911 snapshot_bios = bio_list_get(&pe->snapshot_bios); 1452 snapshot_bios = bio_list_get(&pe->snapshot_bios);
912 origin_bios = put_pending_exception(pe); 1453 origin_bios = bio_list_get(&pe->origin_bios);
1454 free_pending_exception(pe);
1455
1456 increment_pending_exceptions_done_count();
913 1457
914 up_write(&s->lock); 1458 up_write(&s->lock);
915 1459
@@ -919,7 +1463,7 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success)
919 else 1463 else
920 flush_bios(snapshot_bios); 1464 flush_bios(snapshot_bios);
921 1465
922 flush_bios(origin_bios); 1466 retry_origin_bios(s, origin_bios);
923} 1467}
924 1468
925static void commit_callback(void *context, int success) 1469static void commit_callback(void *context, int success)
@@ -963,7 +1507,7 @@ static void start_copy(struct dm_snap_pending_exception *pe)
963 src.sector = chunk_to_sector(s->store, pe->e.old_chunk); 1507 src.sector = chunk_to_sector(s->store, pe->e.old_chunk);
964 src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector); 1508 src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector);
965 1509
966 dest.bdev = s->store->cow->bdev; 1510 dest.bdev = s->cow->bdev;
967 dest.sector = chunk_to_sector(s->store, pe->e.new_chunk); 1511 dest.sector = chunk_to_sector(s->store, pe->e.new_chunk);
968 dest.count = src.count; 1512 dest.count = src.count;
969 1513
@@ -975,7 +1519,7 @@ static void start_copy(struct dm_snap_pending_exception *pe)
975static struct dm_snap_pending_exception * 1519static struct dm_snap_pending_exception *
976__lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk) 1520__lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk)
977{ 1521{
978 struct dm_snap_exception *e = lookup_exception(&s->pending, chunk); 1522 struct dm_exception *e = dm_lookup_exception(&s->pending, chunk);
979 1523
980 if (!e) 1524 if (!e)
981 return NULL; 1525 return NULL;
@@ -1006,8 +1550,6 @@ __find_pending_exception(struct dm_snapshot *s,
1006 pe->e.old_chunk = chunk; 1550 pe->e.old_chunk = chunk;
1007 bio_list_init(&pe->origin_bios); 1551 bio_list_init(&pe->origin_bios);
1008 bio_list_init(&pe->snapshot_bios); 1552 bio_list_init(&pe->snapshot_bios);
1009 pe->primary_pe = NULL;
1010 atomic_set(&pe->ref_count, 0);
1011 pe->started = 0; 1553 pe->started = 0;
1012 1554
1013 if (s->store->type->prepare_exception(s->store, &pe->e)) { 1555 if (s->store->type->prepare_exception(s->store, &pe->e)) {
@@ -1015,16 +1557,15 @@ __find_pending_exception(struct dm_snapshot *s,
1015 return NULL; 1557 return NULL;
1016 } 1558 }
1017 1559
1018 get_pending_exception(pe); 1560 dm_insert_exception(&s->pending, &pe->e);
1019 insert_exception(&s->pending, &pe->e);
1020 1561
1021 return pe; 1562 return pe;
1022} 1563}
1023 1564
1024static void remap_exception(struct dm_snapshot *s, struct dm_snap_exception *e, 1565static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
1025 struct bio *bio, chunk_t chunk) 1566 struct bio *bio, chunk_t chunk)
1026{ 1567{
1027 bio->bi_bdev = s->store->cow->bdev; 1568 bio->bi_bdev = s->cow->bdev;
1028 bio->bi_sector = chunk_to_sector(s->store, 1569 bio->bi_sector = chunk_to_sector(s->store,
1029 dm_chunk_number(e->new_chunk) + 1570 dm_chunk_number(e->new_chunk) +
1030 (chunk - e->old_chunk)) + 1571 (chunk - e->old_chunk)) +
@@ -1035,14 +1576,14 @@ static void remap_exception(struct dm_snapshot *s, struct dm_snap_exception *e,
1035static int snapshot_map(struct dm_target *ti, struct bio *bio, 1576static int snapshot_map(struct dm_target *ti, struct bio *bio,
1036 union map_info *map_context) 1577 union map_info *map_context)
1037{ 1578{
1038 struct dm_snap_exception *e; 1579 struct dm_exception *e;
1039 struct dm_snapshot *s = ti->private; 1580 struct dm_snapshot *s = ti->private;
1040 int r = DM_MAPIO_REMAPPED; 1581 int r = DM_MAPIO_REMAPPED;
1041 chunk_t chunk; 1582 chunk_t chunk;
1042 struct dm_snap_pending_exception *pe = NULL; 1583 struct dm_snap_pending_exception *pe = NULL;
1043 1584
1044 if (unlikely(bio_empty_barrier(bio))) { 1585 if (unlikely(bio_empty_barrier(bio))) {
1045 bio->bi_bdev = s->store->cow->bdev; 1586 bio->bi_bdev = s->cow->bdev;
1046 return DM_MAPIO_REMAPPED; 1587 return DM_MAPIO_REMAPPED;
1047 } 1588 }
1048 1589
@@ -1063,7 +1604,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
1063 } 1604 }
1064 1605
1065 /* If the block is already remapped - use that, else remap it */ 1606 /* If the block is already remapped - use that, else remap it */
1066 e = lookup_exception(&s->complete, chunk); 1607 e = dm_lookup_exception(&s->complete, chunk);
1067 if (e) { 1608 if (e) {
1068 remap_exception(s, e, bio, chunk); 1609 remap_exception(s, e, bio, chunk);
1069 goto out_unlock; 1610 goto out_unlock;
@@ -1087,7 +1628,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
1087 goto out_unlock; 1628 goto out_unlock;
1088 } 1629 }
1089 1630
1090 e = lookup_exception(&s->complete, chunk); 1631 e = dm_lookup_exception(&s->complete, chunk);
1091 if (e) { 1632 if (e) {
1092 free_pending_exception(pe); 1633 free_pending_exception(pe);
1093 remap_exception(s, e, bio, chunk); 1634 remap_exception(s, e, bio, chunk);
@@ -1125,6 +1666,78 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
1125 return r; 1666 return r;
1126} 1667}
1127 1668
1669/*
1670 * A snapshot-merge target behaves like a combination of a snapshot
1671 * target and a snapshot-origin target. It only generates new
1672 * exceptions in other snapshots and not in the one that is being
1673 * merged.
1674 *
1675 * For each chunk, if there is an existing exception, it is used to
1676 * redirect I/O to the cow device. Otherwise I/O is sent to the origin,
1677 * which in turn might generate exceptions in other snapshots.
1678 * If merging is currently taking place on the chunk in question, the
1679 * I/O is deferred by adding it to s->bios_queued_during_merge.
1680 */
1681static int snapshot_merge_map(struct dm_target *ti, struct bio *bio,
1682 union map_info *map_context)
1683{
1684 struct dm_exception *e;
1685 struct dm_snapshot *s = ti->private;
1686 int r = DM_MAPIO_REMAPPED;
1687 chunk_t chunk;
1688
1689 if (unlikely(bio_empty_barrier(bio))) {
1690 if (!map_context->flush_request)
1691 bio->bi_bdev = s->origin->bdev;
1692 else
1693 bio->bi_bdev = s->cow->bdev;
1694 map_context->ptr = NULL;
1695 return DM_MAPIO_REMAPPED;
1696 }
1697
1698 chunk = sector_to_chunk(s->store, bio->bi_sector);
1699
1700 down_write(&s->lock);
1701
1702 /* Full merging snapshots are redirected to the origin */
1703 if (!s->valid)
1704 goto redirect_to_origin;
1705
1706 /* If the block is already remapped - use that */
1707 e = dm_lookup_exception(&s->complete, chunk);
1708 if (e) {
1709 /* Queue writes overlapping with chunks being merged */
1710 if (bio_rw(bio) == WRITE &&
1711 chunk >= s->first_merging_chunk &&
1712 chunk < (s->first_merging_chunk +
1713 s->num_merging_chunks)) {
1714 bio->bi_bdev = s->origin->bdev;
1715 bio_list_add(&s->bios_queued_during_merge, bio);
1716 r = DM_MAPIO_SUBMITTED;
1717 goto out_unlock;
1718 }
1719
1720 remap_exception(s, e, bio, chunk);
1721
1722 if (bio_rw(bio) == WRITE)
1723 map_context->ptr = track_chunk(s, chunk);
1724 goto out_unlock;
1725 }
1726
1727redirect_to_origin:
1728 bio->bi_bdev = s->origin->bdev;
1729
1730 if (bio_rw(bio) == WRITE) {
1731 up_write(&s->lock);
1732 return do_origin(s->origin, bio);
1733 }
1734
1735out_unlock:
1736 up_write(&s->lock);
1737
1738 return r;
1739}
1740
1128static int snapshot_end_io(struct dm_target *ti, struct bio *bio, 1741static int snapshot_end_io(struct dm_target *ti, struct bio *bio,
1129 int error, union map_info *map_context) 1742 int error, union map_info *map_context)
1130{ 1743{
@@ -1137,40 +1750,135 @@ static int snapshot_end_io(struct dm_target *ti, struct bio *bio,
1137 return 0; 1750 return 0;
1138} 1751}
1139 1752
1753static void snapshot_merge_presuspend(struct dm_target *ti)
1754{
1755 struct dm_snapshot *s = ti->private;
1756
1757 stop_merge(s);
1758}
1759
1760static void snapshot_postsuspend(struct dm_target *ti)
1761{
1762 struct dm_snapshot *s = ti->private;
1763
1764 down_write(&s->lock);
1765 s->suspended = 1;
1766 up_write(&s->lock);
1767}
1768
1769static int snapshot_preresume(struct dm_target *ti)
1770{
1771 int r = 0;
1772 struct dm_snapshot *s = ti->private;
1773 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1774
1775 down_read(&_origins_lock);
1776 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1777 if (snap_src && snap_dest) {
1778 down_read(&snap_src->lock);
1779 if (s == snap_src) {
1780 DMERR("Unable to resume snapshot source until "
1781 "handover completes.");
1782 r = -EINVAL;
1783 } else if (!snap_src->suspended) {
1784 DMERR("Unable to perform snapshot handover until "
1785 "source is suspended.");
1786 r = -EINVAL;
1787 }
1788 up_read(&snap_src->lock);
1789 }
1790 up_read(&_origins_lock);
1791
1792 return r;
1793}
1794
1140static void snapshot_resume(struct dm_target *ti) 1795static void snapshot_resume(struct dm_target *ti)
1141{ 1796{
1142 struct dm_snapshot *s = ti->private; 1797 struct dm_snapshot *s = ti->private;
1798 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1799
1800 down_read(&_origins_lock);
1801 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1802 if (snap_src && snap_dest) {
1803 down_write(&snap_src->lock);
1804 down_write_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
1805 __handover_exceptions(snap_src, snap_dest);
1806 up_write(&snap_dest->lock);
1807 up_write(&snap_src->lock);
1808 }
1809 up_read(&_origins_lock);
1810
1811 /* Now we have correct chunk size, reregister */
1812 reregister_snapshot(s);
1143 1813
1144 down_write(&s->lock); 1814 down_write(&s->lock);
1145 s->active = 1; 1815 s->active = 1;
1816 s->suspended = 0;
1146 up_write(&s->lock); 1817 up_write(&s->lock);
1147} 1818}
1148 1819
1820static sector_t get_origin_minimum_chunksize(struct block_device *bdev)
1821{
1822 sector_t min_chunksize;
1823
1824 down_read(&_origins_lock);
1825 min_chunksize = __minimum_chunk_size(__lookup_origin(bdev));
1826 up_read(&_origins_lock);
1827
1828 return min_chunksize;
1829}
1830
1831static void snapshot_merge_resume(struct dm_target *ti)
1832{
1833 struct dm_snapshot *s = ti->private;
1834
1835 /*
1836 * Handover exceptions from existing snapshot.
1837 */
1838 snapshot_resume(ti);
1839
1840 /*
1841 * snapshot-merge acts as an origin, so set ti->split_io
1842 */
1843 ti->split_io = get_origin_minimum_chunksize(s->origin->bdev);
1844
1845 start_merge(s);
1846}
1847
1149static int snapshot_status(struct dm_target *ti, status_type_t type, 1848static int snapshot_status(struct dm_target *ti, status_type_t type,
1150 char *result, unsigned int maxlen) 1849 char *result, unsigned int maxlen)
1151{ 1850{
1152 unsigned sz = 0; 1851 unsigned sz = 0;
1153 struct dm_snapshot *snap = ti->private; 1852 struct dm_snapshot *snap = ti->private;
1154 1853
1155 down_write(&snap->lock);
1156
1157 switch (type) { 1854 switch (type) {
1158 case STATUSTYPE_INFO: 1855 case STATUSTYPE_INFO:
1856
1857 down_write(&snap->lock);
1858
1159 if (!snap->valid) 1859 if (!snap->valid)
1160 DMEMIT("Invalid"); 1860 DMEMIT("Invalid");
1861 else if (snap->merge_failed)
1862 DMEMIT("Merge failed");
1161 else { 1863 else {
1162 if (snap->store->type->fraction_full) { 1864 if (snap->store->type->usage) {
1163 sector_t numerator, denominator; 1865 sector_t total_sectors, sectors_allocated,
1164 snap->store->type->fraction_full(snap->store, 1866 metadata_sectors;
1165 &numerator, 1867 snap->store->type->usage(snap->store,
1166 &denominator); 1868 &total_sectors,
1167 DMEMIT("%llu/%llu", 1869 &sectors_allocated,
1168 (unsigned long long)numerator, 1870 &metadata_sectors);
1169 (unsigned long long)denominator); 1871 DMEMIT("%llu/%llu %llu",
1872 (unsigned long long)sectors_allocated,
1873 (unsigned long long)total_sectors,
1874 (unsigned long long)metadata_sectors);
1170 } 1875 }
1171 else 1876 else
1172 DMEMIT("Unknown"); 1877 DMEMIT("Unknown");
1173 } 1878 }
1879
1880 up_write(&snap->lock);
1881
1174 break; 1882 break;
1175 1883
1176 case STATUSTYPE_TABLE: 1884 case STATUSTYPE_TABLE:
@@ -1179,14 +1887,12 @@ static int snapshot_status(struct dm_target *ti, status_type_t type,
1179 * to make private copies if the output is to 1887 * to make private copies if the output is to
1180 * make sense. 1888 * make sense.
1181 */ 1889 */
1182 DMEMIT("%s", snap->origin->name); 1890 DMEMIT("%s %s", snap->origin->name, snap->cow->name);
1183 snap->store->type->status(snap->store, type, result + sz, 1891 snap->store->type->status(snap->store, type, result + sz,
1184 maxlen - sz); 1892 maxlen - sz);
1185 break; 1893 break;
1186 } 1894 }
1187 1895
1188 up_write(&snap->lock);
1189
1190 return 0; 1896 return 0;
1191} 1897}
1192 1898
@@ -1202,17 +1908,36 @@ static int snapshot_iterate_devices(struct dm_target *ti,
1202/*----------------------------------------------------------------- 1908/*-----------------------------------------------------------------
1203 * Origin methods 1909 * Origin methods
1204 *---------------------------------------------------------------*/ 1910 *---------------------------------------------------------------*/
1205static int __origin_write(struct list_head *snapshots, struct bio *bio) 1911
1912/*
1913 * If no exceptions need creating, DM_MAPIO_REMAPPED is returned and any
1914 * supplied bio was ignored. The caller may submit it immediately.
1915 * (No remapping actually occurs as the origin is always a direct linear
1916 * map.)
1917 *
1918 * If further exceptions are required, DM_MAPIO_SUBMITTED is returned
1919 * and any supplied bio is added to a list to be submitted once all
1920 * the necessary exceptions exist.
1921 */
1922static int __origin_write(struct list_head *snapshots, sector_t sector,
1923 struct bio *bio)
1206{ 1924{
1207 int r = DM_MAPIO_REMAPPED, first = 0; 1925 int r = DM_MAPIO_REMAPPED;
1208 struct dm_snapshot *snap; 1926 struct dm_snapshot *snap;
1209 struct dm_snap_exception *e; 1927 struct dm_exception *e;
1210 struct dm_snap_pending_exception *pe, *next_pe, *primary_pe = NULL; 1928 struct dm_snap_pending_exception *pe;
1929 struct dm_snap_pending_exception *pe_to_start_now = NULL;
1930 struct dm_snap_pending_exception *pe_to_start_last = NULL;
1211 chunk_t chunk; 1931 chunk_t chunk;
1212 LIST_HEAD(pe_queue);
1213 1932
1214 /* Do all the snapshots on this origin */ 1933 /* Do all the snapshots on this origin */
1215 list_for_each_entry (snap, snapshots, list) { 1934 list_for_each_entry (snap, snapshots, list) {
1935 /*
1936 * Don't make new exceptions in a merging snapshot
1937 * because it has effectively been deleted
1938 */
1939 if (dm_target_is_snapshot_merge(snap->ti))
1940 continue;
1216 1941
1217 down_write(&snap->lock); 1942 down_write(&snap->lock);
1218 1943
@@ -1221,24 +1946,21 @@ static int __origin_write(struct list_head *snapshots, struct bio *bio)
1221 goto next_snapshot; 1946 goto next_snapshot;
1222 1947
1223 /* Nothing to do if writing beyond end of snapshot */ 1948 /* Nothing to do if writing beyond end of snapshot */
1224 if (bio->bi_sector >= dm_table_get_size(snap->store->ti->table)) 1949 if (sector >= dm_table_get_size(snap->ti->table))
1225 goto next_snapshot; 1950 goto next_snapshot;
1226 1951
1227 /* 1952 /*
1228 * Remember, different snapshots can have 1953 * Remember, different snapshots can have
1229 * different chunk sizes. 1954 * different chunk sizes.
1230 */ 1955 */
1231 chunk = sector_to_chunk(snap->store, bio->bi_sector); 1956 chunk = sector_to_chunk(snap->store, sector);
1232 1957
1233 /* 1958 /*
1234 * Check exception table to see if block 1959 * Check exception table to see if block
1235 * is already remapped in this snapshot 1960 * is already remapped in this snapshot
1236 * and trigger an exception if not. 1961 * and trigger an exception if not.
1237 *
1238 * ref_count is initialised to 1 so pending_complete()
1239 * won't destroy the primary_pe while we're inside this loop.
1240 */ 1962 */
1241 e = lookup_exception(&snap->complete, chunk); 1963 e = dm_lookup_exception(&snap->complete, chunk);
1242 if (e) 1964 if (e)
1243 goto next_snapshot; 1965 goto next_snapshot;
1244 1966
@@ -1253,7 +1975,7 @@ static int __origin_write(struct list_head *snapshots, struct bio *bio)
1253 goto next_snapshot; 1975 goto next_snapshot;
1254 } 1976 }
1255 1977
1256 e = lookup_exception(&snap->complete, chunk); 1978 e = dm_lookup_exception(&snap->complete, chunk);
1257 if (e) { 1979 if (e) {
1258 free_pending_exception(pe); 1980 free_pending_exception(pe);
1259 goto next_snapshot; 1981 goto next_snapshot;
@@ -1266,59 +1988,43 @@ static int __origin_write(struct list_head *snapshots, struct bio *bio)
1266 } 1988 }
1267 } 1989 }
1268 1990
1269 if (!primary_pe) { 1991 r = DM_MAPIO_SUBMITTED;
1270 /*
1271 * Either every pe here has same
1272 * primary_pe or none has one yet.
1273 */
1274 if (pe->primary_pe)
1275 primary_pe = pe->primary_pe;
1276 else {
1277 primary_pe = pe;
1278 first = 1;
1279 }
1280
1281 bio_list_add(&primary_pe->origin_bios, bio);
1282 1992
1283 r = DM_MAPIO_SUBMITTED; 1993 /*
1284 } 1994 * If an origin bio was supplied, queue it to wait for the
1995 * completion of this exception, and start this one last,
1996 * at the end of the function.
1997 */
1998 if (bio) {
1999 bio_list_add(&pe->origin_bios, bio);
2000 bio = NULL;
1285 2001
1286 if (!pe->primary_pe) { 2002 if (!pe->started) {
1287 pe->primary_pe = primary_pe; 2003 pe->started = 1;
1288 get_pending_exception(primary_pe); 2004 pe_to_start_last = pe;
2005 }
1289 } 2006 }
1290 2007
1291 if (!pe->started) { 2008 if (!pe->started) {
1292 pe->started = 1; 2009 pe->started = 1;
1293 list_add_tail(&pe->list, &pe_queue); 2010 pe_to_start_now = pe;
1294 } 2011 }
1295 2012
1296 next_snapshot: 2013 next_snapshot:
1297 up_write(&snap->lock); 2014 up_write(&snap->lock);
1298 }
1299 2015
1300 if (!primary_pe) 2016 if (pe_to_start_now) {
1301 return r; 2017 start_copy(pe_to_start_now);
1302 2018 pe_to_start_now = NULL;
1303 /* 2019 }
1304 * If this is the first time we're processing this chunk and
1305 * ref_count is now 1 it means all the pending exceptions
1306 * got completed while we were in the loop above, so it falls to
1307 * us here to remove the primary_pe and submit any origin_bios.
1308 */
1309
1310 if (first && atomic_dec_and_test(&primary_pe->ref_count)) {
1311 flush_bios(bio_list_get(&primary_pe->origin_bios));
1312 free_pending_exception(primary_pe);
1313 /* If we got here, pe_queue is necessarily empty. */
1314 return r;
1315 } 2020 }
1316 2021
1317 /* 2022 /*
1318 * Now that we have a complete pe list we can start the copying. 2023 * Submit the exception against which the bio is queued last,
2024 * to give the other exceptions a head start.
1319 */ 2025 */
1320 list_for_each_entry_safe(pe, next_pe, &pe_queue, list) 2026 if (pe_to_start_last)
1321 start_copy(pe); 2027 start_copy(pe_to_start_last);
1322 2028
1323 return r; 2029 return r;
1324} 2030}
@@ -1334,13 +2040,48 @@ static int do_origin(struct dm_dev *origin, struct bio *bio)
1334 down_read(&_origins_lock); 2040 down_read(&_origins_lock);
1335 o = __lookup_origin(origin->bdev); 2041 o = __lookup_origin(origin->bdev);
1336 if (o) 2042 if (o)
1337 r = __origin_write(&o->snapshots, bio); 2043 r = __origin_write(&o->snapshots, bio->bi_sector, bio);
1338 up_read(&_origins_lock); 2044 up_read(&_origins_lock);
1339 2045
1340 return r; 2046 return r;
1341} 2047}
1342 2048
1343/* 2049/*
2050 * Trigger exceptions in all non-merging snapshots.
2051 *
2052 * The chunk size of the merging snapshot may be larger than the chunk
2053 * size of some other snapshot so we may need to reallocate multiple
2054 * chunks in other snapshots.
2055 *
2056 * We scan all the overlapping exceptions in the other snapshots.
2057 * Returns 1 if anything was reallocated and must be waited for,
2058 * otherwise returns 0.
2059 *
2060 * size must be a multiple of merging_snap's chunk_size.
2061 */
2062static int origin_write_extent(struct dm_snapshot *merging_snap,
2063 sector_t sector, unsigned size)
2064{
2065 int must_wait = 0;
2066 sector_t n;
2067 struct origin *o;
2068
2069 /*
2070 * The origin's __minimum_chunk_size() got stored in split_io
2071 * by snapshot_merge_resume().
2072 */
2073 down_read(&_origins_lock);
2074 o = __lookup_origin(merging_snap->origin->bdev);
2075 for (n = 0; n < size; n += merging_snap->ti->split_io)
2076 if (__origin_write(&o->snapshots, sector + n, NULL) ==
2077 DM_MAPIO_SUBMITTED)
2078 must_wait = 1;
2079 up_read(&_origins_lock);
2080
2081 return must_wait;
2082}
2083
2084/*
1344 * Origin: maps a linear range of a device, with hooks for snapshotting. 2085 * Origin: maps a linear range of a device, with hooks for snapshotting.
1345 */ 2086 */
1346 2087
@@ -1391,8 +2132,6 @@ static int origin_map(struct dm_target *ti, struct bio *bio,
1391 return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED; 2132 return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED;
1392} 2133}
1393 2134
1394#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
1395
1396/* 2135/*
1397 * Set the target "split_io" field to the minimum of all the snapshots' 2136 * Set the target "split_io" field to the minimum of all the snapshots'
1398 * chunk sizes. 2137 * chunk sizes.
@@ -1400,19 +2139,8 @@ static int origin_map(struct dm_target *ti, struct bio *bio,
1400static void origin_resume(struct dm_target *ti) 2139static void origin_resume(struct dm_target *ti)
1401{ 2140{
1402 struct dm_dev *dev = ti->private; 2141 struct dm_dev *dev = ti->private;
1403 struct dm_snapshot *snap;
1404 struct origin *o;
1405 unsigned chunk_size = 0;
1406
1407 down_read(&_origins_lock);
1408 o = __lookup_origin(dev->bdev);
1409 if (o)
1410 list_for_each_entry (snap, &o->snapshots, list)
1411 chunk_size = min_not_zero(chunk_size,
1412 snap->store->chunk_size);
1413 up_read(&_origins_lock);
1414 2142
1415 ti->split_io = chunk_size; 2143 ti->split_io = get_origin_minimum_chunksize(dev->bdev);
1416} 2144}
1417 2145
1418static int origin_status(struct dm_target *ti, status_type_t type, char *result, 2146static int origin_status(struct dm_target *ti, status_type_t type, char *result,
@@ -1455,17 +2183,35 @@ static struct target_type origin_target = {
1455 2183
1456static struct target_type snapshot_target = { 2184static struct target_type snapshot_target = {
1457 .name = "snapshot", 2185 .name = "snapshot",
1458 .version = {1, 7, 0}, 2186 .version = {1, 9, 0},
1459 .module = THIS_MODULE, 2187 .module = THIS_MODULE,
1460 .ctr = snapshot_ctr, 2188 .ctr = snapshot_ctr,
1461 .dtr = snapshot_dtr, 2189 .dtr = snapshot_dtr,
1462 .map = snapshot_map, 2190 .map = snapshot_map,
1463 .end_io = snapshot_end_io, 2191 .end_io = snapshot_end_io,
2192 .postsuspend = snapshot_postsuspend,
2193 .preresume = snapshot_preresume,
1464 .resume = snapshot_resume, 2194 .resume = snapshot_resume,
1465 .status = snapshot_status, 2195 .status = snapshot_status,
1466 .iterate_devices = snapshot_iterate_devices, 2196 .iterate_devices = snapshot_iterate_devices,
1467}; 2197};
1468 2198
2199static struct target_type merge_target = {
2200 .name = dm_snapshot_merge_target_name,
2201 .version = {1, 0, 0},
2202 .module = THIS_MODULE,
2203 .ctr = snapshot_ctr,
2204 .dtr = snapshot_dtr,
2205 .map = snapshot_merge_map,
2206 .end_io = snapshot_end_io,
2207 .presuspend = snapshot_merge_presuspend,
2208 .postsuspend = snapshot_postsuspend,
2209 .preresume = snapshot_preresume,
2210 .resume = snapshot_merge_resume,
2211 .status = snapshot_status,
2212 .iterate_devices = snapshot_iterate_devices,
2213};
2214
1469static int __init dm_snapshot_init(void) 2215static int __init dm_snapshot_init(void)
1470{ 2216{
1471 int r; 2217 int r;
@@ -1477,7 +2223,7 @@ static int __init dm_snapshot_init(void)
1477 } 2223 }
1478 2224
1479 r = dm_register_target(&snapshot_target); 2225 r = dm_register_target(&snapshot_target);
1480 if (r) { 2226 if (r < 0) {
1481 DMERR("snapshot target register failed %d", r); 2227 DMERR("snapshot target register failed %d", r);
1482 goto bad_register_snapshot_target; 2228 goto bad_register_snapshot_target;
1483 } 2229 }
@@ -1485,34 +2231,40 @@ static int __init dm_snapshot_init(void)
1485 r = dm_register_target(&origin_target); 2231 r = dm_register_target(&origin_target);
1486 if (r < 0) { 2232 if (r < 0) {
1487 DMERR("Origin target register failed %d", r); 2233 DMERR("Origin target register failed %d", r);
1488 goto bad1; 2234 goto bad_register_origin_target;
2235 }
2236
2237 r = dm_register_target(&merge_target);
2238 if (r < 0) {
2239 DMERR("Merge target register failed %d", r);
2240 goto bad_register_merge_target;
1489 } 2241 }
1490 2242
1491 r = init_origin_hash(); 2243 r = init_origin_hash();
1492 if (r) { 2244 if (r) {
1493 DMERR("init_origin_hash failed."); 2245 DMERR("init_origin_hash failed.");
1494 goto bad2; 2246 goto bad_origin_hash;
1495 } 2247 }
1496 2248
1497 exception_cache = KMEM_CACHE(dm_snap_exception, 0); 2249 exception_cache = KMEM_CACHE(dm_exception, 0);
1498 if (!exception_cache) { 2250 if (!exception_cache) {
1499 DMERR("Couldn't create exception cache."); 2251 DMERR("Couldn't create exception cache.");
1500 r = -ENOMEM; 2252 r = -ENOMEM;
1501 goto bad3; 2253 goto bad_exception_cache;
1502 } 2254 }
1503 2255
1504 pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0); 2256 pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0);
1505 if (!pending_cache) { 2257 if (!pending_cache) {
1506 DMERR("Couldn't create pending cache."); 2258 DMERR("Couldn't create pending cache.");
1507 r = -ENOMEM; 2259 r = -ENOMEM;
1508 goto bad4; 2260 goto bad_pending_cache;
1509 } 2261 }
1510 2262
1511 tracked_chunk_cache = KMEM_CACHE(dm_snap_tracked_chunk, 0); 2263 tracked_chunk_cache = KMEM_CACHE(dm_snap_tracked_chunk, 0);
1512 if (!tracked_chunk_cache) { 2264 if (!tracked_chunk_cache) {
1513 DMERR("Couldn't create cache to track chunks in use."); 2265 DMERR("Couldn't create cache to track chunks in use.");
1514 r = -ENOMEM; 2266 r = -ENOMEM;
1515 goto bad5; 2267 goto bad_tracked_chunk_cache;
1516 } 2268 }
1517 2269
1518 ksnapd = create_singlethread_workqueue("ksnapd"); 2270 ksnapd = create_singlethread_workqueue("ksnapd");
@@ -1526,19 +2278,21 @@ static int __init dm_snapshot_init(void)
1526 2278
1527bad_pending_pool: 2279bad_pending_pool:
1528 kmem_cache_destroy(tracked_chunk_cache); 2280 kmem_cache_destroy(tracked_chunk_cache);
1529bad5: 2281bad_tracked_chunk_cache:
1530 kmem_cache_destroy(pending_cache); 2282 kmem_cache_destroy(pending_cache);
1531bad4: 2283bad_pending_cache:
1532 kmem_cache_destroy(exception_cache); 2284 kmem_cache_destroy(exception_cache);
1533bad3: 2285bad_exception_cache:
1534 exit_origin_hash(); 2286 exit_origin_hash();
1535bad2: 2287bad_origin_hash:
2288 dm_unregister_target(&merge_target);
2289bad_register_merge_target:
1536 dm_unregister_target(&origin_target); 2290 dm_unregister_target(&origin_target);
1537bad1: 2291bad_register_origin_target:
1538 dm_unregister_target(&snapshot_target); 2292 dm_unregister_target(&snapshot_target);
1539
1540bad_register_snapshot_target: 2293bad_register_snapshot_target:
1541 dm_exception_store_exit(); 2294 dm_exception_store_exit();
2295
1542 return r; 2296 return r;
1543} 2297}
1544 2298
@@ -1548,6 +2302,7 @@ static void __exit dm_snapshot_exit(void)
1548 2302
1549 dm_unregister_target(&snapshot_target); 2303 dm_unregister_target(&snapshot_target);
1550 dm_unregister_target(&origin_target); 2304 dm_unregister_target(&origin_target);
2305 dm_unregister_target(&merge_target);
1551 2306
1552 exit_origin_hash(); 2307 exit_origin_hash();
1553 kmem_cache_destroy(pending_cache); 2308 kmem_cache_destroy(pending_cache);
diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c
index 4b045903a4e2..f53392df7b97 100644
--- a/drivers/md/dm-sysfs.c
+++ b/drivers/md/dm-sysfs.c
@@ -59,7 +59,7 @@ static ssize_t dm_attr_uuid_show(struct mapped_device *md, char *buf)
59 59
60static ssize_t dm_attr_suspended_show(struct mapped_device *md, char *buf) 60static ssize_t dm_attr_suspended_show(struct mapped_device *md, char *buf)
61{ 61{
62 sprintf(buf, "%d\n", dm_suspended(md)); 62 sprintf(buf, "%d\n", dm_suspended_md(md));
63 63
64 return strlen(buf); 64 return strlen(buf);
65} 65}
@@ -80,12 +80,20 @@ static struct sysfs_ops dm_sysfs_ops = {
80}; 80};
81 81
82/* 82/*
83 * The sysfs structure is embedded in md struct, nothing to do here
84 */
85static void dm_sysfs_release(struct kobject *kobj)
86{
87}
88
89/*
83 * dm kobject is embedded in mapped_device structure 90 * dm kobject is embedded in mapped_device structure
84 * no need to define release function here 91 * no need to define release function here
85 */ 92 */
86static struct kobj_type dm_ktype = { 93static struct kobj_type dm_ktype = {
87 .sysfs_ops = &dm_sysfs_ops, 94 .sysfs_ops = &dm_sysfs_ops,
88 .default_attrs = dm_attrs, 95 .default_attrs = dm_attrs,
96 .release = dm_sysfs_release
89}; 97};
90 98
91/* 99/*
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 1a6cb3c7822e..be625475cf6d 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -12,6 +12,7 @@
12#include <linux/blkdev.h> 12#include <linux/blkdev.h>
13#include <linux/namei.h> 13#include <linux/namei.h>
14#include <linux/ctype.h> 14#include <linux/ctype.h>
15#include <linux/string.h>
15#include <linux/slab.h> 16#include <linux/slab.h>
16#include <linux/interrupt.h> 17#include <linux/interrupt.h>
17#include <linux/mutex.h> 18#include <linux/mutex.h>
@@ -237,6 +238,9 @@ void dm_table_destroy(struct dm_table *t)
237{ 238{
238 unsigned int i; 239 unsigned int i;
239 240
241 if (!t)
242 return;
243
240 while (atomic_read(&t->holders)) 244 while (atomic_read(&t->holders))
241 msleep(1); 245 msleep(1);
242 smp_mb(); 246 smp_mb();
@@ -600,11 +604,8 @@ int dm_split_args(int *argc, char ***argvp, char *input)
600 return -ENOMEM; 604 return -ENOMEM;
601 605
602 while (1) { 606 while (1) {
603 start = end;
604
605 /* Skip whitespace */ 607 /* Skip whitespace */
606 while (*start && isspace(*start)) 608 start = skip_spaces(end);
607 start++;
608 609
609 if (!*start) 610 if (!*start)
610 break; /* success, we hit the end */ 611 break; /* success, we hit the end */
diff --git a/drivers/md/dm-uevent.c b/drivers/md/dm-uevent.c
index 6f65883aef12..c7c555a8c7b2 100644
--- a/drivers/md/dm-uevent.c
+++ b/drivers/md/dm-uevent.c
@@ -139,14 +139,13 @@ void dm_send_uevents(struct list_head *events, struct kobject *kobj)
139 list_del_init(&event->elist); 139 list_del_init(&event->elist);
140 140
141 /* 141 /*
142 * Need to call dm_copy_name_and_uuid from here for now. 142 * When a device is being removed this copy fails and we
143 * Context of previous var adds and locking used for 143 * discard these unsent events.
144 * hash_cell not compatable.
145 */ 144 */
146 if (dm_copy_name_and_uuid(event->md, event->name, 145 if (dm_copy_name_and_uuid(event->md, event->name,
147 event->uuid)) { 146 event->uuid)) {
148 DMERR("%s: dm_copy_name_and_uuid() failed", 147 DMINFO("%s: skipping sending uevent for lost device",
149 __func__); 148 __func__);
150 goto uevent_free; 149 goto uevent_free;
151 } 150 }
152 151
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 724efc63904d..3167480b532c 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -143,9 +143,19 @@ struct mapped_device {
143 int barrier_error; 143 int barrier_error;
144 144
145 /* 145 /*
146 * Protect barrier_error from concurrent endio processing
147 * in request-based dm.
148 */
149 spinlock_t barrier_error_lock;
150
151 /*
146 * Processing queue (flush/barriers) 152 * Processing queue (flush/barriers)
147 */ 153 */
148 struct workqueue_struct *wq; 154 struct workqueue_struct *wq;
155 struct work_struct barrier_work;
156
157 /* A pointer to the currently processing pre/post flush request */
158 struct request *flush_request;
149 159
150 /* 160 /*
151 * The current mapping. 161 * The current mapping.
@@ -178,9 +188,6 @@ struct mapped_device {
178 /* forced geometry settings */ 188 /* forced geometry settings */
179 struct hd_geometry geometry; 189 struct hd_geometry geometry;
180 190
181 /* marker of flush suspend for request-based dm */
182 struct request suspend_rq;
183
184 /* For saving the address of __make_request for request based dm */ 191 /* For saving the address of __make_request for request based dm */
185 make_request_fn *saved_make_request_fn; 192 make_request_fn *saved_make_request_fn;
186 193
@@ -275,6 +282,7 @@ static int (*_inits[])(void) __initdata = {
275 dm_target_init, 282 dm_target_init,
276 dm_linear_init, 283 dm_linear_init,
277 dm_stripe_init, 284 dm_stripe_init,
285 dm_io_init,
278 dm_kcopyd_init, 286 dm_kcopyd_init,
279 dm_interface_init, 287 dm_interface_init,
280}; 288};
@@ -284,6 +292,7 @@ static void (*_exits[])(void) = {
284 dm_target_exit, 292 dm_target_exit,
285 dm_linear_exit, 293 dm_linear_exit,
286 dm_stripe_exit, 294 dm_stripe_exit,
295 dm_io_exit,
287 dm_kcopyd_exit, 296 dm_kcopyd_exit,
288 dm_interface_exit, 297 dm_interface_exit,
289}; 298};
@@ -320,6 +329,11 @@ static void __exit dm_exit(void)
320/* 329/*
321 * Block device functions 330 * Block device functions
322 */ 331 */
332int dm_deleting_md(struct mapped_device *md)
333{
334 return test_bit(DMF_DELETING, &md->flags);
335}
336
323static int dm_blk_open(struct block_device *bdev, fmode_t mode) 337static int dm_blk_open(struct block_device *bdev, fmode_t mode)
324{ 338{
325 struct mapped_device *md; 339 struct mapped_device *md;
@@ -331,7 +345,7 @@ static int dm_blk_open(struct block_device *bdev, fmode_t mode)
331 goto out; 345 goto out;
332 346
333 if (test_bit(DMF_FREEING, &md->flags) || 347 if (test_bit(DMF_FREEING, &md->flags) ||
334 test_bit(DMF_DELETING, &md->flags)) { 348 dm_deleting_md(md)) {
335 md = NULL; 349 md = NULL;
336 goto out; 350 goto out;
337 } 351 }
@@ -388,7 +402,7 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
388 unsigned int cmd, unsigned long arg) 402 unsigned int cmd, unsigned long arg)
389{ 403{
390 struct mapped_device *md = bdev->bd_disk->private_data; 404 struct mapped_device *md = bdev->bd_disk->private_data;
391 struct dm_table *map = dm_get_table(md); 405 struct dm_table *map = dm_get_live_table(md);
392 struct dm_target *tgt; 406 struct dm_target *tgt;
393 int r = -ENOTTY; 407 int r = -ENOTTY;
394 408
@@ -401,7 +415,7 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
401 415
402 tgt = dm_table_get_target(map, 0); 416 tgt = dm_table_get_target(map, 0);
403 417
404 if (dm_suspended(md)) { 418 if (dm_suspended_md(md)) {
405 r = -EAGAIN; 419 r = -EAGAIN;
406 goto out; 420 goto out;
407 } 421 }
@@ -430,9 +444,10 @@ static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
430 mempool_free(tio, md->tio_pool); 444 mempool_free(tio, md->tio_pool);
431} 445}
432 446
433static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md) 447static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md,
448 gfp_t gfp_mask)
434{ 449{
435 return mempool_alloc(md->tio_pool, GFP_ATOMIC); 450 return mempool_alloc(md->tio_pool, gfp_mask);
436} 451}
437 452
438static void free_rq_tio(struct dm_rq_target_io *tio) 453static void free_rq_tio(struct dm_rq_target_io *tio)
@@ -450,6 +465,12 @@ static void free_bio_info(struct dm_rq_clone_bio_info *info)
450 mempool_free(info, info->tio->md->io_pool); 465 mempool_free(info, info->tio->md->io_pool);
451} 466}
452 467
468static int md_in_flight(struct mapped_device *md)
469{
470 return atomic_read(&md->pending[READ]) +
471 atomic_read(&md->pending[WRITE]);
472}
473
453static void start_io_acct(struct dm_io *io) 474static void start_io_acct(struct dm_io *io)
454{ 475{
455 struct mapped_device *md = io->md; 476 struct mapped_device *md = io->md;
@@ -512,7 +533,7 @@ static void queue_io(struct mapped_device *md, struct bio *bio)
512 * function to access the md->map field, and make sure they call 533 * function to access the md->map field, and make sure they call
513 * dm_table_put() when finished. 534 * dm_table_put() when finished.
514 */ 535 */
515struct dm_table *dm_get_table(struct mapped_device *md) 536struct dm_table *dm_get_live_table(struct mapped_device *md)
516{ 537{
517 struct dm_table *t; 538 struct dm_table *t;
518 unsigned long flags; 539 unsigned long flags;
@@ -716,28 +737,38 @@ static void end_clone_bio(struct bio *clone, int error)
716 blk_update_request(tio->orig, 0, nr_bytes); 737 blk_update_request(tio->orig, 0, nr_bytes);
717} 738}
718 739
740static void store_barrier_error(struct mapped_device *md, int error)
741{
742 unsigned long flags;
743
744 spin_lock_irqsave(&md->barrier_error_lock, flags);
745 /*
746 * Basically, the first error is taken, but:
747 * -EOPNOTSUPP supersedes any I/O error.
748 * Requeue request supersedes any I/O error but -EOPNOTSUPP.
749 */
750 if (!md->barrier_error || error == -EOPNOTSUPP ||
751 (md->barrier_error != -EOPNOTSUPP &&
752 error == DM_ENDIO_REQUEUE))
753 md->barrier_error = error;
754 spin_unlock_irqrestore(&md->barrier_error_lock, flags);
755}
756
719/* 757/*
720 * Don't touch any member of the md after calling this function because 758 * Don't touch any member of the md after calling this function because
721 * the md may be freed in dm_put() at the end of this function. 759 * the md may be freed in dm_put() at the end of this function.
722 * Or do dm_get() before calling this function and dm_put() later. 760 * Or do dm_get() before calling this function and dm_put() later.
723 */ 761 */
724static void rq_completed(struct mapped_device *md, int run_queue) 762static void rq_completed(struct mapped_device *md, int rw, int run_queue)
725{ 763{
726 int wakeup_waiters = 0; 764 atomic_dec(&md->pending[rw]);
727 struct request_queue *q = md->queue;
728 unsigned long flags;
729
730 spin_lock_irqsave(q->queue_lock, flags);
731 if (!queue_in_flight(q))
732 wakeup_waiters = 1;
733 spin_unlock_irqrestore(q->queue_lock, flags);
734 765
735 /* nudge anyone waiting on suspend queue */ 766 /* nudge anyone waiting on suspend queue */
736 if (wakeup_waiters) 767 if (!md_in_flight(md))
737 wake_up(&md->wait); 768 wake_up(&md->wait);
738 769
739 if (run_queue) 770 if (run_queue)
740 blk_run_queue(q); 771 blk_run_queue(md->queue);
741 772
742 /* 773 /*
743 * dm_put() must be at the end of this function. See the comment above 774 * dm_put() must be at the end of this function. See the comment above
@@ -753,6 +784,44 @@ static void free_rq_clone(struct request *clone)
753 free_rq_tio(tio); 784 free_rq_tio(tio);
754} 785}
755 786
787/*
788 * Complete the clone and the original request.
789 * Must be called without queue lock.
790 */
791static void dm_end_request(struct request *clone, int error)
792{
793 int rw = rq_data_dir(clone);
794 int run_queue = 1;
795 bool is_barrier = blk_barrier_rq(clone);
796 struct dm_rq_target_io *tio = clone->end_io_data;
797 struct mapped_device *md = tio->md;
798 struct request *rq = tio->orig;
799
800 if (blk_pc_request(rq) && !is_barrier) {
801 rq->errors = clone->errors;
802 rq->resid_len = clone->resid_len;
803
804 if (rq->sense)
805 /*
806 * We are using the sense buffer of the original
807 * request.
808 * So setting the length of the sense data is enough.
809 */
810 rq->sense_len = clone->sense_len;
811 }
812
813 free_rq_clone(clone);
814
815 if (unlikely(is_barrier)) {
816 if (unlikely(error))
817 store_barrier_error(md, error);
818 run_queue = 0;
819 } else
820 blk_end_request_all(rq, error);
821
822 rq_completed(md, rw, run_queue);
823}
824
756static void dm_unprep_request(struct request *rq) 825static void dm_unprep_request(struct request *rq)
757{ 826{
758 struct request *clone = rq->special; 827 struct request *clone = rq->special;
@@ -768,12 +837,23 @@ static void dm_unprep_request(struct request *rq)
768 */ 837 */
769void dm_requeue_unmapped_request(struct request *clone) 838void dm_requeue_unmapped_request(struct request *clone)
770{ 839{
840 int rw = rq_data_dir(clone);
771 struct dm_rq_target_io *tio = clone->end_io_data; 841 struct dm_rq_target_io *tio = clone->end_io_data;
772 struct mapped_device *md = tio->md; 842 struct mapped_device *md = tio->md;
773 struct request *rq = tio->orig; 843 struct request *rq = tio->orig;
774 struct request_queue *q = rq->q; 844 struct request_queue *q = rq->q;
775 unsigned long flags; 845 unsigned long flags;
776 846
847 if (unlikely(blk_barrier_rq(clone))) {
848 /*
849 * Barrier clones share an original request.
850 * Leave it to dm_end_request(), which handles this special
851 * case.
852 */
853 dm_end_request(clone, DM_ENDIO_REQUEUE);
854 return;
855 }
856
777 dm_unprep_request(rq); 857 dm_unprep_request(rq);
778 858
779 spin_lock_irqsave(q->queue_lock, flags); 859 spin_lock_irqsave(q->queue_lock, flags);
@@ -782,7 +862,7 @@ void dm_requeue_unmapped_request(struct request *clone)
782 blk_requeue_request(q, rq); 862 blk_requeue_request(q, rq);
783 spin_unlock_irqrestore(q->queue_lock, flags); 863 spin_unlock_irqrestore(q->queue_lock, flags);
784 864
785 rq_completed(md, 0); 865 rq_completed(md, rw, 0);
786} 866}
787EXPORT_SYMBOL_GPL(dm_requeue_unmapped_request); 867EXPORT_SYMBOL_GPL(dm_requeue_unmapped_request);
788 868
@@ -815,34 +895,28 @@ static void start_queue(struct request_queue *q)
815 spin_unlock_irqrestore(q->queue_lock, flags); 895 spin_unlock_irqrestore(q->queue_lock, flags);
816} 896}
817 897
818/* 898static void dm_done(struct request *clone, int error, bool mapped)
819 * Complete the clone and the original request.
820 * Must be called without queue lock.
821 */
822static void dm_end_request(struct request *clone, int error)
823{ 899{
900 int r = error;
824 struct dm_rq_target_io *tio = clone->end_io_data; 901 struct dm_rq_target_io *tio = clone->end_io_data;
825 struct mapped_device *md = tio->md; 902 dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io;
826 struct request *rq = tio->orig;
827 903
828 if (blk_pc_request(rq)) { 904 if (mapped && rq_end_io)
829 rq->errors = clone->errors; 905 r = rq_end_io(tio->ti, clone, error, &tio->info);
830 rq->resid_len = clone->resid_len;
831 906
832 if (rq->sense) 907 if (r <= 0)
833 /* 908 /* The target wants to complete the I/O */
834 * We are using the sense buffer of the original 909 dm_end_request(clone, r);
835 * request. 910 else if (r == DM_ENDIO_INCOMPLETE)
836 * So setting the length of the sense data is enough. 911 /* The target will handle the I/O */
837 */ 912 return;
838 rq->sense_len = clone->sense_len; 913 else if (r == DM_ENDIO_REQUEUE)
914 /* The target wants to requeue the I/O */
915 dm_requeue_unmapped_request(clone);
916 else {
917 DMWARN("unimplemented target endio return value: %d", r);
918 BUG();
839 } 919 }
840
841 free_rq_clone(clone);
842
843 blk_end_request_all(rq, error);
844
845 rq_completed(md, 1);
846} 920}
847 921
848/* 922/*
@@ -850,27 +924,14 @@ static void dm_end_request(struct request *clone, int error)
850 */ 924 */
851static void dm_softirq_done(struct request *rq) 925static void dm_softirq_done(struct request *rq)
852{ 926{
927 bool mapped = true;
853 struct request *clone = rq->completion_data; 928 struct request *clone = rq->completion_data;
854 struct dm_rq_target_io *tio = clone->end_io_data; 929 struct dm_rq_target_io *tio = clone->end_io_data;
855 dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io;
856 int error = tio->error;
857 930
858 if (!(rq->cmd_flags & REQ_FAILED) && rq_end_io) 931 if (rq->cmd_flags & REQ_FAILED)
859 error = rq_end_io(tio->ti, clone, error, &tio->info); 932 mapped = false;
860 933
861 if (error <= 0) 934 dm_done(clone, tio->error, mapped);
862 /* The target wants to complete the I/O */
863 dm_end_request(clone, error);
864 else if (error == DM_ENDIO_INCOMPLETE)
865 /* The target will handle the I/O */
866 return;
867 else if (error == DM_ENDIO_REQUEUE)
868 /* The target wants to requeue the I/O */
869 dm_requeue_unmapped_request(clone);
870 else {
871 DMWARN("unimplemented target endio return value: %d", error);
872 BUG();
873 }
874} 935}
875 936
876/* 937/*
@@ -882,6 +943,19 @@ static void dm_complete_request(struct request *clone, int error)
882 struct dm_rq_target_io *tio = clone->end_io_data; 943 struct dm_rq_target_io *tio = clone->end_io_data;
883 struct request *rq = tio->orig; 944 struct request *rq = tio->orig;
884 945
946 if (unlikely(blk_barrier_rq(clone))) {
947 /*
948 * Barrier clones share an original request. So can't use
949 * softirq_done with the original.
950 * Pass the clone to dm_done() directly in this special case.
951 * It is safe (even if clone->q->queue_lock is held here)
952 * because there is no I/O dispatching during the completion
953 * of barrier clone.
954 */
955 dm_done(clone, error, true);
956 return;
957 }
958
885 tio->error = error; 959 tio->error = error;
886 rq->completion_data = clone; 960 rq->completion_data = clone;
887 blk_complete_request(rq); 961 blk_complete_request(rq);
@@ -898,6 +972,17 @@ void dm_kill_unmapped_request(struct request *clone, int error)
898 struct dm_rq_target_io *tio = clone->end_io_data; 972 struct dm_rq_target_io *tio = clone->end_io_data;
899 struct request *rq = tio->orig; 973 struct request *rq = tio->orig;
900 974
975 if (unlikely(blk_barrier_rq(clone))) {
976 /*
977 * Barrier clones share an original request.
978 * Leave it to dm_end_request(), which handles this special
979 * case.
980 */
981 BUG_ON(error > 0);
982 dm_end_request(clone, error);
983 return;
984 }
985
901 rq->cmd_flags |= REQ_FAILED; 986 rq->cmd_flags |= REQ_FAILED;
902 dm_complete_request(clone, error); 987 dm_complete_request(clone, error);
903} 988}
@@ -1214,7 +1299,7 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
1214 struct clone_info ci; 1299 struct clone_info ci;
1215 int error = 0; 1300 int error = 0;
1216 1301
1217 ci.map = dm_get_table(md); 1302 ci.map = dm_get_live_table(md);
1218 if (unlikely(!ci.map)) { 1303 if (unlikely(!ci.map)) {
1219 if (!bio_rw_flagged(bio, BIO_RW_BARRIER)) 1304 if (!bio_rw_flagged(bio, BIO_RW_BARRIER))
1220 bio_io_error(bio); 1305 bio_io_error(bio);
@@ -1255,7 +1340,7 @@ static int dm_merge_bvec(struct request_queue *q,
1255 struct bio_vec *biovec) 1340 struct bio_vec *biovec)
1256{ 1341{
1257 struct mapped_device *md = q->queuedata; 1342 struct mapped_device *md = q->queuedata;
1258 struct dm_table *map = dm_get_table(md); 1343 struct dm_table *map = dm_get_live_table(md);
1259 struct dm_target *ti; 1344 struct dm_target *ti;
1260 sector_t max_sectors; 1345 sector_t max_sectors;
1261 int max_size = 0; 1346 int max_size = 0;
@@ -1352,11 +1437,6 @@ static int dm_make_request(struct request_queue *q, struct bio *bio)
1352{ 1437{
1353 struct mapped_device *md = q->queuedata; 1438 struct mapped_device *md = q->queuedata;
1354 1439
1355 if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
1356 bio_endio(bio, -EOPNOTSUPP);
1357 return 0;
1358 }
1359
1360 return md->saved_make_request_fn(q, bio); /* call __make_request() */ 1440 return md->saved_make_request_fn(q, bio); /* call __make_request() */
1361} 1441}
1362 1442
@@ -1375,6 +1455,25 @@ static int dm_request(struct request_queue *q, struct bio *bio)
1375 return _dm_request(q, bio); 1455 return _dm_request(q, bio);
1376} 1456}
1377 1457
1458/*
1459 * Mark this request as flush request, so that dm_request_fn() can
1460 * recognize.
1461 */
1462static void dm_rq_prepare_flush(struct request_queue *q, struct request *rq)
1463{
1464 rq->cmd_type = REQ_TYPE_LINUX_BLOCK;
1465 rq->cmd[0] = REQ_LB_OP_FLUSH;
1466}
1467
1468static bool dm_rq_is_flush_request(struct request *rq)
1469{
1470 if (rq->cmd_type == REQ_TYPE_LINUX_BLOCK &&
1471 rq->cmd[0] == REQ_LB_OP_FLUSH)
1472 return true;
1473 else
1474 return false;
1475}
1476
1378void dm_dispatch_request(struct request *rq) 1477void dm_dispatch_request(struct request *rq)
1379{ 1478{
1380 int r; 1479 int r;
@@ -1420,25 +1519,54 @@ static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
1420static int setup_clone(struct request *clone, struct request *rq, 1519static int setup_clone(struct request *clone, struct request *rq,
1421 struct dm_rq_target_io *tio) 1520 struct dm_rq_target_io *tio)
1422{ 1521{
1423 int r = blk_rq_prep_clone(clone, rq, tio->md->bs, GFP_ATOMIC, 1522 int r;
1424 dm_rq_bio_constructor, tio);
1425 1523
1426 if (r) 1524 if (dm_rq_is_flush_request(rq)) {
1427 return r; 1525 blk_rq_init(NULL, clone);
1526 clone->cmd_type = REQ_TYPE_FS;
1527 clone->cmd_flags |= (REQ_HARDBARRIER | WRITE);
1528 } else {
1529 r = blk_rq_prep_clone(clone, rq, tio->md->bs, GFP_ATOMIC,
1530 dm_rq_bio_constructor, tio);
1531 if (r)
1532 return r;
1533
1534 clone->cmd = rq->cmd;
1535 clone->cmd_len = rq->cmd_len;
1536 clone->sense = rq->sense;
1537 clone->buffer = rq->buffer;
1538 }
1428 1539
1429 clone->cmd = rq->cmd;
1430 clone->cmd_len = rq->cmd_len;
1431 clone->sense = rq->sense;
1432 clone->buffer = rq->buffer;
1433 clone->end_io = end_clone_request; 1540 clone->end_io = end_clone_request;
1434 clone->end_io_data = tio; 1541 clone->end_io_data = tio;
1435 1542
1436 return 0; 1543 return 0;
1437} 1544}
1438 1545
1439static int dm_rq_flush_suspending(struct mapped_device *md) 1546static struct request *clone_rq(struct request *rq, struct mapped_device *md,
1547 gfp_t gfp_mask)
1440{ 1548{
1441 return !md->suspend_rq.special; 1549 struct request *clone;
1550 struct dm_rq_target_io *tio;
1551
1552 tio = alloc_rq_tio(md, gfp_mask);
1553 if (!tio)
1554 return NULL;
1555
1556 tio->md = md;
1557 tio->ti = NULL;
1558 tio->orig = rq;
1559 tio->error = 0;
1560 memset(&tio->info, 0, sizeof(tio->info));
1561
1562 clone = &tio->clone;
1563 if (setup_clone(clone, rq, tio)) {
1564 /* -ENOMEM */
1565 free_rq_tio(tio);
1566 return NULL;
1567 }
1568
1569 return clone;
1442} 1570}
1443 1571
1444/* 1572/*
@@ -1447,39 +1575,19 @@ static int dm_rq_flush_suspending(struct mapped_device *md)
1447static int dm_prep_fn(struct request_queue *q, struct request *rq) 1575static int dm_prep_fn(struct request_queue *q, struct request *rq)
1448{ 1576{
1449 struct mapped_device *md = q->queuedata; 1577 struct mapped_device *md = q->queuedata;
1450 struct dm_rq_target_io *tio;
1451 struct request *clone; 1578 struct request *clone;
1452 1579
1453 if (unlikely(rq == &md->suspend_rq)) { 1580 if (unlikely(dm_rq_is_flush_request(rq)))
1454 if (dm_rq_flush_suspending(md)) 1581 return BLKPREP_OK;
1455 return BLKPREP_OK;
1456 else
1457 /* The flush suspend was interrupted */
1458 return BLKPREP_KILL;
1459 }
1460 1582
1461 if (unlikely(rq->special)) { 1583 if (unlikely(rq->special)) {
1462 DMWARN("Already has something in rq->special."); 1584 DMWARN("Already has something in rq->special.");
1463 return BLKPREP_KILL; 1585 return BLKPREP_KILL;
1464 } 1586 }
1465 1587
1466 tio = alloc_rq_tio(md); /* Only one for each original request */ 1588 clone = clone_rq(rq, md, GFP_ATOMIC);
1467 if (!tio) 1589 if (!clone)
1468 /* -ENOMEM */
1469 return BLKPREP_DEFER;
1470
1471 tio->md = md;
1472 tio->ti = NULL;
1473 tio->orig = rq;
1474 tio->error = 0;
1475 memset(&tio->info, 0, sizeof(tio->info));
1476
1477 clone = &tio->clone;
1478 if (setup_clone(clone, rq, tio)) {
1479 /* -ENOMEM */
1480 free_rq_tio(tio);
1481 return BLKPREP_DEFER; 1590 return BLKPREP_DEFER;
1482 }
1483 1591
1484 rq->special = clone; 1592 rq->special = clone;
1485 rq->cmd_flags |= REQ_DONTPREP; 1593 rq->cmd_flags |= REQ_DONTPREP;
@@ -1487,11 +1595,10 @@ static int dm_prep_fn(struct request_queue *q, struct request *rq)
1487 return BLKPREP_OK; 1595 return BLKPREP_OK;
1488} 1596}
1489 1597
1490static void map_request(struct dm_target *ti, struct request *rq, 1598static void map_request(struct dm_target *ti, struct request *clone,
1491 struct mapped_device *md) 1599 struct mapped_device *md)
1492{ 1600{
1493 int r; 1601 int r;
1494 struct request *clone = rq->special;
1495 struct dm_rq_target_io *tio = clone->end_io_data; 1602 struct dm_rq_target_io *tio = clone->end_io_data;
1496 1603
1497 /* 1604 /*
@@ -1511,6 +1618,8 @@ static void map_request(struct dm_target *ti, struct request *rq,
1511 break; 1618 break;
1512 case DM_MAPIO_REMAPPED: 1619 case DM_MAPIO_REMAPPED:
1513 /* The target has remapped the I/O so dispatch it */ 1620 /* The target has remapped the I/O so dispatch it */
1621 trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
1622 blk_rq_pos(tio->orig));
1514 dm_dispatch_request(clone); 1623 dm_dispatch_request(clone);
1515 break; 1624 break;
1516 case DM_MAPIO_REQUEUE: 1625 case DM_MAPIO_REQUEUE:
@@ -1536,29 +1645,26 @@ static void map_request(struct dm_target *ti, struct request *rq,
1536static void dm_request_fn(struct request_queue *q) 1645static void dm_request_fn(struct request_queue *q)
1537{ 1646{
1538 struct mapped_device *md = q->queuedata; 1647 struct mapped_device *md = q->queuedata;
1539 struct dm_table *map = dm_get_table(md); 1648 struct dm_table *map = dm_get_live_table(md);
1540 struct dm_target *ti; 1649 struct dm_target *ti;
1541 struct request *rq; 1650 struct request *rq, *clone;
1542 1651
1543 /* 1652 /*
1544 * For noflush suspend, check blk_queue_stopped() to immediately 1653 * For suspend, check blk_queue_stopped() and increment
1545 * quit I/O dispatching. 1654 * ->pending within a single queue_lock not to increment the
1655 * number of in-flight I/Os after the queue is stopped in
1656 * dm_suspend().
1546 */ 1657 */
1547 while (!blk_queue_plugged(q) && !blk_queue_stopped(q)) { 1658 while (!blk_queue_plugged(q) && !blk_queue_stopped(q)) {
1548 rq = blk_peek_request(q); 1659 rq = blk_peek_request(q);
1549 if (!rq) 1660 if (!rq)
1550 goto plug_and_out; 1661 goto plug_and_out;
1551 1662
1552 if (unlikely(rq == &md->suspend_rq)) { /* Flush suspend maker */ 1663 if (unlikely(dm_rq_is_flush_request(rq))) {
1553 if (queue_in_flight(q)) 1664 BUG_ON(md->flush_request);
1554 /* Not quiet yet. Wait more */ 1665 md->flush_request = rq;
1555 goto plug_and_out;
1556
1557 /* This device should be quiet now */
1558 __stop_queue(q);
1559 blk_start_request(rq); 1666 blk_start_request(rq);
1560 __blk_end_request_all(rq, 0); 1667 queue_work(md->wq, &md->barrier_work);
1561 wake_up(&md->wait);
1562 goto out; 1668 goto out;
1563 } 1669 }
1564 1670
@@ -1567,8 +1673,11 @@ static void dm_request_fn(struct request_queue *q)
1567 goto plug_and_out; 1673 goto plug_and_out;
1568 1674
1569 blk_start_request(rq); 1675 blk_start_request(rq);
1676 clone = rq->special;
1677 atomic_inc(&md->pending[rq_data_dir(clone)]);
1678
1570 spin_unlock(q->queue_lock); 1679 spin_unlock(q->queue_lock);
1571 map_request(ti, rq, md); 1680 map_request(ti, clone, md);
1572 spin_lock_irq(q->queue_lock); 1681 spin_lock_irq(q->queue_lock);
1573 } 1682 }
1574 1683
@@ -1595,7 +1704,7 @@ static int dm_lld_busy(struct request_queue *q)
1595{ 1704{
1596 int r; 1705 int r;
1597 struct mapped_device *md = q->queuedata; 1706 struct mapped_device *md = q->queuedata;
1598 struct dm_table *map = dm_get_table(md); 1707 struct dm_table *map = dm_get_live_table(md);
1599 1708
1600 if (!map || test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) 1709 if (!map || test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))
1601 r = 1; 1710 r = 1;
@@ -1610,7 +1719,7 @@ static int dm_lld_busy(struct request_queue *q)
1610static void dm_unplug_all(struct request_queue *q) 1719static void dm_unplug_all(struct request_queue *q)
1611{ 1720{
1612 struct mapped_device *md = q->queuedata; 1721 struct mapped_device *md = q->queuedata;
1613 struct dm_table *map = dm_get_table(md); 1722 struct dm_table *map = dm_get_live_table(md);
1614 1723
1615 if (map) { 1724 if (map) {
1616 if (dm_request_based(md)) 1725 if (dm_request_based(md))
@@ -1628,7 +1737,7 @@ static int dm_any_congested(void *congested_data, int bdi_bits)
1628 struct dm_table *map; 1737 struct dm_table *map;
1629 1738
1630 if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 1739 if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
1631 map = dm_get_table(md); 1740 map = dm_get_live_table(md);
1632 if (map) { 1741 if (map) {
1633 /* 1742 /*
1634 * Request-based dm cares about only own queue for 1743 * Request-based dm cares about only own queue for
@@ -1725,6 +1834,7 @@ out:
1725static const struct block_device_operations dm_blk_dops; 1834static const struct block_device_operations dm_blk_dops;
1726 1835
1727static void dm_wq_work(struct work_struct *work); 1836static void dm_wq_work(struct work_struct *work);
1837static void dm_rq_barrier_work(struct work_struct *work);
1728 1838
1729/* 1839/*
1730 * Allocate and initialise a blank device with a given minor. 1840 * Allocate and initialise a blank device with a given minor.
@@ -1754,6 +1864,7 @@ static struct mapped_device *alloc_dev(int minor)
1754 init_rwsem(&md->io_lock); 1864 init_rwsem(&md->io_lock);
1755 mutex_init(&md->suspend_lock); 1865 mutex_init(&md->suspend_lock);
1756 spin_lock_init(&md->deferred_lock); 1866 spin_lock_init(&md->deferred_lock);
1867 spin_lock_init(&md->barrier_error_lock);
1757 rwlock_init(&md->map_lock); 1868 rwlock_init(&md->map_lock);
1758 atomic_set(&md->holders, 1); 1869 atomic_set(&md->holders, 1);
1759 atomic_set(&md->open_count, 0); 1870 atomic_set(&md->open_count, 0);
@@ -1788,6 +1899,8 @@ static struct mapped_device *alloc_dev(int minor)
1788 blk_queue_softirq_done(md->queue, dm_softirq_done); 1899 blk_queue_softirq_done(md->queue, dm_softirq_done);
1789 blk_queue_prep_rq(md->queue, dm_prep_fn); 1900 blk_queue_prep_rq(md->queue, dm_prep_fn);
1790 blk_queue_lld_busy(md->queue, dm_lld_busy); 1901 blk_queue_lld_busy(md->queue, dm_lld_busy);
1902 blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN_FLUSH,
1903 dm_rq_prepare_flush);
1791 1904
1792 md->disk = alloc_disk(1); 1905 md->disk = alloc_disk(1);
1793 if (!md->disk) 1906 if (!md->disk)
@@ -1797,6 +1910,7 @@ static struct mapped_device *alloc_dev(int minor)
1797 atomic_set(&md->pending[1], 0); 1910 atomic_set(&md->pending[1], 0);
1798 init_waitqueue_head(&md->wait); 1911 init_waitqueue_head(&md->wait);
1799 INIT_WORK(&md->work, dm_wq_work); 1912 INIT_WORK(&md->work, dm_wq_work);
1913 INIT_WORK(&md->barrier_work, dm_rq_barrier_work);
1800 init_waitqueue_head(&md->eventq); 1914 init_waitqueue_head(&md->eventq);
1801 1915
1802 md->disk->major = _major; 1916 md->disk->major = _major;
@@ -1921,9 +2035,13 @@ static void __set_size(struct mapped_device *md, sector_t size)
1921 mutex_unlock(&md->bdev->bd_inode->i_mutex); 2035 mutex_unlock(&md->bdev->bd_inode->i_mutex);
1922} 2036}
1923 2037
1924static int __bind(struct mapped_device *md, struct dm_table *t, 2038/*
1925 struct queue_limits *limits) 2039 * Returns old map, which caller must destroy.
2040 */
2041static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
2042 struct queue_limits *limits)
1926{ 2043{
2044 struct dm_table *old_map;
1927 struct request_queue *q = md->queue; 2045 struct request_queue *q = md->queue;
1928 sector_t size; 2046 sector_t size;
1929 unsigned long flags; 2047 unsigned long flags;
@@ -1938,11 +2056,6 @@ static int __bind(struct mapped_device *md, struct dm_table *t,
1938 2056
1939 __set_size(md, size); 2057 __set_size(md, size);
1940 2058
1941 if (!size) {
1942 dm_table_destroy(t);
1943 return 0;
1944 }
1945
1946 dm_table_event_callback(t, event_callback, md); 2059 dm_table_event_callback(t, event_callback, md);
1947 2060
1948 /* 2061 /*
@@ -1958,26 +2071,31 @@ static int __bind(struct mapped_device *md, struct dm_table *t,
1958 __bind_mempools(md, t); 2071 __bind_mempools(md, t);
1959 2072
1960 write_lock_irqsave(&md->map_lock, flags); 2073 write_lock_irqsave(&md->map_lock, flags);
2074 old_map = md->map;
1961 md->map = t; 2075 md->map = t;
1962 dm_table_set_restrictions(t, q, limits); 2076 dm_table_set_restrictions(t, q, limits);
1963 write_unlock_irqrestore(&md->map_lock, flags); 2077 write_unlock_irqrestore(&md->map_lock, flags);
1964 2078
1965 return 0; 2079 return old_map;
1966} 2080}
1967 2081
1968static void __unbind(struct mapped_device *md) 2082/*
2083 * Returns unbound table for the caller to free.
2084 */
2085static struct dm_table *__unbind(struct mapped_device *md)
1969{ 2086{
1970 struct dm_table *map = md->map; 2087 struct dm_table *map = md->map;
1971 unsigned long flags; 2088 unsigned long flags;
1972 2089
1973 if (!map) 2090 if (!map)
1974 return; 2091 return NULL;
1975 2092
1976 dm_table_event_callback(map, NULL, NULL); 2093 dm_table_event_callback(map, NULL, NULL);
1977 write_lock_irqsave(&md->map_lock, flags); 2094 write_lock_irqsave(&md->map_lock, flags);
1978 md->map = NULL; 2095 md->map = NULL;
1979 write_unlock_irqrestore(&md->map_lock, flags); 2096 write_unlock_irqrestore(&md->map_lock, flags);
1980 dm_table_destroy(map); 2097
2098 return map;
1981} 2099}
1982 2100
1983/* 2101/*
@@ -2059,18 +2177,18 @@ void dm_put(struct mapped_device *md)
2059 BUG_ON(test_bit(DMF_FREEING, &md->flags)); 2177 BUG_ON(test_bit(DMF_FREEING, &md->flags));
2060 2178
2061 if (atomic_dec_and_lock(&md->holders, &_minor_lock)) { 2179 if (atomic_dec_and_lock(&md->holders, &_minor_lock)) {
2062 map = dm_get_table(md); 2180 map = dm_get_live_table(md);
2063 idr_replace(&_minor_idr, MINOR_ALLOCED, 2181 idr_replace(&_minor_idr, MINOR_ALLOCED,
2064 MINOR(disk_devt(dm_disk(md)))); 2182 MINOR(disk_devt(dm_disk(md))));
2065 set_bit(DMF_FREEING, &md->flags); 2183 set_bit(DMF_FREEING, &md->flags);
2066 spin_unlock(&_minor_lock); 2184 spin_unlock(&_minor_lock);
2067 if (!dm_suspended(md)) { 2185 if (!dm_suspended_md(md)) {
2068 dm_table_presuspend_targets(map); 2186 dm_table_presuspend_targets(map);
2069 dm_table_postsuspend_targets(map); 2187 dm_table_postsuspend_targets(map);
2070 } 2188 }
2071 dm_sysfs_exit(md); 2189 dm_sysfs_exit(md);
2072 dm_table_put(map); 2190 dm_table_put(map);
2073 __unbind(md); 2191 dm_table_destroy(__unbind(md));
2074 free_dev(md); 2192 free_dev(md);
2075 } 2193 }
2076} 2194}
@@ -2080,8 +2198,6 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
2080{ 2198{
2081 int r = 0; 2199 int r = 0;
2082 DECLARE_WAITQUEUE(wait, current); 2200 DECLARE_WAITQUEUE(wait, current);
2083 struct request_queue *q = md->queue;
2084 unsigned long flags;
2085 2201
2086 dm_unplug_all(md->queue); 2202 dm_unplug_all(md->queue);
2087 2203
@@ -2091,15 +2207,7 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
2091 set_current_state(interruptible); 2207 set_current_state(interruptible);
2092 2208
2093 smp_mb(); 2209 smp_mb();
2094 if (dm_request_based(md)) { 2210 if (!md_in_flight(md))
2095 spin_lock_irqsave(q->queue_lock, flags);
2096 if (!queue_in_flight(q) && blk_queue_stopped(q)) {
2097 spin_unlock_irqrestore(q->queue_lock, flags);
2098 break;
2099 }
2100 spin_unlock_irqrestore(q->queue_lock, flags);
2101 } else if (!atomic_read(&md->pending[0]) &&
2102 !atomic_read(&md->pending[1]))
2103 break; 2211 break;
2104 2212
2105 if (interruptible == TASK_INTERRUPTIBLE && 2213 if (interruptible == TASK_INTERRUPTIBLE &&
@@ -2194,98 +2302,106 @@ static void dm_queue_flush(struct mapped_device *md)
2194 queue_work(md->wq, &md->work); 2302 queue_work(md->wq, &md->work);
2195} 2303}
2196 2304
2197/* 2305static void dm_rq_set_flush_nr(struct request *clone, unsigned flush_nr)
2198 * Swap in a new table (destroying old one).
2199 */
2200int dm_swap_table(struct mapped_device *md, struct dm_table *table)
2201{ 2306{
2202 struct queue_limits limits; 2307 struct dm_rq_target_io *tio = clone->end_io_data;
2203 int r = -EINVAL;
2204 2308
2205 mutex_lock(&md->suspend_lock); 2309 tio->info.flush_request = flush_nr;
2310}
2206 2311
2207 /* device must be suspended */ 2312/* Issue barrier requests to targets and wait for their completion. */
2208 if (!dm_suspended(md)) 2313static int dm_rq_barrier(struct mapped_device *md)
2209 goto out; 2314{
2315 int i, j;
2316 struct dm_table *map = dm_get_live_table(md);
2317 unsigned num_targets = dm_table_get_num_targets(map);
2318 struct dm_target *ti;
2319 struct request *clone;
2210 2320
2211 r = dm_calculate_queue_limits(table, &limits); 2321 md->barrier_error = 0;
2212 if (r)
2213 goto out;
2214 2322
2215 /* cannot change the device type, once a table is bound */ 2323 for (i = 0; i < num_targets; i++) {
2216 if (md->map && 2324 ti = dm_table_get_target(map, i);
2217 (dm_table_get_type(md->map) != dm_table_get_type(table))) { 2325 for (j = 0; j < ti->num_flush_requests; j++) {
2218 DMWARN("can't change the device type after a table is bound"); 2326 clone = clone_rq(md->flush_request, md, GFP_NOIO);
2219 goto out; 2327 dm_rq_set_flush_nr(clone, j);
2328 atomic_inc(&md->pending[rq_data_dir(clone)]);
2329 map_request(ti, clone, md);
2330 }
2220 } 2331 }
2221 2332
2222 __unbind(md); 2333 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
2223 r = __bind(md, table, &limits); 2334 dm_table_put(map);
2224
2225out:
2226 mutex_unlock(&md->suspend_lock);
2227 return r;
2228}
2229 2335
2230static void dm_rq_invalidate_suspend_marker(struct mapped_device *md) 2336 return md->barrier_error;
2231{
2232 md->suspend_rq.special = (void *)0x1;
2233} 2337}
2234 2338
2235static void dm_rq_abort_suspend(struct mapped_device *md, int noflush) 2339static void dm_rq_barrier_work(struct work_struct *work)
2236{ 2340{
2341 int error;
2342 struct mapped_device *md = container_of(work, struct mapped_device,
2343 barrier_work);
2237 struct request_queue *q = md->queue; 2344 struct request_queue *q = md->queue;
2345 struct request *rq;
2238 unsigned long flags; 2346 unsigned long flags;
2239 2347
2240 spin_lock_irqsave(q->queue_lock, flags); 2348 /*
2241 if (!noflush) 2349 * Hold the md reference here and leave it at the last part so that
2242 dm_rq_invalidate_suspend_marker(md); 2350 * the md can't be deleted by device opener when the barrier request
2243 __start_queue(q); 2351 * completes.
2244 spin_unlock_irqrestore(q->queue_lock, flags); 2352 */
2245} 2353 dm_get(md);
2246 2354
2247static void dm_rq_start_suspend(struct mapped_device *md, int noflush) 2355 error = dm_rq_barrier(md);
2248{
2249 struct request *rq = &md->suspend_rq;
2250 struct request_queue *q = md->queue;
2251 2356
2252 if (noflush) 2357 rq = md->flush_request;
2253 stop_queue(q); 2358 md->flush_request = NULL;
2254 else { 2359
2255 blk_rq_init(q, rq); 2360 if (error == DM_ENDIO_REQUEUE) {
2256 blk_insert_request(q, rq, 0, NULL); 2361 spin_lock_irqsave(q->queue_lock, flags);
2257 } 2362 blk_requeue_request(q, rq);
2363 spin_unlock_irqrestore(q->queue_lock, flags);
2364 } else
2365 blk_end_request_all(rq, error);
2366
2367 blk_run_queue(q);
2368
2369 dm_put(md);
2258} 2370}
2259 2371
2260static int dm_rq_suspend_available(struct mapped_device *md, int noflush) 2372/*
2373 * Swap in a new table, returning the old one for the caller to destroy.
2374 */
2375struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
2261{ 2376{
2262 int r = 1; 2377 struct dm_table *map = ERR_PTR(-EINVAL);
2263 struct request *rq = &md->suspend_rq; 2378 struct queue_limits limits;
2264 struct request_queue *q = md->queue; 2379 int r;
2265 unsigned long flags;
2266 2380
2267 if (noflush) 2381 mutex_lock(&md->suspend_lock);
2268 return r;
2269 2382
2270 /* The marker must be protected by queue lock if it is in use */ 2383 /* device must be suspended */
2271 spin_lock_irqsave(q->queue_lock, flags); 2384 if (!dm_suspended_md(md))
2272 if (unlikely(rq->ref_count)) { 2385 goto out;
2273 /* 2386
2274 * This can happen, when the previous flush suspend was 2387 r = dm_calculate_queue_limits(table, &limits);
2275 * interrupted, the marker is still in the queue and 2388 if (r) {
2276 * this flush suspend has been invoked, because we don't 2389 map = ERR_PTR(r);
2277 * remove the marker at the time of suspend interruption. 2390 goto out;
2278 * We have only one marker per mapped_device, so we can't
2279 * start another flush suspend while it is in use.
2280 */
2281 BUG_ON(!rq->special); /* The marker should be invalidated */
2282 DMWARN("Invalidating the previous flush suspend is still in"
2283 " progress. Please retry later.");
2284 r = 0;
2285 } 2391 }
2286 spin_unlock_irqrestore(q->queue_lock, flags);
2287 2392
2288 return r; 2393 /* cannot change the device type, once a table is bound */
2394 if (md->map &&
2395 (dm_table_get_type(md->map) != dm_table_get_type(table))) {
2396 DMWARN("can't change the device type after a table is bound");
2397 goto out;
2398 }
2399
2400 map = __bind(md, table, &limits);
2401
2402out:
2403 mutex_unlock(&md->suspend_lock);
2404 return map;
2289} 2405}
2290 2406
2291/* 2407/*
@@ -2330,49 +2446,11 @@ static void unlock_fs(struct mapped_device *md)
2330/* 2446/*
2331 * Suspend mechanism in request-based dm. 2447 * Suspend mechanism in request-based dm.
2332 * 2448 *
2333 * After the suspend starts, further incoming requests are kept in 2449 * 1. Flush all I/Os by lock_fs() if needed.
2334 * the request_queue and deferred. 2450 * 2. Stop dispatching any I/O by stopping the request_queue.
2335 * Remaining requests in the request_queue at the start of suspend are flushed 2451 * 3. Wait for all in-flight I/Os to be completed or requeued.
2336 * if it is flush suspend.
2337 * The suspend completes when the following conditions have been satisfied,
2338 * so wait for it:
2339 * 1. q->in_flight is 0 (which means no in_flight request)
2340 * 2. queue has been stopped (which means no request dispatching)
2341 *
2342 * 2452 *
2343 * Noflush suspend 2453 * To abort suspend, start the request_queue.
2344 * ---------------
2345 * Noflush suspend doesn't need to dispatch remaining requests.
2346 * So stop the queue immediately. Then, wait for all in_flight requests
2347 * to be completed or requeued.
2348 *
2349 * To abort noflush suspend, start the queue.
2350 *
2351 *
2352 * Flush suspend
2353 * -------------
2354 * Flush suspend needs to dispatch remaining requests. So stop the queue
2355 * after the remaining requests are completed. (Requeued request must be also
2356 * re-dispatched and completed. Until then, we can't stop the queue.)
2357 *
2358 * During flushing the remaining requests, further incoming requests are also
2359 * inserted to the same queue. To distinguish which requests are to be
2360 * flushed, we insert a marker request to the queue at the time of starting
2361 * flush suspend, like a barrier.
2362 * The dispatching is blocked when the marker is found on the top of the queue.
2363 * And the queue is stopped when all in_flight requests are completed, since
2364 * that means the remaining requests are completely flushed.
2365 * Then, the marker is removed from the queue.
2366 *
2367 * To abort flush suspend, we also need to take care of the marker, not only
2368 * starting the queue.
2369 * We don't remove the marker forcibly from the queue since it's against
2370 * the block-layer manner. Instead, we put a invalidated mark on the marker.
2371 * When the invalidated marker is found on the top of the queue, it is
2372 * immediately removed from the queue, so it doesn't block dispatching.
2373 * Because we have only one marker per mapped_device, we can't start another
2374 * flush suspend until the invalidated marker is removed from the queue.
2375 * So fail and return with -EBUSY in such a case.
2376 */ 2454 */
2377int dm_suspend(struct mapped_device *md, unsigned suspend_flags) 2455int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
2378{ 2456{
@@ -2383,17 +2461,12 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
2383 2461
2384 mutex_lock(&md->suspend_lock); 2462 mutex_lock(&md->suspend_lock);
2385 2463
2386 if (dm_suspended(md)) { 2464 if (dm_suspended_md(md)) {
2387 r = -EINVAL; 2465 r = -EINVAL;
2388 goto out_unlock; 2466 goto out_unlock;
2389 } 2467 }
2390 2468
2391 if (dm_request_based(md) && !dm_rq_suspend_available(md, noflush)) { 2469 map = dm_get_live_table(md);
2392 r = -EBUSY;
2393 goto out_unlock;
2394 }
2395
2396 map = dm_get_table(md);
2397 2470
2398 /* 2471 /*
2399 * DMF_NOFLUSH_SUSPENDING must be set before presuspend. 2472 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
@@ -2406,8 +2479,10 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
2406 dm_table_presuspend_targets(map); 2479 dm_table_presuspend_targets(map);
2407 2480
2408 /* 2481 /*
2409 * Flush I/O to the device. noflush supersedes do_lockfs, 2482 * Flush I/O to the device.
2410 * because lock_fs() needs to flush I/Os. 2483 * Any I/O submitted after lock_fs() may not be flushed.
2484 * noflush takes precedence over do_lockfs.
2485 * (lock_fs() flushes I/Os and waits for them to complete.)
2411 */ 2486 */
2412 if (!noflush && do_lockfs) { 2487 if (!noflush && do_lockfs) {
2413 r = lock_fs(md); 2488 r = lock_fs(md);
@@ -2436,10 +2511,15 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
2436 set_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags); 2511 set_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags);
2437 up_write(&md->io_lock); 2512 up_write(&md->io_lock);
2438 2513
2439 flush_workqueue(md->wq); 2514 /*
2440 2515 * Request-based dm uses md->wq for barrier (dm_rq_barrier_work) which
2516 * can be kicked until md->queue is stopped. So stop md->queue before
2517 * flushing md->wq.
2518 */
2441 if (dm_request_based(md)) 2519 if (dm_request_based(md))
2442 dm_rq_start_suspend(md, noflush); 2520 stop_queue(md->queue);
2521
2522 flush_workqueue(md->wq);
2443 2523
2444 /* 2524 /*
2445 * At this point no more requests are entering target request routines. 2525 * At this point no more requests are entering target request routines.
@@ -2458,7 +2538,7 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
2458 dm_queue_flush(md); 2538 dm_queue_flush(md);
2459 2539
2460 if (dm_request_based(md)) 2540 if (dm_request_based(md))
2461 dm_rq_abort_suspend(md, noflush); 2541 start_queue(md->queue);
2462 2542
2463 unlock_fs(md); 2543 unlock_fs(md);
2464 goto out; /* pushback list is already flushed, so skip flush */ 2544 goto out; /* pushback list is already flushed, so skip flush */
@@ -2470,10 +2550,10 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
2470 * requests are being added to md->deferred list. 2550 * requests are being added to md->deferred list.
2471 */ 2551 */
2472 2552
2473 dm_table_postsuspend_targets(map);
2474
2475 set_bit(DMF_SUSPENDED, &md->flags); 2553 set_bit(DMF_SUSPENDED, &md->flags);
2476 2554
2555 dm_table_postsuspend_targets(map);
2556
2477out: 2557out:
2478 dm_table_put(map); 2558 dm_table_put(map);
2479 2559
@@ -2488,10 +2568,10 @@ int dm_resume(struct mapped_device *md)
2488 struct dm_table *map = NULL; 2568 struct dm_table *map = NULL;
2489 2569
2490 mutex_lock(&md->suspend_lock); 2570 mutex_lock(&md->suspend_lock);
2491 if (!dm_suspended(md)) 2571 if (!dm_suspended_md(md))
2492 goto out; 2572 goto out;
2493 2573
2494 map = dm_get_table(md); 2574 map = dm_get_live_table(md);
2495 if (!map || !dm_table_get_size(map)) 2575 if (!map || !dm_table_get_size(map))
2496 goto out; 2576 goto out;
2497 2577
@@ -2592,18 +2672,29 @@ struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
2592 return NULL; 2672 return NULL;
2593 2673
2594 if (test_bit(DMF_FREEING, &md->flags) || 2674 if (test_bit(DMF_FREEING, &md->flags) ||
2595 test_bit(DMF_DELETING, &md->flags)) 2675 dm_deleting_md(md))
2596 return NULL; 2676 return NULL;
2597 2677
2598 dm_get(md); 2678 dm_get(md);
2599 return md; 2679 return md;
2600} 2680}
2601 2681
2602int dm_suspended(struct mapped_device *md) 2682int dm_suspended_md(struct mapped_device *md)
2603{ 2683{
2604 return test_bit(DMF_SUSPENDED, &md->flags); 2684 return test_bit(DMF_SUSPENDED, &md->flags);
2605} 2685}
2606 2686
2687int dm_suspended(struct dm_target *ti)
2688{
2689 struct mapped_device *md = dm_table_get_md(ti->table);
2690 int r = dm_suspended_md(md);
2691
2692 dm_put(md);
2693
2694 return r;
2695}
2696EXPORT_SYMBOL_GPL(dm_suspended);
2697
2607int dm_noflush_suspending(struct dm_target *ti) 2698int dm_noflush_suspending(struct dm_target *ti)
2608{ 2699{
2609 struct mapped_device *md = dm_table_get_md(ti->table); 2700 struct mapped_device *md = dm_table_get_md(ti->table);
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index a7663eba17e2..8dadaa5bc396 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -89,6 +89,16 @@ int dm_target_iterate(void (*iter_func)(struct target_type *tt,
89int dm_split_args(int *argc, char ***argvp, char *input); 89int dm_split_args(int *argc, char ***argvp, char *input);
90 90
91/* 91/*
92 * Is this mapped_device being deleted?
93 */
94int dm_deleting_md(struct mapped_device *md);
95
96/*
97 * Is this mapped_device suspended?
98 */
99int dm_suspended_md(struct mapped_device *md);
100
101/*
92 * The device-mapper can be driven through one of two interfaces; 102 * The device-mapper can be driven through one of two interfaces;
93 * ioctl or filesystem, depending which patch you have applied. 103 * ioctl or filesystem, depending which patch you have applied.
94 */ 104 */
@@ -118,6 +128,9 @@ int dm_lock_for_deletion(struct mapped_device *md);
118void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, 128void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
119 unsigned cookie); 129 unsigned cookie);
120 130
131int dm_io_init(void);
132void dm_io_exit(void);
133
121int dm_kcopyd_init(void); 134int dm_kcopyd_init(void);
122void dm_kcopyd_exit(void); 135void dm_kcopyd_exit(void);
123 136
diff --git a/drivers/md/md.c b/drivers/md/md.c
index e1f3c1715cca..f4f5f82f9f53 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -39,6 +39,7 @@
39#include <linux/buffer_head.h> /* for invalidate_bdev */ 39#include <linux/buffer_head.h> /* for invalidate_bdev */
40#include <linux/poll.h> 40#include <linux/poll.h>
41#include <linux/ctype.h> 41#include <linux/ctype.h>
42#include <linux/string.h>
42#include <linux/hdreg.h> 43#include <linux/hdreg.h>
43#include <linux/proc_fs.h> 44#include <linux/proc_fs.h>
44#include <linux/random.h> 45#include <linux/random.h>
@@ -1935,15 +1936,11 @@ static void print_sb_1(struct mdp_superblock_1 *sb)
1935 1936
1936 uuid = sb->set_uuid; 1937 uuid = sb->set_uuid;
1937 printk(KERN_INFO 1938 printk(KERN_INFO
1938 "md: SB: (V:%u) (F:0x%08x) Array-ID:<%02x%02x%02x%02x" 1939 "md: SB: (V:%u) (F:0x%08x) Array-ID:<%pU>\n"
1939 ":%02x%02x:%02x%02x:%02x%02x:%02x%02x%02x%02x%02x%02x>\n"
1940 "md: Name: \"%s\" CT:%llu\n", 1940 "md: Name: \"%s\" CT:%llu\n",
1941 le32_to_cpu(sb->major_version), 1941 le32_to_cpu(sb->major_version),
1942 le32_to_cpu(sb->feature_map), 1942 le32_to_cpu(sb->feature_map),
1943 uuid[0], uuid[1], uuid[2], uuid[3], 1943 uuid,
1944 uuid[4], uuid[5], uuid[6], uuid[7],
1945 uuid[8], uuid[9], uuid[10], uuid[11],
1946 uuid[12], uuid[13], uuid[14], uuid[15],
1947 sb->set_name, 1944 sb->set_name,
1948 (unsigned long long)le64_to_cpu(sb->ctime) 1945 (unsigned long long)le64_to_cpu(sb->ctime)
1949 & MD_SUPERBLOCK_1_TIME_SEC_MASK); 1946 & MD_SUPERBLOCK_1_TIME_SEC_MASK);
@@ -1952,8 +1949,7 @@ static void print_sb_1(struct mdp_superblock_1 *sb)
1952 printk(KERN_INFO 1949 printk(KERN_INFO
1953 "md: L%u SZ%llu RD:%u LO:%u CS:%u DO:%llu DS:%llu SO:%llu" 1950 "md: L%u SZ%llu RD:%u LO:%u CS:%u DO:%llu DS:%llu SO:%llu"
1954 " RO:%llu\n" 1951 " RO:%llu\n"
1955 "md: Dev:%08x UUID: %02x%02x%02x%02x:%02x%02x:%02x%02x:%02x%02x" 1952 "md: Dev:%08x UUID: %pU\n"
1956 ":%02x%02x%02x%02x%02x%02x\n"
1957 "md: (F:0x%08x) UT:%llu Events:%llu ResyncOffset:%llu CSUM:0x%08x\n" 1953 "md: (F:0x%08x) UT:%llu Events:%llu ResyncOffset:%llu CSUM:0x%08x\n"
1958 "md: (MaxDev:%u) \n", 1954 "md: (MaxDev:%u) \n",
1959 le32_to_cpu(sb->level), 1955 le32_to_cpu(sb->level),
@@ -1966,10 +1962,7 @@ static void print_sb_1(struct mdp_superblock_1 *sb)
1966 (unsigned long long)le64_to_cpu(sb->super_offset), 1962 (unsigned long long)le64_to_cpu(sb->super_offset),
1967 (unsigned long long)le64_to_cpu(sb->recovery_offset), 1963 (unsigned long long)le64_to_cpu(sb->recovery_offset),
1968 le32_to_cpu(sb->dev_number), 1964 le32_to_cpu(sb->dev_number),
1969 uuid[0], uuid[1], uuid[2], uuid[3], 1965 uuid,
1970 uuid[4], uuid[5], uuid[6], uuid[7],
1971 uuid[8], uuid[9], uuid[10], uuid[11],
1972 uuid[12], uuid[13], uuid[14], uuid[15],
1973 sb->devflags, 1966 sb->devflags,
1974 (unsigned long long)le64_to_cpu(sb->utime) & MD_SUPERBLOCK_1_TIME_SEC_MASK, 1967 (unsigned long long)le64_to_cpu(sb->utime) & MD_SUPERBLOCK_1_TIME_SEC_MASK,
1975 (unsigned long long)le64_to_cpu(sb->events), 1968 (unsigned long long)le64_to_cpu(sb->events),
@@ -3439,8 +3432,7 @@ bitmap_store(mddev_t *mddev, const char *buf, size_t len)
3439 } 3432 }
3440 if (*end && !isspace(*end)) break; 3433 if (*end && !isspace(*end)) break;
3441 bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk); 3434 bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
3442 buf = end; 3435 buf = skip_spaces(end);
3443 while (isspace(*buf)) buf++;
3444 } 3436 }
3445 bitmap_unplug(mddev->bitmap); /* flush the bits to disk */ 3437 bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
3446out: 3438out:
diff --git a/drivers/media/IR/Kconfig b/drivers/media/IR/Kconfig
new file mode 100644
index 000000000000..4dde7d180a32
--- /dev/null
+++ b/drivers/media/IR/Kconfig
@@ -0,0 +1,9 @@
1config IR_CORE
2 tristate
3 depends on INPUT
4 default INPUT
5
6config VIDEO_IR
7 tristate
8 depends on IR_CORE
9 default IR_CORE
diff --git a/drivers/media/IR/Makefile b/drivers/media/IR/Makefile
new file mode 100644
index 000000000000..df5ddb4bbbf7
--- /dev/null
+++ b/drivers/media/IR/Makefile
@@ -0,0 +1,5 @@
1ir-common-objs := ir-functions.o ir-keymaps.o
2ir-core-objs := ir-keytable.o
3
4obj-$(CONFIG_IR_CORE) += ir-core.o
5obj-$(CONFIG_VIDEO_IR) += ir-common.o
diff --git a/drivers/media/common/ir-functions.c b/drivers/media/IR/ir-functions.c
index e616f624ceaa..776a136616d6 100644
--- a/drivers/media/common/ir-functions.c
+++ b/drivers/media/IR/ir-functions.c
@@ -34,9 +34,6 @@ static int repeat = 1;
34module_param(repeat, int, 0444); 34module_param(repeat, int, 0444);
35MODULE_PARM_DESC(repeat,"auto-repeat for IR keys (default: on)"); 35MODULE_PARM_DESC(repeat,"auto-repeat for IR keys (default: on)");
36 36
37int media_ir_debug; /* media_ir_debug level (0,1,2) */
38module_param_named(debug, media_ir_debug, int, 0644);
39
40/* -------------------------------------------------------------------------- */ 37/* -------------------------------------------------------------------------- */
41 38
42static void ir_input_key_event(struct input_dev *dev, struct ir_input_state *ir) 39static void ir_input_key_event(struct input_dev *dev, struct ir_input_state *ir)
@@ -55,25 +52,10 @@ static void ir_input_key_event(struct input_dev *dev, struct ir_input_state *ir)
55/* -------------------------------------------------------------------------- */ 52/* -------------------------------------------------------------------------- */
56 53
57int ir_input_init(struct input_dev *dev, struct ir_input_state *ir, 54int ir_input_init(struct input_dev *dev, struct ir_input_state *ir,
58 int ir_type, struct ir_scancode_table *ir_codes) 55 int ir_type)
59{ 56{
60 ir->ir_type = ir_type; 57 ir->ir_type = ir_type;
61 58
62 ir->keytable.size = ir_roundup_tablesize(ir_codes->size);
63 ir->keytable.scan = kzalloc(ir->keytable.size *
64 sizeof(struct ir_scancode), GFP_KERNEL);
65 if (!ir->keytable.scan)
66 return -ENOMEM;
67
68 IR_dprintk(1, "Allocated space for %d keycode entries (%zd bytes)\n",
69 ir->keytable.size,
70 ir->keytable.size * sizeof(ir->keytable.scan));
71
72 ir_copy_table(&ir->keytable, ir_codes);
73 ir_set_keycode_table(dev, &ir->keytable);
74
75 clear_bit(0, dev->keybit);
76 set_bit(EV_KEY, dev->evbit);
77 if (repeat) 59 if (repeat)
78 set_bit(EV_REP, dev->evbit); 60 set_bit(EV_REP, dev->evbit);
79 61
diff --git a/drivers/media/common/ir-keymaps.c b/drivers/media/IR/ir-keymaps.c
index 328c973a0838..9bbe6b1e9871 100644
--- a/drivers/media/common/ir-keymaps.c
+++ b/drivers/media/IR/ir-keymaps.c
@@ -1847,76 +1847,6 @@ struct ir_scancode_table ir_codes_hauppauge_new_table = {
1847}; 1847};
1848EXPORT_SYMBOL_GPL(ir_codes_hauppauge_new_table); 1848EXPORT_SYMBOL_GPL(ir_codes_hauppauge_new_table);
1849 1849
1850/*
1851 * Hauppauge:the newer, gray remotes (seems there are multiple
1852 * slightly different versions), shipped with cx88+ivtv cards.
1853 *
1854 * This table contains the complete RC5 code, instead of just the data part
1855 */
1856static struct ir_scancode ir_codes_rc5_hauppauge_new[] = {
1857 /* Keys 0 to 9 */
1858 { 0x1e00, KEY_0 },
1859 { 0x1e01, KEY_1 },
1860 { 0x1e02, KEY_2 },
1861 { 0x1e03, KEY_3 },
1862 { 0x1e04, KEY_4 },
1863 { 0x1e05, KEY_5 },
1864 { 0x1e06, KEY_6 },
1865 { 0x1e07, KEY_7 },
1866 { 0x1e08, KEY_8 },
1867 { 0x1e09, KEY_9 },
1868
1869 { 0x1e0a, KEY_TEXT }, /* keypad asterisk as well */
1870 { 0x1e0b, KEY_RED }, /* red button */
1871 { 0x1e0c, KEY_RADIO },
1872 { 0x1e0d, KEY_MENU },
1873 { 0x1e0e, KEY_SUBTITLE }, /* also the # key */
1874 { 0x1e0f, KEY_MUTE },
1875 { 0x1e10, KEY_VOLUMEUP },
1876 { 0x1e11, KEY_VOLUMEDOWN },
1877 { 0x1e12, KEY_PREVIOUS }, /* previous channel */
1878 { 0x1e14, KEY_UP },
1879 { 0x1e15, KEY_DOWN },
1880 { 0x1e16, KEY_LEFT },
1881 { 0x1e17, KEY_RIGHT },
1882 { 0x1e18, KEY_VIDEO }, /* Videos */
1883 { 0x1e19, KEY_AUDIO }, /* Music */
1884 /* 0x1e1a: Pictures - presume this means
1885 "Multimedia Home Platform" -
1886 no "PICTURES" key in input.h
1887 */
1888 { 0x1e1a, KEY_MHP },
1889
1890 { 0x1e1b, KEY_EPG }, /* Guide */
1891 { 0x1e1c, KEY_TV },
1892 { 0x1e1e, KEY_NEXTSONG }, /* skip >| */
1893 { 0x1e1f, KEY_EXIT }, /* back/exit */
1894 { 0x1e20, KEY_CHANNELUP }, /* channel / program + */
1895 { 0x1e21, KEY_CHANNELDOWN }, /* channel / program - */
1896 { 0x1e22, KEY_CHANNEL }, /* source (old black remote) */
1897 { 0x1e24, KEY_PREVIOUSSONG }, /* replay |< */
1898 { 0x1e25, KEY_ENTER }, /* OK */
1899 { 0x1e26, KEY_SLEEP }, /* minimize (old black remote) */
1900 { 0x1e29, KEY_BLUE }, /* blue key */
1901 { 0x1e2e, KEY_GREEN }, /* green button */
1902 { 0x1e30, KEY_PAUSE }, /* pause */
1903 { 0x1e32, KEY_REWIND }, /* backward << */
1904 { 0x1e34, KEY_FASTFORWARD }, /* forward >> */
1905 { 0x1e35, KEY_PLAY },
1906 { 0x1e36, KEY_STOP },
1907 { 0x1e37, KEY_RECORD }, /* recording */
1908 { 0x1e38, KEY_YELLOW }, /* yellow key */
1909 { 0x1e3b, KEY_SELECT }, /* top right button */
1910 { 0x1e3c, KEY_ZOOM }, /* full */
1911 { 0x1e3d, KEY_POWER }, /* system power (green button) */
1912};
1913
1914struct ir_scancode_table ir_codes_rc5_hauppauge_new_table = {
1915 .scan = ir_codes_rc5_hauppauge_new,
1916 .size = ARRAY_SIZE(ir_codes_rc5_hauppauge_new),
1917};
1918EXPORT_SYMBOL_GPL(ir_codes_rc5_hauppauge_new_table);
1919
1920static struct ir_scancode ir_codes_npgtech[] = { 1850static struct ir_scancode ir_codes_npgtech[] = {
1921 { 0x1d, KEY_SWITCHVIDEOMODE }, /* switch inputs */ 1851 { 0x1d, KEY_SWITCHVIDEOMODE }, /* switch inputs */
1922 { 0x2a, KEY_FRONT }, 1852 { 0x2a, KEY_FRONT },
@@ -3314,3 +3244,152 @@ struct ir_scancode_table ir_codes_gadmei_rm008z_table = {
3314}; 3244};
3315EXPORT_SYMBOL_GPL(ir_codes_gadmei_rm008z_table); 3245EXPORT_SYMBOL_GPL(ir_codes_gadmei_rm008z_table);
3316 3246
3247/*************************************************************
3248 * COMPLETE SCANCODE TABLES
3249 * Instead of just a partial scancode, the tables bellow
3250 * contains the complete scancode and the receiver protocol
3251 *************************************************************/
3252
3253/*
3254 * Hauppauge:the newer, gray remotes (seems there are multiple
3255 * slightly different versions), shipped with cx88+ivtv cards.
3256 *
3257 * This table contains the complete RC5 code, instead of just the data part
3258 */
3259static struct ir_scancode ir_codes_rc5_hauppauge_new[] = {
3260 /* Keys 0 to 9 */
3261 { 0x1e00, KEY_0 },
3262 { 0x1e01, KEY_1 },
3263 { 0x1e02, KEY_2 },
3264 { 0x1e03, KEY_3 },
3265 { 0x1e04, KEY_4 },
3266 { 0x1e05, KEY_5 },
3267 { 0x1e06, KEY_6 },
3268 { 0x1e07, KEY_7 },
3269 { 0x1e08, KEY_8 },
3270 { 0x1e09, KEY_9 },
3271
3272 { 0x1e0a, KEY_TEXT }, /* keypad asterisk as well */
3273 { 0x1e0b, KEY_RED }, /* red button */
3274 { 0x1e0c, KEY_RADIO },
3275 { 0x1e0d, KEY_MENU },
3276 { 0x1e0e, KEY_SUBTITLE }, /* also the # key */
3277 { 0x1e0f, KEY_MUTE },
3278 { 0x1e10, KEY_VOLUMEUP },
3279 { 0x1e11, KEY_VOLUMEDOWN },
3280 { 0x1e12, KEY_PREVIOUS }, /* previous channel */
3281 { 0x1e14, KEY_UP },
3282 { 0x1e15, KEY_DOWN },
3283 { 0x1e16, KEY_LEFT },
3284 { 0x1e17, KEY_RIGHT },
3285 { 0x1e18, KEY_VIDEO }, /* Videos */
3286 { 0x1e19, KEY_AUDIO }, /* Music */
3287 /* 0x1e1a: Pictures - presume this means
3288 "Multimedia Home Platform" -
3289 no "PICTURES" key in input.h
3290 */
3291 { 0x1e1a, KEY_MHP },
3292
3293 { 0x1e1b, KEY_EPG }, /* Guide */
3294 { 0x1e1c, KEY_TV },
3295 { 0x1e1e, KEY_NEXTSONG }, /* skip >| */
3296 { 0x1e1f, KEY_EXIT }, /* back/exit */
3297 { 0x1e20, KEY_CHANNELUP }, /* channel / program + */
3298 { 0x1e21, KEY_CHANNELDOWN }, /* channel / program - */
3299 { 0x1e22, KEY_CHANNEL }, /* source (old black remote) */
3300 { 0x1e24, KEY_PREVIOUSSONG }, /* replay |< */
3301 { 0x1e25, KEY_ENTER }, /* OK */
3302 { 0x1e26, KEY_SLEEP }, /* minimize (old black remote) */
3303 { 0x1e29, KEY_BLUE }, /* blue key */
3304 { 0x1e2e, KEY_GREEN }, /* green button */
3305 { 0x1e30, KEY_PAUSE }, /* pause */
3306 { 0x1e32, KEY_REWIND }, /* backward << */
3307 { 0x1e34, KEY_FASTFORWARD }, /* forward >> */
3308 { 0x1e35, KEY_PLAY },
3309 { 0x1e36, KEY_STOP },
3310 { 0x1e37, KEY_RECORD }, /* recording */
3311 { 0x1e38, KEY_YELLOW }, /* yellow key */
3312 { 0x1e3b, KEY_SELECT }, /* top right button */
3313 { 0x1e3c, KEY_ZOOM }, /* full */
3314 { 0x1e3d, KEY_POWER }, /* system power (green button) */
3315};
3316
3317struct ir_scancode_table ir_codes_rc5_hauppauge_new_table = {
3318 .scan = ir_codes_rc5_hauppauge_new,
3319 .size = ARRAY_SIZE(ir_codes_rc5_hauppauge_new),
3320 .ir_type = IR_TYPE_RC5,
3321};
3322EXPORT_SYMBOL_GPL(ir_codes_rc5_hauppauge_new_table);
3323
3324/* Terratec Cinergy Hybrid T USB XS FM
3325 Mauro Carvalho Chehab <mchehab@redhat.com>
3326 */
3327static struct ir_scancode ir_codes_nec_terratec_cinergy_xs[] = {
3328 { 0x1441, KEY_HOME},
3329 { 0x1401, KEY_POWER2},
3330
3331 { 0x1442, KEY_MENU}, /* DVD menu */
3332 { 0x1443, KEY_SUBTITLE},
3333 { 0x1444, KEY_TEXT}, /* Teletext */
3334 { 0x1445, KEY_DELETE},
3335
3336 { 0x1402, KEY_1},
3337 { 0x1403, KEY_2},
3338 { 0x1404, KEY_3},
3339 { 0x1405, KEY_4},
3340 { 0x1406, KEY_5},
3341 { 0x1407, KEY_6},
3342 { 0x1408, KEY_7},
3343 { 0x1409, KEY_8},
3344 { 0x140a, KEY_9},
3345 { 0x140c, KEY_0},
3346
3347 { 0x140b, KEY_TUNER}, /* AV */
3348 { 0x140d, KEY_MODE}, /* A.B */
3349
3350 { 0x1446, KEY_TV},
3351 { 0x1447, KEY_DVD},
3352 { 0x1449, KEY_VIDEO},
3353 { 0x144a, KEY_RADIO}, /* Music */
3354 { 0x144b, KEY_CAMERA}, /* PIC */
3355
3356 { 0x1410, KEY_UP},
3357 { 0x1411, KEY_LEFT},
3358 { 0x1412, KEY_OK},
3359 { 0x1413, KEY_RIGHT},
3360 { 0x1414, KEY_DOWN},
3361
3362 { 0x140f, KEY_EPG},
3363 { 0x1416, KEY_INFO},
3364 { 0x144d, KEY_BACKSPACE},
3365
3366 { 0x141c, KEY_VOLUMEUP},
3367 { 0x141e, KEY_VOLUMEDOWN},
3368
3369 { 0x144c, KEY_PLAY},
3370 { 0x141d, KEY_MUTE},
3371
3372 { 0x141b, KEY_CHANNELUP},
3373 { 0x141f, KEY_CHANNELDOWN},
3374
3375 { 0x1417, KEY_RED},
3376 { 0x1418, KEY_GREEN},
3377 { 0x1419, KEY_YELLOW},
3378 { 0x141a, KEY_BLUE},
3379
3380 { 0x1458, KEY_RECORD},
3381 { 0x1448, KEY_STOP},
3382 { 0x1440, KEY_PAUSE},
3383
3384 { 0x1454, KEY_LAST},
3385 { 0x144e, KEY_REWIND},
3386 { 0x144f, KEY_FASTFORWARD},
3387 { 0x145c, KEY_NEXT},
3388};
3389struct ir_scancode_table ir_codes_nec_terratec_cinergy_xs_table = {
3390 .scan = ir_codes_nec_terratec_cinergy_xs,
3391 .size = ARRAY_SIZE(ir_codes_nec_terratec_cinergy_xs),
3392 .ir_type = IR_TYPE_NEC,
3393};
3394EXPORT_SYMBOL_GPL(ir_codes_nec_terratec_cinergy_xs_table);
3395
diff --git a/drivers/media/common/ir-keytable.c b/drivers/media/IR/ir-keytable.c
index 26ce5bc2fdd5..bff7a5356037 100644
--- a/drivers/media/common/ir-keytable.c
+++ b/drivers/media/IR/ir-keytable.c
@@ -1,10 +1,19 @@
1/* ir-register.c - handle IR scancode->keycode tables 1/* ir-register.c - handle IR scancode->keycode tables
2 * 2 *
3 * Copyright (C) 2009 by Mauro Carvalho Chehab <mchehab@redhat.com> 3 * Copyright (C) 2009 by Mauro Carvalho Chehab <mchehab@redhat.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation version 2 of the License.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
4 */ 13 */
5 14
6#include <linux/usb/input.h>
7 15
16#include <linux/usb/input.h>
8#include <media/ir-common.h> 17#include <media/ir-common.h>
9 18
10#define IR_TAB_MIN_SIZE 32 19#define IR_TAB_MIN_SIZE 32
@@ -72,6 +81,7 @@ int ir_roundup_tablesize(int n_elems)
72 81
73 return n_elems; 82 return n_elems;
74} 83}
84EXPORT_SYMBOL_GPL(ir_roundup_tablesize);
75 85
76/** 86/**
77 * ir_copy_table() - copies a keytable, discarding the unused entries 87 * ir_copy_table() - copies a keytable, discarding the unused entries
@@ -100,6 +110,7 @@ int ir_copy_table(struct ir_scancode_table *destin,
100 110
101 return 0; 111 return 0;
102} 112}
113EXPORT_SYMBOL_GPL(ir_copy_table);
103 114
104/** 115/**
105 * ir_getkeycode() - get a keycode at the evdev scancode ->keycode table 116 * ir_getkeycode() - get a keycode at the evdev scancode ->keycode table
@@ -114,7 +125,8 @@ static int ir_getkeycode(struct input_dev *dev,
114 int scancode, int *keycode) 125 int scancode, int *keycode)
115{ 126{
116 int elem; 127 int elem;
117 struct ir_scancode_table *rc_tab = input_get_drvdata(dev); 128 struct ir_input_dev *ir_dev = input_get_drvdata(dev);
129 struct ir_scancode_table *rc_tab = &ir_dev->rc_tab;
118 130
119 elem = ir_seek_table(rc_tab, scancode); 131 elem = ir_seek_table(rc_tab, scancode);
120 if (elem >= 0) { 132 if (elem >= 0) {
@@ -136,7 +148,6 @@ static int ir_getkeycode(struct input_dev *dev,
136 return 0; 148 return 0;
137} 149}
138 150
139
140/** 151/**
141 * ir_is_resize_needed() - Check if the table needs rezise 152 * ir_is_resize_needed() - Check if the table needs rezise
142 * @table: keycode table that may need to resize 153 * @table: keycode table that may need to resize
@@ -286,7 +297,8 @@ static int ir_setkeycode(struct input_dev *dev,
286 int scancode, int keycode) 297 int scancode, int keycode)
287{ 298{
288 int rc = 0; 299 int rc = 0;
289 struct ir_scancode_table *rc_tab = input_get_drvdata(dev); 300 struct ir_input_dev *ir_dev = input_get_drvdata(dev);
301 struct ir_scancode_table *rc_tab = &ir_dev->rc_tab;
290 struct ir_scancode *keymap = rc_tab->scan; 302 struct ir_scancode *keymap = rc_tab->scan;
291 unsigned long flags; 303 unsigned long flags;
292 304
@@ -360,7 +372,8 @@ static int ir_setkeycode(struct input_dev *dev,
360 */ 372 */
361u32 ir_g_keycode_from_table(struct input_dev *dev, u32 scancode) 373u32 ir_g_keycode_from_table(struct input_dev *dev, u32 scancode)
362{ 374{
363 struct ir_scancode_table *rc_tab = input_get_drvdata(dev); 375 struct ir_input_dev *ir_dev = input_get_drvdata(dev);
376 struct ir_scancode_table *rc_tab = &ir_dev->rc_tab;
364 struct ir_scancode *keymap = rc_tab->scan; 377 struct ir_scancode *keymap = rc_tab->scan;
365 int elem; 378 int elem;
366 379
@@ -378,9 +391,10 @@ u32 ir_g_keycode_from_table(struct input_dev *dev, u32 scancode)
378 /* Reports userspace that an unknown keycode were got */ 391 /* Reports userspace that an unknown keycode were got */
379 return KEY_RESERVED; 392 return KEY_RESERVED;
380} 393}
394EXPORT_SYMBOL_GPL(ir_g_keycode_from_table);
381 395
382/** 396/**
383 * ir_set_keycode_table() - sets the IR keycode table and add the handlers 397 * ir_input_register() - sets the IR keycode table and add the handlers
384 * for keymap table get/set 398 * for keymap table get/set
385 * @input_dev: the struct input_dev descriptor of the device 399 * @input_dev: the struct input_dev descriptor of the device
386 * @rc_tab: the struct ir_scancode_table table of scancode/keymap 400 * @rc_tab: the struct ir_scancode_table table of scancode/keymap
@@ -389,17 +403,34 @@ u32 ir_g_keycode_from_table(struct input_dev *dev, u32 scancode)
389 * an IR. 403 * an IR.
390 * It should be called before registering the IR device. 404 * It should be called before registering the IR device.
391 */ 405 */
392int ir_set_keycode_table(struct input_dev *input_dev, 406int ir_input_register(struct input_dev *input_dev,
393 struct ir_scancode_table *rc_tab) 407 struct ir_scancode_table *rc_tab)
394{ 408{
395 struct ir_scancode *keymap = rc_tab->scan; 409 struct ir_input_dev *ir_dev;
396 int i; 410 struct ir_scancode *keymap = rc_tab->scan;
397 411 int i, rc;
398 spin_lock_init(&rc_tab->lock);
399 412
400 if (rc_tab->scan == NULL || !rc_tab->size) 413 if (rc_tab->scan == NULL || !rc_tab->size)
401 return -EINVAL; 414 return -EINVAL;
402 415
416 ir_dev = kzalloc(sizeof(*ir_dev), GFP_KERNEL);
417 if (!ir_dev)
418 return -ENOMEM;
419
420 spin_lock_init(&rc_tab->lock);
421
422 ir_dev->rc_tab.size = ir_roundup_tablesize(rc_tab->size);
423 ir_dev->rc_tab.scan = kzalloc(ir_dev->rc_tab.size *
424 sizeof(struct ir_scancode), GFP_KERNEL);
425 if (!ir_dev->rc_tab.scan)
426 return -ENOMEM;
427
428 IR_dprintk(1, "Allocated space for %d keycode entries (%zd bytes)\n",
429 ir_dev->rc_tab.size,
430 ir_dev->rc_tab.size * sizeof(ir_dev->rc_tab.scan));
431
432 ir_copy_table(&ir_dev->rc_tab, rc_tab);
433
403 /* set the bits for the keys */ 434 /* set the bits for the keys */
404 IR_dprintk(1, "key map size: %d\n", rc_tab->size); 435 IR_dprintk(1, "key map size: %d\n", rc_tab->size);
405 for (i = 0; i < rc_tab->size; i++) { 436 for (i = 0; i < rc_tab->size; i++) {
@@ -407,23 +438,48 @@ int ir_set_keycode_table(struct input_dev *input_dev,
407 i, keymap[i].keycode); 438 i, keymap[i].keycode);
408 set_bit(keymap[i].keycode, input_dev->keybit); 439 set_bit(keymap[i].keycode, input_dev->keybit);
409 } 440 }
441 clear_bit(0, input_dev->keybit);
442
443 set_bit(EV_KEY, input_dev->evbit);
410 444
411 input_dev->getkeycode = ir_getkeycode; 445 input_dev->getkeycode = ir_getkeycode;
412 input_dev->setkeycode = ir_setkeycode; 446 input_dev->setkeycode = ir_setkeycode;
413 input_set_drvdata(input_dev, rc_tab); 447 input_set_drvdata(input_dev, ir_dev);
414 448
415 return 0; 449 rc = input_register_device(input_dev);
450 if (rc < 0) {
451 kfree(rc_tab->scan);
452 kfree(ir_dev);
453 input_set_drvdata(input_dev, NULL);
454 }
455
456 return rc;
416} 457}
458EXPORT_SYMBOL_GPL(ir_input_register);
417 459
418void ir_input_free(struct input_dev *dev) 460void ir_input_unregister(struct input_dev *dev)
419{ 461{
420 struct ir_scancode_table *rc_tab = input_get_drvdata(dev); 462 struct ir_input_dev *ir_dev = input_get_drvdata(dev);
463 struct ir_scancode_table *rc_tab;
464
465 if (!ir_dev)
466 return;
421 467
422 IR_dprintk(1, "Freed keycode table\n"); 468 IR_dprintk(1, "Freed keycode table\n");
423 469
470 rc_tab = &ir_dev->rc_tab;
424 rc_tab->size = 0; 471 rc_tab->size = 0;
425 kfree(rc_tab->scan); 472 kfree(rc_tab->scan);
426 rc_tab->scan = NULL; 473 rc_tab->scan = NULL;
474
475 kfree(ir_dev);
476 input_unregister_device(dev);
427} 477}
428EXPORT_SYMBOL_GPL(ir_input_free); 478EXPORT_SYMBOL_GPL(ir_input_unregister);
479
480int ir_core_debug; /* ir_debug level (0,1,2) */
481EXPORT_SYMBOL_GPL(ir_core_debug);
482module_param_named(debug, ir_core_debug, int, 0644);
429 483
484MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
485MODULE_LICENSE("GPL");
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index ba69beeb0e21..a28541b2b1a2 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -99,6 +99,7 @@ config VIDEO_MEDIA
99comment "Multimedia drivers" 99comment "Multimedia drivers"
100 100
101source "drivers/media/common/Kconfig" 101source "drivers/media/common/Kconfig"
102source "drivers/media/IR/Kconfig"
102 103
103# 104#
104# Tuner drivers for DVB and V4L 105# Tuner drivers for DVB and V4L
diff --git a/drivers/media/Makefile b/drivers/media/Makefile
index 09a829d8a7e7..499b0810d019 100644
--- a/drivers/media/Makefile
+++ b/drivers/media/Makefile
@@ -2,7 +2,7 @@
2# Makefile for the kernel multimedia device drivers. 2# Makefile for the kernel multimedia device drivers.
3# 3#
4 4
5obj-y += common/ video/ 5obj-y += common/ IR/ video/
6 6
7obj-$(CONFIG_VIDEO_DEV) += radio/ 7obj-$(CONFIG_VIDEO_DEV) += radio/
8obj-$(CONFIG_DVB_CORE) += dvb/ 8obj-$(CONFIG_DVB_CORE) += dvb/
diff --git a/drivers/media/common/Makefile b/drivers/media/common/Makefile
index 169b337b7c9d..e3ec9639321b 100644
--- a/drivers/media/common/Makefile
+++ b/drivers/media/common/Makefile
@@ -1,8 +1,6 @@
1saa7146-objs := saa7146_i2c.o saa7146_core.o 1saa7146-objs := saa7146_i2c.o saa7146_core.o
2saa7146_vv-objs := saa7146_fops.o saa7146_video.o saa7146_hlp.o saa7146_vbi.o 2saa7146_vv-objs := saa7146_fops.o saa7146_video.o saa7146_hlp.o saa7146_vbi.o
3ir-common-objs := ir-functions.o ir-keymaps.o ir-keytable.o
4 3
5obj-y += tuners/ 4obj-y += tuners/
6obj-$(CONFIG_VIDEO_SAA7146) += saa7146.o 5obj-$(CONFIG_VIDEO_SAA7146) += saa7146.o
7obj-$(CONFIG_VIDEO_SAA7146_VV) += saa7146_vv.o 6obj-$(CONFIG_VIDEO_SAA7146_VV) += saa7146_vv.o
8obj-$(CONFIG_VIDEO_IR) += ir-common.o
diff --git a/drivers/media/common/saa7146_fops.c b/drivers/media/common/saa7146_fops.c
index 620f655fa9c5..7364b9642d00 100644
--- a/drivers/media/common/saa7146_fops.c
+++ b/drivers/media/common/saa7146_fops.c
@@ -1,7 +1,5 @@
1#include <media/saa7146_vv.h> 1#include <media/saa7146_vv.h>
2 2
3#define BOARD_CAN_DO_VBI(dev) (dev->revision != 0 && dev->vv_data->vbi_minor != -1)
4
5/****************************************************************************/ 3/****************************************************************************/
6/* resource management functions, shamelessly stolen from saa7134 driver */ 4/* resource management functions, shamelessly stolen from saa7134 driver */
7 5
@@ -194,43 +192,24 @@ void saa7146_buffer_timeout(unsigned long data)
194 192
195static int fops_open(struct file *file) 193static int fops_open(struct file *file)
196{ 194{
197 unsigned int minor = video_devdata(file)->minor; 195 struct video_device *vdev = video_devdata(file);
198 struct saa7146_dev *h = NULL, *dev = NULL; 196 struct saa7146_dev *dev = video_drvdata(file);
199 struct list_head *list;
200 struct saa7146_fh *fh = NULL; 197 struct saa7146_fh *fh = NULL;
201 int result = 0; 198 int result = 0;
202 199
203 enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 200 enum v4l2_buf_type type;
204 201
205 DEB_EE(("file:%p, minor:%d\n", file, minor)); 202 DEB_EE(("file:%p, dev:%s\n", file, video_device_node_name(vdev)));
206 203
207 if (mutex_lock_interruptible(&saa7146_devices_lock)) 204 if (mutex_lock_interruptible(&saa7146_devices_lock))
208 return -ERESTARTSYS; 205 return -ERESTARTSYS;
209 206
210 list_for_each(list,&saa7146_devices) {
211 h = list_entry(list, struct saa7146_dev, item);
212 if( NULL == h->vv_data ) {
213 DEB_D(("device %p has not registered video devices.\n",h));
214 continue;
215 }
216 DEB_D(("trying: %p @ major %d,%d\n",h,h->vv_data->video_minor,h->vv_data->vbi_minor));
217
218 if (h->vv_data->video_minor == minor) {
219 dev = h;
220 }
221 if (h->vv_data->vbi_minor == minor) {
222 type = V4L2_BUF_TYPE_VBI_CAPTURE;
223 dev = h;
224 }
225 }
226 if (NULL == dev) {
227 DEB_S(("no such video device.\n"));
228 result = -ENODEV;
229 goto out;
230 }
231
232 DEB_D(("using: %p\n",dev)); 207 DEB_D(("using: %p\n",dev));
233 208
209 type = vdev->vfl_type == VFL_TYPE_GRABBER
210 ? V4L2_BUF_TYPE_VIDEO_CAPTURE
211 : V4L2_BUF_TYPE_VBI_CAPTURE;
212
234 /* check if an extension is registered */ 213 /* check if an extension is registered */
235 if( NULL == dev->ext ) { 214 if( NULL == dev->ext ) {
236 DEB_S(("no extension registered for this device.\n")); 215 DEB_S(("no extension registered for this device.\n"));
@@ -474,9 +453,6 @@ int saa7146_vv_init(struct saa7146_dev* dev, struct saa7146_ext_vv *ext_vv)
474 configuration data) */ 453 configuration data) */
475 dev->ext_vv_data = ext_vv; 454 dev->ext_vv_data = ext_vv;
476 455
477 vv->video_minor = -1;
478 vv->vbi_minor = -1;
479
480 vv->d_clipping.cpu_addr = pci_alloc_consistent(dev->pci, SAA7146_CLIPPING_MEM, &vv->d_clipping.dma_handle); 456 vv->d_clipping.cpu_addr = pci_alloc_consistent(dev->pci, SAA7146_CLIPPING_MEM, &vv->d_clipping.dma_handle);
481 if( NULL == vv->d_clipping.cpu_addr ) { 457 if( NULL == vv->d_clipping.cpu_addr ) {
482 ERR(("out of memory. aborting.\n")); 458 ERR(("out of memory. aborting.\n"));
@@ -515,7 +491,6 @@ EXPORT_SYMBOL_GPL(saa7146_vv_release);
515int saa7146_register_device(struct video_device **vid, struct saa7146_dev* dev, 491int saa7146_register_device(struct video_device **vid, struct saa7146_dev* dev,
516 char *name, int type) 492 char *name, int type)
517{ 493{
518 struct saa7146_vv *vv = dev->vv_data;
519 struct video_device *vfd; 494 struct video_device *vfd;
520 int err; 495 int err;
521 int i; 496 int i;
@@ -543,15 +518,8 @@ int saa7146_register_device(struct video_device **vid, struct saa7146_dev* dev,
543 return err; 518 return err;
544 } 519 }
545 520
546 if( VFL_TYPE_GRABBER == type ) { 521 INFO(("%s: registered device %s [v4l2]\n",
547 vv->video_minor = vfd->minor; 522 dev->name, video_device_node_name(vfd)));
548 INFO(("%s: registered device video%d [v4l2]\n",
549 dev->name, vfd->num));
550 } else {
551 vv->vbi_minor = vfd->minor;
552 INFO(("%s: registered device vbi%d [v4l2]\n",
553 dev->name, vfd->num));
554 }
555 523
556 *vid = vfd; 524 *vid = vfd;
557 return 0; 525 return 0;
@@ -560,16 +528,8 @@ EXPORT_SYMBOL_GPL(saa7146_register_device);
560 528
561int saa7146_unregister_device(struct video_device **vid, struct saa7146_dev* dev) 529int saa7146_unregister_device(struct video_device **vid, struct saa7146_dev* dev)
562{ 530{
563 struct saa7146_vv *vv = dev->vv_data;
564
565 DEB_EE(("dev:%p\n",dev)); 531 DEB_EE(("dev:%p\n",dev));
566 532
567 if ((*vid)->vfl_type == VFL_TYPE_GRABBER) {
568 vv->video_minor = -1;
569 } else {
570 vv->vbi_minor = -1;
571 }
572
573 video_unregister_device(*vid); 533 video_unregister_device(*vid);
574 *vid = NULL; 534 *vid = NULL;
575 535
diff --git a/drivers/media/dvb/dm1105/dm1105.c b/drivers/media/dvb/dm1105/dm1105.c
index 53e3f2a7d31a..f0f483ac8b89 100644
--- a/drivers/media/dvb/dm1105/dm1105.c
+++ b/drivers/media/dvb/dm1105/dm1105.c
@@ -589,7 +589,7 @@ int __devinit dm1105_ir_init(struct dm1105dvb *dm1105)
589 snprintf(dm1105->ir.input_phys, sizeof(dm1105->ir.input_phys), 589 snprintf(dm1105->ir.input_phys, sizeof(dm1105->ir.input_phys),
590 "pci-%s/ir0", pci_name(dm1105->pdev)); 590 "pci-%s/ir0", pci_name(dm1105->pdev));
591 591
592 err = ir_input_init(input_dev, &dm1105->ir.ir, ir_type, ir_codes); 592 err = ir_input_init(input_dev, &dm1105->ir.ir, ir_type);
593 if (err < 0) { 593 if (err < 0) {
594 input_free_device(input_dev); 594 input_free_device(input_dev);
595 return err; 595 return err;
@@ -611,20 +611,14 @@ int __devinit dm1105_ir_init(struct dm1105dvb *dm1105)
611 611
612 INIT_WORK(&dm1105->ir.work, dm1105_emit_key); 612 INIT_WORK(&dm1105->ir.work, dm1105_emit_key);
613 613
614 err = input_register_device(input_dev); 614 err = ir_input_register(input_dev, ir_codes);
615 if (err) {
616 ir_input_free(input_dev);
617 input_free_device(input_dev);
618 return err;
619 }
620 615
621 return 0; 616 return err;
622} 617}
623 618
624void __devexit dm1105_ir_exit(struct dm1105dvb *dm1105) 619void __devexit dm1105_ir_exit(struct dm1105dvb *dm1105)
625{ 620{
626 ir_input_free(dm1105->ir.input_dev); 621 ir_input_unregister(dm1105->ir.input_dev);
627 input_unregister_device(dm1105->ir.input_dev);
628} 622}
629 623
630static int __devinit dm1105dvb_hw_init(struct dm1105dvb *dm1105dvb) 624static int __devinit dm1105dvb_hw_init(struct dm1105dvb *dm1105dvb)
diff --git a/drivers/media/dvb/dvb-usb/Kconfig b/drivers/media/dvb/dvb-usb/Kconfig
index 2dee1bf73577..1b249897c9fb 100644
--- a/drivers/media/dvb/dvb-usb/Kconfig
+++ b/drivers/media/dvb/dvb-usb/Kconfig
@@ -265,9 +265,13 @@ config DVB_USB_DW2102
265 select DVB_TDA10021 if !DVB_FE_CUSTOMISE 265 select DVB_TDA10021 if !DVB_FE_CUSTOMISE
266 select DVB_MT312 if !DVB_FE_CUSTOMISE 266 select DVB_MT312 if !DVB_FE_CUSTOMISE
267 select DVB_ZL10039 if !DVB_FE_CUSTOMISE 267 select DVB_ZL10039 if !DVB_FE_CUSTOMISE
268 select DVB_DS3000 if !DVB_FE_CUSTOMISE
269 select DVB_STB6100 if !DVB_FE_CUSTOMISE
270 select DVB_STV6110 if !DVB_FE_CUSTOMISE
271 select DVB_STV0900 if !DVB_FE_CUSTOMISE
268 help 272 help
269 Say Y here to support the DvbWorld DVB-S/S2 USB2.0 receivers 273 Say Y here to support the DvbWorld, TeVii, Prof DVB-S/S2 USB2.0
270 and the TeVii S650, S630. 274 receivers.
271 275
272config DVB_USB_CINERGY_T2 276config DVB_USB_CINERGY_T2
273 tristate "Terratec CinergyT2/qanu USB 2.0 DVB-T receiver" 277 tristate "Terratec CinergyT2/qanu USB 2.0 DVB-T receiver"
diff --git a/drivers/media/dvb/dvb-usb/dib0700.h b/drivers/media/dvb/dvb-usb/dib0700.h
index 8b544fe79b0d..495a90577c5f 100644
--- a/drivers/media/dvb/dvb-usb/dib0700.h
+++ b/drivers/media/dvb/dvb-usb/dib0700.h
@@ -20,20 +20,22 @@ extern int dvb_usb_dib0700_debug;
20#define deb_fwdata(args...) dprintk(dvb_usb_dib0700_debug,0x04,args) 20#define deb_fwdata(args...) dprintk(dvb_usb_dib0700_debug,0x04,args)
21#define deb_data(args...) dprintk(dvb_usb_dib0700_debug,0x08,args) 21#define deb_data(args...) dprintk(dvb_usb_dib0700_debug,0x08,args)
22 22
23#define REQUEST_I2C_READ 0x2 23#define REQUEST_SET_USB_XFER_LEN 0x0 /* valid only for firmware version */
24#define REQUEST_I2C_WRITE 0x3 24 /* higher than 1.21 */
25#define REQUEST_POLL_RC 0x4 /* deprecated in firmware v1.20 */ 25#define REQUEST_I2C_READ 0x2
26#define REQUEST_JUMPRAM 0x8 26#define REQUEST_I2C_WRITE 0x3
27#define REQUEST_SET_CLOCK 0xB 27#define REQUEST_POLL_RC 0x4 /* deprecated in firmware v1.20 */
28#define REQUEST_SET_GPIO 0xC 28#define REQUEST_JUMPRAM 0x8
29#define REQUEST_ENABLE_VIDEO 0xF 29#define REQUEST_SET_CLOCK 0xB
30#define REQUEST_SET_GPIO 0xC
31#define REQUEST_ENABLE_VIDEO 0xF
30 // 1 Byte: 4MSB(1 = enable streaming, 0 = disable streaming) 4LSB(Video Mode: 0 = MPEG2 188Bytes, 1 = Analog) 32 // 1 Byte: 4MSB(1 = enable streaming, 0 = disable streaming) 4LSB(Video Mode: 0 = MPEG2 188Bytes, 1 = Analog)
31 // 2 Byte: MPEG2 mode: 4MSB(1 = Master Mode, 0 = Slave Mode) 4LSB(Channel 1 = bit0, Channel 2 = bit1) 33 // 2 Byte: MPEG2 mode: 4MSB(1 = Master Mode, 0 = Slave Mode) 4LSB(Channel 1 = bit0, Channel 2 = bit1)
32 // 2 Byte: Analog mode: 4MSB(0 = 625 lines, 1 = 525 lines) 4LSB( " " ) 34 // 2 Byte: Analog mode: 4MSB(0 = 625 lines, 1 = 525 lines) 4LSB( " " )
33#define REQUEST_SET_RC 0x11 35#define REQUEST_SET_RC 0x11
34#define REQUEST_NEW_I2C_READ 0x12 36#define REQUEST_NEW_I2C_READ 0x12
35#define REQUEST_NEW_I2C_WRITE 0x13 37#define REQUEST_NEW_I2C_WRITE 0x13
36#define REQUEST_GET_VERSION 0x15 38#define REQUEST_GET_VERSION 0x15
37 39
38struct dib0700_state { 40struct dib0700_state {
39 u8 channel_state; 41 u8 channel_state;
@@ -44,6 +46,8 @@ struct dib0700_state {
44 u8 is_dib7000pc; 46 u8 is_dib7000pc;
45 u8 fw_use_new_i2c_api; 47 u8 fw_use_new_i2c_api;
46 u8 disable_streaming_master_mode; 48 u8 disable_streaming_master_mode;
49 u32 fw_version;
50 u32 nb_packet_buffer_size;
47}; 51};
48 52
49extern int dib0700_get_version(struct dvb_usb_device *d, u32 *hwversion, 53extern int dib0700_get_version(struct dvb_usb_device *d, u32 *hwversion,
diff --git a/drivers/media/dvb/dvb-usb/dib0700_core.c b/drivers/media/dvb/dvb-usb/dib0700_core.c
index db7f7f79a66c..0d3c9a9a33be 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_core.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_core.c
@@ -17,6 +17,14 @@ int dvb_usb_dib0700_ir_proto = 1;
17module_param(dvb_usb_dib0700_ir_proto, int, 0644); 17module_param(dvb_usb_dib0700_ir_proto, int, 0644);
18MODULE_PARM_DESC(dvb_usb_dib0700_ir_proto, "set ir protocol (0=NEC, 1=RC5 (default), 2=RC6)."); 18MODULE_PARM_DESC(dvb_usb_dib0700_ir_proto, "set ir protocol (0=NEC, 1=RC5 (default), 2=RC6).");
19 19
20static int nb_packet_buffer_size = 21;
21module_param(nb_packet_buffer_size, int, 0644);
22MODULE_PARM_DESC(nb_packet_buffer_size,
23 "Set the dib0700 driver data buffer size. This parameter "
24 "corresponds to the number of TS packets. The actual size of "
25 "the data buffer corresponds to this parameter "
26 "multiplied by 188 (default: 21)");
27
20DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); 28DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
21 29
22 30
@@ -28,10 +36,14 @@ int dib0700_get_version(struct dvb_usb_device *d, u32 *hwversion,
28 REQUEST_GET_VERSION, 36 REQUEST_GET_VERSION,
29 USB_TYPE_VENDOR | USB_DIR_IN, 0, 0, 37 USB_TYPE_VENDOR | USB_DIR_IN, 0, 0,
30 b, sizeof(b), USB_CTRL_GET_TIMEOUT); 38 b, sizeof(b), USB_CTRL_GET_TIMEOUT);
31 *hwversion = (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3]; 39 if (hwversion != NULL)
32 *romversion = (b[4] << 24) | (b[5] << 16) | (b[6] << 8) | b[7]; 40 *hwversion = (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3];
33 *ramversion = (b[8] << 24) | (b[9] << 16) | (b[10] << 8) | b[11]; 41 if (romversion != NULL)
34 *fwtype = (b[12] << 24) | (b[13] << 16) | (b[14] << 8) | b[15]; 42 *romversion = (b[4] << 24) | (b[5] << 16) | (b[6] << 8) | b[7];
43 if (ramversion != NULL)
44 *ramversion = (b[8] << 24) | (b[9] << 16) | (b[10] << 8) | b[11];
45 if (fwtype != NULL)
46 *fwtype = (b[12] << 24) | (b[13] << 16) | (b[14] << 8) | b[15];
35 return ret; 47 return ret;
36} 48}
37 49
@@ -97,6 +109,27 @@ int dib0700_set_gpio(struct dvb_usb_device *d, enum dib07x0_gpios gpio, u8 gpio_
97 return dib0700_ctrl_wr(d,buf,3); 109 return dib0700_ctrl_wr(d,buf,3);
98} 110}
99 111
112static int dib0700_set_usb_xfer_len(struct dvb_usb_device *d, u16 nb_ts_packets)
113{
114 struct dib0700_state *st = d->priv;
115 u8 b[3];
116 int ret;
117
118 if (st->fw_version >= 0x10201) {
119 b[0] = REQUEST_SET_USB_XFER_LEN;
120 b[1] = (nb_ts_packets >> 8)&0xff;
121 b[2] = nb_ts_packets & 0xff;
122
123 deb_info("set the USB xfer len to %i Ts packet\n", nb_ts_packets);
124
125 ret = dib0700_ctrl_wr(d, b, 3);
126 } else {
127 deb_info("this firmware does not allow to change the USB xfer len\n");
128 ret = -EIO;
129 }
130 return ret;
131}
132
100/* 133/*
101 * I2C master xfer function (supported in 1.20 firmware) 134 * I2C master xfer function (supported in 1.20 firmware)
102 */ 135 */
@@ -328,7 +361,9 @@ static int dib0700_jumpram(struct usb_device *udev, u32 address)
328int dib0700_download_firmware(struct usb_device *udev, const struct firmware *fw) 361int dib0700_download_firmware(struct usb_device *udev, const struct firmware *fw)
329{ 362{
330 struct hexline hx; 363 struct hexline hx;
331 int pos = 0, ret, act_len; 364 int pos = 0, ret, act_len, i, adap_num;
365 u8 b[16];
366 u32 fw_version;
332 367
333 u8 buf[260]; 368 u8 buf[260];
334 369
@@ -364,6 +399,34 @@ int dib0700_download_firmware(struct usb_device *udev, const struct firmware *fw
364 } else 399 } else
365 ret = -EIO; 400 ret = -EIO;
366 401
402 /* the number of ts packet has to be at least 1 */
403 if (nb_packet_buffer_size < 1)
404 nb_packet_buffer_size = 1;
405
406 /* get the fimware version */
407 usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
408 REQUEST_GET_VERSION,
409 USB_TYPE_VENDOR | USB_DIR_IN, 0, 0,
410 b, sizeof(b), USB_CTRL_GET_TIMEOUT);
411 fw_version = (b[8] << 24) | (b[9] << 16) | (b[10] << 8) | b[11];
412
413 /* set the buffer size - DVB-USB is allocating URB buffers
414 * only after the firwmare download was successful */
415 for (i = 0; i < dib0700_device_count; i++) {
416 for (adap_num = 0; adap_num < dib0700_devices[i].num_adapters;
417 adap_num++) {
418 if (fw_version >= 0x10201)
419 dib0700_devices[i].adapter[adap_num].stream.u.bulk.buffersize = 188*nb_packet_buffer_size;
420 else {
421 /* for fw version older than 1.20.1,
422 * the buffersize has to be n times 512 */
423 dib0700_devices[i].adapter[adap_num].stream.u.bulk.buffersize = ((188*nb_packet_buffer_size+188/2)/512)*512;
424 if (dib0700_devices[i].adapter[adap_num].stream.u.bulk.buffersize < 512)
425 dib0700_devices[i].adapter[adap_num].stream.u.bulk.buffersize = 512;
426 }
427 }
428 }
429
367 return ret; 430 return ret;
368} 431}
369 432
@@ -371,6 +434,18 @@ int dib0700_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
371{ 434{
372 struct dib0700_state *st = adap->dev->priv; 435 struct dib0700_state *st = adap->dev->priv;
373 u8 b[4]; 436 u8 b[4];
437 int ret;
438
439 if ((onoff != 0) && (st->fw_version >= 0x10201)) {
440 /* for firmware later than 1.20.1,
441 * the USB xfer length can be set */
442 ret = dib0700_set_usb_xfer_len(adap->dev,
443 st->nb_packet_buffer_size);
444 if (ret < 0) {
445 deb_info("can not set the USB xfer len\n");
446 return ret;
447 }
448 }
374 449
375 b[0] = REQUEST_ENABLE_VIDEO; 450 b[0] = REQUEST_ENABLE_VIDEO;
376 b[1] = (onoff << 4) | 0x00; /* this bit gives a kind of command, rather than enabling something or not */ 451 b[1] = (onoff << 4) | 0x00; /* this bit gives a kind of command, rather than enabling something or not */
@@ -415,9 +490,21 @@ static int dib0700_probe(struct usb_interface *intf,
415 490
416 for (i = 0; i < dib0700_device_count; i++) 491 for (i = 0; i < dib0700_device_count; i++)
417 if (dvb_usb_device_init(intf, &dib0700_devices[i], THIS_MODULE, 492 if (dvb_usb_device_init(intf, &dib0700_devices[i], THIS_MODULE,
418 &dev, adapter_nr) == 0) 493 &dev, adapter_nr) == 0) {
419 { 494 struct dib0700_state *st = dev->priv;
495 u32 hwversion, romversion, fw_version, fwtype;
496
497 dib0700_get_version(dev, &hwversion, &romversion,
498 &fw_version, &fwtype);
499
500 deb_info("Firmware version: %x, %d, 0x%x, %d\n",
501 hwversion, romversion, fw_version, fwtype);
502
503 st->fw_version = fw_version;
504 st->nb_packet_buffer_size = (u32)nb_packet_buffer_size;
505
420 dib0700_rc_setup(dev); 506 dib0700_rc_setup(dev);
507
421 return 0; 508 return 0;
422 } 509 }
423 510
diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
index 684146f98eb7..44972d01bbd0 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
@@ -18,6 +18,7 @@
18#include "xc5000.h" 18#include "xc5000.h"
19#include "s5h1411.h" 19#include "s5h1411.h"
20#include "dib0070.h" 20#include "dib0070.h"
21#include "dib0090.h"
21#include "lgdt3305.h" 22#include "lgdt3305.h"
22#include "mxl5007t.h" 23#include "mxl5007t.h"
23 24
@@ -130,93 +131,95 @@ static int bristol_tuner_attach(struct dvb_usb_adapter *adap)
130/* MT226x */ 131/* MT226x */
131static struct dibx000_agc_config stk7700d_7000p_mt2266_agc_config[2] = { 132static struct dibx000_agc_config stk7700d_7000p_mt2266_agc_config[2] = {
132 { 133 {
133 BAND_UHF, // band_caps 134 BAND_UHF,
134 135
135 /* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=1, P_agc_inv_pwm1=1, P_agc_inv_pwm2=1, 136 /* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=1, P_agc_inv_pwm1=1, P_agc_inv_pwm2=1,
136 * P_agc_inh_dc_rv_est=0, P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=2, P_agc_write=0 */ 137 * P_agc_inh_dc_rv_est=0, P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=2, P_agc_write=0 */
137 (0 << 15) | (0 << 14) | (1 << 11) | (1 << 10) | (1 << 9) | (0 << 8) | (3 << 5) | (0 << 4) | (5 << 1) | (0 << 0), // setup 138 (0 << 15) | (0 << 14) | (1 << 11) | (1 << 10) | (1 << 9) | (0 << 8)
138 139 | (3 << 5) | (0 << 4) | (5 << 1) | (0 << 0),
139 1130, // inv_gain 140
140 21, // time_stabiliz 141 1130,
141 142 21,
142 0, // alpha_level 143
143 118, // thlock 144 0,
144 145 118,
145 0, // wbd_inv 146
146 3530, // wbd_ref 147 0,
147 1, // wbd_sel 148 3530,
148 0, // wbd_alpha 149 1,
149 150 0,
150 65535, // agc1_max 151
151 33770, // agc1_min 152 65535,
152 65535, // agc2_max 153 33770,
153 23592, // agc2_min 154 65535,
154 155 23592,
155 0, // agc1_pt1 156
156 62, // agc1_pt2 157 0,
157 255, // agc1_pt3 158 62,
158 64, // agc1_slope1 159 255,
159 64, // agc1_slope2 160 64,
160 132, // agc2_pt1 161 64,
161 192, // agc2_pt2 162 132,
162 80, // agc2_slope1 163 192,
163 80, // agc2_slope2 164 80,
164 165 80,
165 17, // alpha_mant 166
166 27, // alpha_exp 167 17,
167 23, // beta_mant 168 27,
168 51, // beta_exp 169 23,
169 170 51,
170 1, // perform_agc_softsplit 171
172 1,
171 }, { 173 }, {
172 BAND_VHF | BAND_LBAND, // band_caps 174 BAND_VHF | BAND_LBAND,
173 175
174 /* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=1, P_agc_inv_pwm1=1, P_agc_inv_pwm2=1, 176 /* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=1, P_agc_inv_pwm1=1, P_agc_inv_pwm2=1,
175 * P_agc_inh_dc_rv_est=0, P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=2, P_agc_write=0 */ 177 * P_agc_inh_dc_rv_est=0, P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=2, P_agc_write=0 */
176 (0 << 15) | (0 << 14) | (1 << 11) | (1 << 10) | (1 << 9) | (0 << 8) | (3 << 5) | (0 << 4) | (2 << 1) | (0 << 0), // setup 178 (0 << 15) | (0 << 14) | (1 << 11) | (1 << 10) | (1 << 9) | (0 << 8)
177 179 | (3 << 5) | (0 << 4) | (2 << 1) | (0 << 0),
178 2372, // inv_gain 180
179 21, // time_stabiliz 181 2372,
180 182 21,
181 0, // alpha_level 183
182 118, // thlock 184 0,
183 185 118,
184 0, // wbd_inv 186
185 3530, // wbd_ref 187 0,
186 1, // wbd_sel 188 3530,
187 0, // wbd_alpha 189 1,
188 190 0,
189 65535, // agc1_max 191
190 0, // agc1_min 192 65535,
191 65535, // agc2_max 193 0,
192 23592, // agc2_min 194 65535,
193 195 23592,
194 0, // agc1_pt1 196
195 128, // agc1_pt2 197 0,
196 128, // agc1_pt3 198 128,
197 128, // agc1_slope1 199 128,
198 0, // agc1_slope2 200 128,
199 128, // agc2_pt1 201 0,
200 253, // agc2_pt2 202 128,
201 81, // agc2_slope1 203 253,
202 0, // agc2_slope2 204 81,
203 205 0,
204 17, // alpha_mant 206
205 27, // alpha_exp 207 17,
206 23, // beta_mant 208 27,
207 51, // beta_exp 209 23,
208 210 51,
209 1, // perform_agc_softsplit 211
212 1,
210 } 213 }
211}; 214};
212 215
213static struct dibx000_bandwidth_config stk7700d_mt2266_pll_config = { 216static struct dibx000_bandwidth_config stk7700d_mt2266_pll_config = {
214 60000, 30000, // internal, sampling 217 60000, 30000,
215 1, 8, 3, 1, 0, // pll_cfg: prediv, ratio, range, reset, bypass 218 1, 8, 3, 1, 0,
216 0, 0, 1, 1, 2, // misc: refdiv, bypclk_div, IO_CLK_en_core, ADClkSrc, modulo 219 0, 0, 1, 1, 2,
217 (3 << 14) | (1 << 12) | (524 << 0), // sad_cfg: refsel, sel, freq_15k 220 (3 << 14) | (1 << 12) | (524 << 0),
218 0, // ifreq 221 0,
219 20452225, // timf 222 20452225,
220}; 223};
221 224
222static struct dib7000p_config stk7700d_dib7000p_mt2266_config[] = { 225static struct dib7000p_config stk7700d_dib7000p_mt2266_config[] = {
@@ -605,17 +608,17 @@ static int dib0700_rc_query_v1_20(struct dvb_usb_device *d, u32 *event,
605 } 608 }
606 break; 609 break;
607 default: 610 default:
608 if (actlen != sizeof(buf)) { 611 if (actlen != sizeof(buf)) {
609 /* We didn't get back the 6 byte message we expected */ 612 /* We didn't get back the 6 byte message we expected */
610 err("Unexpected RC response size [%d]", actlen); 613 err("Unexpected RC response size [%d]", actlen);
611 return -1; 614 return -1;
612 } 615 }
613 616
614 poll_reply.report_id = buf[0]; 617 poll_reply.report_id = buf[0];
615 poll_reply.data_state = buf[1]; 618 poll_reply.data_state = buf[1];
616 poll_reply.system = (buf[2] << 8) | buf[3]; 619 poll_reply.system = (buf[2] << 8) | buf[3];
617 poll_reply.data = buf[4]; 620 poll_reply.data = buf[4];
618 poll_reply.not_data = buf[5]; 621 poll_reply.not_data = buf[5];
619 622
620 break; 623 break;
621 } 624 }
@@ -632,7 +635,7 @@ static int dib0700_rc_query_v1_20(struct dvb_usb_device *d, u32 *event,
632 /* Find the key in the map */ 635 /* Find the key in the map */
633 for (i = 0; i < d->props.rc_key_map_size; i++) { 636 for (i = 0; i < d->props.rc_key_map_size; i++) {
634 if (rc5_custom(&keymap[i]) == (poll_reply.system & 0xff) && 637 if (rc5_custom(&keymap[i]) == (poll_reply.system & 0xff) &&
635 rc5_data(&keymap[i]) == poll_reply.data) { 638 rc5_data(&keymap[i]) == poll_reply.data) {
636 *event = keymap[i].event; 639 *event = keymap[i].event;
637 found = 1; 640 found = 1;
638 break; 641 break;
@@ -641,8 +644,8 @@ static int dib0700_rc_query_v1_20(struct dvb_usb_device *d, u32 *event,
641 644
642 if (found == 0) { 645 if (found == 0) {
643 err("Unknown remote controller key: %04x %02x %02x", 646 err("Unknown remote controller key: %04x %02x %02x",
644 poll_reply.system, 647 poll_reply.system,
645 poll_reply.data, poll_reply.not_data); 648 poll_reply.data, poll_reply.not_data);
646 d->last_event = 0; 649 d->last_event = 0;
647 return 0; 650 return 0;
648 } 651 }
@@ -933,47 +936,48 @@ static struct dvb_usb_rc_key dib0700_rc_keys[] = {
933 936
934/* STK7700P: Hauppauge Nova-T Stick, AVerMedia Volar */ 937/* STK7700P: Hauppauge Nova-T Stick, AVerMedia Volar */
935static struct dibx000_agc_config stk7700p_7000m_mt2060_agc_config = { 938static struct dibx000_agc_config stk7700p_7000m_mt2060_agc_config = {
936 BAND_UHF | BAND_VHF, // band_caps 939 BAND_UHF | BAND_VHF,
937 940
938 /* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=5, P_agc_inv_pwm1=0, P_agc_inv_pwm2=0, 941 /* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=5, P_agc_inv_pwm1=0, P_agc_inv_pwm2=0,
939 * P_agc_inh_dc_rv_est=0, P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=2, P_agc_write=0 */ 942 * P_agc_inh_dc_rv_est=0, P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=2, P_agc_write=0 */
940 (0 << 15) | (0 << 14) | (5 << 11) | (0 << 10) | (0 << 9) | (0 << 8) | (3 << 5) | (0 << 4) | (2 << 1) | (0 << 0), // setup 943 (0 << 15) | (0 << 14) | (5 << 11) | (0 << 10) | (0 << 9) | (0 << 8)
941 944 | (3 << 5) | (0 << 4) | (2 << 1) | (0 << 0),
942 712, // inv_gain 945
943 41, // time_stabiliz 946 712,
944 947 41,
945 0, // alpha_level 948
946 118, // thlock 949 0,
947 950 118,
948 0, // wbd_inv 951
949 4095, // wbd_ref 952 0,
950 0, // wbd_sel 953 4095,
951 0, // wbd_alpha 954 0,
952 955 0,
953 42598, // agc1_max 956
954 17694, // agc1_min 957 42598,
955 45875, // agc2_max 958 17694,
956 2621, // agc2_min 959 45875,
957 0, // agc1_pt1 960 2621,
958 76, // agc1_pt2 961 0,
959 139, // agc1_pt3 962 76,
960 52, // agc1_slope1 963 139,
961 59, // agc1_slope2 964 52,
962 107, // agc2_pt1 965 59,
963 172, // agc2_pt2 966 107,
964 57, // agc2_slope1 967 172,
965 70, // agc2_slope2 968 57,
966 969 70,
967 21, // alpha_mant 970
968 25, // alpha_exp 971 21,
969 28, // beta_mant 972 25,
970 48, // beta_exp 973 28,
971 974 48,
972 1, // perform_agc_softsplit 975
973 { 0, // split_min 976 1,
974 107, // split_max 977 { 0,
975 51800, // global_split_min 978 107,
976 24700 // global_split_max 979 51800,
980 24700
977 }, 981 },
978}; 982};
979 983
@@ -982,54 +986,55 @@ static struct dibx000_agc_config stk7700p_7000p_mt2060_agc_config = {
982 986
983 /* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=5, P_agc_inv_pwm1=0, P_agc_inv_pwm2=0, 987 /* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=5, P_agc_inv_pwm1=0, P_agc_inv_pwm2=0,
984 * P_agc_inh_dc_rv_est=0, P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=2, P_agc_write=0 */ 988 * P_agc_inh_dc_rv_est=0, P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=2, P_agc_write=0 */
985 (0 << 15) | (0 << 14) | (5 << 11) | (0 << 10) | (0 << 9) | (0 << 8) | (3 << 5) | (0 << 4) | (2 << 1) | (0 << 0), // setup 989 (0 << 15) | (0 << 14) | (5 << 11) | (0 << 10) | (0 << 9) | (0 << 8)
990 | (3 << 5) | (0 << 4) | (2 << 1) | (0 << 0),
986 991
987 712, // inv_gain 992 712,
988 41, // time_stabiliz 993 41,
989 994
990 0, // alpha_level 995 0,
991 118, // thlock 996 118,
992 997
993 0, // wbd_inv 998 0,
994 4095, // wbd_ref 999 4095,
995 0, // wbd_sel 1000 0,
996 0, // wbd_alpha 1001 0,
997 1002
998 42598, // agc1_max 1003 42598,
999 16384, // agc1_min 1004 16384,
1000 42598, // agc2_max 1005 42598,
1001 0, // agc2_min 1006 0,
1002 1007
1003 0, // agc1_pt1 1008 0,
1004 137, // agc1_pt2 1009 137,
1005 255, // agc1_pt3 1010 255,
1006 1011
1007 0, // agc1_slope1 1012 0,
1008 255, // agc1_slope2 1013 255,
1009 1014
1010 0, // agc2_pt1 1015 0,
1011 0, // agc2_pt2 1016 0,
1012 1017
1013 0, // agc2_slope1 1018 0,
1014 41, // agc2_slope2 1019 41,
1015 1020
1016 15, // alpha_mant 1021 15,
1017 25, // alpha_exp 1022 25,
1018 1023
1019 28, // beta_mant 1024 28,
1020 48, // beta_exp 1025 48,
1021 1026
1022 0, // perform_agc_softsplit 1027 0,
1023}; 1028};
1024 1029
1025static struct dibx000_bandwidth_config stk7700p_pll_config = { 1030static struct dibx000_bandwidth_config stk7700p_pll_config = {
1026 60000, 30000, // internal, sampling 1031 60000, 30000,
1027 1, 8, 3, 1, 0, // pll_cfg: prediv, ratio, range, reset, bypass 1032 1, 8, 3, 1, 0,
1028 0, 0, 1, 1, 0, // misc: refdiv, bypclk_div, IO_CLK_en_core, ADClkSrc, modulo 1033 0, 0, 1, 1, 0,
1029 (3 << 14) | (1 << 12) | (524 << 0), // sad_cfg: refsel, sel, freq_15k 1034 (3 << 14) | (1 << 12) | (524 << 0),
1030 60258167, // ifreq 1035 60258167,
1031 20452225, // timf 1036 20452225,
1032 30000000, // xtal 1037 30000000,
1033}; 1038};
1034 1039
1035static struct dib7000m_config stk7700p_dib7000m_config = { 1040static struct dib7000m_config stk7700p_dib7000m_config = {
@@ -1115,41 +1120,42 @@ static struct dibx000_agc_config dib7070_agc_config = {
1115 BAND_UHF | BAND_VHF | BAND_LBAND | BAND_SBAND, 1120 BAND_UHF | BAND_VHF | BAND_LBAND | BAND_SBAND,
1116 /* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=5, P_agc_inv_pwm1=0, P_agc_inv_pwm2=0, 1121 /* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=5, P_agc_inv_pwm1=0, P_agc_inv_pwm2=0,
1117 * P_agc_inh_dc_rv_est=0, P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=5, P_agc_write=0 */ 1122 * P_agc_inh_dc_rv_est=0, P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=5, P_agc_write=0 */
1118 (0 << 15) | (0 << 14) | (5 << 11) | (0 << 10) | (0 << 9) | (0 << 8) | (3 << 5) | (0 << 4) | (5 << 1) | (0 << 0), // setup 1123 (0 << 15) | (0 << 14) | (5 << 11) | (0 << 10) | (0 << 9) | (0 << 8)
1119 1124 | (3 << 5) | (0 << 4) | (5 << 1) | (0 << 0),
1120 600, // inv_gain 1125
1121 10, // time_stabiliz 1126 600,
1122 1127 10,
1123 0, // alpha_level 1128
1124 118, // thlock 1129 0,
1125 1130 118,
1126 0, // wbd_inv 1131
1127 3530, // wbd_ref 1132 0,
1128 1, // wbd_sel 1133 3530,
1129 5, // wbd_alpha 1134 1,
1130 1135 5,
1131 65535, // agc1_max 1136
1132 0, // agc1_min 1137 65535,
1133 1138 0,
1134 65535, // agc2_max 1139
1135 0, // agc2_min 1140 65535,
1136 1141 0,
1137 0, // agc1_pt1 1142
1138 40, // agc1_pt2 1143 0,
1139 183, // agc1_pt3 1144 40,
1140 206, // agc1_slope1 1145 183,
1141 255, // agc1_slope2 1146 206,
1142 72, // agc2_pt1 1147 255,
1143 152, // agc2_pt2 1148 72,
1144 88, // agc2_slope1 1149 152,
1145 90, // agc2_slope2 1150 88,
1146 1151 90,
1147 17, // alpha_mant 1152
1148 27, // alpha_exp 1153 17,
1149 23, // beta_mant 1154 27,
1150 51, // beta_exp 1155 23,
1151 1156 51,
1152 0, // perform_agc_softsplit 1157
1158 0,
1153}; 1159};
1154 1160
1155static int dib7070_tuner_reset(struct dvb_frontend *fe, int onoff) 1161static int dib7070_tuner_reset(struct dvb_frontend *fe, int onoff)
@@ -1276,13 +1282,13 @@ static int stk70x0p_pid_filter_ctrl(struct dvb_usb_adapter *adapter, int onoff)
1276} 1282}
1277 1283
1278static struct dibx000_bandwidth_config dib7070_bw_config_12_mhz = { 1284static struct dibx000_bandwidth_config dib7070_bw_config_12_mhz = {
1279 60000, 15000, // internal, sampling 1285 60000, 15000,
1280 1, 20, 3, 1, 0, // pll_cfg: prediv, ratio, range, reset, bypass 1286 1, 20, 3, 1, 0,
1281 0, 0, 1, 1, 2, // misc: refdiv, bypclk_div, IO_CLK_en_core, ADClkSrc, modulo 1287 0, 0, 1, 1, 2,
1282 (3 << 14) | (1 << 12) | (524 << 0), // sad_cfg: refsel, sel, freq_15k 1288 (3 << 14) | (1 << 12) | (524 << 0),
1283 (0 << 25) | 0, // ifreq = 0.000000 MHz 1289 (0 << 25) | 0,
1284 20452225, // timf 1290 20452225,
1285 12000000, // xtal_hz 1291 12000000,
1286}; 1292};
1287 1293
1288static struct dib7000p_config dib7070p_dib7000p_config = { 1294static struct dib7000p_config dib7070p_dib7000p_config = {
@@ -1476,12 +1482,12 @@ static struct dib8000_config dib807x_dib8000_config[2] = {
1476 } 1482 }
1477}; 1483};
1478 1484
1479static int dib807x_tuner_reset(struct dvb_frontend *fe, int onoff) 1485static int dib80xx_tuner_reset(struct dvb_frontend *fe, int onoff)
1480{ 1486{
1481 return dib8000_set_gpio(fe, 5, 0, !onoff); 1487 return dib8000_set_gpio(fe, 5, 0, !onoff);
1482} 1488}
1483 1489
1484static int dib807x_tuner_sleep(struct dvb_frontend *fe, int onoff) 1490static int dib80xx_tuner_sleep(struct dvb_frontend *fe, int onoff)
1485{ 1491{
1486 return dib8000_set_gpio(fe, 0, 0, onoff); 1492 return dib8000_set_gpio(fe, 0, 0, onoff);
1487} 1493}
@@ -1494,8 +1500,8 @@ static const struct dib0070_wbd_gain_cfg dib8070_wbd_gain_cfg[] = {
1494static struct dib0070_config dib807x_dib0070_config[2] = { 1500static struct dib0070_config dib807x_dib0070_config[2] = {
1495 { 1501 {
1496 .i2c_address = DEFAULT_DIB0070_I2C_ADDRESS, 1502 .i2c_address = DEFAULT_DIB0070_I2C_ADDRESS,
1497 .reset = dib807x_tuner_reset, 1503 .reset = dib80xx_tuner_reset,
1498 .sleep = dib807x_tuner_sleep, 1504 .sleep = dib80xx_tuner_sleep,
1499 .clock_khz = 12000, 1505 .clock_khz = 12000,
1500 .clock_pad_drive = 4, 1506 .clock_pad_drive = 4,
1501 .vga_filter = 1, 1507 .vga_filter = 1,
@@ -1508,8 +1514,8 @@ static struct dib0070_config dib807x_dib0070_config[2] = {
1508 .freq_offset_khz_vhf = -100, 1514 .freq_offset_khz_vhf = -100,
1509 }, { 1515 }, {
1510 .i2c_address = DEFAULT_DIB0070_I2C_ADDRESS, 1516 .i2c_address = DEFAULT_DIB0070_I2C_ADDRESS,
1511 .reset = dib807x_tuner_reset, 1517 .reset = dib80xx_tuner_reset,
1512 .sleep = dib807x_tuner_sleep, 1518 .sleep = dib80xx_tuner_sleep,
1513 .clock_khz = 12000, 1519 .clock_khz = 12000,
1514 .clock_pad_drive = 2, 1520 .clock_pad_drive = 2,
1515 .vga_filter = 1, 1521 .vga_filter = 1,
@@ -1566,12 +1572,14 @@ static int dib807x_tuner_attach(struct dvb_usb_adapter *adap)
1566 return 0; 1572 return 0;
1567} 1573}
1568 1574
1569static int stk807x_pid_filter(struct dvb_usb_adapter *adapter, int index, u16 pid, int onoff) 1575static int stk80xx_pid_filter(struct dvb_usb_adapter *adapter, int index,
1576 u16 pid, int onoff)
1570{ 1577{
1571 return dib8000_pid_filter(adapter->fe, index, pid, onoff); 1578 return dib8000_pid_filter(adapter->fe, index, pid, onoff);
1572} 1579}
1573 1580
1574static int stk807x_pid_filter_ctrl(struct dvb_usb_adapter *adapter, int onoff) 1581static int stk80xx_pid_filter_ctrl(struct dvb_usb_adapter *adapter,
1582 int onoff)
1575{ 1583{
1576 return dib8000_pid_filter_ctrl(adapter->fe, onoff); 1584 return dib8000_pid_filter_ctrl(adapter->fe, onoff);
1577} 1585}
@@ -1624,7 +1632,7 @@ static int stk807xpvr_frontend_attach0(struct dvb_usb_adapter *adap)
1624 dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1); 1632 dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1);
1625 1633
1626 /* initialize IC 0 */ 1634 /* initialize IC 0 */
1627 dib8000_i2c_enumeration(&adap->dev->i2c_adap, 1, 0x12, 0x80); 1635 dib8000_i2c_enumeration(&adap->dev->i2c_adap, 1, 0x22, 0x80);
1628 1636
1629 adap->fe = dvb_attach(dib8000_attach, &adap->dev->i2c_adap, 0x80, 1637 adap->fe = dvb_attach(dib8000_attach, &adap->dev->i2c_adap, 0x80,
1630 &dib807x_dib8000_config[0]); 1638 &dib807x_dib8000_config[0]);
@@ -1635,7 +1643,7 @@ static int stk807xpvr_frontend_attach0(struct dvb_usb_adapter *adap)
1635static int stk807xpvr_frontend_attach1(struct dvb_usb_adapter *adap) 1643static int stk807xpvr_frontend_attach1(struct dvb_usb_adapter *adap)
1636{ 1644{
1637 /* initialize IC 1 */ 1645 /* initialize IC 1 */
1638 dib8000_i2c_enumeration(&adap->dev->i2c_adap, 1, 0x22, 0x82); 1646 dib8000_i2c_enumeration(&adap->dev->i2c_adap, 1, 0x12, 0x82);
1639 1647
1640 adap->fe = dvb_attach(dib8000_attach, &adap->dev->i2c_adap, 0x82, 1648 adap->fe = dvb_attach(dib8000_attach, &adap->dev->i2c_adap, 0x82,
1641 &dib807x_dib8000_config[1]); 1649 &dib807x_dib8000_config[1]);
@@ -1643,6 +1651,245 @@ static int stk807xpvr_frontend_attach1(struct dvb_usb_adapter *adap)
1643 return adap->fe == NULL ? -ENODEV : 0; 1651 return adap->fe == NULL ? -ENODEV : 0;
1644} 1652}
1645 1653
1654/* STK8096GP */
1655struct dibx000_agc_config dib8090_agc_config[2] = {
1656 {
1657 BAND_UHF | BAND_VHF | BAND_LBAND | BAND_SBAND,
1658 /* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=1,
1659 * P_agc_inv_pwm1=0, P_agc_inv_pwm2=0, P_agc_inh_dc_rv_est=0,
1660 * P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=5, P_agc_write=0 */
1661 (0 << 15) | (0 << 14) | (5 << 11) | (0 << 10) | (0 << 9) | (0 << 8)
1662 | (3 << 5) | (0 << 4) | (5 << 1) | (0 << 0),
1663
1664 787,
1665 10,
1666
1667 0,
1668 118,
1669
1670 0,
1671 3530,
1672 1,
1673 5,
1674
1675 65535,
1676 0,
1677
1678 65535,
1679 0,
1680
1681 0,
1682 32,
1683 114,
1684 143,
1685 144,
1686 114,
1687 227,
1688 116,
1689 117,
1690
1691 28,
1692 26,
1693 31,
1694 51,
1695
1696 0,
1697 },
1698 {
1699 BAND_CBAND,
1700 /* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=1,
1701 * P_agc_inv_pwm1=0, P_agc_inv_pwm2=0, P_agc_inh_dc_rv_est=0,
1702 * P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=5, P_agc_write=0 */
1703 (0 << 15) | (0 << 14) | (5 << 11) | (0 << 10) | (0 << 9) | (0 << 8)
1704 | (3 << 5) | (0 << 4) | (5 << 1) | (0 << 0),
1705
1706 787,
1707 10,
1708
1709 0,
1710 118,
1711
1712 0,
1713 3530,
1714 1,
1715 5,
1716
1717 0,
1718 0,
1719
1720 65535,
1721 0,
1722
1723 0,
1724 32,
1725 114,
1726 143,
1727 144,
1728 114,
1729 227,
1730 116,
1731 117,
1732
1733 28,
1734 26,
1735 31,
1736 51,
1737
1738 0,
1739 }
1740};
1741
1742static struct dibx000_bandwidth_config dib8090_pll_config_12mhz = {
1743 54000, 13500,
1744 1, 18, 3, 1, 0,
1745 0, 0, 1, 1, 2,
1746 (3 << 14) | (1 << 12) | (599 << 0),
1747 (0 << 25) | 0,
1748 20199727,
1749 12000000,
1750};
1751
1752static int dib8090_get_adc_power(struct dvb_frontend *fe)
1753{
1754 return dib8000_get_adc_power(fe, 1);
1755}
1756
1757static struct dib8000_config dib809x_dib8000_config = {
1758 .output_mpeg2_in_188_bytes = 1,
1759
1760 .agc_config_count = 2,
1761 .agc = dib8090_agc_config,
1762 .agc_control = dib0090_dcc_freq,
1763 .pll = &dib8090_pll_config_12mhz,
1764 .tuner_is_baseband = 1,
1765
1766 .gpio_dir = DIB8000_GPIO_DEFAULT_DIRECTIONS,
1767 .gpio_val = DIB8000_GPIO_DEFAULT_VALUES,
1768 .gpio_pwm_pos = DIB8000_GPIO_DEFAULT_PWM_POS,
1769
1770 .hostbus_diversity = 1,
1771 .div_cfg = 0x31,
1772 .output_mode = OUTMODE_MPEG2_FIFO,
1773 .drives = 0x2d98,
1774 .diversity_delay = 144,
1775 .refclksel = 3,
1776};
1777
1778static struct dib0090_config dib809x_dib0090_config = {
1779 .io.pll_bypass = 1,
1780 .io.pll_range = 1,
1781 .io.pll_prediv = 1,
1782 .io.pll_loopdiv = 20,
1783 .io.adc_clock_ratio = 8,
1784 .io.pll_int_loop_filt = 0,
1785 .io.clock_khz = 12000,
1786 .reset = dib80xx_tuner_reset,
1787 .sleep = dib80xx_tuner_sleep,
1788 .clkouttobamse = 1,
1789 .analog_output = 1,
1790 .i2c_address = DEFAULT_DIB0090_I2C_ADDRESS,
1791 .wbd_vhf_offset = 100,
1792 .wbd_cband_offset = 450,
1793 .use_pwm_agc = 1,
1794 .clkoutdrive = 1,
1795 .get_adc_power = dib8090_get_adc_power,
1796 .freq_offset_khz_uhf = 0,
1797 .freq_offset_khz_vhf = -143,
1798};
1799
1800static int dib8096_set_param_override(struct dvb_frontend *fe,
1801 struct dvb_frontend_parameters *fep)
1802{
1803 struct dvb_usb_adapter *adap = fe->dvb->priv;
1804 struct dib0700_adapter_state *state = adap->priv;
1805 u8 band = BAND_OF_FREQUENCY(fep->frequency/1000);
1806 u16 offset;
1807 int ret = 0;
1808 enum frontend_tune_state tune_state = CT_SHUTDOWN;
1809 u16 ltgain, rf_gain_limit;
1810
1811 ret = state->set_param_save(fe, fep);
1812 if (ret < 0)
1813 return ret;
1814
1815 switch (band) {
1816 case BAND_VHF:
1817 offset = 100;
1818 break;
1819 case BAND_UHF:
1820 offset = 550;
1821 break;
1822 default:
1823 offset = 0;
1824 break;
1825 }
1826 offset += (dib0090_get_wbd_offset(fe) * 8 * 18 / 33 + 1) / 2;
1827 dib8000_set_wbd_ref(fe, offset);
1828
1829
1830 if (band == BAND_CBAND) {
1831 deb_info("tuning in CBAND - soft-AGC startup\n");
1832 /* TODO specific wbd target for dib0090 - needed for startup ? */
1833 dib0090_set_tune_state(fe, CT_AGC_START);
1834 do {
1835 ret = dib0090_gain_control(fe);
1836 msleep(ret);
1837 tune_state = dib0090_get_tune_state(fe);
1838 if (tune_state == CT_AGC_STEP_0)
1839 dib8000_set_gpio(fe, 6, 0, 1);
1840 else if (tune_state == CT_AGC_STEP_1) {
1841 dib0090_get_current_gain(fe, NULL, NULL, &rf_gain_limit, &ltgain);
1842 if (rf_gain_limit == 0)
1843 dib8000_set_gpio(fe, 6, 0, 0);
1844 }
1845 } while (tune_state < CT_AGC_STOP);
1846 dib0090_pwm_gain_reset(fe);
1847 dib8000_pwm_agc_reset(fe);
1848 dib8000_set_tune_state(fe, CT_DEMOD_START);
1849 } else {
1850 deb_info("not tuning in CBAND - standard AGC startup\n");
1851 dib0090_pwm_gain_reset(fe);
1852 }
1853
1854 return 0;
1855}
1856
1857static int dib809x_tuner_attach(struct dvb_usb_adapter *adap)
1858{
1859 struct dib0700_adapter_state *st = adap->priv;
1860 struct i2c_adapter *tun_i2c = dib8000_get_i2c_master(adap->fe, DIBX000_I2C_INTERFACE_TUNER, 1);
1861
1862 if (dvb_attach(dib0090_register, adap->fe, tun_i2c, &dib809x_dib0090_config) == NULL)
1863 return -ENODEV;
1864
1865 st->set_param_save = adap->fe->ops.tuner_ops.set_params;
1866 adap->fe->ops.tuner_ops.set_params = dib8096_set_param_override;
1867 return 0;
1868}
1869
1870static int stk809x_frontend_attach(struct dvb_usb_adapter *adap)
1871{
1872 dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1);
1873 msleep(10);
1874 dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1);
1875 dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1);
1876 dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1);
1877
1878 dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0);
1879
1880 dib0700_ctrl_clock(adap->dev, 72, 1);
1881
1882 msleep(10);
1883 dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1);
1884 msleep(10);
1885 dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1);
1886
1887 dib8000_i2c_enumeration(&adap->dev->i2c_adap, 1, 18, 0x80);
1888
1889 adap->fe = dvb_attach(dib8000_attach, &adap->dev->i2c_adap, 0x80, &dib809x_dib8000_config);
1890
1891 return adap->fe == NULL ? -ENODEV : 0;
1892}
1646 1893
1647/* STK7070PD */ 1894/* STK7070PD */
1648static struct dib7000p_config stk7070pd_dib7000p_config[2] = { 1895static struct dib7000p_config stk7070pd_dib7000p_config[2] = {
@@ -1929,14 +2176,17 @@ struct usb_device_id dib0700_usb_id_table[] = {
1929 { USB_DEVICE(USB_VID_YUAN, USB_PID_YUAN_STK7700D) }, 2176 { USB_DEVICE(USB_VID_YUAN, USB_PID_YUAN_STK7700D) },
1930/* 55 */{ USB_DEVICE(USB_VID_YUAN, USB_PID_YUAN_STK7700D_2) }, 2177/* 55 */{ USB_DEVICE(USB_VID_YUAN, USB_PID_YUAN_STK7700D_2) },
1931 { USB_DEVICE(USB_VID_PINNACLE, USB_PID_PINNACLE_PCTV73A) }, 2178 { USB_DEVICE(USB_VID_PINNACLE, USB_PID_PINNACLE_PCTV73A) },
1932 { USB_DEVICE(USB_VID_PINNACLE, USB_PID_PINNACLE_PCTV73ESE) }, 2179 { USB_DEVICE(USB_VID_PCTV, USB_PID_PINNACLE_PCTV73ESE) },
1933 { USB_DEVICE(USB_VID_PINNACLE, USB_PID_PINNACLE_PCTV282E) }, 2180 { USB_DEVICE(USB_VID_PCTV, USB_PID_PINNACLE_PCTV282E) },
1934 { USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_STK7770P) }, 2181 { USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_STK7770P) },
1935/* 60 */{ USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_T_XXS_2) }, 2182/* 60 */{ USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_T_XXS_2) },
1936 { USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_STK807XPVR) }, 2183 { USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_STK807XPVR) },
1937 { USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_STK807XP) }, 2184 { USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_STK807XP) },
1938 { USB_DEVICE(USB_VID_PIXELVIEW, USB_PID_PIXELVIEW_SBTVD) }, 2185 { USB_DEVICE(USB_VID_PIXELVIEW, USB_PID_PIXELVIEW_SBTVD) },
1939 { USB_DEVICE(USB_VID_EVOLUTEPC, USB_PID_TVWAY_PLUS) }, 2186 { USB_DEVICE(USB_VID_EVOLUTEPC, USB_PID_TVWAY_PLUS) },
2187/* 65 */{ USB_DEVICE(USB_VID_PINNACLE, USB_PID_PINNACLE_PCTV73ESE) },
2188 { USB_DEVICE(USB_VID_PINNACLE, USB_PID_PINNACLE_PCTV282E) },
2189 { USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_STK8096GP) },
1940 { 0 } /* Terminating entry */ 2190 { 0 } /* Terminating entry */
1941}; 2191};
1942MODULE_DEVICE_TABLE(usb, dib0700_usb_id_table); 2192MODULE_DEVICE_TABLE(usb, dib0700_usb_id_table);
@@ -2238,11 +2488,11 @@ struct dvb_usb_device_properties dib0700_devices[] = {
2238 { NULL }, 2488 { NULL },
2239 }, 2489 },
2240 { "Pinnacle PCTV 73e SE", 2490 { "Pinnacle PCTV 73e SE",
2241 { &dib0700_usb_id_table[57], NULL }, 2491 { &dib0700_usb_id_table[57], &dib0700_usb_id_table[65], NULL },
2242 { NULL }, 2492 { NULL },
2243 }, 2493 },
2244 { "Pinnacle PCTV 282e", 2494 { "Pinnacle PCTV 282e",
2245 { &dib0700_usb_id_table[58], NULL }, 2495 { &dib0700_usb_id_table[58], &dib0700_usb_id_table[66], NULL },
2246 { NULL }, 2496 { NULL },
2247 }, 2497 },
2248 }, 2498 },
@@ -2471,8 +2721,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
2471 { 2721 {
2472 .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, 2722 .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
2473 .pid_filter_count = 32, 2723 .pid_filter_count = 32,
2474 .pid_filter = stk807x_pid_filter, 2724 .pid_filter = stk80xx_pid_filter,
2475 .pid_filter_ctrl = stk807x_pid_filter_ctrl, 2725 .pid_filter_ctrl = stk80xx_pid_filter_ctrl,
2476 .frontend_attach = stk807x_frontend_attach, 2726 .frontend_attach = stk807x_frontend_attach,
2477 .tuner_attach = dib807x_tuner_attach, 2727 .tuner_attach = dib807x_tuner_attach,
2478 2728
@@ -2510,8 +2760,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
2510 { 2760 {
2511 .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, 2761 .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
2512 .pid_filter_count = 32, 2762 .pid_filter_count = 32,
2513 .pid_filter = stk807x_pid_filter, 2763 .pid_filter = stk80xx_pid_filter,
2514 .pid_filter_ctrl = stk807x_pid_filter_ctrl, 2764 .pid_filter_ctrl = stk80xx_pid_filter_ctrl,
2515 .frontend_attach = stk807xpvr_frontend_attach0, 2765 .frontend_attach = stk807xpvr_frontend_attach0,
2516 .tuner_attach = dib807x_tuner_attach, 2766 .tuner_attach = dib807x_tuner_attach,
2517 2767
@@ -2523,8 +2773,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
2523 { 2773 {
2524 .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, 2774 .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
2525 .pid_filter_count = 32, 2775 .pid_filter_count = 32,
2526 .pid_filter = stk807x_pid_filter, 2776 .pid_filter = stk80xx_pid_filter,
2527 .pid_filter_ctrl = stk807x_pid_filter_ctrl, 2777 .pid_filter_ctrl = stk80xx_pid_filter_ctrl,
2528 .frontend_attach = stk807xpvr_frontend_attach1, 2778 .frontend_attach = stk807xpvr_frontend_attach1,
2529 .tuner_attach = dib807x_tuner_attach, 2779 .tuner_attach = dib807x_tuner_attach,
2530 2780
@@ -2547,6 +2797,37 @@ struct dvb_usb_device_properties dib0700_devices[] = {
2547 .rc_key_map = dib0700_rc_keys, 2797 .rc_key_map = dib0700_rc_keys,
2548 .rc_key_map_size = ARRAY_SIZE(dib0700_rc_keys), 2798 .rc_key_map_size = ARRAY_SIZE(dib0700_rc_keys),
2549 .rc_query = dib0700_rc_query 2799 .rc_query = dib0700_rc_query
2800 }, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
2801 .num_adapters = 1,
2802 .adapter = {
2803 {
2804 .caps = DVB_USB_ADAP_HAS_PID_FILTER |
2805 DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
2806 .pid_filter_count = 32,
2807 .pid_filter = stk80xx_pid_filter,
2808 .pid_filter_ctrl = stk80xx_pid_filter_ctrl,
2809 .frontend_attach = stk809x_frontend_attach,
2810 .tuner_attach = dib809x_tuner_attach,
2811
2812 DIB0700_DEFAULT_STREAMING_CONFIG(0x02),
2813
2814 .size_of_priv =
2815 sizeof(struct dib0700_adapter_state),
2816 },
2817 },
2818
2819 .num_device_descs = 1,
2820 .devices = {
2821 { "DiBcom STK8096GP reference design",
2822 { &dib0700_usb_id_table[67], NULL },
2823 { NULL },
2824 },
2825 },
2826
2827 .rc_interval = DEFAULT_RC_INTERVAL,
2828 .rc_key_map = dib0700_rc_keys,
2829 .rc_key_map_size = ARRAY_SIZE(dib0700_rc_keys),
2830 .rc_query = dib0700_rc_query
2550 }, 2831 },
2551}; 2832};
2552 2833
diff --git a/drivers/media/dvb/dvb-usb/dibusb-common.c b/drivers/media/dvb/dvb-usb/dibusb-common.c
index da34979b5337..9143b5631e88 100644
--- a/drivers/media/dvb/dvb-usb/dibusb-common.c
+++ b/drivers/media/dvb/dvb-usb/dibusb-common.c
@@ -142,8 +142,13 @@ static int dibusb_i2c_xfer(struct i2c_adapter *adap,struct i2c_msg msg[],int num
142 } else if ((msg[i].flags & I2C_M_RD) == 0) { 142 } else if ((msg[i].flags & I2C_M_RD) == 0) {
143 if (dibusb_i2c_msg(d, msg[i].addr, msg[i].buf,msg[i].len,NULL,0) < 0) 143 if (dibusb_i2c_msg(d, msg[i].addr, msg[i].buf,msg[i].len,NULL,0) < 0)
144 break; 144 break;
145 } else 145 } else if (msg[i].addr != 0x50) {
146 break; 146 /* 0x50 is the address of the eeprom - we need to protect it
147 * from dibusb's bad i2c implementation: reads without
148 * writing the offset before are forbidden */
149 if (dibusb_i2c_msg(d, msg[i].addr, NULL, 0, msg[i].buf, msg[i].len) < 0)
150 break;
151 }
147 } 152 }
148 153
149 mutex_unlock(&d->i2c_mutex); 154 mutex_unlock(&d->i2c_mutex);
@@ -243,6 +248,12 @@ static struct dib3000mc_config mod3000p_dib3000p_config = {
243 248
244int dibusb_dib3000mc_frontend_attach(struct dvb_usb_adapter *adap) 249int dibusb_dib3000mc_frontend_attach(struct dvb_usb_adapter *adap)
245{ 250{
251 if (adap->dev->udev->descriptor.idVendor == USB_VID_LITEON &&
252 adap->dev->udev->descriptor.idProduct ==
253 USB_PID_LITEON_DVB_T_WARM) {
254 msleep(1000);
255 }
256
246 if ((adap->fe = dvb_attach(dib3000mc_attach, &adap->dev->i2c_adap, DEFAULT_DIB3000P_I2C_ADDRESS, &mod3000p_dib3000p_config)) != NULL || 257 if ((adap->fe = dvb_attach(dib3000mc_attach, &adap->dev->i2c_adap, DEFAULT_DIB3000P_I2C_ADDRESS, &mod3000p_dib3000p_config)) != NULL ||
247 (adap->fe = dvb_attach(dib3000mc_attach, &adap->dev->i2c_adap, DEFAULT_DIB3000MC_I2C_ADDRESS, &mod3000p_dib3000p_config)) != NULL) { 258 (adap->fe = dvb_attach(dib3000mc_attach, &adap->dev->i2c_adap, DEFAULT_DIB3000MC_I2C_ADDRESS, &mod3000p_dib3000p_config)) != NULL) {
248 if (adap->priv != NULL) { 259 if (adap->priv != NULL) {
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-ids.h b/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
index f1602d4ace6d..bc3581d58ced 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
@@ -47,6 +47,7 @@
47#define USB_VID_MSI_2 0x1462 47#define USB_VID_MSI_2 0x1462
48#define USB_VID_OPERA1 0x695c 48#define USB_VID_OPERA1 0x695c
49#define USB_VID_PINNACLE 0x2304 49#define USB_VID_PINNACLE 0x2304
50#define USB_VID_PCTV 0x2013
50#define USB_VID_PIXELVIEW 0x1554 51#define USB_VID_PIXELVIEW 0x1554
51#define USB_VID_TECHNOTREND 0x0b48 52#define USB_VID_TECHNOTREND 0x0b48
52#define USB_VID_TERRATEC 0x0ccd 53#define USB_VID_TERRATEC 0x0ccd
@@ -101,6 +102,7 @@
101#define USB_PID_DIBCOM_STK7070PD 0x1ebe 102#define USB_PID_DIBCOM_STK7070PD 0x1ebe
102#define USB_PID_DIBCOM_STK807XP 0x1f90 103#define USB_PID_DIBCOM_STK807XP 0x1f90
103#define USB_PID_DIBCOM_STK807XPVR 0x1f98 104#define USB_PID_DIBCOM_STK807XPVR 0x1f98
105#define USB_PID_DIBCOM_STK8096GP 0x1fa0
104#define USB_PID_DIBCOM_ANCHOR_2135_COLD 0x2131 106#define USB_PID_DIBCOM_ANCHOR_2135_COLD 0x2131
105#define USB_PID_DIBCOM_STK7770P 0x1e80 107#define USB_PID_DIBCOM_STK7770P 0x1e80
106#define USB_PID_DPOSH_M9206_COLD 0x9206 108#define USB_PID_DPOSH_M9206_COLD 0x9206
@@ -211,6 +213,7 @@
211#define USB_PID_PINNACLE_PCTV801E_SE 0x023b 213#define USB_PID_PINNACLE_PCTV801E_SE 0x023b
212#define USB_PID_PINNACLE_PCTV73A 0x0243 214#define USB_PID_PINNACLE_PCTV73A 0x0243
213#define USB_PID_PINNACLE_PCTV73ESE 0x0245 215#define USB_PID_PINNACLE_PCTV73ESE 0x0245
216#define USB_PID_PINNACLE_PCTV74E 0x0246
214#define USB_PID_PINNACLE_PCTV282E 0x0248 217#define USB_PID_PINNACLE_PCTV282E 0x0248
215#define USB_PID_PIXELVIEW_SBTVD 0x5010 218#define USB_PID_PIXELVIEW_SBTVD 0x5010
216#define USB_PID_PCTV_200E 0x020e 219#define USB_PID_PCTV_200E 0x020e
diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
index 5bb9479d154e..64132c0cf80d 100644
--- a/drivers/media/dvb/dvb-usb/dw2102.c
+++ b/drivers/media/dvb/dvb-usb/dw2102.c
@@ -20,6 +20,11 @@
20#include "tda1002x.h" 20#include "tda1002x.h"
21#include "mt312.h" 21#include "mt312.h"
22#include "zl10039.h" 22#include "zl10039.h"
23#include "ds3000.h"
24#include "stv0900.h"
25#include "stv6110.h"
26#include "stb6100.h"
27#include "stb6100_proc.h"
23 28
24#ifndef USB_PID_DW2102 29#ifndef USB_PID_DW2102
25#define USB_PID_DW2102 0x2102 30#define USB_PID_DW2102 0x2102
@@ -37,12 +42,20 @@
37#define USB_PID_CINERGY_S 0x0064 42#define USB_PID_CINERGY_S 0x0064
38#endif 43#endif
39 44
45#ifndef USB_PID_TEVII_S630
46#define USB_PID_TEVII_S630 0xd630
47#endif
48
40#ifndef USB_PID_TEVII_S650 49#ifndef USB_PID_TEVII_S650
41#define USB_PID_TEVII_S650 0xd650 50#define USB_PID_TEVII_S650 0xd650
42#endif 51#endif
43 52
44#ifndef USB_PID_TEVII_S630 53#ifndef USB_PID_TEVII_S660
45#define USB_PID_TEVII_S630 0xd630 54#define USB_PID_TEVII_S660 0xd660
55#endif
56
57#ifndef USB_PID_PROF_1100
58#define USB_PID_PROF_1100 0xb012
46#endif 59#endif
47 60
48#define DW210X_READ_MSG 0 61#define DW210X_READ_MSG 0
@@ -55,6 +68,10 @@
55#define DW2102_VOLTAGE_CTRL (0x1800) 68#define DW2102_VOLTAGE_CTRL (0x1800)
56#define DW2102_RC_QUERY (0x1a00) 69#define DW2102_RC_QUERY (0x1a00)
57 70
71#define err_str "did not find the firmware file. (%s) " \
72 "Please see linux/Documentation/dvb/ for more details " \
73 "on firmware-problems."
74
58struct dvb_usb_rc_keys_table { 75struct dvb_usb_rc_keys_table {
59 struct dvb_usb_rc_key *rc_keys; 76 struct dvb_usb_rc_key *rc_keys;
60 int rc_keys_size; 77 int rc_keys_size;
@@ -71,6 +88,12 @@ static int ir_keymap;
71module_param_named(keymap, ir_keymap, int, 0644); 88module_param_named(keymap, ir_keymap, int, 0644);
72MODULE_PARM_DESC(keymap, "set keymap 0=default 1=dvbworld 2=tevii 3=tbs ..."); 89MODULE_PARM_DESC(keymap, "set keymap 0=default 1=dvbworld 2=tevii 3=tbs ...");
73 90
91/* demod probe */
92static int demod_probe = 1;
93module_param_named(demod, demod_probe, int, 0644);
94MODULE_PARM_DESC(demod, "demod to probe (1=cx24116 2=stv0903+stv6110 "
95 "4=stv0903+stb6100(or-able)).");
96
74DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); 97DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
75 98
76static int dw210x_op_rw(struct usb_device *dev, u8 request, u16 value, 99static int dw210x_op_rw(struct usb_device *dev, u8 request, u16 value,
@@ -183,7 +206,7 @@ static int dw2102_serit_i2c_transfer(struct i2c_adapter *adap,
183 switch (num) { 206 switch (num) {
184 case 2: 207 case 2:
185 /* read si2109 register by number */ 208 /* read si2109 register by number */
186 buf6[0] = 0xd0; 209 buf6[0] = msg[0].addr << 1;
187 buf6[1] = msg[0].len; 210 buf6[1] = msg[0].len;
188 buf6[2] = msg[0].buf[0]; 211 buf6[2] = msg[0].buf[0];
189 ret = dw210x_op_rw(d->udev, 0xc2, 0, 0, 212 ret = dw210x_op_rw(d->udev, 0xc2, 0, 0,
@@ -198,7 +221,7 @@ static int dw2102_serit_i2c_transfer(struct i2c_adapter *adap,
198 switch (msg[0].addr) { 221 switch (msg[0].addr) {
199 case 0x68: 222 case 0x68:
200 /* write to si2109 register */ 223 /* write to si2109 register */
201 buf6[0] = 0xd0; 224 buf6[0] = msg[0].addr << 1;
202 buf6[1] = msg[0].len; 225 buf6[1] = msg[0].len;
203 memcpy(buf6 + 2, msg[0].buf, msg[0].len); 226 memcpy(buf6 + 2, msg[0].buf, msg[0].len);
204 ret = dw210x_op_rw(d->udev, 0xc2, 0, 0, buf6, 227 ret = dw210x_op_rw(d->udev, 0xc2, 0, 0, buf6,
@@ -239,7 +262,7 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms
239 /* read */ 262 /* read */
240 /* first write first register number */ 263 /* first write first register number */
241 u8 ibuf[msg[1].len + 2], obuf[3]; 264 u8 ibuf[msg[1].len + 2], obuf[3];
242 obuf[0] = 0xd0; 265 obuf[0] = msg[0].addr << 1;
243 obuf[1] = msg[0].len; 266 obuf[1] = msg[0].len;
244 obuf[2] = msg[0].buf[0]; 267 obuf[2] = msg[0].buf[0];
245 ret = dw210x_op_rw(d->udev, 0xc2, 0, 0, 268 ret = dw210x_op_rw(d->udev, 0xc2, 0, 0,
@@ -256,7 +279,7 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms
256 case 0x68: { 279 case 0x68: {
257 /* write to register */ 280 /* write to register */
258 u8 obuf[msg[0].len + 2]; 281 u8 obuf[msg[0].len + 2];
259 obuf[0] = 0xd0; 282 obuf[0] = msg[0].addr << 1;
260 obuf[1] = msg[0].len; 283 obuf[1] = msg[0].len;
261 memcpy(obuf + 2, msg[0].buf, msg[0].len); 284 memcpy(obuf + 2, msg[0].buf, msg[0].len);
262 ret = dw210x_op_rw(d->udev, 0xc2, 0, 0, 285 ret = dw210x_op_rw(d->udev, 0xc2, 0, 0,
@@ -266,7 +289,7 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms
266 case 0x61: { 289 case 0x61: {
267 /* write to tuner */ 290 /* write to tuner */
268 u8 obuf[msg[0].len + 2]; 291 u8 obuf[msg[0].len + 2];
269 obuf[0] = 0xc2; 292 obuf[0] = msg[0].addr << 1;
270 obuf[1] = msg[0].len; 293 obuf[1] = msg[0].len;
271 memcpy(obuf + 2, msg[0].buf, msg[0].len); 294 memcpy(obuf + 2, msg[0].buf, msg[0].len);
272 ret = dw210x_op_rw(d->udev, 0xc2, 0, 0, 295 ret = dw210x_op_rw(d->udev, 0xc2, 0, 0,
@@ -301,78 +324,78 @@ static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], i
301{ 324{
302 struct dvb_usb_device *d = i2c_get_adapdata(adap); 325 struct dvb_usb_device *d = i2c_get_adapdata(adap);
303 int ret = 0; 326 int ret = 0;
304 int len, i; 327 int len, i, j;
305 328
306 if (!d) 329 if (!d)
307 return -ENODEV; 330 return -ENODEV;
308 if (mutex_lock_interruptible(&d->i2c_mutex) < 0) 331 if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
309 return -EAGAIN; 332 return -EAGAIN;
310 333
311 switch (num) { 334 for (j = 0; j < num; j++) {
312 case 2: { 335 switch (msg[j].addr) {
313 /* read */
314 /* first write first register number */
315 u8 ibuf[msg[1].len + 2], obuf[3];
316 obuf[0] = 0xaa;
317 obuf[1] = msg[0].len;
318 obuf[2] = msg[0].buf[0];
319 ret = dw210x_op_rw(d->udev, 0xc2, 0, 0,
320 obuf, msg[0].len + 2, DW210X_WRITE_MSG);
321 /* second read registers */
322 ret = dw210x_op_rw(d->udev, 0xc3, 0xab , 0,
323 ibuf, msg[1].len + 2, DW210X_READ_MSG);
324 memcpy(msg[1].buf, ibuf + 2, msg[1].len);
325
326 break;
327 }
328 case 1:
329 switch (msg[0].addr) {
330 case 0x55: {
331 if (msg[0].buf[0] == 0xf7) {
332 /* firmware */
333 /* Write in small blocks */
334 u8 obuf[19];
335 obuf[0] = 0xaa;
336 obuf[1] = 0x11;
337 obuf[2] = 0xf7;
338 len = msg[0].len - 1;
339 i = 1;
340 do {
341 memcpy(obuf + 3, msg[0].buf + i, (len > 16 ? 16 : len));
342 ret = dw210x_op_rw(d->udev, 0xc2, 0, 0,
343 obuf, (len > 16 ? 16 : len) + 3, DW210X_WRITE_MSG);
344 i += 16;
345 len -= 16;
346 } while (len > 0);
347 } else {
348 /* write to register */
349 u8 obuf[msg[0].len + 2];
350 obuf[0] = 0xaa;
351 obuf[1] = msg[0].len;
352 memcpy(obuf + 2, msg[0].buf, msg[0].len);
353 ret = dw210x_op_rw(d->udev, 0xc2, 0, 0,
354 obuf, msg[0].len + 2, DW210X_WRITE_MSG);
355 }
356 break;
357 }
358 case(DW2102_RC_QUERY): { 336 case(DW2102_RC_QUERY): {
359 u8 ibuf[2]; 337 u8 ibuf[2];
360 ret = dw210x_op_rw(d->udev, 0xb8, 0, 0, 338 ret = dw210x_op_rw(d->udev, 0xb8, 0, 0,
361 ibuf, 2, DW210X_READ_MSG); 339 ibuf, 2, DW210X_READ_MSG);
362 memcpy(msg[0].buf, ibuf , 2); 340 memcpy(msg[j].buf, ibuf , 2);
363 break; 341 break;
364 } 342 }
365 case(DW2102_VOLTAGE_CTRL): { 343 case(DW2102_VOLTAGE_CTRL): {
366 u8 obuf[2]; 344 u8 obuf[2];
367 obuf[0] = 0x30; 345 obuf[0] = 0x30;
368 obuf[1] = msg[0].buf[0]; 346 obuf[1] = msg[j].buf[0];
369 ret = dw210x_op_rw(d->udev, 0xb2, 0, 0, 347 ret = dw210x_op_rw(d->udev, 0xb2, 0, 0,
370 obuf, 2, DW210X_WRITE_MSG); 348 obuf, 2, DW210X_WRITE_MSG);
371 break; 349 break;
372 } 350 }
351 /*case 0x55: cx24116
352 case 0x6a: stv0903
353 case 0x68: ds3000, stv0903
354 case 0x60: ts2020, stv6110, stb6100 */
355 default: {
356 if (msg[j].flags == I2C_M_RD) {
357 /* read registers */
358 u8 ibuf[msg[j].len + 2];
359 ret = dw210x_op_rw(d->udev, 0xc3,
360 (msg[j].addr << 1) + 1, 0,
361 ibuf, msg[j].len + 2,
362 DW210X_READ_MSG);
363 memcpy(msg[j].buf, ibuf + 2, msg[j].len);
364 mdelay(10);
365 } else if (((msg[j].buf[0] == 0xb0) &&
366 (msg[j].addr == 0x68)) ||
367 ((msg[j].buf[0] == 0xf7) &&
368 (msg[j].addr == 0x55))) {
369 /* write firmware */
370 u8 obuf[19];
371 obuf[0] = msg[j].addr << 1;
372 obuf[1] = (msg[j].len > 15 ? 17 : msg[j].len);
373 obuf[2] = msg[j].buf[0];
374 len = msg[j].len - 1;
375 i = 1;
376 do {
377 memcpy(obuf + 3, msg[j].buf + i,
378 (len > 16 ? 16 : len));
379 ret = dw210x_op_rw(d->udev, 0xc2, 0, 0,
380 obuf, (len > 16 ? 16 : len) + 3,
381 DW210X_WRITE_MSG);
382 i += 16;
383 len -= 16;
384 } while (len > 0);
385 } else {
386 /* write registers */
387 u8 obuf[msg[j].len + 2];
388 obuf[0] = msg[j].addr << 1;
389 obuf[1] = msg[j].len;
390 memcpy(obuf + 2, msg[j].buf, msg[j].len);
391 ret = dw210x_op_rw(d->udev, 0xc2, 0, 0,
392 obuf, msg[j].len + 2,
393 DW210X_WRITE_MSG);
394 }
395 break;
396 }
373 } 397 }
374 398
375 break;
376 } 399 }
377 400
378 mutex_unlock(&d->i2c_mutex); 401 mutex_unlock(&d->i2c_mutex);
@@ -442,63 +465,85 @@ static int dw3101_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
442 return num; 465 return num;
443} 466}
444 467
445static int s630_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], 468static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
446 int num) 469 int num)
447{ 470{
448 struct dvb_usb_device *d = i2c_get_adapdata(adap); 471 struct dvb_usb_device *d = i2c_get_adapdata(adap);
449 int ret = 0; 472 int ret = 0;
473 int len, i, j;
450 474
451 if (!d) 475 if (!d)
452 return -ENODEV; 476 return -ENODEV;
453 if (mutex_lock_interruptible(&d->i2c_mutex) < 0) 477 if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
454 return -EAGAIN; 478 return -EAGAIN;
455 479
456 switch (num) { 480 for (j = 0; j < num; j++) {
457 case 2: { /* read */ 481 switch (msg[j].addr) {
458 u8 ibuf[msg[1].len], obuf[3];
459 obuf[0] = msg[1].len;
460 obuf[1] = (msg[0].addr << 1);
461 obuf[2] = msg[0].buf[0];
462
463 ret = dw210x_op_rw(d->udev, 0x90, 0, 0,
464 obuf, 3, DW210X_WRITE_MSG);
465 msleep(5);
466 ret = dw210x_op_rw(d->udev, 0x91, 0, 0,
467 ibuf, msg[1].len, DW210X_READ_MSG);
468 memcpy(msg[1].buf, ibuf, msg[1].len);
469 break;
470 }
471 case 1:
472 switch (msg[0].addr) {
473 case 0x60:
474 case 0x0e: {
475 /* write to zl10313, zl10039 register, */
476 u8 obuf[msg[0].len + 2];
477 obuf[0] = msg[0].len + 1;
478 obuf[1] = (msg[0].addr << 1);
479 memcpy(obuf + 2, msg[0].buf, msg[0].len);
480 ret = dw210x_op_rw(d->udev, 0x80, 0, 0,
481 obuf, msg[0].len + 2, DW210X_WRITE_MSG);
482 break;
483 }
484 case (DW2102_RC_QUERY): { 482 case (DW2102_RC_QUERY): {
485 u8 ibuf[4]; 483 u8 ibuf[4];
486 ret = dw210x_op_rw(d->udev, 0xb8, 0, 0, 484 ret = dw210x_op_rw(d->udev, 0xb8, 0, 0,
487 ibuf, 4, DW210X_READ_MSG); 485 ibuf, 4, DW210X_READ_MSG);
488 msg[0].buf[0] = ibuf[3]; 486 memcpy(msg[j].buf, ibuf + 1, 2);
489 break; 487 break;
490 } 488 }
491 case (DW2102_VOLTAGE_CTRL): { 489 case (DW2102_VOLTAGE_CTRL): {
492 u8 obuf[2]; 490 u8 obuf[2];
493 obuf[0] = 0x03; 491 obuf[0] = 3;
494 obuf[1] = msg[0].buf[0]; 492 obuf[1] = msg[j].buf[0];
495 ret = dw210x_op_rw(d->udev, 0x8a, 0, 0, 493 ret = dw210x_op_rw(d->udev, 0x8a, 0, 0,
496 obuf, 2, DW210X_WRITE_MSG); 494 obuf, 2, DW210X_WRITE_MSG);
497 break; 495 break;
498 } 496 }
497 /*case 0x55: cx24116
498 case 0x6a: stv0903
499 case 0x68: ds3000, stv0903
500 case 0x60: ts2020, stv6110, stb6100
501 case 0xa0: eeprom */
502 default: {
503 if (msg[j].flags == I2C_M_RD) {
504 /* read registers */
505 u8 ibuf[msg[j].len];
506 ret = dw210x_op_rw(d->udev, 0x91, 0, 0,
507 ibuf, msg[j].len,
508 DW210X_READ_MSG);
509 memcpy(msg[j].buf, ibuf, msg[j].len);
510 break;
511 } else if ((msg[j].buf[0] == 0xb0) &&
512 (msg[j].addr == 0x68)) {
513 /* write firmware */
514 u8 obuf[19];
515 obuf[0] = (msg[j].len > 16 ?
516 18 : msg[j].len + 1);
517 obuf[1] = msg[j].addr << 1;
518 obuf[2] = msg[j].buf[0];
519 len = msg[j].len - 1;
520 i = 1;
521 do {
522 memcpy(obuf + 3, msg[j].buf + i,
523 (len > 16 ? 16 : len));
524 ret = dw210x_op_rw(d->udev, 0x80, 0, 0,
525 obuf, (len > 16 ? 16 : len) + 3,
526 DW210X_WRITE_MSG);
527 i += 16;
528 len -= 16;
529 } while (len > 0);
530 } else {
531 /* write registers */
532 u8 obuf[msg[j].len + 2];
533 obuf[0] = msg[j].len + 1;
534 obuf[1] = (msg[j].addr << 1);
535 memcpy(obuf + 2, msg[j].buf, msg[j].len);
536 ret = dw210x_op_rw(d->udev,
537 (num > 1 ? 0x90 : 0x80), 0, 0,
538 obuf, msg[j].len + 2,
539 DW210X_WRITE_MSG);
540 break;
541 }
542 break;
543 }
499 } 544 }
500 545
501 break; 546 msleep(3);
502 } 547 }
503 548
504 mutex_unlock(&d->i2c_mutex); 549 mutex_unlock(&d->i2c_mutex);
@@ -535,8 +580,8 @@ static struct i2c_algorithm dw3101_i2c_algo = {
535 .functionality = dw210x_i2c_func, 580 .functionality = dw210x_i2c_func,
536}; 581};
537 582
538static struct i2c_algorithm s630_i2c_algo = { 583static struct i2c_algorithm s6x0_i2c_algo = {
539 .master_xfer = s630_i2c_transfer, 584 .master_xfer = s6x0_i2c_transfer,
540 .functionality = dw210x_i2c_func, 585 .functionality = dw210x_i2c_func,
541}; 586};
542 587
@@ -564,25 +609,34 @@ static int dw210x_read_mac_address(struct dvb_usb_device *d, u8 mac[6])
564 return 0; 609 return 0;
565}; 610};
566 611
567static int s630_read_mac_address(struct dvb_usb_device *d, u8 mac[6]) 612static int s6x0_read_mac_address(struct dvb_usb_device *d, u8 mac[6])
568{ 613{
569 int i, ret; 614 int i, ret;
570 u8 buf[3], eeprom[256], eepromline[16]; 615 u8 ibuf[] = { 0 }, obuf[] = { 0 };
616 u8 eeprom[256], eepromline[16];
617 struct i2c_msg msg[] = {
618 {
619 .addr = 0xa0 >> 1,
620 .flags = 0,
621 .buf = obuf,
622 .len = 1,
623 }, {
624 .addr = 0xa0 >> 1,
625 .flags = I2C_M_RD,
626 .buf = ibuf,
627 .len = 1,
628 }
629 };
571 630
572 for (i = 0; i < 256; i++) { 631 for (i = 0; i < 256; i++) {
573 buf[0] = 1; 632 obuf[0] = i;
574 buf[1] = 0xa0; 633 ret = s6x0_i2c_transfer(&d->i2c_adap, msg, 2);
575 buf[2] = i; 634 if (ret != 2) {
576 ret = dw210x_op_rw(d->udev, 0x90, 0, 0,
577 buf, 3, DW210X_WRITE_MSG);
578 ret = dw210x_op_rw(d->udev, 0x91, 0, 0,
579 buf, 1, DW210X_READ_MSG);
580 if (ret < 0) {
581 err("read eeprom failed."); 635 err("read eeprom failed.");
582 return -1; 636 return -1;
583 } else { 637 } else {
584 eepromline[i % 16] = buf[0]; 638 eepromline[i % 16] = ibuf[0];
585 eeprom[i] = buf[0]; 639 eeprom[i] = ibuf[0];
586 } 640 }
587 641
588 if ((i % 16) == 15) { 642 if ((i % 16) == 15) {
@@ -644,19 +698,104 @@ static struct mt312_config zl313_config = {
644 .demod_address = 0x0e, 698 .demod_address = 0x0e,
645}; 699};
646 700
701static struct ds3000_config dw2104_ds3000_config = {
702 .demod_address = 0x68,
703};
704
705static struct stv0900_config dw2104a_stv0900_config = {
706 .demod_address = 0x6a,
707 .demod_mode = 0,
708 .xtal = 27000000,
709 .clkmode = 3,/* 0-CLKI, 2-XTALI, else AUTO */
710 .diseqc_mode = 2,/* 2/3 PWM */
711 .tun1_maddress = 0,/* 0x60 */
712 .tun1_adc = 0,/* 2 Vpp */
713 .path1_mode = 3,
714};
715
716static struct stb6100_config dw2104a_stb6100_config = {
717 .tuner_address = 0x60,
718 .refclock = 27000000,
719};
720
721static struct stv0900_config dw2104_stv0900_config = {
722 .demod_address = 0x68,
723 .demod_mode = 0,
724 .xtal = 8000000,
725 .clkmode = 3,
726 .diseqc_mode = 2,
727 .tun1_maddress = 0,
728 .tun1_adc = 1,/* 1 Vpp */
729 .path1_mode = 3,
730};
731
732static struct stv6110_config dw2104_stv6110_config = {
733 .i2c_address = 0x60,
734 .mclk = 16000000,
735 .clk_div = 1,
736};
737
647static int dw2104_frontend_attach(struct dvb_usb_adapter *d) 738static int dw2104_frontend_attach(struct dvb_usb_adapter *d)
648{ 739{
649 if ((d->fe = dvb_attach(cx24116_attach, &dw2104_config, 740 struct dvb_tuner_ops *tuner_ops = NULL;
650 &d->dev->i2c_adap)) != NULL) { 741
742 if (demod_probe & 4) {
743 d->fe = dvb_attach(stv0900_attach, &dw2104a_stv0900_config,
744 &d->dev->i2c_adap, 0);
745 if (d->fe != NULL) {
746 if (dvb_attach(stb6100_attach, d->fe,
747 &dw2104a_stb6100_config,
748 &d->dev->i2c_adap)) {
749 tuner_ops = &d->fe->ops.tuner_ops;
750 tuner_ops->set_frequency = stb6100_set_freq;
751 tuner_ops->get_frequency = stb6100_get_freq;
752 tuner_ops->set_bandwidth = stb6100_set_bandw;
753 tuner_ops->get_bandwidth = stb6100_get_bandw;
754 d->fe->ops.set_voltage = dw210x_set_voltage;
755 info("Attached STV0900+STB6100!\n");
756 return 0;
757 }
758 }
759 }
760
761 if (demod_probe & 2) {
762 d->fe = dvb_attach(stv0900_attach, &dw2104_stv0900_config,
763 &d->dev->i2c_adap, 0);
764 if (d->fe != NULL) {
765 if (dvb_attach(stv6110_attach, d->fe,
766 &dw2104_stv6110_config,
767 &d->dev->i2c_adap)) {
768 d->fe->ops.set_voltage = dw210x_set_voltage;
769 info("Attached STV0900+STV6110A!\n");
770 return 0;
771 }
772 }
773 }
774
775 if (demod_probe & 1) {
776 d->fe = dvb_attach(cx24116_attach, &dw2104_config,
777 &d->dev->i2c_adap);
778 if (d->fe != NULL) {
779 d->fe->ops.set_voltage = dw210x_set_voltage;
780 info("Attached cx24116!\n");
781 return 0;
782 }
783 }
784
785 d->fe = dvb_attach(ds3000_attach, &dw2104_ds3000_config,
786 &d->dev->i2c_adap);
787 if (d->fe != NULL) {
651 d->fe->ops.set_voltage = dw210x_set_voltage; 788 d->fe->ops.set_voltage = dw210x_set_voltage;
652 info("Attached cx24116!\n"); 789 info("Attached DS3000!\n");
653 return 0; 790 return 0;
654 } 791 }
792
655 return -EIO; 793 return -EIO;
656} 794}
657 795
658static struct dvb_usb_device_properties dw2102_properties; 796static struct dvb_usb_device_properties dw2102_properties;
659static struct dvb_usb_device_properties dw2104_properties; 797static struct dvb_usb_device_properties dw2104_properties;
798static struct dvb_usb_device_properties s6x0_properties;
660 799
661static int dw2102_frontend_attach(struct dvb_usb_adapter *d) 800static int dw2102_frontend_attach(struct dvb_usb_adapter *d)
662{ 801{
@@ -670,14 +809,17 @@ static int dw2102_frontend_attach(struct dvb_usb_adapter *d)
670 return 0; 809 return 0;
671 } 810 }
672 } 811 }
812
673 if (dw2102_properties.i2c_algo == &dw2102_earda_i2c_algo) { 813 if (dw2102_properties.i2c_algo == &dw2102_earda_i2c_algo) {
674 /*dw2102_properties.adapter->tuner_attach = dw2102_tuner_attach;*/
675 d->fe = dvb_attach(stv0288_attach, &earda_config, 814 d->fe = dvb_attach(stv0288_attach, &earda_config,
676 &d->dev->i2c_adap); 815 &d->dev->i2c_adap);
677 if (d->fe != NULL) { 816 if (d->fe != NULL) {
678 d->fe->ops.set_voltage = dw210x_set_voltage; 817 if (dvb_attach(stb6000_attach, d->fe, 0x61,
679 info("Attached stv0288!\n"); 818 &d->dev->i2c_adap)) {
680 return 0; 819 d->fe->ops.set_voltage = dw210x_set_voltage;
820 info("Attached stv0288!\n");
821 return 0;
822 }
681 } 823 }
682 } 824 }
683 825
@@ -705,15 +847,38 @@ static int dw3101_frontend_attach(struct dvb_usb_adapter *d)
705 return -EIO; 847 return -EIO;
706} 848}
707 849
708static int s630_frontend_attach(struct dvb_usb_adapter *d) 850static int s6x0_frontend_attach(struct dvb_usb_adapter *d)
709{ 851{
710 d->fe = dvb_attach(mt312_attach, &zl313_config, 852 d->fe = dvb_attach(mt312_attach, &zl313_config,
711 &d->dev->i2c_adap); 853 &d->dev->i2c_adap);
854 if (d->fe != NULL) {
855 if (dvb_attach(zl10039_attach, d->fe, 0x60,
856 &d->dev->i2c_adap)) {
857 d->fe->ops.set_voltage = dw210x_set_voltage;
858 info("Attached zl100313+zl10039!\n");
859 return 0;
860 }
861 }
862
863 d->fe = dvb_attach(stv0288_attach, &earda_config,
864 &d->dev->i2c_adap);
865 if (d->fe != NULL) {
866 if (dvb_attach(stb6000_attach, d->fe, 0x61,
867 &d->dev->i2c_adap)) {
868 d->fe->ops.set_voltage = dw210x_set_voltage;
869 info("Attached stv0288+stb6000!\n");
870 return 0;
871 }
872 }
873
874 d->fe = dvb_attach(ds3000_attach, &dw2104_ds3000_config,
875 &d->dev->i2c_adap);
712 if (d->fe != NULL) { 876 if (d->fe != NULL) {
713 d->fe->ops.set_voltage = dw210x_set_voltage; 877 d->fe->ops.set_voltage = dw210x_set_voltage;
714 info("Attached zl10313!\n"); 878 info("Attached ds3000+ds2020!\n");
715 return 0; 879 return 0;
716 } 880 }
881
717 return -EIO; 882 return -EIO;
718} 883}
719 884
@@ -724,14 +889,6 @@ static int dw2102_tuner_attach(struct dvb_usb_adapter *adap)
724 return 0; 889 return 0;
725} 890}
726 891
727static int dw2102_earda_tuner_attach(struct dvb_usb_adapter *adap)
728{
729 dvb_attach(stb6000_attach, adap->fe, 0x61,
730 &adap->dev->i2c_adap);
731
732 return 0;
733}
734
735static int dw3101_tuner_attach(struct dvb_usb_adapter *adap) 892static int dw3101_tuner_attach(struct dvb_usb_adapter *adap)
736{ 893{
737 dvb_attach(dvb_pll_attach, adap->fe, 0x60, 894 dvb_attach(dvb_pll_attach, adap->fe, 0x60,
@@ -740,14 +897,6 @@ static int dw3101_tuner_attach(struct dvb_usb_adapter *adap)
740 return 0; 897 return 0;
741} 898}
742 899
743static int s630_zl10039_tuner_attach(struct dvb_usb_adapter *adap)
744{
745 dvb_attach(zl10039_attach, adap->fe, 0x60,
746 &adap->dev->i2c_adap);
747
748 return 0;
749}
750
751static struct dvb_usb_rc_key dw210x_rc_keys[] = { 900static struct dvb_usb_rc_key dw210x_rc_keys[] = {
752 { 0xf80a, KEY_Q }, /*power*/ 901 { 0xf80a, KEY_Q }, /*power*/
753 { 0xf80c, KEY_M }, /*mute*/ 902 { 0xf80c, KEY_M }, /*mute*/
@@ -922,6 +1071,8 @@ static struct usb_device_id dw2102_table[] = {
922 {USB_DEVICE(USB_VID_TERRATEC, USB_PID_CINERGY_S)}, 1071 {USB_DEVICE(USB_VID_TERRATEC, USB_PID_CINERGY_S)},
923 {USB_DEVICE(USB_VID_CYPRESS, USB_PID_DW3101)}, 1072 {USB_DEVICE(USB_VID_CYPRESS, USB_PID_DW3101)},
924 {USB_DEVICE(0x9022, USB_PID_TEVII_S630)}, 1073 {USB_DEVICE(0x9022, USB_PID_TEVII_S630)},
1074 {USB_DEVICE(0x3011, USB_PID_PROF_1100)},
1075 {USB_DEVICE(0x9022, USB_PID_TEVII_S660)},
925 { } 1076 { }
926}; 1077};
927 1078
@@ -935,15 +1086,13 @@ static int dw2102_load_firmware(struct usb_device *dev,
935 u8 reset; 1086 u8 reset;
936 u8 reset16[] = {0, 0, 0, 0, 0, 0, 0}; 1087 u8 reset16[] = {0, 0, 0, 0, 0, 0, 0};
937 const struct firmware *fw; 1088 const struct firmware *fw;
938 const char *filename = "dvb-usb-dw2101.fw"; 1089 const char *fw_2101 = "dvb-usb-dw2101.fw";
939 1090
940 switch (dev->descriptor.idProduct) { 1091 switch (dev->descriptor.idProduct) {
941 case 0x2101: 1092 case 0x2101:
942 ret = request_firmware(&fw, filename, &dev->dev); 1093 ret = request_firmware(&fw, fw_2101, &dev->dev);
943 if (ret != 0) { 1094 if (ret != 0) {
944 err("did not find the firmware file. (%s) " 1095 err(err_str, fw_2101);
945 "Please see linux/Documentation/dvb/ for more details "
946 "on firmware-problems.", filename);
947 return ret; 1096 return ret;
948 } 1097 }
949 break; 1098 break;
@@ -983,6 +1132,11 @@ static int dw2102_load_firmware(struct usb_device *dev,
983 } 1132 }
984 /* init registers */ 1133 /* init registers */
985 switch (dev->descriptor.idProduct) { 1134 switch (dev->descriptor.idProduct) {
1135 case USB_PID_PROF_1100:
1136 s6x0_properties.rc_key_map = tbs_rc_keys;
1137 s6x0_properties.rc_key_map_size =
1138 ARRAY_SIZE(tbs_rc_keys);
1139 break;
986 case USB_PID_TEVII_S650: 1140 case USB_PID_TEVII_S650:
987 dw2104_properties.rc_key_map = tevii_rc_keys; 1141 dw2104_properties.rc_key_map = tevii_rc_keys;
988 dw2104_properties.rc_key_map_size = 1142 dw2104_properties.rc_key_map_size =
@@ -1021,7 +1175,6 @@ static int dw2102_load_firmware(struct usb_device *dev,
1021 DW210X_READ_MSG); 1175 DW210X_READ_MSG);
1022 if (reset16[2] == 0x11) { 1176 if (reset16[2] == 0x11) {
1023 dw2102_properties.i2c_algo = &dw2102_earda_i2c_algo; 1177 dw2102_properties.i2c_algo = &dw2102_earda_i2c_algo;
1024 dw2102_properties.adapter->tuner_attach = &dw2102_earda_tuner_attach;
1025 break; 1178 break;
1026 } 1179 }
1027 } 1180 }
@@ -1184,13 +1337,13 @@ static struct dvb_usb_device_properties dw3101_properties = {
1184 } 1337 }
1185}; 1338};
1186 1339
1187static struct dvb_usb_device_properties s630_properties = { 1340static struct dvb_usb_device_properties s6x0_properties = {
1188 .caps = DVB_USB_IS_AN_I2C_ADAPTER, 1341 .caps = DVB_USB_IS_AN_I2C_ADAPTER,
1189 .usb_ctrl = DEVICE_SPECIFIC, 1342 .usb_ctrl = DEVICE_SPECIFIC,
1190 .firmware = "dvb-usb-s630.fw", 1343 .firmware = "dvb-usb-s630.fw",
1191 .no_reconnect = 1, 1344 .no_reconnect = 1,
1192 1345
1193 .i2c_algo = &s630_i2c_algo, 1346 .i2c_algo = &s6x0_i2c_algo,
1194 .rc_key_map = tevii_rc_keys, 1347 .rc_key_map = tevii_rc_keys,
1195 .rc_key_map_size = ARRAY_SIZE(tevii_rc_keys), 1348 .rc_key_map_size = ARRAY_SIZE(tevii_rc_keys),
1196 .rc_interval = 150, 1349 .rc_interval = 150,
@@ -1199,12 +1352,12 @@ static struct dvb_usb_device_properties s630_properties = {
1199 .generic_bulk_ctrl_endpoint = 0x81, 1352 .generic_bulk_ctrl_endpoint = 0x81,
1200 .num_adapters = 1, 1353 .num_adapters = 1,
1201 .download_firmware = dw2102_load_firmware, 1354 .download_firmware = dw2102_load_firmware,
1202 .read_mac_address = s630_read_mac_address, 1355 .read_mac_address = s6x0_read_mac_address,
1203 .adapter = { 1356 .adapter = {
1204 { 1357 {
1205 .frontend_attach = s630_frontend_attach, 1358 .frontend_attach = s6x0_frontend_attach,
1206 .streaming_ctrl = NULL, 1359 .streaming_ctrl = NULL,
1207 .tuner_attach = s630_zl10039_tuner_attach, 1360 .tuner_attach = NULL,
1208 .stream = { 1361 .stream = {
1209 .type = USB_BULK, 1362 .type = USB_BULK,
1210 .count = 8, 1363 .count = 8,
@@ -1217,12 +1370,20 @@ static struct dvb_usb_device_properties s630_properties = {
1217 }, 1370 },
1218 } 1371 }
1219 }, 1372 },
1220 .num_device_descs = 1, 1373 .num_device_descs = 3,
1221 .devices = { 1374 .devices = {
1222 {"TeVii S630 USB", 1375 {"TeVii S630 USB",
1223 {&dw2102_table[6], NULL}, 1376 {&dw2102_table[6], NULL},
1224 {NULL}, 1377 {NULL},
1225 }, 1378 },
1379 {"Prof 1100 USB ",
1380 {&dw2102_table[7], NULL},
1381 {NULL},
1382 },
1383 {"TeVii S660 USB",
1384 {&dw2102_table[8], NULL},
1385 {NULL},
1386 },
1226 } 1387 }
1227}; 1388};
1228 1389
@@ -1235,10 +1396,10 @@ static int dw2102_probe(struct usb_interface *intf,
1235 THIS_MODULE, NULL, adapter_nr) || 1396 THIS_MODULE, NULL, adapter_nr) ||
1236 0 == dvb_usb_device_init(intf, &dw3101_properties, 1397 0 == dvb_usb_device_init(intf, &dw3101_properties,
1237 THIS_MODULE, NULL, adapter_nr) || 1398 THIS_MODULE, NULL, adapter_nr) ||
1238 0 == dvb_usb_device_init(intf, &s630_properties, 1399 0 == dvb_usb_device_init(intf, &s6x0_properties,
1239 THIS_MODULE, NULL, adapter_nr)) { 1400 THIS_MODULE, NULL, adapter_nr))
1240 return 0; 1401 return 0;
1241 } 1402
1242 return -ENODEV; 1403 return -ENODEV;
1243} 1404}
1244 1405
@@ -1269,6 +1430,7 @@ module_exit(dw2102_module_exit);
1269MODULE_AUTHOR("Igor M. Liplianin (c) liplianin@me.by"); 1430MODULE_AUTHOR("Igor M. Liplianin (c) liplianin@me.by");
1270MODULE_DESCRIPTION("Driver for DVBWorld DVB-S 2101, 2102, DVB-S2 2104," 1431MODULE_DESCRIPTION("Driver for DVBWorld DVB-S 2101, 2102, DVB-S2 2104,"
1271 " DVB-C 3101 USB2.0," 1432 " DVB-C 3101 USB2.0,"
1272 " TeVii S600, S630, S650 USB2.0 devices"); 1433 " TeVii S600, S630, S650, S660 USB2.0,"
1434 " Prof 1100 USB2.0 devices");
1273MODULE_VERSION("0.1"); 1435MODULE_VERSION("0.1");
1274MODULE_LICENSE("GPL"); 1436MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/dvb-usb/friio-fe.c b/drivers/media/dvb/dvb-usb/friio-fe.c
index 9cbbe42ca44b..ebb7b9fd115b 100644
--- a/drivers/media/dvb/dvb-usb/friio-fe.c
+++ b/drivers/media/dvb/dvb-usb/friio-fe.c
@@ -134,11 +134,13 @@ static int jdvbt90502_pll_set_freq(struct jdvbt90502_state *state, u32 freq)
134 deb_fe("%s: freq=%d, step=%d\n", __func__, freq, 134 deb_fe("%s: freq=%d, step=%d\n", __func__, freq,
135 state->frontend.ops.info.frequency_stepsize); 135 state->frontend.ops.info.frequency_stepsize);
136 /* freq -> oscilator frequency conversion. */ 136 /* freq -> oscilator frequency conversion. */
137 /* freq: 473,000,000 + n*6,000,000 (no 1/7MHz shift to center freq) */ 137 /* freq: 473,000,000 + n*6,000,000 [+ 142857 (center freq. shift)] */
138 /* add 400[1/7 MHZ] = 57.142857MHz. 57MHz for the IF, */
139 /* 1/7MHz for center freq shift */
140 f = freq / state->frontend.ops.info.frequency_stepsize; 138 f = freq / state->frontend.ops.info.frequency_stepsize;
141 f += 400; 139 /* add 399[1/7 MHZ] = 57MHz for the IF */
140 f += 399;
141 /* add center frequency shift if necessary */
142 if (f % 7 == 0)
143 f++;
142 pll_freq_cmd[DEMOD_REDIRECT_REG] = JDVBT90502_2ND_I2C_REG; /* 0xFE */ 144 pll_freq_cmd[DEMOD_REDIRECT_REG] = JDVBT90502_2ND_I2C_REG; /* 0xFE */
143 pll_freq_cmd[ADDRESS_BYTE] = state->config.pll_address << 1; 145 pll_freq_cmd[ADDRESS_BYTE] = state->config.pll_address << 1;
144 pll_freq_cmd[DIVIDER_BYTE1] = (f >> 8) & 0x7F; 146 pll_freq_cmd[DIVIDER_BYTE1] = (f >> 8) & 0x7F;
diff --git a/drivers/media/dvb/dvb-usb/gp8psk-fe.c b/drivers/media/dvb/dvb-usb/gp8psk-fe.c
index 20eadf9318e0..7a7f1b2b681c 100644
--- a/drivers/media/dvb/dvb-usb/gp8psk-fe.c
+++ b/drivers/media/dvb/dvb-usb/gp8psk-fe.c
@@ -146,8 +146,8 @@ static int gp8psk_fe_set_frontend(struct dvb_frontend* fe,
146 146
147 switch (c->delivery_system) { 147 switch (c->delivery_system) {
148 case SYS_DVBS: 148 case SYS_DVBS:
149 /* Only QPSK is supported for DVB-S */ 149 /* Allow QPSK and 8PSK (even for DVB-S) */
150 if (c->modulation != QPSK) { 150 if (c->modulation != QPSK && c->modulation != PSK_8) {
151 deb_fe("%s: unsupported modulation selected (%d)\n", 151 deb_fe("%s: unsupported modulation selected (%d)\n",
152 __func__, c->modulation); 152 __func__, c->modulation);
153 return -EOPNOTSUPP; 153 return -EOPNOTSUPP;
diff --git a/drivers/media/dvb/frontends/Kconfig b/drivers/media/dvb/frontends/Kconfig
index 58aac018f109..a3b8b697349b 100644
--- a/drivers/media/dvb/frontends/Kconfig
+++ b/drivers/media/dvb/frontends/Kconfig
@@ -526,6 +526,15 @@ config DVB_TUNER_DIB0070
526 This device is only used inside a SiP called together with a 526 This device is only used inside a SiP called together with a
527 demodulator for now. 527 demodulator for now.
528 528
529config DVB_TUNER_DIB0090
530 tristate "DiBcom DiB0090 silicon base-band tuner"
531 depends on I2C
532 default m if DVB_FE_CUSTOMISE
533 help
534 A driver for the silicon baseband tuner DiB0090 from DiBcom.
535 This device is only used inside a SiP called together with a
536 demodulator for now.
537
529comment "SEC control devices for DVB-S" 538comment "SEC control devices for DVB-S"
530 depends on DVB_CORE 539 depends on DVB_CORE
531 540
diff --git a/drivers/media/dvb/frontends/Makefile b/drivers/media/dvb/frontends/Makefile
index 823482535d11..47575cc7b699 100644
--- a/drivers/media/dvb/frontends/Makefile
+++ b/drivers/media/dvb/frontends/Makefile
@@ -55,6 +55,7 @@ obj-$(CONFIG_DVB_TDA10086) += tda10086.o
55obj-$(CONFIG_DVB_TDA826X) += tda826x.o 55obj-$(CONFIG_DVB_TDA826X) += tda826x.o
56obj-$(CONFIG_DVB_TDA8261) += tda8261.o 56obj-$(CONFIG_DVB_TDA8261) += tda8261.o
57obj-$(CONFIG_DVB_TUNER_DIB0070) += dib0070.o 57obj-$(CONFIG_DVB_TUNER_DIB0070) += dib0070.o
58obj-$(CONFIG_DVB_TUNER_DIB0090) += dib0090.o
58obj-$(CONFIG_DVB_TUA6100) += tua6100.o 59obj-$(CONFIG_DVB_TUA6100) += tua6100.o
59obj-$(CONFIG_DVB_S5H1409) += s5h1409.o 60obj-$(CONFIG_DVB_S5H1409) += s5h1409.o
60obj-$(CONFIG_DVB_TUNER_ITD1000) += itd1000.o 61obj-$(CONFIG_DVB_TUNER_ITD1000) += itd1000.o
diff --git a/drivers/media/dvb/frontends/au8522_decoder.c b/drivers/media/dvb/frontends/au8522_decoder.c
index 2dc2723b724a..24268ef2753d 100644
--- a/drivers/media/dvb/frontends/au8522_decoder.c
+++ b/drivers/media/dvb/frontends/au8522_decoder.c
@@ -62,7 +62,7 @@ struct au8522_register_config {
62 The values are as follows from left to right 62 The values are as follows from left to right
63 0="ATV RF" 1="ATV RF13" 2="CVBS" 3="S-Video" 4="PAL" 5=CVBS13" 6="SVideo13" 63 0="ATV RF" 1="ATV RF13" 2="CVBS" 3="S-Video" 4="PAL" 5=CVBS13" 6="SVideo13"
64*/ 64*/
65struct au8522_register_config filter_coef[] = { 65static const struct au8522_register_config filter_coef[] = {
66 {AU8522_FILTER_COEF_R410, {0x25, 0x00, 0x25, 0x25, 0x00, 0x00, 0x00} }, 66 {AU8522_FILTER_COEF_R410, {0x25, 0x00, 0x25, 0x25, 0x00, 0x00, 0x00} },
67 {AU8522_FILTER_COEF_R411, {0x20, 0x00, 0x20, 0x20, 0x00, 0x00, 0x00} }, 67 {AU8522_FILTER_COEF_R411, {0x20, 0x00, 0x20, 0x20, 0x00, 0x00, 0x00} },
68 {AU8522_FILTER_COEF_R412, {0x03, 0x00, 0x03, 0x03, 0x00, 0x00, 0x00} }, 68 {AU8522_FILTER_COEF_R412, {0x03, 0x00, 0x03, 0x03, 0x00, 0x00, 0x00} },
@@ -104,7 +104,7 @@ struct au8522_register_config filter_coef[] = {
104 0="SIF" 1="ATVRF/ATVRF13" 104 0="SIF" 1="ATVRF/ATVRF13"
105 Note: the "ATVRF/ATVRF13" mode has never been tested 105 Note: the "ATVRF/ATVRF13" mode has never been tested
106*/ 106*/
107struct au8522_register_config lpfilter_coef[] = { 107static const struct au8522_register_config lpfilter_coef[] = {
108 {0x060b, {0x21, 0x0b} }, 108 {0x060b, {0x21, 0x0b} },
109 {0x060c, {0xad, 0xad} }, 109 {0x060c, {0xad, 0xad} },
110 {0x060d, {0x70, 0xf0} }, 110 {0x060d, {0x70, 0xf0} },
diff --git a/drivers/media/dvb/frontends/dib0070.c b/drivers/media/dvb/frontends/dib0070.c
index 2be17b93e0bd..0d12763603b4 100644
--- a/drivers/media/dvb/frontends/dib0070.c
+++ b/drivers/media/dvb/frontends/dib0070.c
@@ -49,21 +49,6 @@ MODULE_PARM_DESC(debug, "turn on debugging (default: 0)");
49#define DIB0070_P1G 0x03 49#define DIB0070_P1G 0x03
50#define DIB0070S_P1A 0x02 50#define DIB0070S_P1A 0x02
51 51
52enum frontend_tune_state {
53 CT_TUNER_START = 10,
54 CT_TUNER_STEP_0,
55 CT_TUNER_STEP_1,
56 CT_TUNER_STEP_2,
57 CT_TUNER_STEP_3,
58 CT_TUNER_STEP_4,
59 CT_TUNER_STEP_5,
60 CT_TUNER_STEP_6,
61 CT_TUNER_STEP_7,
62 CT_TUNER_STOP,
63};
64
65#define FE_CALLBACK_TIME_NEVER 0xffffffff
66
67struct dib0070_state { 52struct dib0070_state {
68 struct i2c_adapter *i2c; 53 struct i2c_adapter *i2c;
69 struct dvb_frontend *fe; 54 struct dvb_frontend *fe;
@@ -71,10 +56,10 @@ struct dib0070_state {
71 u16 wbd_ff_offset; 56 u16 wbd_ff_offset;
72 u8 revision; 57 u8 revision;
73 58
74 enum frontend_tune_state tune_state; 59 enum frontend_tune_state tune_state;
75 u32 current_rf; 60 u32 current_rf;
76 61
77 /* for the captrim binary search */ 62 /* for the captrim binary search */
78 s8 step; 63 s8 step;
79 u16 adc_diff; 64 u16 adc_diff;
80 65
@@ -85,7 +70,7 @@ struct dib0070_state {
85 const struct dib0070_tuning *current_tune_table_index; 70 const struct dib0070_tuning *current_tune_table_index;
86 const struct dib0070_lna_match *lna_match; 71 const struct dib0070_lna_match *lna_match;
87 72
88 u8 wbd_gain_current; 73 u8 wbd_gain_current;
89 u16 wbd_offset_3_3[2]; 74 u16 wbd_offset_3_3[2];
90}; 75};
91 76
@@ -93,8 +78,8 @@ static uint16_t dib0070_read_reg(struct dib0070_state *state, u8 reg)
93{ 78{
94 u8 b[2]; 79 u8 b[2];
95 struct i2c_msg msg[2] = { 80 struct i2c_msg msg[2] = {
96 {.addr = state->cfg->i2c_address,.flags = 0,.buf = &reg,.len = 1}, 81 { .addr = state->cfg->i2c_address, .flags = 0, .buf = &reg, .len = 1 },
97 {.addr = state->cfg->i2c_address,.flags = I2C_M_RD,.buf = b,.len = 2}, 82 { .addr = state->cfg->i2c_address, .flags = I2C_M_RD, .buf = b, .len = 2 },
98 }; 83 };
99 if (i2c_transfer(state->i2c, msg, 2) != 2) { 84 if (i2c_transfer(state->i2c, msg, 2) != 2) {
100 printk(KERN_WARNING "DiB0070 I2C read failed\n"); 85 printk(KERN_WARNING "DiB0070 I2C read failed\n");
@@ -106,7 +91,7 @@ static uint16_t dib0070_read_reg(struct dib0070_state *state, u8 reg)
106static int dib0070_write_reg(struct dib0070_state *state, u8 reg, u16 val) 91static int dib0070_write_reg(struct dib0070_state *state, u8 reg, u16 val)
107{ 92{
108 u8 b[3] = { reg, val >> 8, val & 0xff }; 93 u8 b[3] = { reg, val >> 8, val & 0xff };
109 struct i2c_msg msg = {.addr = state->cfg->i2c_address,.flags = 0,.buf = b,.len = 3 }; 94 struct i2c_msg msg = { .addr = state->cfg->i2c_address, .flags = 0, .buf = b, .len = 3 };
110 if (i2c_transfer(state->i2c, &msg, 1) != 1) { 95 if (i2c_transfer(state->i2c, &msg, 1) != 1) {
111 printk(KERN_WARNING "DiB0070 I2C write failed\n"); 96 printk(KERN_WARNING "DiB0070 I2C write failed\n");
112 return -EREMOTEIO; 97 return -EREMOTEIO;
@@ -124,30 +109,30 @@ static int dib0070_write_reg(struct dib0070_state *state, u8 reg, u16 val)
124 109
125static int dib0070_set_bandwidth(struct dvb_frontend *fe, struct dvb_frontend_parameters *ch) 110static int dib0070_set_bandwidth(struct dvb_frontend *fe, struct dvb_frontend_parameters *ch)
126{ 111{
127 struct dib0070_state *state = fe->tuner_priv; 112 struct dib0070_state *state = fe->tuner_priv;
128 u16 tmp = dib0070_read_reg(state, 0x02) & 0x3fff; 113 u16 tmp = dib0070_read_reg(state, 0x02) & 0x3fff;
129 114
130 if (state->fe->dtv_property_cache.bandwidth_hz / 1000 > 7000) 115 if (state->fe->dtv_property_cache.bandwidth_hz/1000 > 7000)
131 tmp |= (0 << 14); 116 tmp |= (0 << 14);
132 else if (state->fe->dtv_property_cache.bandwidth_hz / 1000 > 6000) 117 else if (state->fe->dtv_property_cache.bandwidth_hz/1000 > 6000)
133 tmp |= (1 << 14); 118 tmp |= (1 << 14);
134 else if (state->fe->dtv_property_cache.bandwidth_hz / 1000 > 5000) 119 else if (state->fe->dtv_property_cache.bandwidth_hz/1000 > 5000)
135 tmp |= (2 << 14); 120 tmp |= (2 << 14);
136 else 121 else
137 tmp |= (3 << 14); 122 tmp |= (3 << 14);
138 123
139 dib0070_write_reg(state, 0x02, tmp); 124 dib0070_write_reg(state, 0x02, tmp);
140 125
141 /* sharpen the BB filter in ISDB-T to have higher immunity to adjacent channels */ 126 /* sharpen the BB filter in ISDB-T to have higher immunity to adjacent channels */
142 if (state->fe->dtv_property_cache.delivery_system == SYS_ISDBT) { 127 if (state->fe->dtv_property_cache.delivery_system == SYS_ISDBT) {
143 u16 value = dib0070_read_reg(state, 0x17); 128 u16 value = dib0070_read_reg(state, 0x17);
144 129
145 dib0070_write_reg(state, 0x17, value & 0xfffc); 130 dib0070_write_reg(state, 0x17, value & 0xfffc);
146 tmp = dib0070_read_reg(state, 0x01) & 0x01ff; 131 tmp = dib0070_read_reg(state, 0x01) & 0x01ff;
147 dib0070_write_reg(state, 0x01, tmp | (60 << 9)); 132 dib0070_write_reg(state, 0x01, tmp | (60 << 9));
148 133
149 dib0070_write_reg(state, 0x17, value); 134 dib0070_write_reg(state, 0x17, value);
150 } 135 }
151 return 0; 136 return 0;
152} 137}
153 138
@@ -160,14 +145,14 @@ static int dib0070_captrim(struct dib0070_state *state, enum frontend_tune_state
160 if (*tune_state == CT_TUNER_STEP_0) { 145 if (*tune_state == CT_TUNER_STEP_0) {
161 146
162 dib0070_write_reg(state, 0x0f, 0xed10); 147 dib0070_write_reg(state, 0x0f, 0xed10);
163 dib0070_write_reg(state, 0x17, 0x0034); 148 dib0070_write_reg(state, 0x17, 0x0034);
164 149
165 dib0070_write_reg(state, 0x18, 0x0032); 150 dib0070_write_reg(state, 0x18, 0x0032);
166 state->step = state->captrim = state->fcaptrim = 64; 151 state->step = state->captrim = state->fcaptrim = 64;
167 state->adc_diff = 3000; 152 state->adc_diff = 3000;
168 ret = 20; 153 ret = 20;
169 154
170 *tune_state = CT_TUNER_STEP_1; 155 *tune_state = CT_TUNER_STEP_1;
171 } else if (*tune_state == CT_TUNER_STEP_1) { 156 } else if (*tune_state == CT_TUNER_STEP_1) {
172 state->step /= 2; 157 state->step /= 2;
173 dib0070_write_reg(state, 0x14, state->lo4 | state->captrim); 158 dib0070_write_reg(state, 0x14, state->lo4 | state->captrim);
@@ -178,7 +163,7 @@ static int dib0070_captrim(struct dib0070_state *state, enum frontend_tune_state
178 163
179 adc = dib0070_read_reg(state, 0x19); 164 adc = dib0070_read_reg(state, 0x19);
180 165
181 dprintk("CAPTRIM=%hd; ADC = %hd (ADC) & %dmV", state->captrim, adc, (u32) adc * (u32) 1800 / (u32) 1024); 166 dprintk("CAPTRIM=%hd; ADC = %hd (ADC) & %dmV", state->captrim, adc, (u32) adc*(u32)1800/(u32)1024);
182 167
183 if (adc >= 400) { 168 if (adc >= 400) {
184 adc -= 400; 169 adc -= 400;
@@ -193,6 +178,8 @@ static int dib0070_captrim(struct dib0070_state *state, enum frontend_tune_state
193 state->adc_diff = adc; 178 state->adc_diff = adc;
194 state->fcaptrim = state->captrim; 179 state->fcaptrim = state->captrim;
195 180
181
182
196 } 183 }
197 state->captrim += (step_sign * state->step); 184 state->captrim += (step_sign * state->step);
198 185
@@ -213,7 +200,7 @@ static int dib0070_captrim(struct dib0070_state *state, enum frontend_tune_state
213static int dib0070_set_ctrl_lo5(struct dvb_frontend *fe, u8 vco_bias_trim, u8 hf_div_trim, u8 cp_current, u8 third_order_filt) 200static int dib0070_set_ctrl_lo5(struct dvb_frontend *fe, u8 vco_bias_trim, u8 hf_div_trim, u8 cp_current, u8 third_order_filt)
214{ 201{
215 struct dib0070_state *state = fe->tuner_priv; 202 struct dib0070_state *state = fe->tuner_priv;
216 u16 lo5 = (third_order_filt << 14) | (0 << 13) | (1 << 12) | (3 << 9) | (cp_current << 6) | (hf_div_trim << 3) | (vco_bias_trim << 0); 203 u16 lo5 = (third_order_filt << 14) | (0 << 13) | (1 << 12) | (3 << 9) | (cp_current << 6) | (hf_div_trim << 3) | (vco_bias_trim << 0);
217 dprintk("CTRL_LO5: 0x%x", lo5); 204 dprintk("CTRL_LO5: 0x%x", lo5);
218 return dib0070_write_reg(state, 0x15, lo5); 205 return dib0070_write_reg(state, 0x15, lo5);
219} 206}
@@ -227,99 +214,99 @@ void dib0070_ctrl_agc_filter(struct dvb_frontend *fe, u8 open)
227 dib0070_write_reg(state, 0x1a, 0x0000); 214 dib0070_write_reg(state, 0x1a, 0x0000);
228 } else { 215 } else {
229 dib0070_write_reg(state, 0x1b, 0x4112); 216 dib0070_write_reg(state, 0x1b, 0x4112);
230 if (state->cfg->vga_filter != 0) { 217 if (state->cfg->vga_filter != 0) {
231 dib0070_write_reg(state, 0x1a, state->cfg->vga_filter); 218 dib0070_write_reg(state, 0x1a, state->cfg->vga_filter);
232 dprintk("vga filter register is set to %x", state->cfg->vga_filter); 219 dprintk("vga filter register is set to %x", state->cfg->vga_filter);
233 } else 220 } else
234 dib0070_write_reg(state, 0x1a, 0x0009); 221 dib0070_write_reg(state, 0x1a, 0x0009);
235 } 222 }
236} 223}
237 224
238EXPORT_SYMBOL(dib0070_ctrl_agc_filter); 225EXPORT_SYMBOL(dib0070_ctrl_agc_filter);
239struct dib0070_tuning { 226struct dib0070_tuning {
240 u32 max_freq; /* for every frequency less than or equal to that field: this information is correct */ 227 u32 max_freq; /* for every frequency less than or equal to that field: this information is correct */
241 u8 switch_trim; 228 u8 switch_trim;
242 u8 vco_band; 229 u8 vco_band;
243 u8 hfdiv; 230 u8 hfdiv;
244 u8 vco_multi; 231 u8 vco_multi;
245 u8 presc; 232 u8 presc;
246 u8 wbdmux; 233 u8 wbdmux;
247 u16 tuner_enable; 234 u16 tuner_enable;
248}; 235};
249 236
250struct dib0070_lna_match { 237struct dib0070_lna_match {
251 u32 max_freq; /* for every frequency less than or equal to that field: this information is correct */ 238 u32 max_freq; /* for every frequency less than or equal to that field: this information is correct */
252 u8 lna_band; 239 u8 lna_band;
253}; 240};
254 241
255static const struct dib0070_tuning dib0070s_tuning_table[] = { 242static const struct dib0070_tuning dib0070s_tuning_table[] = {
256 {570000, 2, 1, 3, 6, 6, 2, 0x4000 | 0x0800}, /* UHF */ 243 { 570000, 2, 1, 3, 6, 6, 2, 0x4000 | 0x0800 }, /* UHF */
257 {700000, 2, 0, 2, 4, 2, 2, 0x4000 | 0x0800}, 244 { 700000, 2, 0, 2, 4, 2, 2, 0x4000 | 0x0800 },
258 {863999, 2, 1, 2, 4, 2, 2, 0x4000 | 0x0800}, 245 { 863999, 2, 1, 2, 4, 2, 2, 0x4000 | 0x0800 },
259 {1500000, 0, 1, 1, 2, 2, 4, 0x2000 | 0x0400}, /* LBAND */ 246 { 1500000, 0, 1, 1, 2, 2, 4, 0x2000 | 0x0400 }, /* LBAND */
260 {1600000, 0, 1, 1, 2, 2, 4, 0x2000 | 0x0400}, 247 { 1600000, 0, 1, 1, 2, 2, 4, 0x2000 | 0x0400 },
261 {2000000, 0, 1, 1, 2, 2, 4, 0x2000 | 0x0400}, 248 { 2000000, 0, 1, 1, 2, 2, 4, 0x2000 | 0x0400 },
262 {0xffffffff, 0, 0, 8, 1, 2, 1, 0x8000 | 0x1000}, /* SBAND */ 249 { 0xffffffff, 0, 0, 8, 1, 2, 1, 0x8000 | 0x1000 }, /* SBAND */
263}; 250};
264 251
265static const struct dib0070_tuning dib0070_tuning_table[] = { 252static const struct dib0070_tuning dib0070_tuning_table[] = {
266 {115000, 1, 0, 7, 24, 2, 1, 0x8000 | 0x1000}, /* FM below 92MHz cannot be tuned */ 253 { 115000, 1, 0, 7, 24, 2, 1, 0x8000 | 0x1000 }, /* FM below 92MHz cannot be tuned */
267 {179500, 1, 0, 3, 16, 2, 1, 0x8000 | 0x1000}, /* VHF */ 254 { 179500, 1, 0, 3, 16, 2, 1, 0x8000 | 0x1000 }, /* VHF */
268 {189999, 1, 1, 3, 16, 2, 1, 0x8000 | 0x1000}, 255 { 189999, 1, 1, 3, 16, 2, 1, 0x8000 | 0x1000 },
269 {250000, 1, 0, 6, 12, 2, 1, 0x8000 | 0x1000}, 256 { 250000, 1, 0, 6, 12, 2, 1, 0x8000 | 0x1000 },
270 {569999, 2, 1, 5, 6, 2, 2, 0x4000 | 0x0800}, /* UHF */ 257 { 569999, 2, 1, 5, 6, 2, 2, 0x4000 | 0x0800 }, /* UHF */
271 {699999, 2, 0, 1, 4, 2, 2, 0x4000 | 0x0800}, 258 { 699999, 2, 0, 1, 4, 2, 2, 0x4000 | 0x0800 },
272 {863999, 2, 1, 1, 4, 2, 2, 0x4000 | 0x0800}, 259 { 863999, 2, 1, 1, 4, 2, 2, 0x4000 | 0x0800 },
273 {0xffffffff, 0, 1, 0, 2, 2, 4, 0x2000 | 0x0400}, /* LBAND or everything higher than UHF */ 260 { 0xffffffff, 0, 1, 0, 2, 2, 4, 0x2000 | 0x0400 }, /* LBAND or everything higher than UHF */
274}; 261};
275 262
276static const struct dib0070_lna_match dib0070_lna_flip_chip[] = { 263static const struct dib0070_lna_match dib0070_lna_flip_chip[] = {
277 {180000, 0}, /* VHF */ 264 { 180000, 0 }, /* VHF */
278 {188000, 1}, 265 { 188000, 1 },
279 {196400, 2}, 266 { 196400, 2 },
280 {250000, 3}, 267 { 250000, 3 },
281 {550000, 0}, /* UHF */ 268 { 550000, 0 }, /* UHF */
282 {590000, 1}, 269 { 590000, 1 },
283 {666000, 3}, 270 { 666000, 3 },
284 {864000, 5}, 271 { 864000, 5 },
285 {1500000, 0}, /* LBAND or everything higher than UHF */ 272 { 1500000, 0 }, /* LBAND or everything higher than UHF */
286 {1600000, 1}, 273 { 1600000, 1 },
287 {2000000, 3}, 274 { 2000000, 3 },
288 {0xffffffff, 7}, 275 { 0xffffffff, 7 },
289}; 276};
290 277
291static const struct dib0070_lna_match dib0070_lna[] = { 278static const struct dib0070_lna_match dib0070_lna[] = {
292 {180000, 0}, /* VHF */ 279 { 180000, 0 }, /* VHF */
293 {188000, 1}, 280 { 188000, 1 },
294 {196400, 2}, 281 { 196400, 2 },
295 {250000, 3}, 282 { 250000, 3 },
296 {550000, 2}, /* UHF */ 283 { 550000, 2 }, /* UHF */
297 {650000, 3}, 284 { 650000, 3 },
298 {750000, 5}, 285 { 750000, 5 },
299 {850000, 6}, 286 { 850000, 6 },
300 {864000, 7}, 287 { 864000, 7 },
301 {1500000, 0}, /* LBAND or everything higher than UHF */ 288 { 1500000, 0 }, /* LBAND or everything higher than UHF */
302 {1600000, 1}, 289 { 1600000, 1 },
303 {2000000, 3}, 290 { 2000000, 3 },
304 {0xffffffff, 7}, 291 { 0xffffffff, 7 },
305}; 292};
306 293
307#define LPF 100 // define for the loop filter 100kHz by default 16-07-06 294#define LPF 100
308static int dib0070_tune_digital(struct dvb_frontend *fe, struct dvb_frontend_parameters *ch) 295static int dib0070_tune_digital(struct dvb_frontend *fe, struct dvb_frontend_parameters *ch)
309{ 296{
310 struct dib0070_state *state = fe->tuner_priv; 297 struct dib0070_state *state = fe->tuner_priv;
311 298
312 const struct dib0070_tuning *tune; 299 const struct dib0070_tuning *tune;
313 const struct dib0070_lna_match *lna_match; 300 const struct dib0070_lna_match *lna_match;
314 301
315 enum frontend_tune_state *tune_state = &state->tune_state; 302 enum frontend_tune_state *tune_state = &state->tune_state;
316 int ret = 10; /* 1ms is the default delay most of the time */ 303 int ret = 10; /* 1ms is the default delay most of the time */
317 304
318 u8 band = (u8) BAND_OF_FREQUENCY(fe->dtv_property_cache.frequency / 1000); 305 u8 band = (u8)BAND_OF_FREQUENCY(fe->dtv_property_cache.frequency/1000);
319 u32 freq = fe->dtv_property_cache.frequency / 1000 + (band == BAND_VHF ? state->cfg->freq_offset_khz_vhf : state->cfg->freq_offset_khz_uhf); 306 u32 freq = fe->dtv_property_cache.frequency/1000 + (band == BAND_VHF ? state->cfg->freq_offset_khz_vhf : state->cfg->freq_offset_khz_uhf);
320 307
321#ifdef CONFIG_SYS_ISDBT 308#ifdef CONFIG_SYS_ISDBT
322 if (state->fe->dtv_property_cache.delivery_system == SYS_ISDBT && state->fe->dtv_property_cache.isdbt_sb_mode == 1) 309 if (state->fe->dtv_property_cache.delivery_system == SYS_ISDBT && state->fe->dtv_property_cache.isdbt_sb_mode == 1)
323 if (((state->fe->dtv_property_cache.isdbt_sb_segment_count % 2) 310 if (((state->fe->dtv_property_cache.isdbt_sb_segment_count % 2)
324 && (state->fe->dtv_property_cache.isdbt_sb_segment_idx == ((state->fe->dtv_property_cache.isdbt_sb_segment_count / 2) + 1))) 311 && (state->fe->dtv_property_cache.isdbt_sb_segment_idx == ((state->fe->dtv_property_cache.isdbt_sb_segment_count / 2) + 1)))
325 || (((state->fe->dtv_property_cache.isdbt_sb_segment_count % 2) == 0) 312 || (((state->fe->dtv_property_cache.isdbt_sb_segment_count % 2) == 0)
@@ -328,172 +315,180 @@ static int dib0070_tune_digital(struct dvb_frontend *fe, struct dvb_frontend_par
328 && (state->fe->dtv_property_cache.isdbt_sb_segment_idx == ((state->fe->dtv_property_cache.isdbt_sb_segment_count / 2) + 1)))) 315 && (state->fe->dtv_property_cache.isdbt_sb_segment_idx == ((state->fe->dtv_property_cache.isdbt_sb_segment_count / 2) + 1))))
329 freq += 850; 316 freq += 850;
330#endif 317#endif
318 if (state->current_rf != freq) {
319
320 switch (state->revision) {
321 case DIB0070S_P1A:
322 tune = dib0070s_tuning_table;
323 lna_match = dib0070_lna;
324 break;
325 default:
326 tune = dib0070_tuning_table;
327 if (state->cfg->flip_chip)
328 lna_match = dib0070_lna_flip_chip;
329 else
330 lna_match = dib0070_lna;
331 break;
332 }
333 while (freq > tune->max_freq) /* find the right one */
334 tune++;
335 while (freq > lna_match->max_freq) /* find the right one */
336 lna_match++;
337
338 state->current_tune_table_index = tune;
339 state->lna_match = lna_match;
340 }
341
342 if (*tune_state == CT_TUNER_START) {
343 dprintk("Tuning for Band: %hd (%d kHz)", band, freq);
331 if (state->current_rf != freq) { 344 if (state->current_rf != freq) {
345 u8 REFDIV;
346 u32 FBDiv, Rest, FREF, VCOF_kHz;
347 u8 Den;
348
349 state->current_rf = freq;
350 state->lo4 = (state->current_tune_table_index->vco_band << 11) | (state->current_tune_table_index->hfdiv << 7);
351
352
353 dib0070_write_reg(state, 0x17, 0x30);
354
355
356 VCOF_kHz = state->current_tune_table_index->vco_multi * freq * 2;
357
358 switch (band) {
359 case BAND_VHF:
360 REFDIV = (u8) ((state->cfg->clock_khz + 9999) / 10000);
361 break;
362 case BAND_FM:
363 REFDIV = (u8) ((state->cfg->clock_khz) / 1000);
364 break;
365 default:
366 REFDIV = (u8) (state->cfg->clock_khz / 10000);
367 break;
368 }
369 FREF = state->cfg->clock_khz / REFDIV;
370
371
332 372
333 switch (state->revision) { 373 switch (state->revision) {
334 case DIB0070S_P1A: 374 case DIB0070S_P1A:
335 tune = dib0070s_tuning_table; 375 FBDiv = (VCOF_kHz / state->current_tune_table_index->presc / FREF);
336 lna_match = dib0070_lna; 376 Rest = (VCOF_kHz / state->current_tune_table_index->presc) - FBDiv * FREF;
337 break; 377 break;
378
379 case DIB0070_P1G:
380 case DIB0070_P1F:
338 default: 381 default:
339 tune = dib0070_tuning_table; 382 FBDiv = (freq / (FREF / 2));
340 if (state->cfg->flip_chip) 383 Rest = 2 * freq - FBDiv * FREF;
341 lna_match = dib0070_lna_flip_chip;
342 else
343 lna_match = dib0070_lna;
344 break; 384 break;
345 } 385 }
346 while (freq > tune->max_freq) /* find the right one */
347 tune++;
348 while (freq > lna_match->max_freq) /* find the right one */
349 lna_match++;
350 386
351 state->current_tune_table_index = tune; 387 if (Rest < LPF)
352 state->lna_match = lna_match; 388 Rest = 0;
353 } 389 else if (Rest < 2 * LPF)
390 Rest = 2 * LPF;
391 else if (Rest > (FREF - LPF)) {
392 Rest = 0;
393 FBDiv += 1;
394 } else if (Rest > (FREF - 2 * LPF))
395 Rest = FREF - 2 * LPF;
396 Rest = (Rest * 6528) / (FREF / 10);
397
398 Den = 1;
399 if (Rest > 0) {
400 state->lo4 |= (1 << 14) | (1 << 12);
401 Den = 255;
402 }
403
354 404
355 if (*tune_state == CT_TUNER_START) { 405 dib0070_write_reg(state, 0x11, (u16)FBDiv);
356 dprintk("Tuning for Band: %hd (%d kHz)", band, freq); 406 dib0070_write_reg(state, 0x12, (Den << 8) | REFDIV);
357 if (state->current_rf != freq) { 407 dib0070_write_reg(state, 0x13, (u16) Rest);
358 u8 REFDIV; 408
359 u32 FBDiv, Rest, FREF, VCOF_kHz; 409 if (state->revision == DIB0070S_P1A) {
360 u8 Den; 410
361 411 if (band == BAND_SBAND) {
362 state->current_rf = freq; 412 dib0070_set_ctrl_lo5(fe, 2, 4, 3, 0);
363 state->lo4 = (state->current_tune_table_index->vco_band << 11) | (state->current_tune_table_index->hfdiv << 7); 413 dib0070_write_reg(state, 0x1d, 0xFFFF);
364 414 } else
365 dib0070_write_reg(state, 0x17, 0x30); 415 dib0070_set_ctrl_lo5(fe, 5, 4, 3, 1);
366
367 VCOF_kHz = state->current_tune_table_index->vco_multi * freq * 2;
368
369 switch (band) {
370 case BAND_VHF:
371 REFDIV = (u8) ((state->cfg->clock_khz + 9999) / 10000);
372 break;
373 case BAND_FM:
374 REFDIV = (u8) ((state->cfg->clock_khz) / 1000);
375 break;
376 default:
377 REFDIV = (u8) (state->cfg->clock_khz / 10000);
378 break;
379 }
380 FREF = state->cfg->clock_khz / REFDIV;
381
382 switch (state->revision) {
383 case DIB0070S_P1A:
384 FBDiv = (VCOF_kHz / state->current_tune_table_index->presc / FREF);
385 Rest = (VCOF_kHz / state->current_tune_table_index->presc) - FBDiv * FREF;
386 break;
387
388 case DIB0070_P1G:
389 case DIB0070_P1F:
390 default:
391 FBDiv = (freq / (FREF / 2));
392 Rest = 2 * freq - FBDiv * FREF;
393 break;
394 }
395
396 if (Rest < LPF)
397 Rest = 0;
398 else if (Rest < 2 * LPF)
399 Rest = 2 * LPF;
400 else if (Rest > (FREF - LPF)) {
401 Rest = 0;
402 FBDiv += 1;
403 } else if (Rest > (FREF - 2 * LPF))
404 Rest = FREF - 2 * LPF;
405 Rest = (Rest * 6528) / (FREF / 10);
406
407 Den = 1;
408 if (Rest > 0) {
409 state->lo4 |= (1 << 14) | (1 << 12);
410 Den = 255;
411 }
412
413 dib0070_write_reg(state, 0x11, (u16) FBDiv);
414 dib0070_write_reg(state, 0x12, (Den << 8) | REFDIV);
415 dib0070_write_reg(state, 0x13, (u16) Rest);
416
417 if (state->revision == DIB0070S_P1A) {
418
419 if (band == BAND_SBAND) {
420 dib0070_set_ctrl_lo5(fe, 2, 4, 3, 0);
421 dib0070_write_reg(state, 0x1d, 0xFFFF);
422 } else
423 dib0070_set_ctrl_lo5(fe, 5, 4, 3, 1);
424 }
425
426 dib0070_write_reg(state, 0x20,
427 0x0040 | 0x0020 | 0x0010 | 0x0008 | 0x0002 | 0x0001 | state->current_tune_table_index->tuner_enable);
428
429 dprintk("REFDIV: %hd, FREF: %d", REFDIV, FREF);
430 dprintk("FBDIV: %d, Rest: %d", FBDiv, Rest);
431 dprintk("Num: %hd, Den: %hd, SD: %hd", (u16) Rest, Den, (state->lo4 >> 12) & 0x1);
432 dprintk("HFDIV code: %hd", state->current_tune_table_index->hfdiv);
433 dprintk("VCO = %hd", state->current_tune_table_index->vco_band);
434 dprintk("VCOF: ((%hd*%d) << 1))", state->current_tune_table_index->vco_multi, freq);
435
436 *tune_state = CT_TUNER_STEP_0;
437 } else { /* we are already tuned to this frequency - the configuration is correct */
438 ret = 50; /* wakeup time */
439 *tune_state = CT_TUNER_STEP_5;
440 } 416 }
441 } else if ((*tune_state > CT_TUNER_START) && (*tune_state < CT_TUNER_STEP_4)) {
442 417
443 ret = dib0070_captrim(state, tune_state); 418 dib0070_write_reg(state, 0x20,
419 0x0040 | 0x0020 | 0x0010 | 0x0008 | 0x0002 | 0x0001 | state->current_tune_table_index->tuner_enable);
444 420
445 } else if (*tune_state == CT_TUNER_STEP_4) { 421 dprintk("REFDIV: %hd, FREF: %d", REFDIV, FREF);
446 const struct dib0070_wbd_gain_cfg *tmp = state->cfg->wbd_gain; 422 dprintk("FBDIV: %d, Rest: %d", FBDiv, Rest);
447 if (tmp != NULL) { 423 dprintk("Num: %hd, Den: %hd, SD: %hd", (u16) Rest, Den, (state->lo4 >> 12) & 0x1);
448 while (freq / 1000 > tmp->freq) /* find the right one */ 424 dprintk("HFDIV code: %hd", state->current_tune_table_index->hfdiv);
449 tmp++; 425 dprintk("VCO = %hd", state->current_tune_table_index->vco_band);
450 dib0070_write_reg(state, 0x0f, 426 dprintk("VCOF: ((%hd*%d) << 1))", state->current_tune_table_index->vco_multi, freq);
451 (0 << 15) | (1 << 14) | (3 << 12) | (tmp->wbd_gain_val << 9) | (0 << 8) | (1 << 7) | (state-> 427
452 current_tune_table_index-> 428 *tune_state = CT_TUNER_STEP_0;
453 wbdmux << 0)); 429 } else { /* we are already tuned to this frequency - the configuration is correct */
454 state->wbd_gain_current = tmp->wbd_gain_val; 430 ret = 50; /* wakeup time */
455 } else { 431 *tune_state = CT_TUNER_STEP_5;
432 }
433 } else if ((*tune_state > CT_TUNER_START) && (*tune_state < CT_TUNER_STEP_4)) {
434
435 ret = dib0070_captrim(state, tune_state);
436
437 } else if (*tune_state == CT_TUNER_STEP_4) {
438 const struct dib0070_wbd_gain_cfg *tmp = state->cfg->wbd_gain;
439 if (tmp != NULL) {
440 while (freq/1000 > tmp->freq) /* find the right one */
441 tmp++;
442 dib0070_write_reg(state, 0x0f,
443 (0 << 15) | (1 << 14) | (3 << 12)
444 | (tmp->wbd_gain_val << 9) | (0 << 8) | (1 << 7)
445 | (state->current_tune_table_index->wbdmux << 0));
446 state->wbd_gain_current = tmp->wbd_gain_val;
447 } else {
456 dib0070_write_reg(state, 0x0f, 448 dib0070_write_reg(state, 0x0f,
457 (0 << 15) | (1 << 14) | (3 << 12) | (6 << 9) | (0 << 8) | (1 << 7) | (state->current_tune_table_index-> 449 (0 << 15) | (1 << 14) | (3 << 12) | (6 << 9) | (0 << 8) | (1 << 7) | (state->current_tune_table_index->
458 wbdmux << 0)); 450 wbdmux << 0));
459 state->wbd_gain_current = 6; 451 state->wbd_gain_current = 6;
460 } 452 }
461 453
462 dib0070_write_reg(state, 0x06, 0x3fff); 454 dib0070_write_reg(state, 0x06, 0x3fff);
463 dib0070_write_reg(state, 0x07, 455 dib0070_write_reg(state, 0x07,
464 (state->current_tune_table_index->switch_trim << 11) | (7 << 8) | (state->lna_match->lna_band << 3) | (3 << 0)); 456 (state->current_tune_table_index->switch_trim << 11) | (7 << 8) | (state->lna_match->lna_band << 3) | (3 << 0));
465 dib0070_write_reg(state, 0x08, (state->lna_match->lna_band << 10) | (3 << 7) | (127)); 457 dib0070_write_reg(state, 0x08, (state->lna_match->lna_band << 10) | (3 << 7) | (127));
466 dib0070_write_reg(state, 0x0d, 0x0d80); 458 dib0070_write_reg(state, 0x0d, 0x0d80);
467 459
468 dib0070_write_reg(state, 0x18, 0x07ff);
469 dib0070_write_reg(state, 0x17, 0x0033);
470 460
471 *tune_state = CT_TUNER_STEP_5; 461 dib0070_write_reg(state, 0x18, 0x07ff);
472 } else if (*tune_state == CT_TUNER_STEP_5) { 462 dib0070_write_reg(state, 0x17, 0x0033);
473 dib0070_set_bandwidth(fe, ch); 463
474 *tune_state = CT_TUNER_STOP; 464
475 } else { 465 *tune_state = CT_TUNER_STEP_5;
476 ret = FE_CALLBACK_TIME_NEVER; /* tuner finished, time to call again infinite */ 466 } else if (*tune_state == CT_TUNER_STEP_5) {
477 } 467 dib0070_set_bandwidth(fe, ch);
478 return ret; 468 *tune_state = CT_TUNER_STOP;
469 } else {
470 ret = FE_CALLBACK_TIME_NEVER; /* tuner finished, time to call again infinite */
471 }
472 return ret;
479} 473}
480 474
475
481static int dib0070_tune(struct dvb_frontend *fe, struct dvb_frontend_parameters *p) 476static int dib0070_tune(struct dvb_frontend *fe, struct dvb_frontend_parameters *p)
482{ 477{
483 struct dib0070_state *state = fe->tuner_priv; 478 struct dib0070_state *state = fe->tuner_priv;
484 uint32_t ret; 479 uint32_t ret;
485 480
486 state->tune_state = CT_TUNER_START; 481 state->tune_state = CT_TUNER_START;
487 482
488 do { 483 do {
489 ret = dib0070_tune_digital(fe, p); 484 ret = dib0070_tune_digital(fe, p);
490 if (ret != FE_CALLBACK_TIME_NEVER) 485 if (ret != FE_CALLBACK_TIME_NEVER)
491 msleep(ret / 10); 486 msleep(ret/10);
492 else 487 else
493 break; 488 break;
494 } while (state->tune_state != CT_TUNER_STOP); 489 } while (state->tune_state != CT_TUNER_STOP);
495 490
496 return 0; 491 return 0;
497} 492}
498 493
499static int dib0070_wakeup(struct dvb_frontend *fe) 494static int dib0070_wakeup(struct dvb_frontend *fe)
@@ -512,92 +507,113 @@ static int dib0070_sleep(struct dvb_frontend *fe)
512 return 0; 507 return 0;
513} 508}
514 509
515static const u16 dib0070_p1f_defaults[] = { 510u8 dib0070_get_rf_output(struct dvb_frontend *fe)
511{
512 struct dib0070_state *state = fe->tuner_priv;
513 return (dib0070_read_reg(state, 0x07) >> 11) & 0x3;
514}
515EXPORT_SYMBOL(dib0070_get_rf_output);
516
517int dib0070_set_rf_output(struct dvb_frontend *fe, u8 no)
518{
519 struct dib0070_state *state = fe->tuner_priv;
520 u16 rxrf2 = dib0070_read_reg(state, 0x07) & 0xfe7ff;
521 if (no > 3)
522 no = 3;
523 if (no < 1)
524 no = 1;
525 return dib0070_write_reg(state, 0x07, rxrf2 | (no << 11));
526}
527EXPORT_SYMBOL(dib0070_set_rf_output);
528
529static const u16 dib0070_p1f_defaults[] =
530
531{
516 7, 0x02, 532 7, 0x02,
517 0x0008, 533 0x0008,
518 0x0000, 534 0x0000,
519 0x0000, 535 0x0000,
520 0x0000, 536 0x0000,
521 0x0000, 537 0x0000,
522 0x0002, 538 0x0002,
523 0x0100, 539 0x0100,
524 540
525 3, 0x0d, 541 3, 0x0d,
526 0x0d80, 542 0x0d80,
527 0x0001, 543 0x0001,
528 0x0000, 544 0x0000,
529 545
530 4, 0x11, 546 4, 0x11,
531 0x0000, 547 0x0000,
532 0x0103, 548 0x0103,
533 0x0000, 549 0x0000,
534 0x0000, 550 0x0000,
535 551
536 3, 0x16, 552 3, 0x16,
537 0x0004 | 0x0040, 553 0x0004 | 0x0040,
538 0x0030, 554 0x0030,
539 0x07ff, 555 0x07ff,
540 556
541 6, 0x1b, 557 6, 0x1b,
542 0x4112, 558 0x4112,
543 0xff00, 559 0xff00,
544 0xc07f, 560 0xc07f,
545 0x0000, 561 0x0000,
546 0x0180, 562 0x0180,
547 0x4000 | 0x0800 | 0x0040 | 0x0020 | 0x0010 | 0x0008 | 0x0002 | 0x0001, 563 0x4000 | 0x0800 | 0x0040 | 0x0020 | 0x0010 | 0x0008 | 0x0002 | 0x0001,
548 564
549 0, 565 0,
550}; 566};
551 567
552static u16 dib0070_read_wbd_offset(struct dib0070_state *state, u8 gain) 568static u16 dib0070_read_wbd_offset(struct dib0070_state *state, u8 gain)
553{ 569{
554 u16 tuner_en = dib0070_read_reg(state, 0x20); 570 u16 tuner_en = dib0070_read_reg(state, 0x20);
555 u16 offset; 571 u16 offset;
556 572
557 dib0070_write_reg(state, 0x18, 0x07ff); 573 dib0070_write_reg(state, 0x18, 0x07ff);
558 dib0070_write_reg(state, 0x20, 0x0800 | 0x4000 | 0x0040 | 0x0020 | 0x0010 | 0x0008 | 0x0002 | 0x0001); 574 dib0070_write_reg(state, 0x20, 0x0800 | 0x4000 | 0x0040 | 0x0020 | 0x0010 | 0x0008 | 0x0002 | 0x0001);
559 dib0070_write_reg(state, 0x0f, (1 << 14) | (2 << 12) | (gain << 9) | (1 << 8) | (1 << 7) | (0 << 0)); 575 dib0070_write_reg(state, 0x0f, (1 << 14) | (2 << 12) | (gain << 9) | (1 << 8) | (1 << 7) | (0 << 0));
560 msleep(9); 576 msleep(9);
561 offset = dib0070_read_reg(state, 0x19); 577 offset = dib0070_read_reg(state, 0x19);
562 dib0070_write_reg(state, 0x20, tuner_en); 578 dib0070_write_reg(state, 0x20, tuner_en);
563 return offset; 579 return offset;
564} 580}
565 581
566static void dib0070_wbd_offset_calibration(struct dib0070_state *state) 582static void dib0070_wbd_offset_calibration(struct dib0070_state *state)
567{ 583{
568 u8 gain; 584 u8 gain;
569 for (gain = 6; gain < 8; gain++) { 585 for (gain = 6; gain < 8; gain++) {
570 state->wbd_offset_3_3[gain - 6] = ((dib0070_read_wbd_offset(state, gain) * 8 * 18 / 33 + 1) / 2); 586 state->wbd_offset_3_3[gain - 6] = ((dib0070_read_wbd_offset(state, gain) * 8 * 18 / 33 + 1) / 2);
571 dprintk("Gain: %d, WBDOffset (3.3V) = %hd", gain, state->wbd_offset_3_3[gain - 6]); 587 dprintk("Gain: %d, WBDOffset (3.3V) = %hd", gain, state->wbd_offset_3_3[gain-6]);
572 } 588 }
573} 589}
574 590
575u16 dib0070_wbd_offset(struct dvb_frontend *fe) 591u16 dib0070_wbd_offset(struct dvb_frontend *fe)
576{ 592{
577 struct dib0070_state *state = fe->tuner_priv; 593 struct dib0070_state *state = fe->tuner_priv;
578 const struct dib0070_wbd_gain_cfg *tmp = state->cfg->wbd_gain; 594 const struct dib0070_wbd_gain_cfg *tmp = state->cfg->wbd_gain;
579 u32 freq = fe->dtv_property_cache.frequency / 1000; 595 u32 freq = fe->dtv_property_cache.frequency/1000;
580 596
581 if (tmp != NULL) { 597 if (tmp != NULL) {
582 while (freq / 1000 > tmp->freq) /* find the right one */ 598 while (freq/1000 > tmp->freq) /* find the right one */
583 tmp++; 599 tmp++;
584 state->wbd_gain_current = tmp->wbd_gain_val; 600 state->wbd_gain_current = tmp->wbd_gain_val;
585 } else 601 } else
586 state->wbd_gain_current = 6; 602 state->wbd_gain_current = 6;
587 603
588 return state->wbd_offset_3_3[state->wbd_gain_current - 6]; 604 return state->wbd_offset_3_3[state->wbd_gain_current - 6];
589} 605}
590
591EXPORT_SYMBOL(dib0070_wbd_offset); 606EXPORT_SYMBOL(dib0070_wbd_offset);
592 607
593#define pgm_read_word(w) (*w) 608#define pgm_read_word(w) (*w)
594static int dib0070_reset(struct dvb_frontend *fe) 609static int dib0070_reset(struct dvb_frontend *fe)
595{ 610{
596 struct dib0070_state *state = fe->tuner_priv; 611 struct dib0070_state *state = fe->tuner_priv;
597 u16 l, r, *n; 612 u16 l, r, *n;
598 613
599 HARD_RESET(state); 614 HARD_RESET(state);
600 615
616
601#ifndef FORCE_SBAND_TUNER 617#ifndef FORCE_SBAND_TUNER
602 if ((dib0070_read_reg(state, 0x22) >> 9) & 0x1) 618 if ((dib0070_read_reg(state, 0x22) >> 9) & 0x1)
603 state->revision = (dib0070_read_reg(state, 0x1f) >> 8) & 0xff; 619 state->revision = (dib0070_read_reg(state, 0x1f) >> 8) & 0xff;
@@ -605,7 +621,7 @@ static int dib0070_reset(struct dvb_frontend *fe)
605#else 621#else
606#warning forcing SBAND 622#warning forcing SBAND
607#endif 623#endif
608 state->revision = DIB0070S_P1A; 624 state->revision = DIB0070S_P1A;
609 625
610 /* P1F or not */ 626 /* P1F or not */
611 dprintk("Revision: %x", state->revision); 627 dprintk("Revision: %x", state->revision);
@@ -620,7 +636,7 @@ static int dib0070_reset(struct dvb_frontend *fe)
620 while (l) { 636 while (l) {
621 r = pgm_read_word(n++); 637 r = pgm_read_word(n++);
622 do { 638 do {
623 dib0070_write_reg(state, (u8) r, pgm_read_word(n++)); 639 dib0070_write_reg(state, (u8)r, pgm_read_word(n++));
624 r++; 640 r++;
625 } while (--l); 641 } while (--l);
626 l = pgm_read_word(n++); 642 l = pgm_read_word(n++);
@@ -633,6 +649,7 @@ static int dib0070_reset(struct dvb_frontend *fe)
633 else 649 else
634 r = 2; 650 r = 2;
635 651
652
636 r |= state->cfg->osc_buffer_state << 3; 653 r |= state->cfg->osc_buffer_state << 3;
637 654
638 dib0070_write_reg(state, 0x10, r); 655 dib0070_write_reg(state, 0x10, r);
@@ -643,16 +660,24 @@ static int dib0070_reset(struct dvb_frontend *fe)
643 dib0070_write_reg(state, 0x02, r | (1 << 5)); 660 dib0070_write_reg(state, 0x02, r | (1 << 5));
644 } 661 }
645 662
646 if (state->revision == DIB0070S_P1A) 663 if (state->revision == DIB0070S_P1A)
647 dib0070_set_ctrl_lo5(fe, 2, 4, 3, 0); 664 dib0070_set_ctrl_lo5(fe, 2, 4, 3, 0);
648 else 665 else
649 dib0070_set_ctrl_lo5(fe, 5, 4, state->cfg->charge_pump, state->cfg->enable_third_order_filter); 666 dib0070_set_ctrl_lo5(fe, 5, 4, state->cfg->charge_pump, state->cfg->enable_third_order_filter);
650 667
651 dib0070_write_reg(state, 0x01, (54 << 9) | 0xc8); 668 dib0070_write_reg(state, 0x01, (54 << 9) | 0xc8);
652 669
653 dib0070_wbd_offset_calibration(state); 670 dib0070_wbd_offset_calibration(state);
654 671
655 return 0; 672 return 0;
673}
674
675static int dib0070_get_frequency(struct dvb_frontend *fe, u32 *frequency)
676{
677 struct dib0070_state *state = fe->tuner_priv;
678
679 *frequency = 1000 * state->current_rf;
680 return 0;
656} 681}
657 682
658static int dib0070_release(struct dvb_frontend *fe) 683static int dib0070_release(struct dvb_frontend *fe)
@@ -664,18 +689,18 @@ static int dib0070_release(struct dvb_frontend *fe)
664 689
665static const struct dvb_tuner_ops dib0070_ops = { 690static const struct dvb_tuner_ops dib0070_ops = {
666 .info = { 691 .info = {
667 .name = "DiBcom DiB0070", 692 .name = "DiBcom DiB0070",
668 .frequency_min = 45000000, 693 .frequency_min = 45000000,
669 .frequency_max = 860000000, 694 .frequency_max = 860000000,
670 .frequency_step = 1000, 695 .frequency_step = 1000,
671 }, 696 },
672 .release = dib0070_release, 697 .release = dib0070_release,
673 698
674 .init = dib0070_wakeup, 699 .init = dib0070_wakeup,
675 .sleep = dib0070_sleep, 700 .sleep = dib0070_sleep,
676 .set_params = dib0070_tune, 701 .set_params = dib0070_tune,
677 702
678// .get_frequency = dib0070_get_frequency, 703 .get_frequency = dib0070_get_frequency,
679// .get_bandwidth = dib0070_get_bandwidth 704// .get_bandwidth = dib0070_get_bandwidth
680}; 705};
681 706
@@ -687,7 +712,7 @@ struct dvb_frontend *dib0070_attach(struct dvb_frontend *fe, struct i2c_adapter
687 712
688 state->cfg = cfg; 713 state->cfg = cfg;
689 state->i2c = i2c; 714 state->i2c = i2c;
690 state->fe = fe; 715 state->fe = fe;
691 fe->tuner_priv = state; 716 fe->tuner_priv = state;
692 717
693 if (dib0070_reset(fe) != 0) 718 if (dib0070_reset(fe) != 0)
@@ -699,12 +724,11 @@ struct dvb_frontend *dib0070_attach(struct dvb_frontend *fe, struct i2c_adapter
699 fe->tuner_priv = state; 724 fe->tuner_priv = state;
700 return fe; 725 return fe;
701 726
702 free_mem: 727free_mem:
703 kfree(state); 728 kfree(state);
704 fe->tuner_priv = NULL; 729 fe->tuner_priv = NULL;
705 return NULL; 730 return NULL;
706} 731}
707
708EXPORT_SYMBOL(dib0070_attach); 732EXPORT_SYMBOL(dib0070_attach);
709 733
710MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>"); 734MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>");
diff --git a/drivers/media/dvb/frontends/dib0070.h b/drivers/media/dvb/frontends/dib0070.h
index eec9e52ffa75..45c31fae3967 100644
--- a/drivers/media/dvb/frontends/dib0070.h
+++ b/drivers/media/dvb/frontends/dib0070.h
@@ -52,6 +52,8 @@ struct dib0070_config {
52extern struct dvb_frontend *dib0070_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct dib0070_config *cfg); 52extern struct dvb_frontend *dib0070_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct dib0070_config *cfg);
53extern u16 dib0070_wbd_offset(struct dvb_frontend *); 53extern u16 dib0070_wbd_offset(struct dvb_frontend *);
54extern void dib0070_ctrl_agc_filter(struct dvb_frontend *, u8 open); 54extern void dib0070_ctrl_agc_filter(struct dvb_frontend *, u8 open);
55extern u8 dib0070_get_rf_output(struct dvb_frontend *fe);
56extern int dib0070_set_rf_output(struct dvb_frontend *fe, u8 no);
55#else 57#else
56static inline struct dvb_frontend *dib0070_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct dib0070_config *cfg) 58static inline struct dvb_frontend *dib0070_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct dib0070_config *cfg)
57{ 59{
@@ -62,7 +64,7 @@ static inline struct dvb_frontend *dib0070_attach(struct dvb_frontend *fe, struc
62static inline u16 dib0070_wbd_offset(struct dvb_frontend *fe) 64static inline u16 dib0070_wbd_offset(struct dvb_frontend *fe)
63{ 65{
64 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__); 66 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
65 return -ENODEV; 67 return 0;
66} 68}
67 69
68static inline void dib0070_ctrl_agc_filter(struct dvb_frontend *fe, u8 open) 70static inline void dib0070_ctrl_agc_filter(struct dvb_frontend *fe, u8 open)
diff --git a/drivers/media/dvb/frontends/dib0090.c b/drivers/media/dvb/frontends/dib0090.c
new file mode 100644
index 000000000000..614552709a6f
--- /dev/null
+++ b/drivers/media/dvb/frontends/dib0090.c
@@ -0,0 +1,1522 @@
1/*
2 * Linux-DVB Driver for DiBcom's DiB0090 base-band RF Tuner.
3 *
4 * Copyright (C) 2005-9 DiBcom (http://www.dibcom.fr/)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation; either version 2 of the
9 * License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 *
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 *
21 *
22 * This code is more or less generated from another driver, please
23 * excuse some codingstyle oddities.
24 *
25 */
26
27#include <linux/kernel.h>
28#include <linux/i2c.h>
29
30#include "dvb_frontend.h"
31
32#include "dib0090.h"
33#include "dibx000_common.h"
34
35static int debug;
36module_param(debug, int, 0644);
37MODULE_PARM_DESC(debug, "turn on debugging (default: 0)");
38
39#define dprintk(args...) do { \
40 if (debug) { \
41 printk(KERN_DEBUG "DiB0090: "); \
42 printk(args); \
43 printk("\n"); \
44 } \
45} while (0)
46
47#define CONFIG_SYS_ISDBT
48#define CONFIG_BAND_CBAND
49#define CONFIG_BAND_VHF
50#define CONFIG_BAND_UHF
51#define CONFIG_DIB0090_USE_PWM_AGC
52
53#define EN_LNA0 0x8000
54#define EN_LNA1 0x4000
55#define EN_LNA2 0x2000
56#define EN_LNA3 0x1000
57#define EN_MIX0 0x0800
58#define EN_MIX1 0x0400
59#define EN_MIX2 0x0200
60#define EN_MIX3 0x0100
61#define EN_IQADC 0x0040
62#define EN_PLL 0x0020
63#define EN_TX 0x0010
64#define EN_BB 0x0008
65#define EN_LO 0x0004
66#define EN_BIAS 0x0001
67
68#define EN_IQANA 0x0002
69#define EN_DIGCLK 0x0080 /* not in the 0x24 reg, only in 0x1b */
70#define EN_CRYSTAL 0x0002
71
72#define EN_UHF 0x22E9
73#define EN_VHF 0x44E9
74#define EN_LBD 0x11E9
75#define EN_SBD 0x44E9
76#define EN_CAB 0x88E9
77
78#define pgm_read_word(w) (*w)
79
80struct dc_calibration;
81
82struct dib0090_tuning {
83 u32 max_freq; /* for every frequency less than or equal to that field: this information is correct */
84 u8 switch_trim;
85 u8 lna_tune;
86 u8 lna_bias;
87 u16 v2i;
88 u16 mix;
89 u16 load;
90 u16 tuner_enable;
91};
92
93struct dib0090_pll {
94 u32 max_freq; /* for every frequency less than or equal to that field: this information is correct */
95 u8 vco_band;
96 u8 hfdiv_code;
97 u8 hfdiv;
98 u8 topresc;
99};
100
101struct dib0090_state {
102 struct i2c_adapter *i2c;
103 struct dvb_frontend *fe;
104 const struct dib0090_config *config;
105
106 u8 current_band;
107 u16 revision;
108 enum frontend_tune_state tune_state;
109 u32 current_rf;
110
111 u16 wbd_offset;
112 s16 wbd_target; /* in dB */
113
114 s16 rf_gain_limit; /* take-over-point: where to split between bb and rf gain */
115 s16 current_gain; /* keeps the currently programmed gain */
116 u8 agc_step; /* new binary search */
117
118 u16 gain[2]; /* for channel monitoring */
119
120 const u16 *rf_ramp;
121 const u16 *bb_ramp;
122
123 /* for the software AGC ramps */
124 u16 bb_1_def;
125 u16 rf_lt_def;
126 u16 gain_reg[4];
127
128 /* for the captrim/dc-offset search */
129 s8 step;
130 s16 adc_diff;
131 s16 min_adc_diff;
132
133 s8 captrim;
134 s8 fcaptrim;
135
136 const struct dc_calibration *dc;
137 u16 bb6, bb7;
138
139 const struct dib0090_tuning *current_tune_table_index;
140 const struct dib0090_pll *current_pll_table_index;
141
142 u8 tuner_is_tuned;
143 u8 agc_freeze;
144
145 u8 reset;
146};
147
148static u16 dib0090_read_reg(struct dib0090_state *state, u8 reg)
149{
150 u8 b[2];
151 struct i2c_msg msg[2] = {
152 {.addr = state->config->i2c_address, .flags = 0, .buf = &reg, .len = 1},
153 {.addr = state->config->i2c_address, .flags = I2C_M_RD, .buf = b, .len = 2},
154 };
155 if (i2c_transfer(state->i2c, msg, 2) != 2) {
156 printk(KERN_WARNING "DiB0090 I2C read failed\n");
157 return 0;
158 }
159 return (b[0] << 8) | b[1];
160}
161
162static int dib0090_write_reg(struct dib0090_state *state, u32 reg, u16 val)
163{
164 u8 b[3] = { reg & 0xff, val >> 8, val & 0xff };
165 struct i2c_msg msg = {.addr = state->config->i2c_address, .flags = 0, .buf = b, .len = 3 };
166 if (i2c_transfer(state->i2c, &msg, 1) != 1) {
167 printk(KERN_WARNING "DiB0090 I2C write failed\n");
168 return -EREMOTEIO;
169 }
170 return 0;
171}
172
173#define HARD_RESET(state) do { if (cfg->reset) { if (cfg->sleep) cfg->sleep(fe, 0); msleep(10); cfg->reset(fe, 1); msleep(10); cfg->reset(fe, 0); msleep(10); } } while (0)
174#define ADC_TARGET -220
175#define GAIN_ALPHA 5
176#define WBD_ALPHA 6
177#define LPF 100
178static void dib0090_write_regs(struct dib0090_state *state, u8 r, const u16 * b, u8 c)
179{
180 do {
181 dib0090_write_reg(state, r++, *b++);
182 } while (--c);
183}
184
185static u16 dib0090_identify(struct dvb_frontend *fe)
186{
187 struct dib0090_state *state = fe->tuner_priv;
188 u16 v;
189
190 v = dib0090_read_reg(state, 0x1a);
191
192#ifdef FIRMWARE_FIREFLY
193 /* pll is not locked locked */
194 if (!(v & 0x800))
195 dprintk("FE%d : Identification : pll is not yet locked", fe->id);
196#endif
197
198 /* without PLL lock info */
199 v &= 0x3ff;
200 dprintk("P/V: %04x:", v);
201
202 if ((v >> 8) & 0xf)
203 dprintk("FE%d : Product ID = 0x%x : KROSUS", fe->id, (v >> 8) & 0xf);
204 else
205 return 0xff;
206
207 v &= 0xff;
208 if (((v >> 5) & 0x7) == 0x1)
209 dprintk("FE%d : MP001 : 9090/8096", fe->id);
210 else if (((v >> 5) & 0x7) == 0x4)
211 dprintk("FE%d : MP005 : Single Sband", fe->id);
212 else if (((v >> 5) & 0x7) == 0x6)
213 dprintk("FE%d : MP008 : diversity VHF-UHF-LBAND", fe->id);
214 else if (((v >> 5) & 0x7) == 0x7)
215 dprintk("FE%d : MP009 : diversity 29098 CBAND-UHF-LBAND-SBAND", fe->id);
216 else
217 return 0xff;
218
219 /* revision only */
220 if ((v & 0x1f) == 0x3)
221 dprintk("FE%d : P1-D/E/F detected", fe->id);
222 else if ((v & 0x1f) == 0x1)
223 dprintk("FE%d : P1C detected", fe->id);
224 else if ((v & 0x1f) == 0x0) {
225#ifdef CONFIG_TUNER_DIB0090_P1B_SUPPORT
226 dprintk("FE%d : P1-A/B detected: using previous driver - support will be removed soon", fe->id);
227 dib0090_p1b_register(fe);
228#else
229 dprintk("FE%d : P1-A/B detected: driver is deactivated - not available", fe->id);
230 return 0xff;
231#endif
232 }
233
234 return v;
235}
236
237static void dib0090_reset_digital(struct dvb_frontend *fe, const struct dib0090_config *cfg)
238{
239 struct dib0090_state *state = fe->tuner_priv;
240
241 HARD_RESET(state);
242
243 dib0090_write_reg(state, 0x24, EN_PLL);
244 dib0090_write_reg(state, 0x1b, EN_DIGCLK | EN_PLL | EN_CRYSTAL); /* PLL, DIG_CLK and CRYSTAL remain */
245
246 /* adcClkOutRatio=8->7, release reset */
247 dib0090_write_reg(state, 0x20, ((cfg->io.adc_clock_ratio - 1) << 11) | (0 << 10) | (1 << 9) | (1 << 8) | (0 << 4) | 0);
248 if (cfg->clkoutdrive != 0)
249 dib0090_write_reg(state, 0x23,
250 (0 << 15) | ((!cfg->analog_output) << 14) | (1 << 10) | (1 << 9) | (0 << 8) | (cfg->clkoutdrive << 5) | (cfg->
251 clkouttobamse
252 << 4) | (0
253 <<
254 2)
255 | (0));
256 else
257 dib0090_write_reg(state, 0x23,
258 (0 << 15) | ((!cfg->analog_output) << 14) | (1 << 10) | (1 << 9) | (0 << 8) | (7 << 5) | (cfg->
259 clkouttobamse << 4) | (0
260 <<
261 2)
262 | (0));
263
264 /* enable pll, de-activate reset, ratio: 2/1 = 60MHz */
265 dib0090_write_reg(state, 0x21,
266 (cfg->io.pll_bypass << 15) | (1 << 13) | (cfg->io.pll_range << 12) | (cfg->io.pll_loopdiv << 6) | (cfg->io.pll_prediv));
267
268}
269
270static int dib0090_wakeup(struct dvb_frontend *fe)
271{
272 struct dib0090_state *state = fe->tuner_priv;
273 if (state->config->sleep)
274 state->config->sleep(fe, 0);
275 return 0;
276}
277
278static int dib0090_sleep(struct dvb_frontend *fe)
279{
280 struct dib0090_state *state = fe->tuner_priv;
281 if (state->config->sleep)
282 state->config->sleep(fe, 1);
283 return 0;
284}
285
286extern void dib0090_dcc_freq(struct dvb_frontend *fe, u8 fast)
287{
288 struct dib0090_state *state = fe->tuner_priv;
289 if (fast)
290 dib0090_write_reg(state, 0x04, 0);
291 else
292 dib0090_write_reg(state, 0x04, 1);
293}
294EXPORT_SYMBOL(dib0090_dcc_freq);
295
296static const u16 rf_ramp_pwm_cband[] = {
297 0, /* max RF gain in 10th of dB */
298 0, /* ramp_slope = 1dB of gain -> clock_ticks_per_db = clk_khz / ramp_slope -> 0x2b */
299 0, /* ramp_max = maximum X used on the ramp */
300 (0 << 10) | 0, /* 0x2c, LNA 1 = 0dB */
301 (0 << 10) | 0, /* 0x2d, LNA 1 */
302 (0 << 10) | 0, /* 0x2e, LNA 2 = 0dB */
303 (0 << 10) | 0, /* 0x2f, LNA 2 */
304 (0 << 10) | 0, /* 0x30, LNA 3 = 0dB */
305 (0 << 10) | 0, /* 0x31, LNA 3 */
306 (0 << 10) | 0, /* GAIN_4_1, LNA 4 = 0dB */
307 (0 << 10) | 0, /* GAIN_4_2, LNA 4 */
308};
309
310static const u16 rf_ramp_vhf[] = {
311 412, /* max RF gain in 10th of dB */
312 132, 307, 127, /* LNA1, 13.2dB */
313 105, 412, 255, /* LNA2, 10.5dB */
314 50, 50, 127, /* LNA3, 5dB */
315 125, 175, 127, /* LNA4, 12.5dB */
316 0, 0, 127, /* CBAND, 0dB */
317};
318
319static const u16 rf_ramp_uhf[] = {
320 412, /* max RF gain in 10th of dB */
321 132, 307, 127, /* LNA1 : total gain = 13.2dB, point on the ramp where this amp is full gain, value to write to get full gain */
322 105, 412, 255, /* LNA2 : 10.5 dB */
323 50, 50, 127, /* LNA3 : 5.0 dB */
324 125, 175, 127, /* LNA4 : 12.5 dB */
325 0, 0, 127, /* CBAND : 0.0 dB */
326};
327
328static const u16 rf_ramp_cband[] = {
329 332, /* max RF gain in 10th of dB */
330 132, 252, 127, /* LNA1, dB */
331 80, 332, 255, /* LNA2, dB */
332 0, 0, 127, /* LNA3, dB */
333 0, 0, 127, /* LNA4, dB */
334 120, 120, 127, /* LT1 CBAND */
335};
336
337static const u16 rf_ramp_pwm_vhf[] = {
338 404, /* max RF gain in 10th of dB */
339 25, /* ramp_slope = 1dB of gain -> clock_ticks_per_db = clk_khz / ramp_slope -> 0x2b */
340 1011, /* ramp_max = maximum X used on the ramp */
341 (6 << 10) | 417, /* 0x2c, LNA 1 = 13.2dB */
342 (0 << 10) | 756, /* 0x2d, LNA 1 */
343 (16 << 10) | 756, /* 0x2e, LNA 2 = 10.5dB */
344 (0 << 10) | 1011, /* 0x2f, LNA 2 */
345 (16 << 10) | 290, /* 0x30, LNA 3 = 5dB */
346 (0 << 10) | 417, /* 0x31, LNA 3 */
347 (7 << 10) | 0, /* GAIN_4_1, LNA 4 = 12.5dB */
348 (0 << 10) | 290, /* GAIN_4_2, LNA 4 */
349};
350
351static const u16 rf_ramp_pwm_uhf[] = {
352 404, /* max RF gain in 10th of dB */
353 25, /* ramp_slope = 1dB of gain -> clock_ticks_per_db = clk_khz / ramp_slope -> 0x2b */
354 1011, /* ramp_max = maximum X used on the ramp */
355 (6 << 10) | 417, /* 0x2c, LNA 1 = 13.2dB */
356 (0 << 10) | 756, /* 0x2d, LNA 1 */
357 (16 << 10) | 756, /* 0x2e, LNA 2 = 10.5dB */
358 (0 << 10) | 1011, /* 0x2f, LNA 2 */
359 (16 << 10) | 0, /* 0x30, LNA 3 = 5dB */
360 (0 << 10) | 127, /* 0x31, LNA 3 */
361 (7 << 10) | 127, /* GAIN_4_1, LNA 4 = 12.5dB */
362 (0 << 10) | 417, /* GAIN_4_2, LNA 4 */
363};
364
365static const u16 bb_ramp_boost[] = {
366 550, /* max BB gain in 10th of dB */
367 260, 260, 26, /* BB1, 26dB */
368 290, 550, 29, /* BB2, 29dB */
369};
370
371static const u16 bb_ramp_pwm_normal[] = {
372 500, /* max RF gain in 10th of dB */
373 8, /* ramp_slope = 1dB of gain -> clock_ticks_per_db = clk_khz / ramp_slope -> 0x34 */
374 400,
375 (2 << 9) | 0, /* 0x35 = 21dB */
376 (0 << 9) | 168, /* 0x36 */
377 (2 << 9) | 168, /* 0x37 = 29dB */
378 (0 << 9) | 400, /* 0x38 */
379};
380
381struct slope {
382 int16_t range;
383 int16_t slope;
384};
385static u16 slopes_to_scale(const struct slope *slopes, u8 num, s16 val)
386{
387 u8 i;
388 u16 rest;
389 u16 ret = 0;
390 for (i = 0; i < num; i++) {
391 if (val > slopes[i].range)
392 rest = slopes[i].range;
393 else
394 rest = val;
395 ret += (rest * slopes[i].slope) / slopes[i].range;
396 val -= rest;
397 }
398 return ret;
399}
400
401static const struct slope dib0090_wbd_slopes[3] = {
402 {66, 120}, /* -64,-52: offset - 65 */
403 {600, 170}, /* -52,-35: 65 - 665 */
404 {170, 250}, /* -45,-10: 665 - 835 */
405};
406
407static s16 dib0090_wbd_to_db(struct dib0090_state *state, u16 wbd)
408{
409 wbd &= 0x3ff;
410 if (wbd < state->wbd_offset)
411 wbd = 0;
412 else
413 wbd -= state->wbd_offset;
414 /* -64dB is the floor */
415 return -640 + (s16) slopes_to_scale(dib0090_wbd_slopes, ARRAY_SIZE(dib0090_wbd_slopes), wbd);
416}
417
418static void dib0090_wbd_target(struct dib0090_state *state, u32 rf)
419{
420 u16 offset = 250;
421
422 /* TODO : DAB digital N+/-1 interferer perfs : offset = 10 */
423
424 if (state->current_band == BAND_VHF)
425 offset = 650;
426#ifndef FIRMWARE_FIREFLY
427 if (state->current_band == BAND_VHF)
428 offset = state->config->wbd_vhf_offset;
429 if (state->current_band == BAND_CBAND)
430 offset = state->config->wbd_cband_offset;
431#endif
432
433 state->wbd_target = dib0090_wbd_to_db(state, state->wbd_offset + offset);
434 dprintk("wbd-target: %d dB", (u32) state->wbd_target);
435}
436
437static const int gain_reg_addr[4] = {
438 0x08, 0x0a, 0x0f, 0x01
439};
440
441static void dib0090_gain_apply(struct dib0090_state *state, s16 gain_delta, s16 top_delta, u8 force)
442{
443 u16 rf, bb, ref;
444 u16 i, v, gain_reg[4] = { 0 }, gain;
445 const u16 *g;
446
447 if (top_delta < -511)
448 top_delta = -511;
449 if (top_delta > 511)
450 top_delta = 511;
451
452 if (force) {
453 top_delta *= (1 << WBD_ALPHA);
454 gain_delta *= (1 << GAIN_ALPHA);
455 }
456
457 if (top_delta >= ((s16) (state->rf_ramp[0] << WBD_ALPHA) - state->rf_gain_limit)) /* overflow */
458 state->rf_gain_limit = state->rf_ramp[0] << WBD_ALPHA;
459 else
460 state->rf_gain_limit += top_delta;
461
462 if (state->rf_gain_limit < 0) /*underflow */
463 state->rf_gain_limit = 0;
464
465 /* use gain as a temporary variable and correct current_gain */
466 gain = ((state->rf_gain_limit >> WBD_ALPHA) + state->bb_ramp[0]) << GAIN_ALPHA;
467 if (gain_delta >= ((s16) gain - state->current_gain)) /* overflow */
468 state->current_gain = gain;
469 else
470 state->current_gain += gain_delta;
471 /* cannot be less than 0 (only if gain_delta is less than 0 we can have current_gain < 0) */
472 if (state->current_gain < 0)
473 state->current_gain = 0;
474
475 /* now split total gain to rf and bb gain */
476 gain = state->current_gain >> GAIN_ALPHA;
477
478 /* requested gain is bigger than rf gain limit - ACI/WBD adjustment */
479 if (gain > (state->rf_gain_limit >> WBD_ALPHA)) {
480 rf = state->rf_gain_limit >> WBD_ALPHA;
481 bb = gain - rf;
482 if (bb > state->bb_ramp[0])
483 bb = state->bb_ramp[0];
484 } else { /* high signal level -> all gains put on RF */
485 rf = gain;
486 bb = 0;
487 }
488
489 state->gain[0] = rf;
490 state->gain[1] = bb;
491
492 /* software ramp */
493 /* Start with RF gains */
494 g = state->rf_ramp + 1; /* point on RF LNA1 max gain */
495 ref = rf;
496 for (i = 0; i < 7; i++) { /* Go over all amplifiers => 5RF amps + 2 BB amps = 7 amps */
497 if (g[0] == 0 || ref < (g[1] - g[0])) /* if total gain of the current amp is null or this amp is not concerned because it starts to work from an higher gain value */
498 v = 0; /* force the gain to write for the current amp to be null */
499 else if (ref >= g[1]) /* Gain to set is higher than the high working point of this amp */
500 v = g[2]; /* force this amp to be full gain */
501 else /* compute the value to set to this amp because we are somewhere in his range */
502 v = ((ref - (g[1] - g[0])) * g[2]) / g[0];
503
504 if (i == 0) /* LNA 1 reg mapping */
505 gain_reg[0] = v;
506 else if (i == 1) /* LNA 2 reg mapping */
507 gain_reg[0] |= v << 7;
508 else if (i == 2) /* LNA 3 reg mapping */
509 gain_reg[1] = v;
510 else if (i == 3) /* LNA 4 reg mapping */
511 gain_reg[1] |= v << 7;
512 else if (i == 4) /* CBAND LNA reg mapping */
513 gain_reg[2] = v | state->rf_lt_def;
514 else if (i == 5) /* BB gain 1 reg mapping */
515 gain_reg[3] = v << 3;
516 else if (i == 6) /* BB gain 2 reg mapping */
517 gain_reg[3] |= v << 8;
518
519 g += 3; /* go to next gain bloc */
520
521 /* When RF is finished, start with BB */
522 if (i == 4) {
523 g = state->bb_ramp + 1; /* point on BB gain 1 max gain */
524 ref = bb;
525 }
526 }
527 gain_reg[3] |= state->bb_1_def;
528 gain_reg[3] |= ((bb % 10) * 100) / 125;
529
530#ifdef DEBUG_AGC
531 dprintk("GA CALC: DB: %3d(rf) + %3d(bb) = %3d gain_reg[0]=%04x gain_reg[1]=%04x gain_reg[2]=%04x gain_reg[0]=%04x", rf, bb, rf + bb,
532 gain_reg[0], gain_reg[1], gain_reg[2], gain_reg[3]);
533#endif
534
535 /* Write the amplifier regs */
536 for (i = 0; i < 4; i++) {
537 v = gain_reg[i];
538 if (force || state->gain_reg[i] != v) {
539 state->gain_reg[i] = v;
540 dib0090_write_reg(state, gain_reg_addr[i], v);
541 }
542 }
543}
544
545static void dib0090_set_boost(struct dib0090_state *state, int onoff)
546{
547 state->bb_1_def &= 0xdfff;
548 state->bb_1_def |= onoff << 13;
549}
550
551static void dib0090_set_rframp(struct dib0090_state *state, const u16 * cfg)
552{
553 state->rf_ramp = cfg;
554}
555
556static void dib0090_set_rframp_pwm(struct dib0090_state *state, const u16 * cfg)
557{
558 state->rf_ramp = cfg;
559
560 dib0090_write_reg(state, 0x2a, 0xffff);
561
562 dprintk("total RF gain: %ddB, step: %d", (u32) cfg[0], dib0090_read_reg(state, 0x2a));
563
564 dib0090_write_regs(state, 0x2c, cfg + 3, 6);
565 dib0090_write_regs(state, 0x3e, cfg + 9, 2);
566}
567
568static void dib0090_set_bbramp(struct dib0090_state *state, const u16 * cfg)
569{
570 state->bb_ramp = cfg;
571 dib0090_set_boost(state, cfg[0] > 500); /* we want the boost if the gain is higher that 50dB */
572}
573
574static void dib0090_set_bbramp_pwm(struct dib0090_state *state, const u16 * cfg)
575{
576 state->bb_ramp = cfg;
577
578 dib0090_set_boost(state, cfg[0] > 500); /* we want the boost if the gain is higher that 50dB */
579
580 dib0090_write_reg(state, 0x33, 0xffff);
581 dprintk("total BB gain: %ddB, step: %d", (u32) cfg[0], dib0090_read_reg(state, 0x33));
582 dib0090_write_regs(state, 0x35, cfg + 3, 4);
583}
584
585void dib0090_pwm_gain_reset(struct dvb_frontend *fe)
586{
587 struct dib0090_state *state = fe->tuner_priv;
588 /* reset the AGC */
589
590 if (state->config->use_pwm_agc) {
591#ifdef CONFIG_BAND_SBAND
592 if (state->current_band == BAND_SBAND) {
593 dib0090_set_rframp_pwm(state, rf_ramp_pwm_sband);
594 dib0090_set_bbramp_pwm(state, bb_ramp_pwm_boost);
595 } else
596#endif
597#ifdef CONFIG_BAND_CBAND
598 if (state->current_band == BAND_CBAND) {
599 dib0090_set_rframp_pwm(state, rf_ramp_pwm_cband);
600 dib0090_set_bbramp_pwm(state, bb_ramp_pwm_normal);
601 } else
602#endif
603#ifdef CONFIG_BAND_VHF
604 if (state->current_band == BAND_VHF) {
605 dib0090_set_rframp_pwm(state, rf_ramp_pwm_vhf);
606 dib0090_set_bbramp_pwm(state, bb_ramp_pwm_normal);
607 } else
608#endif
609 {
610 dib0090_set_rframp_pwm(state, rf_ramp_pwm_uhf);
611 dib0090_set_bbramp_pwm(state, bb_ramp_pwm_normal);
612 }
613
614 if (state->rf_ramp[0] != 0)
615 dib0090_write_reg(state, 0x32, (3 << 11));
616 else
617 dib0090_write_reg(state, 0x32, (0 << 11));
618
619 dib0090_write_reg(state, 0x39, (1 << 10));
620 }
621}
622EXPORT_SYMBOL(dib0090_pwm_gain_reset);
623
624int dib0090_gain_control(struct dvb_frontend *fe)
625{
626 struct dib0090_state *state = fe->tuner_priv;
627 enum frontend_tune_state *tune_state = &state->tune_state;
628 int ret = 10;
629
630 u16 wbd_val = 0;
631 u8 apply_gain_immediatly = 1;
632 s16 wbd_error = 0, adc_error = 0;
633
634 if (*tune_state == CT_AGC_START) {
635 state->agc_freeze = 0;
636 dib0090_write_reg(state, 0x04, 0x0);
637
638#ifdef CONFIG_BAND_SBAND
639 if (state->current_band == BAND_SBAND) {
640 dib0090_set_rframp(state, rf_ramp_sband);
641 dib0090_set_bbramp(state, bb_ramp_boost);
642 } else
643#endif
644#ifdef CONFIG_BAND_VHF
645 if (state->current_band == BAND_VHF) {
646 dib0090_set_rframp(state, rf_ramp_vhf);
647 dib0090_set_bbramp(state, bb_ramp_boost);
648 } else
649#endif
650#ifdef CONFIG_BAND_CBAND
651 if (state->current_band == BAND_CBAND) {
652 dib0090_set_rframp(state, rf_ramp_cband);
653 dib0090_set_bbramp(state, bb_ramp_boost);
654 } else
655#endif
656 {
657 dib0090_set_rframp(state, rf_ramp_uhf);
658 dib0090_set_bbramp(state, bb_ramp_boost);
659 }
660
661 dib0090_write_reg(state, 0x32, 0);
662 dib0090_write_reg(state, 0x39, 0);
663
664 dib0090_wbd_target(state, state->current_rf);
665
666 state->rf_gain_limit = state->rf_ramp[0] << WBD_ALPHA;
667 state->current_gain = ((state->rf_ramp[0] + state->bb_ramp[0]) / 2) << GAIN_ALPHA;
668
669 *tune_state = CT_AGC_STEP_0;
670 } else if (!state->agc_freeze) {
671 s16 wbd;
672
673 int adc;
674 wbd_val = dib0090_read_reg(state, 0x1d);
675
676 /* read and calc the wbd power */
677 wbd = dib0090_wbd_to_db(state, wbd_val);
678 wbd_error = state->wbd_target - wbd;
679
680 if (*tune_state == CT_AGC_STEP_0) {
681 if (wbd_error < 0 && state->rf_gain_limit > 0) {
682#ifdef CONFIG_BAND_CBAND
683 /* in case of CBAND tune reduce first the lt_gain2 before adjusting the RF gain */
684 u8 ltg2 = (state->rf_lt_def >> 10) & 0x7;
685 if (state->current_band == BAND_CBAND && ltg2) {
686 ltg2 >>= 1;
687 state->rf_lt_def &= ltg2 << 10; /* reduce in 3 steps from 7 to 0 */
688 }
689#endif
690 } else {
691 state->agc_step = 0;
692 *tune_state = CT_AGC_STEP_1;
693 }
694 } else {
695 /* calc the adc power */
696 adc = state->config->get_adc_power(fe);
697 adc = (adc * ((s32) 355774) + (((s32) 1) << 20)) >> 21; /* included in [0:-700] */
698
699 adc_error = (s16) (((s32) ADC_TARGET) - adc);
700#ifdef CONFIG_STANDARD_DAB
701 if (state->fe->dtv_property_cache.delivery_system == STANDARD_DAB)
702 adc_error += 130;
703#endif
704#ifdef CONFIG_STANDARD_DVBT
705 if (state->fe->dtv_property_cache.delivery_system == STANDARD_DVBT &&
706 (state->fe->dtv_property_cache.modulation == QAM_64 || state->fe->dtv_property_cache.modulation == QAM_16))
707 adc_error += 60;
708#endif
709#ifdef CONFIG_SYS_ISDBT
710 if ((state->fe->dtv_property_cache.delivery_system == SYS_ISDBT) && (((state->fe->dtv_property_cache.layer[0].segment_count >
711 0)
712 &&
713 ((state->fe->dtv_property_cache.layer[0].modulation ==
714 QAM_64)
715 || (state->fe->dtv_property_cache.layer[0].
716 modulation == QAM_16)))
717 ||
718 ((state->fe->dtv_property_cache.layer[1].segment_count >
719 0)
720 &&
721 ((state->fe->dtv_property_cache.layer[1].modulation ==
722 QAM_64)
723 || (state->fe->dtv_property_cache.layer[1].
724 modulation == QAM_16)))
725 ||
726 ((state->fe->dtv_property_cache.layer[2].segment_count >
727 0)
728 &&
729 ((state->fe->dtv_property_cache.layer[2].modulation ==
730 QAM_64)
731 || (state->fe->dtv_property_cache.layer[2].
732 modulation == QAM_16)))
733 )
734 )
735 adc_error += 60;
736#endif
737
738 if (*tune_state == CT_AGC_STEP_1) { /* quickly go to the correct range of the ADC power */
739 if (ABS(adc_error) < 50 || state->agc_step++ > 5) {
740
741#ifdef CONFIG_STANDARD_DAB
742 if (state->fe->dtv_property_cache.delivery_system == STANDARD_DAB) {
743 dib0090_write_reg(state, 0x02, (1 << 15) | (15 << 11) | (31 << 6) | (63)); /* cap value = 63 : narrow BB filter : Fc = 1.8MHz */
744 dib0090_write_reg(state, 0x04, 0x0);
745 } else
746#endif
747 {
748 dib0090_write_reg(state, 0x02, (1 << 15) | (3 << 11) | (6 << 6) | (32));
749 dib0090_write_reg(state, 0x04, 0x01); /*0 = 1KHz ; 1 = 150Hz ; 2 = 50Hz ; 3 = 50KHz ; 4 = servo fast */
750 }
751
752 *tune_state = CT_AGC_STOP;
753 }
754 } else {
755 /* everything higher than or equal to CT_AGC_STOP means tracking */
756 ret = 100; /* 10ms interval */
757 apply_gain_immediatly = 0;
758 }
759 }
760#ifdef DEBUG_AGC
761 dprintk
762 ("FE: %d, tune state %d, ADC = %3ddB (ADC err %3d) WBD %3ddB (WBD err %3d, WBD val SADC: %4d), RFGainLimit (TOP): %3d, signal: %3ddBm",
763 (u32) fe->id, (u32) *tune_state, (u32) adc, (u32) adc_error, (u32) wbd, (u32) wbd_error, (u32) wbd_val,
764 (u32) state->rf_gain_limit >> WBD_ALPHA, (s32) 200 + adc - (state->current_gain >> GAIN_ALPHA));
765#endif
766 }
767
768 /* apply gain */
769 if (!state->agc_freeze)
770 dib0090_gain_apply(state, adc_error, wbd_error, apply_gain_immediatly);
771 return ret;
772}
773EXPORT_SYMBOL(dib0090_gain_control);
774
775void dib0090_get_current_gain(struct dvb_frontend *fe, u16 * rf, u16 * bb, u16 * rf_gain_limit, u16 * rflt)
776{
777 struct dib0090_state *state = fe->tuner_priv;
778 if (rf)
779 *rf = state->gain[0];
780 if (bb)
781 *bb = state->gain[1];
782 if (rf_gain_limit)
783 *rf_gain_limit = state->rf_gain_limit;
784 if (rflt)
785 *rflt = (state->rf_lt_def >> 10) & 0x7;
786}
787EXPORT_SYMBOL(dib0090_get_current_gain);
788
789u16 dib0090_get_wbd_offset(struct dvb_frontend *tuner)
790{
791 struct dib0090_state *st = tuner->tuner_priv;
792 return st->wbd_offset;
793}
794EXPORT_SYMBOL(dib0090_get_wbd_offset);
795
796static const u16 dib0090_defaults[] = {
797
798 25, 0x01,
799 0x0000,
800 0x99a0,
801 0x6008,
802 0x0000,
803 0x8acb,
804 0x0000,
805 0x0405,
806 0x0000,
807 0x0000,
808 0x0000,
809 0xb802,
810 0x0300,
811 0x2d12,
812 0xbac0,
813 0x7c00,
814 0xdbb9,
815 0x0954,
816 0x0743,
817 0x8000,
818 0x0001,
819 0x0040,
820 0x0100,
821 0x0000,
822 0xe910,
823 0x149e,
824
825 1, 0x1c,
826 0xff2d,
827
828 1, 0x39,
829 0x0000,
830
831 1, 0x1b,
832 EN_IQADC | EN_BB | EN_BIAS | EN_DIGCLK | EN_PLL | EN_CRYSTAL,
833 2, 0x1e,
834 0x07FF,
835 0x0007,
836
837 1, 0x24,
838 EN_UHF | EN_CRYSTAL,
839
840 2, 0x3c,
841 0x3ff,
842 0x111,
843 0
844};
845
846static int dib0090_reset(struct dvb_frontend *fe)
847{
848 struct dib0090_state *state = fe->tuner_priv;
849 u16 l, r, *n;
850
851 dib0090_reset_digital(fe, state->config);
852 state->revision = dib0090_identify(fe);
853
854 /* Revision definition */
855 if (state->revision == 0xff)
856 return -EINVAL;
857#ifdef EFUSE
858 else if ((state->revision & 0x1f) >= 3) /* Update the efuse : Only available for KROSUS > P1C */
859 dib0090_set_EFUSE(state);
860#endif
861
862#ifdef CONFIG_TUNER_DIB0090_P1B_SUPPORT
863 if (!(state->revision & 0x1)) /* it is P1B - reset is already done */
864 return 0;
865#endif
866
867 /* Upload the default values */
868 n = (u16 *) dib0090_defaults;
869 l = pgm_read_word(n++);
870 while (l) {
871 r = pgm_read_word(n++);
872 do {
873 /* DEBUG_TUNER */
874 /* dprintk("%d, %d, %d", l, r, pgm_read_word(n)); */
875 dib0090_write_reg(state, r, pgm_read_word(n++));
876 r++;
877 } while (--l);
878 l = pgm_read_word(n++);
879 }
880
881 /* Congigure in function of the crystal */
882 if (state->config->io.clock_khz >= 24000)
883 l = 1;
884 else
885 l = 2;
886 dib0090_write_reg(state, 0x14, l);
887 dprintk("Pll lock : %d", (dib0090_read_reg(state, 0x1a) >> 11) & 0x1);
888
889 state->reset = 3; /* enable iq-offset-calibration and wbd-calibration when tuning next time */
890
891 return 0;
892}
893
894#define steps(u) (((u) > 15) ? ((u)-16) : (u))
895#define INTERN_WAIT 10
896static int dib0090_get_offset(struct dib0090_state *state, enum frontend_tune_state *tune_state)
897{
898 int ret = INTERN_WAIT * 10;
899
900 switch (*tune_state) {
901 case CT_TUNER_STEP_2:
902 /* Turns to positive */
903 dib0090_write_reg(state, 0x1f, 0x7);
904 *tune_state = CT_TUNER_STEP_3;
905 break;
906
907 case CT_TUNER_STEP_3:
908 state->adc_diff = dib0090_read_reg(state, 0x1d);
909
910 /* Turns to negative */
911 dib0090_write_reg(state, 0x1f, 0x4);
912 *tune_state = CT_TUNER_STEP_4;
913 break;
914
915 case CT_TUNER_STEP_4:
916 state->adc_diff -= dib0090_read_reg(state, 0x1d);
917 *tune_state = CT_TUNER_STEP_5;
918 ret = 0;
919 break;
920
921 default:
922 break;
923 }
924
925 return ret;
926}
927
928struct dc_calibration {
929 uint8_t addr;
930 uint8_t offset;
931 uint8_t pga:1;
932 uint16_t bb1;
933 uint8_t i:1;
934};
935
936static const struct dc_calibration dc_table[] = {
937 /* Step1 BB gain1= 26 with boost 1, gain 2 = 0 */
938 {0x06, 5, 1, (1 << 13) | (0 << 8) | (26 << 3), 1},
939 {0x07, 11, 1, (1 << 13) | (0 << 8) | (26 << 3), 0},
940 /* Step 2 BB gain 1 = 26 with boost = 1 & gain 2 = 29 */
941 {0x06, 0, 0, (1 << 13) | (29 << 8) | (26 << 3), 1},
942 {0x06, 10, 0, (1 << 13) | (29 << 8) | (26 << 3), 0},
943 {0},
944};
945
946static void dib0090_set_trim(struct dib0090_state *state)
947{
948 u16 *val;
949
950 if (state->dc->addr == 0x07)
951 val = &state->bb7;
952 else
953 val = &state->bb6;
954
955 *val &= ~(0x1f << state->dc->offset);
956 *val |= state->step << state->dc->offset;
957
958 dib0090_write_reg(state, state->dc->addr, *val);
959}
960
961static int dib0090_dc_offset_calibration(struct dib0090_state *state, enum frontend_tune_state *tune_state)
962{
963 int ret = 0;
964
965 switch (*tune_state) {
966
967 case CT_TUNER_START:
968 /* init */
969 dprintk("Internal DC calibration");
970
971 /* the LNA is off */
972 dib0090_write_reg(state, 0x24, 0x02ed);
973
974 /* force vcm2 = 0.8V */
975 state->bb6 = 0;
976 state->bb7 = 0x040d;
977
978 state->dc = dc_table;
979
980 *tune_state = CT_TUNER_STEP_0;
981
982 /* fall through */
983
984 case CT_TUNER_STEP_0:
985 dib0090_write_reg(state, 0x01, state->dc->bb1);
986 dib0090_write_reg(state, 0x07, state->bb7 | (state->dc->i << 7));
987
988 state->step = 0;
989
990 state->min_adc_diff = 1023;
991
992 *tune_state = CT_TUNER_STEP_1;
993 ret = 50;
994 break;
995
996 case CT_TUNER_STEP_1:
997 dib0090_set_trim(state);
998
999 *tune_state = CT_TUNER_STEP_2;
1000 break;
1001
1002 case CT_TUNER_STEP_2:
1003 case CT_TUNER_STEP_3:
1004 case CT_TUNER_STEP_4:
1005 ret = dib0090_get_offset(state, tune_state);
1006 break;
1007
1008 case CT_TUNER_STEP_5: /* found an offset */
1009 dprintk("FE%d: IQC read=%d, current=%x", state->fe->id, (u32) state->adc_diff, state->step);
1010
1011 /* first turn for this frequency */
1012 if (state->step == 0) {
1013 if (state->dc->pga && state->adc_diff < 0)
1014 state->step = 0x10;
1015 if (state->dc->pga == 0 && state->adc_diff > 0)
1016 state->step = 0x10;
1017 }
1018
1019 state->adc_diff = ABS(state->adc_diff);
1020
1021 if (state->adc_diff < state->min_adc_diff && steps(state->step) < 15) { /* stop search when the delta to 0 is increasing */
1022 state->step++;
1023 state->min_adc_diff = state->adc_diff;
1024 *tune_state = CT_TUNER_STEP_1;
1025 } else {
1026
1027 /* the minimum was what we have seen in the step before */
1028 state->step--;
1029 dib0090_set_trim(state);
1030
1031 dprintk("FE%d: BB Offset Cal, BBreg=%hd,Offset=%hd,Value Set=%hd", state->fe->id, state->dc->addr, state->adc_diff,
1032 state->step);
1033
1034 state->dc++;
1035 if (state->dc->addr == 0) /* done */
1036 *tune_state = CT_TUNER_STEP_6;
1037 else
1038 *tune_state = CT_TUNER_STEP_0;
1039
1040 }
1041 break;
1042
1043 case CT_TUNER_STEP_6:
1044 dib0090_write_reg(state, 0x07, state->bb7 & ~0x0008);
1045 dib0090_write_reg(state, 0x1f, 0x7);
1046 *tune_state = CT_TUNER_START; /* reset done -> real tuning can now begin */
1047 state->reset &= ~0x1;
1048 default:
1049 break;
1050 }
1051 return ret;
1052}
1053
1054static int dib0090_wbd_calibration(struct dib0090_state *state, enum frontend_tune_state *tune_state)
1055{
1056 switch (*tune_state) {
1057 case CT_TUNER_START:
1058 /* WBD-mode=log, Bias=2, Gain=6, Testmode=1, en=1, WBDMUX=1 */
1059 dib0090_write_reg(state, 0x10, 0xdb09 | (1 << 10));
1060 dib0090_write_reg(state, 0x24, EN_UHF & 0x0fff);
1061
1062 *tune_state = CT_TUNER_STEP_0;
1063 return 90; /* wait for the WBDMUX to switch and for the ADC to sample */
1064 case CT_TUNER_STEP_0:
1065 state->wbd_offset = dib0090_read_reg(state, 0x1d);
1066 dprintk("WBD calibration offset = %d", state->wbd_offset);
1067
1068 *tune_state = CT_TUNER_START; /* reset done -> real tuning can now begin */
1069 state->reset &= ~0x2;
1070 break;
1071 default:
1072 break;
1073 }
1074 return 0;
1075}
1076
1077static void dib0090_set_bandwidth(struct dib0090_state *state)
1078{
1079 u16 tmp;
1080
1081 if (state->fe->dtv_property_cache.bandwidth_hz / 1000 <= 5000)
1082 tmp = (3 << 14);
1083 else if (state->fe->dtv_property_cache.bandwidth_hz / 1000 <= 6000)
1084 tmp = (2 << 14);
1085 else if (state->fe->dtv_property_cache.bandwidth_hz / 1000 <= 7000)
1086 tmp = (1 << 14);
1087 else
1088 tmp = (0 << 14);
1089
1090 state->bb_1_def &= 0x3fff;
1091 state->bb_1_def |= tmp;
1092
1093 dib0090_write_reg(state, 0x01, state->bb_1_def); /* be sure that we have the right bb-filter */
1094}
1095
1096static const struct dib0090_pll dib0090_pll_table[] = {
1097#ifdef CONFIG_BAND_CBAND
1098 {56000, 0, 9, 48, 6},
1099 {70000, 1, 9, 48, 6},
1100 {87000, 0, 8, 32, 4},
1101 {105000, 1, 8, 32, 4},
1102 {115000, 0, 7, 24, 6},
1103 {140000, 1, 7, 24, 6},
1104 {170000, 0, 6, 16, 4},
1105#endif
1106#ifdef CONFIG_BAND_VHF
1107 {200000, 1, 6, 16, 4},
1108 {230000, 0, 5, 12, 6},
1109 {280000, 1, 5, 12, 6},
1110 {340000, 0, 4, 8, 4},
1111 {380000, 1, 4, 8, 4},
1112 {450000, 0, 3, 6, 6},
1113#endif
1114#ifdef CONFIG_BAND_UHF
1115 {580000, 1, 3, 6, 6},
1116 {700000, 0, 2, 4, 4},
1117 {860000, 1, 2, 4, 4},
1118#endif
1119#ifdef CONFIG_BAND_LBAND
1120 {1800000, 1, 0, 2, 4},
1121#endif
1122#ifdef CONFIG_BAND_SBAND
1123 {2900000, 0, 14, 1, 4},
1124#endif
1125};
1126
1127static const struct dib0090_tuning dib0090_tuning_table_fm_vhf_on_cband[] = {
1128
1129#ifdef CONFIG_BAND_CBAND
1130 {184000, 4, 1, 15, 0x280, 0x2912, 0xb94e, EN_CAB},
1131 {227000, 4, 3, 15, 0x280, 0x2912, 0xb94e, EN_CAB},
1132 {380000, 4, 7, 15, 0x280, 0x2912, 0xb94e, EN_CAB},
1133#endif
1134#ifdef CONFIG_BAND_UHF
1135 {520000, 2, 0, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF},
1136 {550000, 2, 2, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF},
1137 {650000, 2, 3, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF},
1138 {750000, 2, 5, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF},
1139 {850000, 2, 6, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF},
1140 {900000, 2, 7, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF},
1141#endif
1142#ifdef CONFIG_BAND_LBAND
1143 {1500000, 4, 0, 20, 0x300, 0x1912, 0x82c9, EN_LBD},
1144 {1600000, 4, 1, 20, 0x300, 0x1912, 0x82c9, EN_LBD},
1145 {1800000, 4, 3, 20, 0x300, 0x1912, 0x82c9, EN_LBD},
1146#endif
1147#ifdef CONFIG_BAND_SBAND
1148 {2300000, 1, 4, 20, 0x300, 0x2d2A, 0x82c7, EN_SBD},
1149 {2900000, 1, 7, 20, 0x280, 0x2deb, 0x8347, EN_SBD},
1150#endif
1151};
1152
1153static const struct dib0090_tuning dib0090_tuning_table[] = {
1154
1155#ifdef CONFIG_BAND_CBAND
1156 {170000, 4, 1, 15, 0x280, 0x2912, 0xb94e, EN_CAB},
1157#endif
1158#ifdef CONFIG_BAND_VHF
1159 {184000, 1, 1, 15, 0x300, 0x4d12, 0xb94e, EN_VHF},
1160 {227000, 1, 3, 15, 0x300, 0x4d12, 0xb94e, EN_VHF},
1161 {380000, 1, 7, 15, 0x300, 0x4d12, 0xb94e, EN_VHF},
1162#endif
1163#ifdef CONFIG_BAND_UHF
1164 {520000, 2, 0, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF},
1165 {550000, 2, 2, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF},
1166 {650000, 2, 3, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF},
1167 {750000, 2, 5, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF},
1168 {850000, 2, 6, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF},
1169 {900000, 2, 7, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF},
1170#endif
1171#ifdef CONFIG_BAND_LBAND
1172 {1500000, 4, 0, 20, 0x300, 0x1912, 0x82c9, EN_LBD},
1173 {1600000, 4, 1, 20, 0x300, 0x1912, 0x82c9, EN_LBD},
1174 {1800000, 4, 3, 20, 0x300, 0x1912, 0x82c9, EN_LBD},
1175#endif
1176#ifdef CONFIG_BAND_SBAND
1177 {2300000, 1, 4, 20, 0x300, 0x2d2A, 0x82c7, EN_SBD},
1178 {2900000, 1, 7, 20, 0x280, 0x2deb, 0x8347, EN_SBD},
1179#endif
1180};
1181
1182#define WBD 0x781 /* 1 1 1 1 0000 0 0 1 */
1183static int dib0090_tune(struct dvb_frontend *fe)
1184{
1185 struct dib0090_state *state = fe->tuner_priv;
1186 const struct dib0090_tuning *tune = state->current_tune_table_index;
1187 const struct dib0090_pll *pll = state->current_pll_table_index;
1188 enum frontend_tune_state *tune_state = &state->tune_state;
1189
1190 u32 rf;
1191 u16 lo4 = 0xe900, lo5, lo6, Den;
1192 u32 FBDiv, Rest, FREF, VCOF_kHz = 0;
1193 u16 tmp, adc;
1194 int8_t step_sign;
1195 int ret = 10; /* 1ms is the default delay most of the time */
1196 u8 c, i;
1197
1198 state->current_band = (u8) BAND_OF_FREQUENCY(fe->dtv_property_cache.frequency / 1000);
1199 rf = fe->dtv_property_cache.frequency / 1000 + (state->current_band ==
1200 BAND_UHF ? state->config->freq_offset_khz_uhf : state->config->freq_offset_khz_vhf);
1201 /* in any case we first need to do a reset if needed */
1202 if (state->reset & 0x1)
1203 return dib0090_dc_offset_calibration(state, tune_state);
1204 else if (state->reset & 0x2)
1205 return dib0090_wbd_calibration(state, tune_state);
1206
1207 /************************* VCO ***************************/
1208 /* Default values for FG */
1209 /* from these are needed : */
1210 /* Cp,HFdiv,VCOband,SD,Num,Den,FB and REFDiv */
1211
1212#ifdef CONFIG_SYS_ISDBT
1213 if (state->fe->dtv_property_cache.delivery_system == SYS_ISDBT && state->fe->dtv_property_cache.isdbt_sb_mode == 1)
1214 rf += 850;
1215#endif
1216
1217 if (state->current_rf != rf) {
1218 state->tuner_is_tuned = 0;
1219
1220 tune = dib0090_tuning_table;
1221
1222 tmp = (state->revision >> 5) & 0x7;
1223 if (tmp == 0x4 || tmp == 0x7) {
1224 /* CBAND tuner version for VHF */
1225 if (state->current_band == BAND_FM || state->current_band == BAND_VHF) {
1226 /* Force CBAND */
1227 state->current_band = BAND_CBAND;
1228 tune = dib0090_tuning_table_fm_vhf_on_cband;
1229 }
1230 }
1231
1232 pll = dib0090_pll_table;
1233 /* Look for the interval */
1234 while (rf > tune->max_freq)
1235 tune++;
1236 while (rf > pll->max_freq)
1237 pll++;
1238 state->current_tune_table_index = tune;
1239 state->current_pll_table_index = pll;
1240 }
1241
1242 if (*tune_state == CT_TUNER_START) {
1243
1244 if (state->tuner_is_tuned == 0)
1245 state->current_rf = 0;
1246
1247 if (state->current_rf != rf) {
1248
1249 dib0090_write_reg(state, 0x0b, 0xb800 | (tune->switch_trim));
1250
1251 /* external loop filter, otherwise:
1252 * lo5 = (0 << 15) | (0 << 12) | (0 << 11) | (3 << 9) | (4 << 6) | (3 << 4) | 4;
1253 * lo6 = 0x0e34 */
1254 if (pll->vco_band)
1255 lo5 = 0x049e;
1256 else if (state->config->analog_output)
1257 lo5 = 0x041d;
1258 else
1259 lo5 = 0x041c;
1260
1261 lo5 |= (pll->hfdiv_code << 11) | (pll->vco_band << 7); /* bit 15 is the split to the slave, we do not do it here */
1262
1263 if (!state->config->io.pll_int_loop_filt)
1264 lo6 = 0xff28;
1265 else
1266 lo6 = (state->config->io.pll_int_loop_filt << 3);
1267
1268 VCOF_kHz = (pll->hfdiv * rf) * 2;
1269
1270 FREF = state->config->io.clock_khz;
1271
1272 FBDiv = (VCOF_kHz / pll->topresc / FREF);
1273 Rest = (VCOF_kHz / pll->topresc) - FBDiv * FREF;
1274
1275 if (Rest < LPF)
1276 Rest = 0;
1277 else if (Rest < 2 * LPF)
1278 Rest = 2 * LPF;
1279 else if (Rest > (FREF - LPF)) {
1280 Rest = 0;
1281 FBDiv += 1;
1282 } else if (Rest > (FREF - 2 * LPF))
1283 Rest = FREF - 2 * LPF;
1284 Rest = (Rest * 6528) / (FREF / 10);
1285
1286 Den = 1;
1287
1288 dprintk(" ***** ******* Rest value = %d", Rest);
1289
1290 if (Rest > 0) {
1291 if (state->config->analog_output)
1292 lo6 |= (1 << 2) | 2;
1293 else
1294 lo6 |= (1 << 2) | 1;
1295 Den = 255;
1296 }
1297#ifdef CONFIG_BAND_SBAND
1298 if (state->current_band == BAND_SBAND)
1299 lo6 &= 0xfffb;
1300#endif
1301
1302 dib0090_write_reg(state, 0x15, (u16) FBDiv);
1303
1304 dib0090_write_reg(state, 0x16, (Den << 8) | 1);
1305
1306 dib0090_write_reg(state, 0x17, (u16) Rest);
1307
1308 dib0090_write_reg(state, 0x19, lo5);
1309
1310 dib0090_write_reg(state, 0x1c, lo6);
1311
1312 lo6 = tune->tuner_enable;
1313 if (state->config->analog_output)
1314 lo6 = (lo6 & 0xff9f) | 0x2;
1315
1316 dib0090_write_reg(state, 0x24, lo6 | EN_LO
1317#ifdef CONFIG_DIB0090_USE_PWM_AGC
1318 | state->config->use_pwm_agc * EN_CRYSTAL
1319#endif
1320 );
1321
1322 state->current_rf = rf;
1323
1324 /* prepare a complete captrim */
1325 state->step = state->captrim = state->fcaptrim = 64;
1326
1327 } else { /* we are already tuned to this frequency - the configuration is correct */
1328
1329 /* do a minimal captrim even if the frequency has not changed */
1330 state->step = 4;
1331 state->captrim = state->fcaptrim = dib0090_read_reg(state, 0x18) & 0x7f;
1332 }
1333 state->adc_diff = 3000;
1334
1335 dib0090_write_reg(state, 0x10, 0x2B1);
1336
1337 dib0090_write_reg(state, 0x1e, 0x0032);
1338
1339 ret = 20;
1340 *tune_state = CT_TUNER_STEP_1;
1341 } else if (*tune_state == CT_TUNER_STEP_0) {
1342 /* nothing */
1343 } else if (*tune_state == CT_TUNER_STEP_1) {
1344 state->step /= 2;
1345 dib0090_write_reg(state, 0x18, lo4 | state->captrim);
1346 *tune_state = CT_TUNER_STEP_2;
1347 } else if (*tune_state == CT_TUNER_STEP_2) {
1348
1349 adc = dib0090_read_reg(state, 0x1d);
1350 dprintk("FE %d CAPTRIM=%d; ADC = %d (ADC) & %dmV", (u32) fe->id, (u32) state->captrim, (u32) adc,
1351 (u32) (adc) * (u32) 1800 / (u32) 1024);
1352
1353 if (adc >= 400) {
1354 adc -= 400;
1355 step_sign = -1;
1356 } else {
1357 adc = 400 - adc;
1358 step_sign = 1;
1359 }
1360
1361 if (adc < state->adc_diff) {
1362 dprintk("FE %d CAPTRIM=%d is closer to target (%d/%d)", (u32) fe->id, (u32) state->captrim, (u32) adc, (u32) state->adc_diff);
1363 state->adc_diff = adc;
1364 state->fcaptrim = state->captrim;
1365
1366 }
1367
1368 state->captrim += step_sign * state->step;
1369 if (state->step >= 1)
1370 *tune_state = CT_TUNER_STEP_1;
1371 else
1372 *tune_state = CT_TUNER_STEP_3;
1373
1374 ret = 15;
1375 } else if (*tune_state == CT_TUNER_STEP_3) {
1376 /*write the final cptrim config */
1377 dib0090_write_reg(state, 0x18, lo4 | state->fcaptrim);
1378
1379#ifdef CONFIG_TUNER_DIB0090_CAPTRIM_MEMORY
1380 state->memory[state->memory_index].cap = state->fcaptrim;
1381#endif
1382
1383 *tune_state = CT_TUNER_STEP_4;
1384 } else if (*tune_state == CT_TUNER_STEP_4) {
1385 dib0090_write_reg(state, 0x1e, 0x07ff);
1386
1387 dprintk("FE %d Final Captrim: %d", (u32) fe->id, (u32) state->fcaptrim);
1388 dprintk("FE %d HFDIV code: %d", (u32) fe->id, (u32) pll->hfdiv_code);
1389 dprintk("FE %d VCO = %d", (u32) fe->id, (u32) pll->vco_band);
1390 dprintk("FE %d VCOF in kHz: %d ((%d*%d) << 1))", (u32) fe->id, (u32) ((pll->hfdiv * rf) * 2), (u32) pll->hfdiv, (u32) rf);
1391 dprintk("FE %d REFDIV: %d, FREF: %d", (u32) fe->id, (u32) 1, (u32) state->config->io.clock_khz);
1392 dprintk("FE %d FBDIV: %d, Rest: %d", (u32) fe->id, (u32) dib0090_read_reg(state, 0x15), (u32) dib0090_read_reg(state, 0x17));
1393 dprintk("FE %d Num: %d, Den: %d, SD: %d", (u32) fe->id, (u32) dib0090_read_reg(state, 0x17),
1394 (u32) (dib0090_read_reg(state, 0x16) >> 8), (u32) dib0090_read_reg(state, 0x1c) & 0x3);
1395
1396 c = 4;
1397 i = 3;
1398#if defined(CONFIG_BAND_LBAND) || defined(CONFIG_BAND_SBAND)
1399 if ((state->current_band == BAND_LBAND) || (state->current_band == BAND_SBAND)) {
1400 c = 2;
1401 i = 2;
1402 }
1403#endif
1404 dib0090_write_reg(state, 0x10, (c << 13) | (i << 11) | (WBD
1405#ifdef CONFIG_DIB0090_USE_PWM_AGC
1406 | (state->config->use_pwm_agc << 1)
1407#endif
1408 ));
1409 dib0090_write_reg(state, 0x09, (tune->lna_tune << 5) | (tune->lna_bias << 0));
1410 dib0090_write_reg(state, 0x0c, tune->v2i);
1411 dib0090_write_reg(state, 0x0d, tune->mix);
1412 dib0090_write_reg(state, 0x0e, tune->load);
1413
1414 *tune_state = CT_TUNER_STEP_5;
1415 } else if (*tune_state == CT_TUNER_STEP_5) {
1416
1417 /* initialize the lt gain register */
1418 state->rf_lt_def = 0x7c00;
1419 dib0090_write_reg(state, 0x0f, state->rf_lt_def);
1420
1421 dib0090_set_bandwidth(state);
1422 state->tuner_is_tuned = 1;
1423 *tune_state = CT_TUNER_STOP;
1424 } else
1425 ret = FE_CALLBACK_TIME_NEVER;
1426 return ret;
1427}
1428
1429static int dib0090_release(struct dvb_frontend *fe)
1430{
1431 kfree(fe->tuner_priv);
1432 fe->tuner_priv = NULL;
1433 return 0;
1434}
1435
1436enum frontend_tune_state dib0090_get_tune_state(struct dvb_frontend *fe)
1437{
1438 struct dib0090_state *state = fe->tuner_priv;
1439
1440 return state->tune_state;
1441}
1442EXPORT_SYMBOL(dib0090_get_tune_state);
1443
1444int dib0090_set_tune_state(struct dvb_frontend *fe, enum frontend_tune_state tune_state)
1445{
1446 struct dib0090_state *state = fe->tuner_priv;
1447
1448 state->tune_state = tune_state;
1449 return 0;
1450}
1451EXPORT_SYMBOL(dib0090_set_tune_state);
1452
1453static int dib0090_get_frequency(struct dvb_frontend *fe, u32 * frequency)
1454{
1455 struct dib0090_state *state = fe->tuner_priv;
1456
1457 *frequency = 1000 * state->current_rf;
1458 return 0;
1459}
1460
1461static int dib0090_set_params(struct dvb_frontend *fe, struct dvb_frontend_parameters *p)
1462{
1463 struct dib0090_state *state = fe->tuner_priv;
1464 uint32_t ret;
1465
1466 state->tune_state = CT_TUNER_START;
1467
1468 do {
1469 ret = dib0090_tune(fe);
1470 if (ret != FE_CALLBACK_TIME_NEVER)
1471 msleep(ret / 10);
1472 else
1473 break;
1474 } while (state->tune_state != CT_TUNER_STOP);
1475
1476 return 0;
1477}
1478
1479static const struct dvb_tuner_ops dib0090_ops = {
1480 .info = {
1481 .name = "DiBcom DiB0090",
1482 .frequency_min = 45000000,
1483 .frequency_max = 860000000,
1484 .frequency_step = 1000,
1485 },
1486 .release = dib0090_release,
1487
1488 .init = dib0090_wakeup,
1489 .sleep = dib0090_sleep,
1490 .set_params = dib0090_set_params,
1491 .get_frequency = dib0090_get_frequency,
1492};
1493
1494struct dvb_frontend *dib0090_register(struct dvb_frontend *fe, struct i2c_adapter *i2c, const struct dib0090_config *config)
1495{
1496 struct dib0090_state *st = kzalloc(sizeof(struct dib0090_state), GFP_KERNEL);
1497 if (st == NULL)
1498 return NULL;
1499
1500 st->config = config;
1501 st->i2c = i2c;
1502 st->fe = fe;
1503 fe->tuner_priv = st;
1504
1505 if (dib0090_reset(fe) != 0)
1506 goto free_mem;
1507
1508 printk(KERN_INFO "DiB0090: successfully identified\n");
1509 memcpy(&fe->ops.tuner_ops, &dib0090_ops, sizeof(struct dvb_tuner_ops));
1510
1511 return fe;
1512 free_mem:
1513 kfree(st);
1514 fe->tuner_priv = NULL;
1515 return NULL;
1516}
1517EXPORT_SYMBOL(dib0090_register);
1518
1519MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>");
1520MODULE_AUTHOR("Olivier Grenie <olivier.grenie@dibcom.fr>");
1521MODULE_DESCRIPTION("Driver for the DiBcom 0090 base-band RF Tuner");
1522MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/frontends/dib0090.h b/drivers/media/dvb/frontends/dib0090.h
new file mode 100644
index 000000000000..aa7711e88776
--- /dev/null
+++ b/drivers/media/dvb/frontends/dib0090.h
@@ -0,0 +1,108 @@
1/*
2 * Linux-DVB Driver for DiBcom's DiB0090 base-band RF Tuner.
3 *
4 * Copyright (C) 2005-7 DiBcom (http://www.dibcom.fr/)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation, version 2.
9 */
10#ifndef DIB0090_H
11#define DIB0090_H
12
13struct dvb_frontend;
14struct i2c_adapter;
15
16#define DEFAULT_DIB0090_I2C_ADDRESS 0x60
17
18struct dib0090_io_config {
19 u32 clock_khz;
20
21 u8 pll_bypass:1;
22 u8 pll_range:1;
23 u8 pll_prediv:6;
24 u8 pll_loopdiv:6;
25
26 u8 adc_clock_ratio; /* valid is 8, 7 ,6 */
27 u16 pll_int_loop_filt;
28};
29
30struct dib0090_config {
31 struct dib0090_io_config io;
32 int (*reset) (struct dvb_frontend *, int);
33 int (*sleep) (struct dvb_frontend *, int);
34
35 /* offset in kHz */
36 int freq_offset_khz_uhf;
37 int freq_offset_khz_vhf;
38
39 int (*get_adc_power) (struct dvb_frontend *);
40
41 u8 clkouttobamse:1; /* activate or deactivate clock output */
42 u8 analog_output;
43
44 u8 i2c_address;
45 /* add drives and other things if necessary */
46 u16 wbd_vhf_offset;
47 u16 wbd_cband_offset;
48 u8 use_pwm_agc;
49 u8 clkoutdrive;
50};
51
52#if defined(CONFIG_DVB_TUNER_DIB0090) || (defined(CONFIG_DVB_TUNER_DIB0090_MODULE) && defined(MODULE))
53extern struct dvb_frontend *dib0090_register(struct dvb_frontend *fe, struct i2c_adapter *i2c, const struct dib0090_config *config);
54extern void dib0090_dcc_freq(struct dvb_frontend *fe, u8 fast);
55extern void dib0090_pwm_gain_reset(struct dvb_frontend *fe);
56extern u16 dib0090_get_wbd_offset(struct dvb_frontend *tuner);
57extern int dib0090_gain_control(struct dvb_frontend *fe);
58extern enum frontend_tune_state dib0090_get_tune_state(struct dvb_frontend *fe);
59extern int dib0090_set_tune_state(struct dvb_frontend *fe, enum frontend_tune_state tune_state);
60extern void dib0090_get_current_gain(struct dvb_frontend *fe, u16 * rf, u16 * bb, u16 * rf_gain_limit, u16 * rflt);
61#else
62static inline struct dvb_frontend *dib0090_register(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct dib0090_config *config)
63{
64 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
65 return NULL;
66}
67
68static inline void dib0090_dcc_freq(struct dvb_frontend *fe, u8 fast)
69{
70 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
71}
72
73static inline void dib0090_pwm_gain_reset(struct dvb_frontend *fe)
74{
75 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
76}
77
78static inline u16 dib0090_get_wbd_offset(struct dvb_frontend *tuner)
79{
80 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
81 return 0;
82}
83
84static inline int dib0090_gain_control(struct dvb_frontend *fe)
85{
86 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
87 return -ENODEV;
88}
89
90static inline enum frontend_tune_state dib0090_get_tune_state(struct dvb_frontend *fe)
91{
92 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
93 return CT_DONE;
94}
95
96static inline int dib0090_set_tune_state(struct dvb_frontend *fe, enum frontend_tune_state tune_state)
97{
98 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
99 return -ENODEV;
100}
101
102static inline void dib0090_get_current_gain(struct dvb_frontend *fe, u16 * rf, u16 * bb, u16 * rf_gain_limit, u16 * rflt)
103{
104 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
105}
106#endif
107
108#endif
diff --git a/drivers/media/dvb/frontends/dib8000.c b/drivers/media/dvb/frontends/dib8000.c
index 898400d331a3..6f6fa29d9ea4 100644
--- a/drivers/media/dvb/frontends/dib8000.c
+++ b/drivers/media/dvb/frontends/dib8000.c
@@ -28,18 +28,6 @@ MODULE_PARM_DESC(debug, "turn on debugging (default: 0)");
28 28
29#define dprintk(args...) do { if (debug) { printk(KERN_DEBUG "DiB8000: "); printk(args); printk("\n"); } } while (0) 29#define dprintk(args...) do { if (debug) { printk(KERN_DEBUG "DiB8000: "); printk(args); printk("\n"); } } while (0)
30 30
31enum frontend_tune_state {
32 CT_AGC_START = 20,
33 CT_AGC_STEP_0,
34 CT_AGC_STEP_1,
35 CT_AGC_STEP_2,
36 CT_AGC_STEP_3,
37 CT_AGC_STEP_4,
38 CT_AGC_STOP,
39
40 CT_DEMOD_START = 30,
41};
42
43#define FE_STATUS_TUNE_FAILED 0 31#define FE_STATUS_TUNE_FAILED 0
44 32
45struct i2c_device { 33struct i2c_device {
@@ -133,104 +121,104 @@ static int dib8000_write_word(struct dib8000_state *state, u16 reg, u16 val)
133 return dib8000_i2c_write16(&state->i2c, reg, val); 121 return dib8000_i2c_write16(&state->i2c, reg, val);
134} 122}
135 123
136const int16_t coeff_2k_sb_1seg_dqpsk[8] = { 124static const int16_t coeff_2k_sb_1seg_dqpsk[8] = {
137 (769 << 5) | 0x0a, (745 << 5) | 0x03, (595 << 5) | 0x0d, (769 << 5) | 0x0a, (920 << 5) | 0x09, (784 << 5) | 0x02, (519 << 5) | 0x0c, 125 (769 << 5) | 0x0a, (745 << 5) | 0x03, (595 << 5) | 0x0d, (769 << 5) | 0x0a, (920 << 5) | 0x09, (784 << 5) | 0x02, (519 << 5) | 0x0c,
138 (920 << 5) | 0x09 126 (920 << 5) | 0x09
139}; 127};
140 128
141const int16_t coeff_2k_sb_1seg[8] = { 129static const int16_t coeff_2k_sb_1seg[8] = {
142 (692 << 5) | 0x0b, (683 << 5) | 0x01, (519 << 5) | 0x09, (692 << 5) | 0x0b, 0 | 0x1f, 0 | 0x1f, 0 | 0x1f, 0 | 0x1f 130 (692 << 5) | 0x0b, (683 << 5) | 0x01, (519 << 5) | 0x09, (692 << 5) | 0x0b, 0 | 0x1f, 0 | 0x1f, 0 | 0x1f, 0 | 0x1f
143}; 131};
144 132
145const int16_t coeff_2k_sb_3seg_0dqpsk_1dqpsk[8] = { 133static const int16_t coeff_2k_sb_3seg_0dqpsk_1dqpsk[8] = {
146 (832 << 5) | 0x10, (912 << 5) | 0x05, (900 << 5) | 0x12, (832 << 5) | 0x10, (-931 << 5) | 0x0f, (912 << 5) | 0x04, (807 << 5) | 0x11, 134 (832 << 5) | 0x10, (912 << 5) | 0x05, (900 << 5) | 0x12, (832 << 5) | 0x10, (-931 << 5) | 0x0f, (912 << 5) | 0x04, (807 << 5) | 0x11,
147 (-931 << 5) | 0x0f 135 (-931 << 5) | 0x0f
148}; 136};
149 137
150const int16_t coeff_2k_sb_3seg_0dqpsk[8] = { 138static const int16_t coeff_2k_sb_3seg_0dqpsk[8] = {
151 (622 << 5) | 0x0c, (941 << 5) | 0x04, (796 << 5) | 0x10, (622 << 5) | 0x0c, (982 << 5) | 0x0c, (519 << 5) | 0x02, (572 << 5) | 0x0e, 139 (622 << 5) | 0x0c, (941 << 5) | 0x04, (796 << 5) | 0x10, (622 << 5) | 0x0c, (982 << 5) | 0x0c, (519 << 5) | 0x02, (572 << 5) | 0x0e,
152 (982 << 5) | 0x0c 140 (982 << 5) | 0x0c
153}; 141};
154 142
155const int16_t coeff_2k_sb_3seg_1dqpsk[8] = { 143static const int16_t coeff_2k_sb_3seg_1dqpsk[8] = {
156 (699 << 5) | 0x14, (607 << 5) | 0x04, (944 << 5) | 0x13, (699 << 5) | 0x14, (-720 << 5) | 0x0d, (640 << 5) | 0x03, (866 << 5) | 0x12, 144 (699 << 5) | 0x14, (607 << 5) | 0x04, (944 << 5) | 0x13, (699 << 5) | 0x14, (-720 << 5) | 0x0d, (640 << 5) | 0x03, (866 << 5) | 0x12,
157 (-720 << 5) | 0x0d 145 (-720 << 5) | 0x0d
158}; 146};
159 147
160const int16_t coeff_2k_sb_3seg[8] = { 148static const int16_t coeff_2k_sb_3seg[8] = {
161 (664 << 5) | 0x0c, (925 << 5) | 0x03, (937 << 5) | 0x10, (664 << 5) | 0x0c, (-610 << 5) | 0x0a, (697 << 5) | 0x01, (836 << 5) | 0x0e, 149 (664 << 5) | 0x0c, (925 << 5) | 0x03, (937 << 5) | 0x10, (664 << 5) | 0x0c, (-610 << 5) | 0x0a, (697 << 5) | 0x01, (836 << 5) | 0x0e,
162 (-610 << 5) | 0x0a 150 (-610 << 5) | 0x0a
163}; 151};
164 152
165const int16_t coeff_4k_sb_1seg_dqpsk[8] = { 153static const int16_t coeff_4k_sb_1seg_dqpsk[8] = {
166 (-955 << 5) | 0x0e, (687 << 5) | 0x04, (818 << 5) | 0x10, (-955 << 5) | 0x0e, (-922 << 5) | 0x0d, (750 << 5) | 0x03, (665 << 5) | 0x0f, 154 (-955 << 5) | 0x0e, (687 << 5) | 0x04, (818 << 5) | 0x10, (-955 << 5) | 0x0e, (-922 << 5) | 0x0d, (750 << 5) | 0x03, (665 << 5) | 0x0f,
167 (-922 << 5) | 0x0d 155 (-922 << 5) | 0x0d
168}; 156};
169 157
170const int16_t coeff_4k_sb_1seg[8] = { 158static const int16_t coeff_4k_sb_1seg[8] = {
171 (638 << 5) | 0x0d, (683 << 5) | 0x02, (638 << 5) | 0x0d, (638 << 5) | 0x0d, (-655 << 5) | 0x0a, (517 << 5) | 0x00, (698 << 5) | 0x0d, 159 (638 << 5) | 0x0d, (683 << 5) | 0x02, (638 << 5) | 0x0d, (638 << 5) | 0x0d, (-655 << 5) | 0x0a, (517 << 5) | 0x00, (698 << 5) | 0x0d,
172 (-655 << 5) | 0x0a 160 (-655 << 5) | 0x0a
173}; 161};
174 162
175const int16_t coeff_4k_sb_3seg_0dqpsk_1dqpsk[8] = { 163static const int16_t coeff_4k_sb_3seg_0dqpsk_1dqpsk[8] = {
176 (-707 << 5) | 0x14, (910 << 5) | 0x06, (889 << 5) | 0x16, (-707 << 5) | 0x14, (-958 << 5) | 0x13, (993 << 5) | 0x05, (523 << 5) | 0x14, 164 (-707 << 5) | 0x14, (910 << 5) | 0x06, (889 << 5) | 0x16, (-707 << 5) | 0x14, (-958 << 5) | 0x13, (993 << 5) | 0x05, (523 << 5) | 0x14,
177 (-958 << 5) | 0x13 165 (-958 << 5) | 0x13
178}; 166};
179 167
180const int16_t coeff_4k_sb_3seg_0dqpsk[8] = { 168static const int16_t coeff_4k_sb_3seg_0dqpsk[8] = {
181 (-723 << 5) | 0x13, (910 << 5) | 0x05, (777 << 5) | 0x14, (-723 << 5) | 0x13, (-568 << 5) | 0x0f, (547 << 5) | 0x03, (696 << 5) | 0x12, 169 (-723 << 5) | 0x13, (910 << 5) | 0x05, (777 << 5) | 0x14, (-723 << 5) | 0x13, (-568 << 5) | 0x0f, (547 << 5) | 0x03, (696 << 5) | 0x12,
182 (-568 << 5) | 0x0f 170 (-568 << 5) | 0x0f
183}; 171};
184 172
185const int16_t coeff_4k_sb_3seg_1dqpsk[8] = { 173static const int16_t coeff_4k_sb_3seg_1dqpsk[8] = {
186 (-940 << 5) | 0x15, (607 << 5) | 0x05, (915 << 5) | 0x16, (-940 << 5) | 0x15, (-848 << 5) | 0x13, (683 << 5) | 0x04, (543 << 5) | 0x14, 174 (-940 << 5) | 0x15, (607 << 5) | 0x05, (915 << 5) | 0x16, (-940 << 5) | 0x15, (-848 << 5) | 0x13, (683 << 5) | 0x04, (543 << 5) | 0x14,
187 (-848 << 5) | 0x13 175 (-848 << 5) | 0x13
188}; 176};
189 177
190const int16_t coeff_4k_sb_3seg[8] = { 178static const int16_t coeff_4k_sb_3seg[8] = {
191 (612 << 5) | 0x12, (910 << 5) | 0x04, (864 << 5) | 0x14, (612 << 5) | 0x12, (-869 << 5) | 0x13, (683 << 5) | 0x02, (869 << 5) | 0x12, 179 (612 << 5) | 0x12, (910 << 5) | 0x04, (864 << 5) | 0x14, (612 << 5) | 0x12, (-869 << 5) | 0x13, (683 << 5) | 0x02, (869 << 5) | 0x12,
192 (-869 << 5) | 0x13 180 (-869 << 5) | 0x13
193}; 181};
194 182
195const int16_t coeff_8k_sb_1seg_dqpsk[8] = { 183static const int16_t coeff_8k_sb_1seg_dqpsk[8] = {
196 (-835 << 5) | 0x12, (684 << 5) | 0x05, (735 << 5) | 0x14, (-835 << 5) | 0x12, (-598 << 5) | 0x10, (781 << 5) | 0x04, (739 << 5) | 0x13, 184 (-835 << 5) | 0x12, (684 << 5) | 0x05, (735 << 5) | 0x14, (-835 << 5) | 0x12, (-598 << 5) | 0x10, (781 << 5) | 0x04, (739 << 5) | 0x13,
197 (-598 << 5) | 0x10 185 (-598 << 5) | 0x10
198}; 186};
199 187
200const int16_t coeff_8k_sb_1seg[8] = { 188static const int16_t coeff_8k_sb_1seg[8] = {
201 (673 << 5) | 0x0f, (683 << 5) | 0x03, (808 << 5) | 0x12, (673 << 5) | 0x0f, (585 << 5) | 0x0f, (512 << 5) | 0x01, (780 << 5) | 0x0f, 189 (673 << 5) | 0x0f, (683 << 5) | 0x03, (808 << 5) | 0x12, (673 << 5) | 0x0f, (585 << 5) | 0x0f, (512 << 5) | 0x01, (780 << 5) | 0x0f,
202 (585 << 5) | 0x0f 190 (585 << 5) | 0x0f
203}; 191};
204 192
205const int16_t coeff_8k_sb_3seg_0dqpsk_1dqpsk[8] = { 193static const int16_t coeff_8k_sb_3seg_0dqpsk_1dqpsk[8] = {
206 (863 << 5) | 0x17, (930 << 5) | 0x07, (878 << 5) | 0x19, (863 << 5) | 0x17, (0 << 5) | 0x14, (521 << 5) | 0x05, (980 << 5) | 0x18, 194 (863 << 5) | 0x17, (930 << 5) | 0x07, (878 << 5) | 0x19, (863 << 5) | 0x17, (0 << 5) | 0x14, (521 << 5) | 0x05, (980 << 5) | 0x18,
207 (0 << 5) | 0x14 195 (0 << 5) | 0x14
208}; 196};
209 197
210const int16_t coeff_8k_sb_3seg_0dqpsk[8] = { 198static const int16_t coeff_8k_sb_3seg_0dqpsk[8] = {
211 (-924 << 5) | 0x17, (910 << 5) | 0x06, (774 << 5) | 0x17, (-924 << 5) | 0x17, (-877 << 5) | 0x15, (565 << 5) | 0x04, (553 << 5) | 0x15, 199 (-924 << 5) | 0x17, (910 << 5) | 0x06, (774 << 5) | 0x17, (-924 << 5) | 0x17, (-877 << 5) | 0x15, (565 << 5) | 0x04, (553 << 5) | 0x15,
212 (-877 << 5) | 0x15 200 (-877 << 5) | 0x15
213}; 201};
214 202
215const int16_t coeff_8k_sb_3seg_1dqpsk[8] = { 203static const int16_t coeff_8k_sb_3seg_1dqpsk[8] = {
216 (-921 << 5) | 0x19, (607 << 5) | 0x06, (881 << 5) | 0x19, (-921 << 5) | 0x19, (-921 << 5) | 0x14, (713 << 5) | 0x05, (1018 << 5) | 0x18, 204 (-921 << 5) | 0x19, (607 << 5) | 0x06, (881 << 5) | 0x19, (-921 << 5) | 0x19, (-921 << 5) | 0x14, (713 << 5) | 0x05, (1018 << 5) | 0x18,
217 (-921 << 5) | 0x14 205 (-921 << 5) | 0x14
218}; 206};
219 207
220const int16_t coeff_8k_sb_3seg[8] = { 208static const int16_t coeff_8k_sb_3seg[8] = {
221 (514 << 5) | 0x14, (910 << 5) | 0x05, (861 << 5) | 0x17, (514 << 5) | 0x14, (690 << 5) | 0x14, (683 << 5) | 0x03, (662 << 5) | 0x15, 209 (514 << 5) | 0x14, (910 << 5) | 0x05, (861 << 5) | 0x17, (514 << 5) | 0x14, (690 << 5) | 0x14, (683 << 5) | 0x03, (662 << 5) | 0x15,
222 (690 << 5) | 0x14 210 (690 << 5) | 0x14
223}; 211};
224 212
225const int16_t ana_fe_coeff_3seg[24] = { 213static const int16_t ana_fe_coeff_3seg[24] = {
226 81, 80, 78, 74, 68, 61, 54, 45, 37, 28, 19, 11, 4, 1022, 1017, 1013, 1010, 1008, 1008, 1008, 1008, 1010, 1014, 1017 214 81, 80, 78, 74, 68, 61, 54, 45, 37, 28, 19, 11, 4, 1022, 1017, 1013, 1010, 1008, 1008, 1008, 1008, 1010, 1014, 1017
227}; 215};
228 216
229const int16_t ana_fe_coeff_1seg[24] = { 217static const int16_t ana_fe_coeff_1seg[24] = {
230 249, 226, 164, 82, 5, 981, 970, 988, 1018, 20, 31, 26, 8, 1012, 1000, 1018, 1012, 8, 15, 14, 9, 3, 1017, 1003 218 249, 226, 164, 82, 5, 981, 970, 988, 1018, 20, 31, 26, 8, 1012, 1000, 1018, 1012, 8, 15, 14, 9, 3, 1017, 1003
231}; 219};
232 220
233const int16_t ana_fe_coeff_13seg[24] = { 221static const int16_t ana_fe_coeff_13seg[24] = {
234 396, 305, 105, -51, -77, -12, 41, 31, -11, -30, -11, 14, 15, -2, -13, -7, 5, 8, 1, -6, -7, -3, 0, 1 222 396, 305, 105, -51, -77, -12, 41, 31, -11, -30, -11, 14, 15, -2, -13, -7, 5, 8, 1, -6, -7, -3, 0, 1
235}; 223};
236 224
@@ -852,6 +840,14 @@ static int dib8000_set_agc_config(struct dib8000_state *state, u8 band)
852 return 0; 840 return 0;
853} 841}
854 842
843void dib8000_pwm_agc_reset(struct dvb_frontend *fe)
844{
845 struct dib8000_state *state = fe->demodulator_priv;
846 dib8000_set_adc_state(state, DIBX000_ADC_ON);
847 dib8000_set_agc_config(state, (unsigned char)(BAND_OF_FREQUENCY(fe->dtv_property_cache.frequency / 1000)));
848}
849EXPORT_SYMBOL(dib8000_pwm_agc_reset);
850
855static int dib8000_agc_soft_split(struct dib8000_state *state) 851static int dib8000_agc_soft_split(struct dib8000_state *state)
856{ 852{
857 u16 agc, split_offset; 853 u16 agc, split_offset;
@@ -939,6 +935,32 @@ static int dib8000_agc_startup(struct dvb_frontend *fe)
939 935
940} 936}
941 937
938static const int32_t lut_1000ln_mant[] =
939{
940 908, 7003, 7090, 7170, 7244, 7313, 7377, 7438, 7495, 7549, 7600
941};
942
943int32_t dib8000_get_adc_power(struct dvb_frontend *fe, uint8_t mode)
944{
945 struct dib8000_state *state = fe->demodulator_priv;
946 uint32_t ix = 0, tmp_val = 0, exp = 0, mant = 0;
947 int32_t val;
948
949 val = dib8000_read32(state, 384);
950 /* mode = 1 : ln_agcpower calc using mant-exp conversion and mantis look up table */
951 if (mode) {
952 tmp_val = val;
953 while (tmp_val >>= 1)
954 exp++;
955 mant = (val * 1000 / (1<<exp));
956 ix = (uint8_t)((mant-1000)/100); /* index of the LUT */
957 val = (lut_1000ln_mant[ix] + 693*(exp-20) - 6908); /* 1000 * ln(adcpower_real) ; 693 = 1000ln(2) ; 6908 = 1000*ln(1000) ; 20 comes from adc_real = adc_pow_int / 2**20 */
958 val = (val*256)/1000;
959 }
960 return val;
961}
962EXPORT_SYMBOL(dib8000_get_adc_power);
963
942static void dib8000_update_timf(struct dib8000_state *state) 964static void dib8000_update_timf(struct dib8000_state *state)
943{ 965{
944 u32 timf = state->timf = dib8000_read32(state, 435); 966 u32 timf = state->timf = dib8000_read32(state, 435);
@@ -1401,10 +1423,9 @@ static void dib8000_set_channel(struct dib8000_state *state, u8 seq, u8 autosear
1401 } 1423 }
1402 break; 1424 break;
1403 } 1425 }
1404 }
1405 if (state->fe.dtv_property_cache.isdbt_sb_mode == 1)
1406 for (i = 0; i < 8; i++) 1426 for (i = 0; i < 8; i++)
1407 dib8000_write_word(state, 343 + i, ncoeff[i]); 1427 dib8000_write_word(state, 343 + i, ncoeff[i]);
1428 }
1408 1429
1409 // P_small_coef_ext_enable=ISDB-Tsb, P_small_narrow_band=ISDB-Tsb, P_small_last_seg=13, P_small_offset_num_car=5 1430 // P_small_coef_ext_enable=ISDB-Tsb, P_small_narrow_band=ISDB-Tsb, P_small_last_seg=13, P_small_offset_num_car=5
1410 dib8000_write_word(state, 351, 1431 dib8000_write_word(state, 351,
@@ -1854,6 +1875,24 @@ static int dib8000_sleep(struct dvb_frontend *fe)
1854 } 1875 }
1855} 1876}
1856 1877
1878enum frontend_tune_state dib8000_get_tune_state(struct dvb_frontend *fe)
1879{
1880 struct dib8000_state *state = fe->demodulator_priv;
1881 return state->tune_state;
1882}
1883EXPORT_SYMBOL(dib8000_get_tune_state);
1884
1885int dib8000_set_tune_state(struct dvb_frontend *fe, enum frontend_tune_state tune_state)
1886{
1887 struct dib8000_state *state = fe->demodulator_priv;
1888 state->tune_state = tune_state;
1889 return 0;
1890}
1891EXPORT_SYMBOL(dib8000_set_tune_state);
1892
1893
1894
1895
1857static int dib8000_get_frontend(struct dvb_frontend *fe, struct dvb_frontend_parameters *fep) 1896static int dib8000_get_frontend(struct dvb_frontend *fe, struct dvb_frontend_parameters *fep)
1858{ 1897{
1859 struct dib8000_state *state = fe->demodulator_priv; 1898 struct dib8000_state *state = fe->demodulator_priv;
@@ -2043,29 +2082,31 @@ static int dib8000_read_status(struct dvb_frontend *fe, fe_status_t * stat)
2043 2082
2044 *stat = 0; 2083 *stat = 0;
2045 2084
2046 if ((lock >> 14) & 1) // AGC 2085 if ((lock >> 13) & 1)
2047 *stat |= FE_HAS_SIGNAL; 2086 *stat |= FE_HAS_SIGNAL;
2048 2087
2049 if ((lock >> 8) & 1) // Equal 2088 if ((lock >> 8) & 1) /* Equal */
2050 *stat |= FE_HAS_CARRIER; 2089 *stat |= FE_HAS_CARRIER;
2051 2090
2052 if ((lock >> 3) & 1) // TMCC_SYNC 2091 if (((lock >> 1) & 0xf) == 0xf) /* TMCC_SYNC */
2053 *stat |= FE_HAS_SYNC; 2092 *stat |= FE_HAS_SYNC;
2054 2093
2055 if ((lock >> 5) & 7) // FEC MPEG 2094 if (((lock >> 12) & 1) && ((lock >> 5) & 7)) /* FEC MPEG */
2056 *stat |= FE_HAS_LOCK; 2095 *stat |= FE_HAS_LOCK;
2057 2096
2058 lock = dib8000_read_word(state, 554); // Viterbi Layer A 2097 if ((lock >> 12) & 1) {
2059 if (lock & 0x01) 2098 lock = dib8000_read_word(state, 554); /* Viterbi Layer A */
2060 *stat |= FE_HAS_VITERBI; 2099 if (lock & 0x01)
2100 *stat |= FE_HAS_VITERBI;
2061 2101
2062 lock = dib8000_read_word(state, 555); // Viterbi Layer B 2102 lock = dib8000_read_word(state, 555); /* Viterbi Layer B */
2063 if (lock & 0x01) 2103 if (lock & 0x01)
2064 *stat |= FE_HAS_VITERBI; 2104 *stat |= FE_HAS_VITERBI;
2065 2105
2066 lock = dib8000_read_word(state, 556); // Viterbi Layer C 2106 lock = dib8000_read_word(state, 556); /* Viterbi Layer C */
2067 if (lock & 0x01) 2107 if (lock & 0x01)
2068 *stat |= FE_HAS_VITERBI; 2108 *stat |= FE_HAS_VITERBI;
2109 }
2069 2110
2070 return 0; 2111 return 0;
2071} 2112}
diff --git a/drivers/media/dvb/frontends/dib8000.h b/drivers/media/dvb/frontends/dib8000.h
index 8c89482b738a..d99619ae983c 100644
--- a/drivers/media/dvb/frontends/dib8000.h
+++ b/drivers/media/dvb/frontends/dib8000.h
@@ -46,6 +46,10 @@ extern int dib8000_set_gpio(struct dvb_frontend *, u8 num, u8 dir, u8 val);
46extern int dib8000_set_wbd_ref(struct dvb_frontend *, u16 value); 46extern int dib8000_set_wbd_ref(struct dvb_frontend *, u16 value);
47extern int dib8000_pid_filter_ctrl(struct dvb_frontend *, u8 onoff); 47extern int dib8000_pid_filter_ctrl(struct dvb_frontend *, u8 onoff);
48extern int dib8000_pid_filter(struct dvb_frontend *, u8 id, u16 pid, u8 onoff); 48extern int dib8000_pid_filter(struct dvb_frontend *, u8 id, u16 pid, u8 onoff);
49extern int dib8000_set_tune_state(struct dvb_frontend *fe, enum frontend_tune_state tune_state);
50extern enum frontend_tune_state dib8000_get_tune_state(struct dvb_frontend *fe);
51extern void dib8000_pwm_agc_reset(struct dvb_frontend *fe);
52extern s32 dib8000_get_adc_power(struct dvb_frontend *fe, u8 mode);
49#else 53#else
50static inline struct dvb_frontend *dib8000_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib8000_config *cfg) 54static inline struct dvb_frontend *dib8000_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib8000_config *cfg)
51{ 55{
@@ -59,35 +63,53 @@ static inline struct i2c_adapter *dib8000_get_i2c_master(struct dvb_frontend *fe
59 return NULL; 63 return NULL;
60} 64}
61 65
62int dib8000_i2c_enumeration(struct i2c_adapter *host, int no_of_demods, u8 default_addr, u8 first_addr) 66static inline int dib8000_i2c_enumeration(struct i2c_adapter *host, int no_of_demods, u8 default_addr, u8 first_addr)
63{ 67{
64 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__); 68 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
65 return -ENODEV; 69 return -ENODEV;
66} 70}
67 71
68int dib8000_set_gpio(struct dvb_frontend *fe, u8 num, u8 dir, u8 val) 72static inline int dib8000_set_gpio(struct dvb_frontend *fe, u8 num, u8 dir, u8 val)
69{ 73{
70 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__); 74 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
71 return -ENODEV; 75 return -ENODEV;
72} 76}
73 77
74int dib8000_set_wbd_ref(struct dvb_frontend *fe, u16 value) 78static inline int dib8000_set_wbd_ref(struct dvb_frontend *fe, u16 value)
75{ 79{
76 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__); 80 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
77 return -ENODEV; 81 return -ENODEV;
78} 82}
79 83
80int dib8000_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff) 84static inline int dib8000_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff)
81{ 85{
82 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__); 86 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
83 return -ENODEV; 87 return -ENODEV;
84} 88}
85 89
86int dib8000_pid_filter(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff) 90static inline int dib8000_pid_filter(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff)
87{ 91{
88 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__); 92 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
89 return -ENODEV; 93 return -ENODEV;
90} 94}
95static inline int dib8000_set_tune_state(struct dvb_frontend *fe, enum frontend_tune_state tune_state)
96{
97 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
98 return -ENODEV;
99}
100static inline enum frontend_tune_state dib8000_get_tune_state(struct dvb_frontend *fe)
101{
102 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
103 return CT_SHUTDOWN,
104}
105static inline void dib8000_pwm_agc_reset(struct dvb_frontend *fe)
106{
107 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
108}
109static inline s32 dib8000_get_adc_power(struct dvb_frontend *fe, u8 mode)
110{
111 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
112}
91#endif 113#endif
92 114
93#endif 115#endif
diff --git a/drivers/media/dvb/frontends/dibx000_common.c b/drivers/media/dvb/frontends/dibx000_common.c
index 4efca30d2127..e6f3d73db9d3 100644
--- a/drivers/media/dvb/frontends/dibx000_common.c
+++ b/drivers/media/dvb/frontends/dibx000_common.c
@@ -6,7 +6,7 @@ static int debug;
6module_param(debug, int, 0644); 6module_param(debug, int, 0644);
7MODULE_PARM_DESC(debug, "turn on debugging (default: 0)"); 7MODULE_PARM_DESC(debug, "turn on debugging (default: 0)");
8 8
9#define dprintk(args...) do { if (debug) { printk(KERN_DEBUG "DiBX000: "); printk(args); } } while (0) 9#define dprintk(args...) do { if (debug) { printk(KERN_DEBUG "DiBX000: "); printk(args); printk("\n"); } } while (0)
10 10
11static int dibx000_write_word(struct dibx000_i2c_master *mst, u16 reg, u16 val) 11static int dibx000_write_word(struct dibx000_i2c_master *mst, u16 reg, u16 val)
12{ 12{
@@ -25,7 +25,7 @@ static int dibx000_i2c_select_interface(struct dibx000_i2c_master *mst,
25 enum dibx000_i2c_interface intf) 25 enum dibx000_i2c_interface intf)
26{ 26{
27 if (mst->device_rev > DIB3000MC && mst->selected_interface != intf) { 27 if (mst->device_rev > DIB3000MC && mst->selected_interface != intf) {
28 dprintk("selecting interface: %d\n", intf); 28 dprintk("selecting interface: %d", intf);
29 mst->selected_interface = intf; 29 mst->selected_interface = intf;
30 return dibx000_write_word(mst, mst->base_reg + 4, intf); 30 return dibx000_write_word(mst, mst->base_reg + 4, intf);
31 } 31 }
@@ -171,9 +171,18 @@ void dibx000_exit_i2c_master(struct dibx000_i2c_master *mst)
171{ 171{
172 i2c_del_adapter(&mst->gated_tuner_i2c_adap); 172 i2c_del_adapter(&mst->gated_tuner_i2c_adap);
173} 173}
174
175EXPORT_SYMBOL(dibx000_exit_i2c_master); 174EXPORT_SYMBOL(dibx000_exit_i2c_master);
176 175
176
177u32 systime()
178{
179 struct timespec t;
180
181 t = current_kernel_time();
182 return (t.tv_sec * 10000) + (t.tv_nsec / 100000);
183}
184EXPORT_SYMBOL(systime);
185
177MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>"); 186MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>");
178MODULE_DESCRIPTION("Common function the DiBcom demodulator family"); 187MODULE_DESCRIPTION("Common function the DiBcom demodulator family");
179MODULE_LICENSE("GPL"); 188MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/frontends/dibx000_common.h b/drivers/media/dvb/frontends/dibx000_common.h
index 5be10eca07c0..4f5d141a308d 100644
--- a/drivers/media/dvb/frontends/dibx000_common.h
+++ b/drivers/media/dvb/frontends/dibx000_common.h
@@ -36,13 +36,17 @@ extern struct i2c_adapter *dibx000_get_i2c_adapter(struct dibx000_i2c_master
36extern void dibx000_exit_i2c_master(struct dibx000_i2c_master *mst); 36extern void dibx000_exit_i2c_master(struct dibx000_i2c_master *mst);
37extern void dibx000_reset_i2c_master(struct dibx000_i2c_master *mst); 37extern void dibx000_reset_i2c_master(struct dibx000_i2c_master *mst);
38 38
39extern u32 systime(void);
40
39#define BAND_LBAND 0x01 41#define BAND_LBAND 0x01
40#define BAND_UHF 0x02 42#define BAND_UHF 0x02
41#define BAND_VHF 0x04 43#define BAND_VHF 0x04
42#define BAND_SBAND 0x08 44#define BAND_SBAND 0x08
43#define BAND_FM 0x10 45#define BAND_FM 0x10
46#define BAND_CBAND 0x20
44 47
45#define BAND_OF_FREQUENCY(freq_kHz) ( (freq_kHz) <= 115000 ? BAND_FM : \ 48#define BAND_OF_FREQUENCY(freq_kHz) ((freq_kHz) <= 170000 ? BAND_CBAND : \
49 (freq_kHz) <= 115000 ? BAND_FM : \
46 (freq_kHz) <= 250000 ? BAND_VHF : \ 50 (freq_kHz) <= 250000 ? BAND_VHF : \
47 (freq_kHz) <= 863000 ? BAND_UHF : \ 51 (freq_kHz) <= 863000 ? BAND_UHF : \
48 (freq_kHz) <= 2000000 ? BAND_LBAND : BAND_SBAND ) 52 (freq_kHz) <= 2000000 ? BAND_LBAND : BAND_SBAND )
@@ -149,4 +153,67 @@ enum dibx000_adc_states {
149#define OUTMODE_MPEG2_FIFO 5 153#define OUTMODE_MPEG2_FIFO 5
150#define OUTMODE_ANALOG_ADC 6 154#define OUTMODE_ANALOG_ADC 6
151 155
156enum frontend_tune_state {
157 CT_TUNER_START = 10,
158 CT_TUNER_STEP_0,
159 CT_TUNER_STEP_1,
160 CT_TUNER_STEP_2,
161 CT_TUNER_STEP_3,
162 CT_TUNER_STEP_4,
163 CT_TUNER_STEP_5,
164 CT_TUNER_STEP_6,
165 CT_TUNER_STEP_7,
166 CT_TUNER_STOP,
167
168 CT_AGC_START = 20,
169 CT_AGC_STEP_0,
170 CT_AGC_STEP_1,
171 CT_AGC_STEP_2,
172 CT_AGC_STEP_3,
173 CT_AGC_STEP_4,
174 CT_AGC_STOP,
175
176 CT_DEMOD_START = 30,
177 CT_DEMOD_STEP_1,
178 CT_DEMOD_STEP_2,
179 CT_DEMOD_STEP_3,
180 CT_DEMOD_STEP_4,
181 CT_DEMOD_STEP_5,
182 CT_DEMOD_STEP_6,
183 CT_DEMOD_STEP_7,
184 CT_DEMOD_STEP_8,
185 CT_DEMOD_STEP_9,
186 CT_DEMOD_STEP_10,
187 CT_DEMOD_SEARCH_NEXT = 41,
188 CT_DEMOD_STEP_LOCKED,
189 CT_DEMOD_STOP,
190
191 CT_DONE = 100,
192 CT_SHUTDOWN,
193
194};
195
196struct dvb_frontend_parametersContext {
197#define CHANNEL_STATUS_PARAMETERS_UNKNOWN 0x01
198#define CHANNEL_STATUS_PARAMETERS_SET 0x02
199 u8 status;
200 u32 tune_time_estimation[2];
201 s32 tps_available;
202 u16 tps[9];
203};
204
205#define FE_STATUS_TUNE_FAILED 0
206#define FE_STATUS_TUNE_TIMED_OUT -1
207#define FE_STATUS_TUNE_TIME_TOO_SHORT -2
208#define FE_STATUS_TUNE_PENDING -3
209#define FE_STATUS_STD_SUCCESS -4
210#define FE_STATUS_FFT_SUCCESS -5
211#define FE_STATUS_DEMOD_SUCCESS -6
212#define FE_STATUS_LOCKED -7
213#define FE_STATUS_DATA_LOCKED -8
214
215#define FE_CALLBACK_TIME_NEVER 0xffffffff
216
217#define ABS(x) ((x < 0) ? (-x) : (x))
218
152#endif 219#endif
diff --git a/drivers/media/dvb/frontends/lgs8gxx.c b/drivers/media/dvb/frontends/lgs8gxx.c
index eabcadc425d5..dee53960e7e8 100644
--- a/drivers/media/dvb/frontends/lgs8gxx.c
+++ b/drivers/media/dvb/frontends/lgs8gxx.c
@@ -199,7 +199,7 @@ static int lgs8gxx_set_if_freq(struct lgs8gxx_state *priv, u32 freq /*in kHz*/)
199 199
200 val = freq; 200 val = freq;
201 if (freq != 0) { 201 if (freq != 0) {
202 val *= (u64)1 << 32; 202 val <<= 32;
203 if (if_clk != 0) 203 if (if_clk != 0)
204 do_div(val, if_clk); 204 do_div(val, if_clk);
205 v32 = val & 0xFFFFFFFF; 205 v32 = val & 0xFFFFFFFF;
@@ -246,7 +246,7 @@ static int lgs8gxx_get_afc_phase(struct lgs8gxx_state *priv)
246 246
247 val = v32; 247 val = v32;
248 val *= priv->config->if_clk_freq; 248 val *= priv->config->if_clk_freq;
249 val /= (u64)1 << 32; 249 val >>= 32;
250 dprintk("AFC = %u kHz\n", (u32)val); 250 dprintk("AFC = %u kHz\n", (u32)val);
251 return 0; 251 return 0;
252} 252}
diff --git a/drivers/media/dvb/frontends/lnbp21.c b/drivers/media/dvb/frontends/lnbp21.c
index 71f607fe8fc7..b181bf023ada 100644
--- a/drivers/media/dvb/frontends/lnbp21.c
+++ b/drivers/media/dvb/frontends/lnbp21.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * lnbp21.c - driver for lnb supply and control ic lnbp21 2 * lnbp21.c - driver for lnb supply and control ic lnbp21
3 * 3 *
4 * Copyright (C) 2006 Oliver Endriss 4 * Copyright (C) 2006, 2009 Oliver Endriss <o.endriss@gmx.de>
5 * Copyright (C) 2009 Igor M. Liplianin <liplianin@netup.ru> 5 * Copyright (C) 2009 Igor M. Liplianin <liplianin@netup.ru>
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
@@ -91,6 +91,31 @@ static int lnbp21_enable_high_lnb_voltage(struct dvb_frontend *fe, long arg)
91 return (i2c_transfer(lnbp21->i2c, &msg, 1) == 1) ? 0 : -EIO; 91 return (i2c_transfer(lnbp21->i2c, &msg, 1) == 1) ? 0 : -EIO;
92} 92}
93 93
94static int lnbp21_set_tone(struct dvb_frontend *fe,
95 fe_sec_tone_mode_t tone)
96{
97 struct lnbp21 *lnbp21 = (struct lnbp21 *) fe->sec_priv;
98 struct i2c_msg msg = { .addr = lnbp21->i2c_addr, .flags = 0,
99 .buf = &lnbp21->config,
100 .len = sizeof(lnbp21->config) };
101
102 switch (tone) {
103 case SEC_TONE_OFF:
104 lnbp21->config &= ~LNBP21_TEN;
105 break;
106 case SEC_TONE_ON:
107 lnbp21->config |= LNBP21_TEN;
108 break;
109 default:
110 return -EINVAL;
111 };
112
113 lnbp21->config |= lnbp21->override_or;
114 lnbp21->config &= lnbp21->override_and;
115
116 return (i2c_transfer(lnbp21->i2c, &msg, 1) == 1) ? 0 : -EIO;
117}
118
94static void lnbp21_release(struct dvb_frontend *fe) 119static void lnbp21_release(struct dvb_frontend *fe)
95{ 120{
96 /* LNBP power off */ 121 /* LNBP power off */
@@ -133,6 +158,7 @@ static struct dvb_frontend *lnbx2x_attach(struct dvb_frontend *fe,
133 /* override frontend ops */ 158 /* override frontend ops */
134 fe->ops.set_voltage = lnbp21_set_voltage; 159 fe->ops.set_voltage = lnbp21_set_voltage;
135 fe->ops.enable_high_lnb_voltage = lnbp21_enable_high_lnb_voltage; 160 fe->ops.enable_high_lnb_voltage = lnbp21_enable_high_lnb_voltage;
161 fe->ops.set_tone = lnbp21_set_tone;
136 printk(KERN_INFO "LNBx2x attached on addr=%x\n", lnbp21->i2c_addr); 162 printk(KERN_INFO "LNBx2x attached on addr=%x\n", lnbp21->i2c_addr);
137 163
138 return fe; 164 return fe;
diff --git a/drivers/media/dvb/frontends/stv0900_core.c b/drivers/media/dvb/frontends/stv0900_core.c
index df49ea0983bc..8762c86044a5 100644
--- a/drivers/media/dvb/frontends/stv0900_core.c
+++ b/drivers/media/dvb/frontends/stv0900_core.c
@@ -1451,6 +1451,8 @@ static int stv0900_status(struct stv0900_internal *intp,
1451{ 1451{
1452 enum fe_stv0900_search_state demod_state; 1452 enum fe_stv0900_search_state demod_state;
1453 int locked = FALSE; 1453 int locked = FALSE;
1454 u8 tsbitrate0_val, tsbitrate1_val;
1455 s32 bitrate;
1454 1456
1455 demod_state = stv0900_get_bits(intp, HEADER_MODE); 1457 demod_state = stv0900_get_bits(intp, HEADER_MODE);
1456 switch (demod_state) { 1458 switch (demod_state) {
@@ -1473,6 +1475,17 @@ static int stv0900_status(struct stv0900_internal *intp,
1473 1475
1474 dprintk("%s: locked = %d\n", __func__, locked); 1476 dprintk("%s: locked = %d\n", __func__, locked);
1475 1477
1478 if (stvdebug) {
1479 /* Print TS bitrate */
1480 tsbitrate0_val = stv0900_read_reg(intp, TSBITRATE0);
1481 tsbitrate1_val = stv0900_read_reg(intp, TSBITRATE1);
1482 /* Formula Bit rate = Mclk * px_tsfifo_bitrate / 16384 */
1483 bitrate = (stv0900_get_mclk_freq(intp, intp->quartz)/1000000)
1484 * (tsbitrate1_val << 8 | tsbitrate0_val);
1485 bitrate /= 16384;
1486 dprintk("TS bitrate = %d Mbit/sec \n", bitrate);
1487 };
1488
1476 return locked; 1489 return locked;
1477} 1490}
1478 1491
diff --git a/drivers/media/dvb/frontends/stv090x.c b/drivers/media/dvb/frontends/stv090x.c
index 48edd542242e..1573466a5c74 100644
--- a/drivers/media/dvb/frontends/stv090x.c
+++ b/drivers/media/dvb/frontends/stv090x.c
@@ -3597,7 +3597,8 @@ static int stv090x_send_diseqc_msg(struct dvb_frontend *fe, struct dvb_diseqc_ma
3597 3597
3598 reg = STV090x_READ_DEMOD(state, DISTXCTL); 3598 reg = STV090x_READ_DEMOD(state, DISTXCTL);
3599 3599
3600 STV090x_SETFIELD_Px(reg, DISTX_MODE_FIELD, 2); 3600 STV090x_SETFIELD_Px(reg, DISTX_MODE_FIELD,
3601 (state->config->diseqc_envelope_mode) ? 4 : 2);
3601 STV090x_SETFIELD_Px(reg, DISEQC_RESET_FIELD, 1); 3602 STV090x_SETFIELD_Px(reg, DISEQC_RESET_FIELD, 1);
3602 if (STV090x_WRITE_DEMOD(state, DISTXCTL, reg) < 0) 3603 if (STV090x_WRITE_DEMOD(state, DISTXCTL, reg) < 0)
3603 goto err; 3604 goto err;
@@ -3649,10 +3650,10 @@ static int stv090x_send_diseqc_burst(struct dvb_frontend *fe, fe_sec_mini_cmd_t
3649 reg = STV090x_READ_DEMOD(state, DISTXCTL); 3650 reg = STV090x_READ_DEMOD(state, DISTXCTL);
3650 3651
3651 if (burst == SEC_MINI_A) { 3652 if (burst == SEC_MINI_A) {
3652 mode = 3; 3653 mode = (state->config->diseqc_envelope_mode) ? 5 : 3;
3653 value = 0x00; 3654 value = 0x00;
3654 } else { 3655 } else {
3655 mode = 2; 3656 mode = (state->config->diseqc_envelope_mode) ? 4 : 2;
3656 value = 0xFF; 3657 value = 0xFF;
3657 } 3658 }
3658 3659
diff --git a/drivers/media/dvb/frontends/stv090x.h b/drivers/media/dvb/frontends/stv090x.h
index e968c98bb70f..b133807663ea 100644
--- a/drivers/media/dvb/frontends/stv090x.h
+++ b/drivers/media/dvb/frontends/stv090x.h
@@ -75,6 +75,8 @@ struct stv090x_config {
75 75
76 enum stv090x_i2crpt repeater_level; 76 enum stv090x_i2crpt repeater_level;
77 77
78 bool diseqc_envelope_mode;
79
78 int (*tuner_init) (struct dvb_frontend *fe); 80 int (*tuner_init) (struct dvb_frontend *fe);
79 int (*tuner_set_mode) (struct dvb_frontend *fe, enum tuner_mode mode); 81 int (*tuner_set_mode) (struct dvb_frontend *fe, enum tuner_mode mode);
80 int (*tuner_set_frequency) (struct dvb_frontend *fe, u32 frequency); 82 int (*tuner_set_frequency) (struct dvb_frontend *fe, u32 frequency);
diff --git a/drivers/media/dvb/siano/smsdvb.c b/drivers/media/dvb/siano/smsdvb.c
index 266033ae2784..68bf9fbd8fed 100644
--- a/drivers/media/dvb/siano/smsdvb.c
+++ b/drivers/media/dvb/siano/smsdvb.c
@@ -662,7 +662,7 @@ adapter_error:
662 return rc; 662 return rc;
663} 663}
664 664
665int smsdvb_module_init(void) 665static int __init smsdvb_module_init(void)
666{ 666{
667 int rc; 667 int rc;
668 668
@@ -676,7 +676,7 @@ int smsdvb_module_init(void)
676 return rc; 676 return rc;
677} 677}
678 678
679void smsdvb_module_exit(void) 679static void __exit smsdvb_module_exit(void)
680{ 680{
681 smscore_unregister_hotplug(smsdvb_hotplug); 681 smscore_unregister_hotplug(smsdvb_hotplug);
682 682
diff --git a/drivers/media/dvb/siano/smssdio.c b/drivers/media/dvb/siano/smssdio.c
index 24206cbda264..195244a3e69b 100644
--- a/drivers/media/dvb/siano/smssdio.c
+++ b/drivers/media/dvb/siano/smssdio.c
@@ -48,7 +48,7 @@
48#define SMSSDIO_INT 0x04 48#define SMSSDIO_INT 0x04
49#define SMSSDIO_BLOCK_SIZE 128 49#define SMSSDIO_BLOCK_SIZE 128
50 50
51static const struct sdio_device_id smssdio_ids[] = { 51static const struct sdio_device_id smssdio_ids[] __devinitconst = {
52 {SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_STELLAR), 52 {SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_STELLAR),
53 .driver_data = SMS1XXX_BOARD_SIANO_STELLAR}, 53 .driver_data = SMS1XXX_BOARD_SIANO_STELLAR},
54 {SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_NOVA_A0), 54 {SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_NOVA_A0),
@@ -222,7 +222,7 @@ static void smssdio_interrupt(struct sdio_func *func)
222 smscore_onresponse(smsdev->coredev, cb); 222 smscore_onresponse(smsdev->coredev, cb);
223} 223}
224 224
225static int smssdio_probe(struct sdio_func *func, 225static int __devinit smssdio_probe(struct sdio_func *func,
226 const struct sdio_device_id *id) 226 const struct sdio_device_id *id)
227{ 227{
228 int ret; 228 int ret;
@@ -338,7 +338,7 @@ static struct sdio_driver smssdio_driver = {
338/* Module functions */ 338/* Module functions */
339/*******************************************************************/ 339/*******************************************************************/
340 340
341int smssdio_module_init(void) 341static int __init smssdio_module_init(void)
342{ 342{
343 int ret = 0; 343 int ret = 0;
344 344
@@ -350,7 +350,7 @@ int smssdio_module_init(void)
350 return ret; 350 return ret;
351} 351}
352 352
353void smssdio_module_exit(void) 353static void __exit smssdio_module_exit(void)
354{ 354{
355 sdio_unregister_driver(&smssdio_driver); 355 sdio_unregister_driver(&smssdio_driver);
356} 356}
diff --git a/drivers/media/dvb/siano/smsusb.c b/drivers/media/dvb/siano/smsusb.c
index 8f88a586b0dd..5eac27287d9c 100644
--- a/drivers/media/dvb/siano/smsusb.c
+++ b/drivers/media/dvb/siano/smsusb.c
@@ -390,7 +390,7 @@ static int smsusb_init_device(struct usb_interface *intf, int board_id)
390 return rc; 390 return rc;
391} 391}
392 392
393static int smsusb_probe(struct usb_interface *intf, 393static int __devinit smsusb_probe(struct usb_interface *intf,
394 const struct usb_device_id *id) 394 const struct usb_device_id *id)
395{ 395{
396 struct usb_device *udev = interface_to_usbdev(intf); 396 struct usb_device *udev = interface_to_usbdev(intf);
@@ -484,7 +484,7 @@ static int smsusb_resume(struct usb_interface *intf)
484 return 0; 484 return 0;
485} 485}
486 486
487struct usb_device_id smsusb_id_table[] = { 487static const struct usb_device_id smsusb_id_table[] __devinitconst = {
488 { USB_DEVICE(0x187f, 0x0010), 488 { USB_DEVICE(0x187f, 0x0010),
489 .driver_info = SMS1XXX_BOARD_SIANO_STELLAR }, 489 .driver_info = SMS1XXX_BOARD_SIANO_STELLAR },
490 { USB_DEVICE(0x187f, 0x0100), 490 { USB_DEVICE(0x187f, 0x0100),
@@ -533,8 +533,18 @@ struct usb_device_id smsusb_id_table[] = {
533 .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM }, 533 .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
534 { USB_DEVICE(0x2040, 0xb910), 534 { USB_DEVICE(0x2040, 0xb910),
535 .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM }, 535 .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
536 { USB_DEVICE(0x2040, 0xb980),
537 .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
538 { USB_DEVICE(0x2040, 0xb990),
539 .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
536 { USB_DEVICE(0x2040, 0xc000), 540 { USB_DEVICE(0x2040, 0xc000),
537 .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM }, 541 .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
542 { USB_DEVICE(0x2040, 0xc010),
543 .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
544 { USB_DEVICE(0x2040, 0xc080),
545 .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
546 { USB_DEVICE(0x2040, 0xc090),
547 .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
538 { } /* Terminating entry */ 548 { } /* Terminating entry */
539 }; 549 };
540 550
@@ -550,7 +560,7 @@ static struct usb_driver smsusb_driver = {
550 .resume = smsusb_resume, 560 .resume = smsusb_resume,
551}; 561};
552 562
553int smsusb_module_init(void) 563static int __init smsusb_module_init(void)
554{ 564{
555 int rc = usb_register(&smsusb_driver); 565 int rc = usb_register(&smsusb_driver);
556 if (rc) 566 if (rc)
@@ -561,7 +571,7 @@ int smsusb_module_init(void)
561 return rc; 571 return rc;
562} 572}
563 573
564void smsusb_module_exit(void) 574static void __exit smsusb_module_exit(void)
565{ 575{
566 /* Regular USB Cleanup */ 576 /* Regular USB Cleanup */
567 usb_deregister(&smsusb_driver); 577 usb_deregister(&smsusb_driver);
diff --git a/drivers/media/dvb/ttpci/budget-ci.c b/drivers/media/dvb/ttpci/budget-ci.c
index 7d193ebc0aea..9782e0593733 100644
--- a/drivers/media/dvb/ttpci/budget-ci.c
+++ b/drivers/media/dvb/ttpci/budget-ci.c
@@ -190,12 +190,13 @@ static int msp430_ir_init(struct budget_ci *budget_ci)
190 struct saa7146_dev *saa = budget_ci->budget.dev; 190 struct saa7146_dev *saa = budget_ci->budget.dev;
191 struct input_dev *input_dev = budget_ci->ir.dev; 191 struct input_dev *input_dev = budget_ci->ir.dev;
192 int error; 192 int error;
193 struct ir_scancode_table *ir_codes;
194
193 195
194 budget_ci->ir.dev = input_dev = input_allocate_device(); 196 budget_ci->ir.dev = input_dev = input_allocate_device();
195 if (!input_dev) { 197 if (!input_dev) {
196 printk(KERN_ERR "budget_ci: IR interface initialisation failed\n"); 198 printk(KERN_ERR "budget_ci: IR interface initialisation failed\n");
197 error = -ENOMEM; 199 return -ENOMEM;
198 goto out1;
199 } 200 }
200 201
201 snprintf(budget_ci->ir.name, sizeof(budget_ci->ir.name), 202 snprintf(budget_ci->ir.name, sizeof(budget_ci->ir.name),
@@ -217,6 +218,11 @@ static int msp430_ir_init(struct budget_ci *budget_ci)
217 } 218 }
218 input_dev->dev.parent = &saa->pci->dev; 219 input_dev->dev.parent = &saa->pci->dev;
219 220
221 if (rc5_device < 0)
222 budget_ci->ir.rc5_device = IR_DEVICE_ANY;
223 else
224 budget_ci->ir.rc5_device = rc5_device;
225
220 /* Select keymap and address */ 226 /* Select keymap and address */
221 switch (budget_ci->budget.dev->pci->subsystem_device) { 227 switch (budget_ci->budget.dev->pci->subsystem_device) {
222 case 0x100c: 228 case 0x100c:
@@ -224,53 +230,34 @@ static int msp430_ir_init(struct budget_ci *budget_ci)
224 case 0x1011: 230 case 0x1011:
225 case 0x1012: 231 case 0x1012:
226 /* The hauppauge keymap is a superset of these remotes */ 232 /* The hauppauge keymap is a superset of these remotes */
227 error = ir_input_init(input_dev, &budget_ci->ir.state, 233 ir_codes = &ir_codes_hauppauge_new_table;
228 IR_TYPE_RC5, &ir_codes_hauppauge_new_table);
229 if (error < 0)
230 goto out2;
231 234
232 if (rc5_device < 0) 235 if (rc5_device < 0)
233 budget_ci->ir.rc5_device = 0x1f; 236 budget_ci->ir.rc5_device = 0x1f;
234 else
235 budget_ci->ir.rc5_device = rc5_device;
236 break; 237 break;
237 case 0x1010: 238 case 0x1010:
238 case 0x1017: 239 case 0x1017:
239 case 0x101a: 240 case 0x101a:
240 /* for the Technotrend 1500 bundled remote */ 241 /* for the Technotrend 1500 bundled remote */
241 error = ir_input_init(input_dev, &budget_ci->ir.state, 242 ir_codes = &ir_codes_tt_1500_table;
242 IR_TYPE_RC5, &ir_codes_tt_1500_table);
243 if (error < 0)
244 goto out2;
245
246 if (rc5_device < 0)
247 budget_ci->ir.rc5_device = IR_DEVICE_ANY;
248 else
249 budget_ci->ir.rc5_device = rc5_device;
250 break; 243 break;
251 default: 244 default:
252 /* unknown remote */ 245 /* unknown remote */
253 error = ir_input_init(input_dev, &budget_ci->ir.state, 246 ir_codes = &ir_codes_budget_ci_old_table;
254 IR_TYPE_RC5, &ir_codes_budget_ci_old_table);
255 if (error < 0)
256 goto out2;
257
258 if (rc5_device < 0)
259 budget_ci->ir.rc5_device = IR_DEVICE_ANY;
260 else
261 budget_ci->ir.rc5_device = rc5_device;
262 break; 247 break;
263 } 248 }
264 249
250 ir_input_init(input_dev, &budget_ci->ir.state, IR_TYPE_RC5);
251
265 /* initialise the key-up timeout handler */ 252 /* initialise the key-up timeout handler */
266 init_timer(&budget_ci->ir.timer_keyup); 253 init_timer(&budget_ci->ir.timer_keyup);
267 budget_ci->ir.timer_keyup.function = msp430_ir_keyup; 254 budget_ci->ir.timer_keyup.function = msp430_ir_keyup;
268 budget_ci->ir.timer_keyup.data = (unsigned long) &budget_ci->ir; 255 budget_ci->ir.timer_keyup.data = (unsigned long) &budget_ci->ir;
269 budget_ci->ir.last_raw = 0xffff; /* An impossible value */ 256 budget_ci->ir.last_raw = 0xffff; /* An impossible value */
270 error = input_register_device(input_dev); 257 error = ir_input_register(input_dev, ir_codes);
271 if (error) { 258 if (error) {
272 printk(KERN_ERR "budget_ci: could not init driver for IR device (code %d)\n", error); 259 printk(KERN_ERR "budget_ci: could not init driver for IR device (code %d)\n", error);
273 goto out2; 260 return error;
274 } 261 }
275 262
276 /* note: these must be after input_register_device */ 263 /* note: these must be after input_register_device */
@@ -284,12 +271,6 @@ static int msp430_ir_init(struct budget_ci *budget_ci)
284 saa7146_setgpio(saa, 3, SAA7146_GPIO_IRQHI); 271 saa7146_setgpio(saa, 3, SAA7146_GPIO_IRQHI);
285 272
286 return 0; 273 return 0;
287
288out2:
289 ir_input_free(input_dev);
290 input_free_device(input_dev);
291out1:
292 return error;
293} 274}
294 275
295static void msp430_ir_deinit(struct budget_ci *budget_ci) 276static void msp430_ir_deinit(struct budget_ci *budget_ci)
@@ -304,8 +285,7 @@ static void msp430_ir_deinit(struct budget_ci *budget_ci)
304 del_timer_sync(&dev->timer); 285 del_timer_sync(&dev->timer);
305 ir_input_nokey(dev, &budget_ci->ir.state); 286 ir_input_nokey(dev, &budget_ci->ir.state);
306 287
307 ir_input_free(dev); 288 ir_input_unregister(dev);
308 input_unregister_device(dev);
309} 289}
310 290
311static int ciintf_read_attribute_mem(struct dvb_ca_en50221 *ca, int slot, int address) 291static int ciintf_read_attribute_mem(struct dvb_ca_en50221 *ca, int slot, int address)
diff --git a/drivers/media/radio/Kconfig b/drivers/media/radio/Kconfig
index 4c2b8a246772..3f40f375981b 100644
--- a/drivers/media/radio/Kconfig
+++ b/drivers/media/radio/Kconfig
@@ -215,13 +215,10 @@ config RADIO_MIROPCM20
215 module will be called radio-miropcm20. 215 module will be called radio-miropcm20.
216 216
217config RADIO_SF16FMI 217config RADIO_SF16FMI
218 tristate "SF16FMI Radio" 218 tristate "SF16-FMI/SF16-FMP Radio"
219 depends on ISA && VIDEO_V4L2 219 depends on ISA && VIDEO_V4L2
220 ---help--- 220 ---help---
221 Choose Y here if you have one of these FM radio cards. If you 221 Choose Y here if you have one of these FM radio cards.
222 compile the driver into the kernel and your card is not PnP one, you
223 have to add "sf16fm=<io>" to the kernel command line (I/O address is
224 0x284 or 0x384).
225 222
226 In order to control your radio card, you will need to use programs 223 In order to control your radio card, you will need to use programs
227 that are compatible with the Video For Linux API. Information on 224 that are compatible with the Video For Linux API. Information on
diff --git a/drivers/media/radio/radio-aimslab.c b/drivers/media/radio/radio-aimslab.c
index 35edee009ba8..5bf4985daede 100644
--- a/drivers/media/radio/radio-aimslab.c
+++ b/drivers/media/radio/radio-aimslab.c
@@ -268,6 +268,8 @@ static int vidioc_s_frequency(struct file *file, void *priv,
268{ 268{
269 struct rtrack *rt = video_drvdata(file); 269 struct rtrack *rt = video_drvdata(file);
270 270
271 if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO)
272 return -EINVAL;
271 rt_setfreq(rt, f->frequency); 273 rt_setfreq(rt, f->frequency);
272 return 0; 274 return 0;
273} 275}
@@ -277,6 +279,8 @@ static int vidioc_g_frequency(struct file *file, void *priv,
277{ 279{
278 struct rtrack *rt = video_drvdata(file); 280 struct rtrack *rt = video_drvdata(file);
279 281
282 if (f->tuner != 0)
283 return -EINVAL;
280 f->type = V4L2_TUNER_RADIO; 284 f->type = V4L2_TUNER_RADIO;
281 f->frequency = rt->curfreq; 285 f->frequency = rt->curfreq;
282 return 0; 286 return 0;
diff --git a/drivers/media/radio/radio-aztech.c b/drivers/media/radio/radio-aztech.c
index 8daf809eb01a..c22311393624 100644
--- a/drivers/media/radio/radio-aztech.c
+++ b/drivers/media/radio/radio-aztech.c
@@ -254,6 +254,8 @@ static int vidioc_s_frequency(struct file *file, void *priv,
254{ 254{
255 struct aztech *az = video_drvdata(file); 255 struct aztech *az = video_drvdata(file);
256 256
257 if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO)
258 return -EINVAL;
257 az_setfreq(az, f->frequency); 259 az_setfreq(az, f->frequency);
258 return 0; 260 return 0;
259} 261}
@@ -263,6 +265,8 @@ static int vidioc_g_frequency(struct file *file, void *priv,
263{ 265{
264 struct aztech *az = video_drvdata(file); 266 struct aztech *az = video_drvdata(file);
265 267
268 if (f->tuner != 0)
269 return -EINVAL;
266 f->type = V4L2_TUNER_RADIO; 270 f->type = V4L2_TUNER_RADIO;
267 f->frequency = az->curfreq; 271 f->frequency = az->curfreq;
268 return 0; 272 return 0;
diff --git a/drivers/media/radio/radio-gemtek-pci.c b/drivers/media/radio/radio-gemtek-pci.c
index c6cf11661868..000f4d34087c 100644
--- a/drivers/media/radio/radio-gemtek-pci.c
+++ b/drivers/media/radio/radio-gemtek-pci.c
@@ -240,6 +240,8 @@ static int vidioc_s_frequency(struct file *file, void *priv,
240{ 240{
241 struct gemtek_pci *card = video_drvdata(file); 241 struct gemtek_pci *card = video_drvdata(file);
242 242
243 if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO)
244 return -EINVAL;
243 if (f->frequency < GEMTEK_PCI_RANGE_LOW || 245 if (f->frequency < GEMTEK_PCI_RANGE_LOW ||
244 f->frequency > GEMTEK_PCI_RANGE_HIGH) 246 f->frequency > GEMTEK_PCI_RANGE_HIGH)
245 return -EINVAL; 247 return -EINVAL;
@@ -253,6 +255,8 @@ static int vidioc_g_frequency(struct file *file, void *priv,
253{ 255{
254 struct gemtek_pci *card = video_drvdata(file); 256 struct gemtek_pci *card = video_drvdata(file);
255 257
258 if (f->tuner != 0)
259 return -EINVAL;
256 f->type = V4L2_TUNER_RADIO; 260 f->type = V4L2_TUNER_RADIO;
257 f->frequency = card->current_frequency; 261 f->frequency = card->current_frequency;
258 return 0; 262 return 0;
diff --git a/drivers/media/radio/radio-maestro.c b/drivers/media/radio/radio-maestro.c
index 64d737c35acf..f8213b7c8ddc 100644
--- a/drivers/media/radio/radio-maestro.c
+++ b/drivers/media/radio/radio-maestro.c
@@ -200,6 +200,8 @@ static int vidioc_s_frequency(struct file *file, void *priv,
200{ 200{
201 struct maestro *dev = video_drvdata(file); 201 struct maestro *dev = video_drvdata(file);
202 202
203 if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO)
204 return -EINVAL;
203 if (f->frequency < FREQ_LO || f->frequency > FREQ_HI) 205 if (f->frequency < FREQ_LO || f->frequency > FREQ_HI)
204 return -EINVAL; 206 return -EINVAL;
205 mutex_lock(&dev->lock); 207 mutex_lock(&dev->lock);
@@ -213,6 +215,8 @@ static int vidioc_g_frequency(struct file *file, void *priv,
213{ 215{
214 struct maestro *dev = video_drvdata(file); 216 struct maestro *dev = video_drvdata(file);
215 217
218 if (f->tuner != 0)
219 return -EINVAL;
216 f->type = V4L2_TUNER_RADIO; 220 f->type = V4L2_TUNER_RADIO;
217 mutex_lock(&dev->lock); 221 mutex_lock(&dev->lock);
218 f->frequency = BITS2FREQ(radio_bits_get(dev)); 222 f->frequency = BITS2FREQ(radio_bits_get(dev));
diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c
index 3da51fe8fb93..44b4dbedb322 100644
--- a/drivers/media/radio/radio-maxiradio.c
+++ b/drivers/media/radio/radio-maxiradio.c
@@ -262,6 +262,8 @@ static int vidioc_s_frequency(struct file *file, void *priv,
262{ 262{
263 struct maxiradio *dev = video_drvdata(file); 263 struct maxiradio *dev = video_drvdata(file);
264 264
265 if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO)
266 return -EINVAL;
265 if (f->frequency < FREQ_LO || f->frequency > FREQ_HI) { 267 if (f->frequency < FREQ_LO || f->frequency > FREQ_HI) {
266 dprintk(dev, 1, "radio freq (%d.%02d MHz) out of range (%d-%d)\n", 268 dprintk(dev, 1, "radio freq (%d.%02d MHz) out of range (%d-%d)\n",
267 f->frequency / 16000, 269 f->frequency / 16000,
@@ -285,6 +287,8 @@ static int vidioc_g_frequency(struct file *file, void *priv,
285{ 287{
286 struct maxiradio *dev = video_drvdata(file); 288 struct maxiradio *dev = video_drvdata(file);
287 289
290 if (f->tuner != 0)
291 return -EINVAL;
288 f->type = V4L2_TUNER_RADIO; 292 f->type = V4L2_TUNER_RADIO;
289 f->frequency = dev->freq; 293 f->frequency = dev->freq;
290 294
diff --git a/drivers/media/radio/radio-mr800.c b/drivers/media/radio/radio-mr800.c
index 949f60513d9e..02a9cefc9a00 100644
--- a/drivers/media/radio/radio-mr800.c
+++ b/drivers/media/radio/radio-mr800.c
@@ -374,6 +374,8 @@ static int vidioc_s_frequency(struct file *file, void *priv,
374{ 374{
375 struct amradio_device *radio = file->private_data; 375 struct amradio_device *radio = file->private_data;
376 376
377 if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO)
378 return -EINVAL;
377 return amradio_setfreq(radio, f->frequency); 379 return amradio_setfreq(radio, f->frequency);
378} 380}
379 381
@@ -383,6 +385,8 @@ static int vidioc_g_frequency(struct file *file, void *priv,
383{ 385{
384 struct amradio_device *radio = file->private_data; 386 struct amradio_device *radio = file->private_data;
385 387
388 if (f->tuner != 0)
389 return -EINVAL;
386 f->type = V4L2_TUNER_RADIO; 390 f->type = V4L2_TUNER_RADIO;
387 f->frequency = radio->curfreq; 391 f->frequency = radio->curfreq;
388 392
diff --git a/drivers/media/radio/radio-rtrack2.c b/drivers/media/radio/radio-rtrack2.c
index 9cb193fa6e33..a79296aac9a9 100644
--- a/drivers/media/radio/radio-rtrack2.c
+++ b/drivers/media/radio/radio-rtrack2.c
@@ -167,6 +167,8 @@ static int vidioc_s_frequency(struct file *file, void *priv,
167{ 167{
168 struct rtrack2 *rt = video_drvdata(file); 168 struct rtrack2 *rt = video_drvdata(file);
169 169
170 if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO)
171 return -EINVAL;
170 rt_setfreq(rt, f->frequency); 172 rt_setfreq(rt, f->frequency);
171 return 0; 173 return 0;
172} 174}
@@ -176,6 +178,8 @@ static int vidioc_g_frequency(struct file *file, void *priv,
176{ 178{
177 struct rtrack2 *rt = video_drvdata(file); 179 struct rtrack2 *rt = video_drvdata(file);
178 180
181 if (f->tuner != 0)
182 return -EINVAL;
179 f->type = V4L2_TUNER_RADIO; 183 f->type = V4L2_TUNER_RADIO;
180 f->frequency = rt->curfreq; 184 f->frequency = rt->curfreq;
181 return 0; 185 return 0;
diff --git a/drivers/media/radio/radio-sf16fmi.c b/drivers/media/radio/radio-sf16fmi.c
index 49c4aab95dab..985359d18aa5 100644
--- a/drivers/media/radio/radio-sf16fmi.c
+++ b/drivers/media/radio/radio-sf16fmi.c
@@ -1,4 +1,4 @@
1/* SF16FMI radio driver for Linux radio support 1/* SF16-FMI and SF16-FMP radio driver for Linux radio support
2 * heavily based on rtrack driver... 2 * heavily based on rtrack driver...
3 * (c) 1997 M. Kirkwood 3 * (c) 1997 M. Kirkwood
4 * (c) 1998 Petr Vandrovec, vandrove@vc.cvut.cz 4 * (c) 1998 Petr Vandrovec, vandrove@vc.cvut.cz
@@ -11,7 +11,7 @@
11 * 11 *
12 * Frequency control is done digitally -- ie out(port,encodefreq(95.8)); 12 * Frequency control is done digitally -- ie out(port,encodefreq(95.8));
13 * No volume control - only mute/unmute - you have to use line volume 13 * No volume control - only mute/unmute - you have to use line volume
14 * control on SB-part of SF16FMI 14 * control on SB-part of SF16-FMI/SF16-FMP
15 * 15 *
16 * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org> 16 * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org>
17 */ 17 */
@@ -30,14 +30,14 @@
30#include <media/v4l2-ioctl.h> 30#include <media/v4l2-ioctl.h>
31 31
32MODULE_AUTHOR("Petr Vandrovec, vandrove@vc.cvut.cz and M. Kirkwood"); 32MODULE_AUTHOR("Petr Vandrovec, vandrove@vc.cvut.cz and M. Kirkwood");
33MODULE_DESCRIPTION("A driver for the SF16MI radio."); 33MODULE_DESCRIPTION("A driver for the SF16-FMI and SF16-FMP radio.");
34MODULE_LICENSE("GPL"); 34MODULE_LICENSE("GPL");
35 35
36static int io = -1; 36static int io = -1;
37static int radio_nr = -1; 37static int radio_nr = -1;
38 38
39module_param(io, int, 0); 39module_param(io, int, 0);
40MODULE_PARM_DESC(io, "I/O address of the SF16MI card (0x284 or 0x384)"); 40MODULE_PARM_DESC(io, "I/O address of the SF16-FMI or SF16-FMP card (0x284 or 0x384)");
41module_param(radio_nr, int, 0); 41module_param(radio_nr, int, 0);
42 42
43#define RADIO_VERSION KERNEL_VERSION(0, 0, 2) 43#define RADIO_VERSION KERNEL_VERSION(0, 0, 2)
@@ -47,13 +47,14 @@ struct fmi
47 struct v4l2_device v4l2_dev; 47 struct v4l2_device v4l2_dev;
48 struct video_device vdev; 48 struct video_device vdev;
49 int io; 49 int io;
50 int curvol; /* 1 or 0 */ 50 bool mute;
51 unsigned long curfreq; /* freq in kHz */ 51 unsigned long curfreq; /* freq in kHz */
52 struct mutex lock; 52 struct mutex lock;
53}; 53};
54 54
55static struct fmi fmi_card; 55static struct fmi fmi_card;
56static struct pnp_dev *dev; 56static struct pnp_dev *dev;
57bool pnp_attached;
57 58
58/* freq is in 1/16 kHz to internal number, hw precision is 50 kHz */ 59/* freq is in 1/16 kHz to internal number, hw precision is 50 kHz */
59/* It is only useful to give freq in interval of 800 (=0.05Mhz), 60/* It is only useful to give freq in interval of 800 (=0.05Mhz),
@@ -105,7 +106,7 @@ static inline int fmi_setfreq(struct fmi *fmi, unsigned long freq)
105 outbits(8, 0xC0, fmi->io); 106 outbits(8, 0xC0, fmi->io);
106 msleep(143); /* was schedule_timeout(HZ/7) */ 107 msleep(143); /* was schedule_timeout(HZ/7) */
107 mutex_unlock(&fmi->lock); 108 mutex_unlock(&fmi->lock);
108 if (fmi->curvol) 109 if (!fmi->mute)
109 fmi_unmute(fmi); 110 fmi_unmute(fmi);
110 return 0; 111 return 0;
111} 112}
@@ -116,7 +117,7 @@ static inline int fmi_getsigstr(struct fmi *fmi)
116 int res; 117 int res;
117 118
118 mutex_lock(&fmi->lock); 119 mutex_lock(&fmi->lock);
119 val = fmi->curvol ? 0x08 : 0x00; /* unmute/mute */ 120 val = fmi->mute ? 0x00 : 0x08; /* mute/unmute */
120 outb(val, fmi->io); 121 outb(val, fmi->io);
121 outb(val | 0x10, fmi->io); 122 outb(val | 0x10, fmi->io);
122 msleep(143); /* was schedule_timeout(HZ/7) */ 123 msleep(143); /* was schedule_timeout(HZ/7) */
@@ -168,6 +169,8 @@ static int vidioc_s_frequency(struct file *file, void *priv,
168{ 169{
169 struct fmi *fmi = video_drvdata(file); 170 struct fmi *fmi = video_drvdata(file);
170 171
172 if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO)
173 return -EINVAL;
171 if (f->frequency < RSF16_MINFREQ || 174 if (f->frequency < RSF16_MINFREQ ||
172 f->frequency > RSF16_MAXFREQ) 175 f->frequency > RSF16_MAXFREQ)
173 return -EINVAL; 176 return -EINVAL;
@@ -182,6 +185,8 @@ static int vidioc_g_frequency(struct file *file, void *priv,
182{ 185{
183 struct fmi *fmi = video_drvdata(file); 186 struct fmi *fmi = video_drvdata(file);
184 187
188 if (f->tuner != 0)
189 return -EINVAL;
185 f->type = V4L2_TUNER_RADIO; 190 f->type = V4L2_TUNER_RADIO;
186 f->frequency = fmi->curfreq; 191 f->frequency = fmi->curfreq;
187 return 0; 192 return 0;
@@ -204,7 +209,7 @@ static int vidioc_g_ctrl(struct file *file, void *priv,
204 209
205 switch (ctrl->id) { 210 switch (ctrl->id) {
206 case V4L2_CID_AUDIO_MUTE: 211 case V4L2_CID_AUDIO_MUTE:
207 ctrl->value = fmi->curvol; 212 ctrl->value = fmi->mute;
208 return 0; 213 return 0;
209 } 214 }
210 return -EINVAL; 215 return -EINVAL;
@@ -221,7 +226,7 @@ static int vidioc_s_ctrl(struct file *file, void *priv,
221 fmi_mute(fmi); 226 fmi_mute(fmi);
222 else 227 else
223 fmi_unmute(fmi); 228 fmi_unmute(fmi);
224 fmi->curvol = ctrl->value; 229 fmi->mute = ctrl->value;
225 return 0; 230 return 0;
226 } 231 }
227 return -EINVAL; 232 return -EINVAL;
@@ -316,26 +321,54 @@ static int __init fmi_init(void)
316{ 321{
317 struct fmi *fmi = &fmi_card; 322 struct fmi *fmi = &fmi_card;
318 struct v4l2_device *v4l2_dev = &fmi->v4l2_dev; 323 struct v4l2_device *v4l2_dev = &fmi->v4l2_dev;
319 int res; 324 int res, i;
325 int probe_ports[] = { 0, 0x284, 0x384 };
326
327 if (io < 0) {
328 for (i = 0; i < ARRAY_SIZE(probe_ports); i++) {
329 io = probe_ports[i];
330 if (io == 0) {
331 io = isapnp_fmi_probe();
332 if (io < 0)
333 continue;
334 pnp_attached = 1;
335 }
336 if (!request_region(io, 2, "radio-sf16fmi")) {
337 if (pnp_attached)
338 pnp_device_detach(dev);
339 io = -1;
340 continue;
341 }
342 if (pnp_attached ||
343 ((inb(io) & 0xf9) == 0xf9 && (inb(io) & 0x4) == 0))
344 break;
345 release_region(io, 2);
346 io = -1;
347 }
348 } else {
349 if (!request_region(io, 2, "radio-sf16fmi")) {
350 printk(KERN_ERR "radio-sf16fmi: port %#x already in use\n", io);
351 return -EBUSY;
352 }
353 if (inb(io) == 0xff) {
354 printk(KERN_ERR "radio-sf16fmi: card not present at %#x\n", io);
355 release_region(io, 2);
356 return -ENODEV;
357 }
358 }
359 if (io < 0) {
360 printk(KERN_ERR "radio-sf16fmi: no cards found\n");
361 return -ENODEV;
362 }
320 363
321 if (io < 0)
322 io = isapnp_fmi_probe();
323 strlcpy(v4l2_dev->name, "sf16fmi", sizeof(v4l2_dev->name)); 364 strlcpy(v4l2_dev->name, "sf16fmi", sizeof(v4l2_dev->name));
324 fmi->io = io; 365 fmi->io = io;
325 if (fmi->io < 0) {
326 v4l2_err(v4l2_dev, "No PnP card found.\n");
327 return fmi->io;
328 }
329 if (!request_region(io, 2, "radio-sf16fmi")) {
330 v4l2_err(v4l2_dev, "port 0x%x already in use\n", fmi->io);
331 pnp_device_detach(dev);
332 return -EBUSY;
333 }
334 366
335 res = v4l2_device_register(NULL, v4l2_dev); 367 res = v4l2_device_register(NULL, v4l2_dev);
336 if (res < 0) { 368 if (res < 0) {
337 release_region(fmi->io, 2); 369 release_region(fmi->io, 2);
338 pnp_device_detach(dev); 370 if (pnp_attached)
371 pnp_device_detach(dev);
339 v4l2_err(v4l2_dev, "Could not register v4l2_device\n"); 372 v4l2_err(v4l2_dev, "Could not register v4l2_device\n");
340 return res; 373 return res;
341 } 374 }
@@ -352,7 +385,8 @@ static int __init fmi_init(void)
352 if (video_register_device(&fmi->vdev, VFL_TYPE_RADIO, radio_nr) < 0) { 385 if (video_register_device(&fmi->vdev, VFL_TYPE_RADIO, radio_nr) < 0) {
353 v4l2_device_unregister(v4l2_dev); 386 v4l2_device_unregister(v4l2_dev);
354 release_region(fmi->io, 2); 387 release_region(fmi->io, 2);
355 pnp_device_detach(dev); 388 if (pnp_attached)
389 pnp_device_detach(dev);
356 return -EINVAL; 390 return -EINVAL;
357 } 391 }
358 392
@@ -369,7 +403,7 @@ static void __exit fmi_exit(void)
369 video_unregister_device(&fmi->vdev); 403 video_unregister_device(&fmi->vdev);
370 v4l2_device_unregister(&fmi->v4l2_dev); 404 v4l2_device_unregister(&fmi->v4l2_dev);
371 release_region(fmi->io, 2); 405 release_region(fmi->io, 2);
372 if (dev) 406 if (dev && pnp_attached)
373 pnp_device_detach(dev); 407 pnp_device_detach(dev);
374} 408}
375 409
diff --git a/drivers/media/radio/radio-sf16fmr2.c b/drivers/media/radio/radio-sf16fmr2.c
index a11414f648d4..52c7bbb32b8b 100644
--- a/drivers/media/radio/radio-sf16fmr2.c
+++ b/drivers/media/radio/radio-sf16fmr2.c
@@ -251,6 +251,8 @@ static int vidioc_s_frequency(struct file *file, void *priv,
251{ 251{
252 struct fmr2 *fmr2 = video_drvdata(file); 252 struct fmr2 *fmr2 = video_drvdata(file);
253 253
254 if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO)
255 return -EINVAL;
254 if (f->frequency < RSF16_MINFREQ || 256 if (f->frequency < RSF16_MINFREQ ||
255 f->frequency > RSF16_MAXFREQ) 257 f->frequency > RSF16_MAXFREQ)
256 return -EINVAL; 258 return -EINVAL;
@@ -272,6 +274,8 @@ static int vidioc_g_frequency(struct file *file, void *priv,
272{ 274{
273 struct fmr2 *fmr2 = video_drvdata(file); 275 struct fmr2 *fmr2 = video_drvdata(file);
274 276
277 if (f->tuner != 0)
278 return -EINVAL;
275 f->type = V4L2_TUNER_RADIO; 279 f->type = V4L2_TUNER_RADIO;
276 f->frequency = fmr2->curfreq; 280 f->frequency = fmr2->curfreq;
277 return 0; 281 return 0;
diff --git a/drivers/media/radio/radio-tea5764.c b/drivers/media/radio/radio-tea5764.c
index 3cd76dddb6aa..8e718bfcdad3 100644
--- a/drivers/media/radio/radio-tea5764.c
+++ b/drivers/media/radio/radio-tea5764.c
@@ -314,7 +314,7 @@ static int vidioc_g_tuner(struct file *file, void *priv,
314 if (v->index > 0) 314 if (v->index > 0)
315 return -EINVAL; 315 return -EINVAL;
316 316
317 memset(v, 0, sizeof(v)); 317 memset(v, 0, sizeof(*v));
318 strcpy(v->name, "FM"); 318 strcpy(v->name, "FM");
319 v->type = V4L2_TUNER_RADIO; 319 v->type = V4L2_TUNER_RADIO;
320 tea5764_i2c_read(radio); 320 tea5764_i2c_read(radio);
@@ -349,7 +349,7 @@ static int vidioc_s_frequency(struct file *file, void *priv,
349{ 349{
350 struct tea5764_device *radio = video_drvdata(file); 350 struct tea5764_device *radio = video_drvdata(file);
351 351
352 if (f->tuner != 0) 352 if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO)
353 return -EINVAL; 353 return -EINVAL;
354 if (f->frequency == 0) { 354 if (f->frequency == 0) {
355 /* We special case this as a power down control. */ 355 /* We special case this as a power down control. */
@@ -370,8 +370,10 @@ static int vidioc_g_frequency(struct file *file, void *priv,
370 struct tea5764_device *radio = video_drvdata(file); 370 struct tea5764_device *radio = video_drvdata(file);
371 struct tea5764_regs *r = &radio->regs; 371 struct tea5764_regs *r = &radio->regs;
372 372
373 if (f->tuner != 0)
374 return -EINVAL;
373 tea5764_i2c_read(radio); 375 tea5764_i2c_read(radio);
374 memset(f, 0, sizeof(f)); 376 memset(f, 0, sizeof(*f));
375 f->type = V4L2_TUNER_RADIO; 377 f->type = V4L2_TUNER_RADIO;
376 if (r->tnctrl & TEA5764_TNCTRL_PUPD0) 378 if (r->tnctrl & TEA5764_TNCTRL_PUPD0)
377 f->frequency = (tea5764_get_freq(radio) * 2) / 125; 379 f->frequency = (tea5764_get_freq(radio) * 2) / 125;
@@ -458,12 +460,8 @@ static int vidioc_s_audio(struct file *file, void *priv,
458static int tea5764_open(struct file *file) 460static int tea5764_open(struct file *file)
459{ 461{
460 /* Currently we support only one device */ 462 /* Currently we support only one device */
461 int minor = video_devdata(file)->minor;
462 struct tea5764_device *radio = video_drvdata(file); 463 struct tea5764_device *radio = video_drvdata(file);
463 464
464 if (radio->videodev->minor != minor)
465 return -ENODEV;
466
467 mutex_lock(&radio->mutex); 465 mutex_lock(&radio->mutex);
468 /* Only exclusive access */ 466 /* Only exclusive access */
469 if (radio->users) { 467 if (radio->users) {
diff --git a/drivers/media/radio/radio-terratec.c b/drivers/media/radio/radio-terratec.c
index 699db9acaaf7..fc1c860fd438 100644
--- a/drivers/media/radio/radio-terratec.c
+++ b/drivers/media/radio/radio-terratec.c
@@ -240,6 +240,8 @@ static int vidioc_s_frequency(struct file *file, void *priv,
240{ 240{
241 struct terratec *tt = video_drvdata(file); 241 struct terratec *tt = video_drvdata(file);
242 242
243 if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO)
244 return -EINVAL;
243 tt_setfreq(tt, f->frequency); 245 tt_setfreq(tt, f->frequency);
244 return 0; 246 return 0;
245} 247}
@@ -249,6 +251,8 @@ static int vidioc_g_frequency(struct file *file, void *priv,
249{ 251{
250 struct terratec *tt = video_drvdata(file); 252 struct terratec *tt = video_drvdata(file);
251 253
254 if (f->tuner != 0)
255 return -EINVAL;
252 f->type = V4L2_TUNER_RADIO; 256 f->type = V4L2_TUNER_RADIO;
253 f->frequency = tt->curfreq; 257 f->frequency = tt->curfreq;
254 return 0; 258 return 0;
diff --git a/drivers/media/radio/radio-trust.c b/drivers/media/radio/radio-trust.c
index 6f9ecc359356..9d6dcf8af5b0 100644
--- a/drivers/media/radio/radio-trust.c
+++ b/drivers/media/radio/radio-trust.c
@@ -239,6 +239,8 @@ static int vidioc_s_frequency(struct file *file, void *priv,
239{ 239{
240 struct trust *tr = video_drvdata(file); 240 struct trust *tr = video_drvdata(file);
241 241
242 if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO)
243 return -EINVAL;
242 tr_setfreq(tr, f->frequency); 244 tr_setfreq(tr, f->frequency);
243 return 0; 245 return 0;
244} 246}
@@ -248,6 +250,8 @@ static int vidioc_g_frequency(struct file *file, void *priv,
248{ 250{
249 struct trust *tr = video_drvdata(file); 251 struct trust *tr = video_drvdata(file);
250 252
253 if (f->tuner != 0)
254 return -EINVAL;
251 f->type = V4L2_TUNER_RADIO; 255 f->type = V4L2_TUNER_RADIO;
252 f->frequency = tr->curfreq; 256 f->frequency = tr->curfreq;
253 return 0; 257 return 0;
diff --git a/drivers/media/radio/radio-typhoon.c b/drivers/media/radio/radio-typhoon.c
index 3a98f1399495..03439282dfce 100644
--- a/drivers/media/radio/radio-typhoon.c
+++ b/drivers/media/radio/radio-typhoon.c
@@ -207,6 +207,8 @@ static int vidioc_g_frequency(struct file *file, void *priv,
207{ 207{
208 struct typhoon *dev = video_drvdata(file); 208 struct typhoon *dev = video_drvdata(file);
209 209
210 if (f->tuner != 0)
211 return -EINVAL;
210 f->type = V4L2_TUNER_RADIO; 212 f->type = V4L2_TUNER_RADIO;
211 f->frequency = dev->curfreq; 213 f->frequency = dev->curfreq;
212 return 0; 214 return 0;
@@ -217,6 +219,8 @@ static int vidioc_s_frequency(struct file *file, void *priv,
217{ 219{
218 struct typhoon *dev = video_drvdata(file); 220 struct typhoon *dev = video_drvdata(file);
219 221
222 if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO)
223 return -EINVAL;
220 dev->curfreq = f->frequency; 224 dev->curfreq = f->frequency;
221 typhoon_setfreq(dev, dev->curfreq); 225 typhoon_setfreq(dev, dev->curfreq);
222 return 0; 226 return 0;
diff --git a/drivers/media/radio/radio-zoltrix.c b/drivers/media/radio/radio-zoltrix.c
index 80e98b6422fe..f31eab99c943 100644
--- a/drivers/media/radio/radio-zoltrix.c
+++ b/drivers/media/radio/radio-zoltrix.c
@@ -266,6 +266,8 @@ static int vidioc_s_frequency(struct file *file, void *priv,
266{ 266{
267 struct zoltrix *zol = video_drvdata(file); 267 struct zoltrix *zol = video_drvdata(file);
268 268
269 if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO)
270 return -EINVAL;
269 if (zol_setfreq(zol, f->frequency) != 0) 271 if (zol_setfreq(zol, f->frequency) != 0)
270 return -EINVAL; 272 return -EINVAL;
271 return 0; 273 return 0;
@@ -276,6 +278,8 @@ static int vidioc_g_frequency(struct file *file, void *priv,
276{ 278{
277 struct zoltrix *zol = video_drvdata(file); 279 struct zoltrix *zol = video_drvdata(file);
278 280
281 if (f->tuner != 0)
282 return -EINVAL;
279 f->type = V4L2_TUNER_RADIO; 283 f->type = V4L2_TUNER_RADIO;
280 f->frequency = zol->curfreq; 284 f->frequency = zol->curfreq;
281 return 0; 285 return 0;
diff --git a/drivers/media/radio/si470x/radio-si470x-common.c b/drivers/media/radio/si470x/radio-si470x-common.c
index f33315f2c543..4da0f150c6e2 100644
--- a/drivers/media/radio/si470x/radio-si470x-common.c
+++ b/drivers/media/radio/si470x/radio-si470x-common.c
@@ -426,6 +426,104 @@ int si470x_rds_on(struct si470x_device *radio)
426 426
427 427
428/************************************************************************** 428/**************************************************************************
429 * File Operations Interface
430 **************************************************************************/
431
432/*
433 * si470x_fops_read - read RDS data
434 */
435static ssize_t si470x_fops_read(struct file *file, char __user *buf,
436 size_t count, loff_t *ppos)
437{
438 struct si470x_device *radio = video_drvdata(file);
439 int retval = 0;
440 unsigned int block_count = 0;
441
442 /* switch on rds reception */
443 if ((radio->registers[SYSCONFIG1] & SYSCONFIG1_RDS) == 0)
444 si470x_rds_on(radio);
445
446 /* block if no new data available */
447 while (radio->wr_index == radio->rd_index) {
448 if (file->f_flags & O_NONBLOCK) {
449 retval = -EWOULDBLOCK;
450 goto done;
451 }
452 if (wait_event_interruptible(radio->read_queue,
453 radio->wr_index != radio->rd_index) < 0) {
454 retval = -EINTR;
455 goto done;
456 }
457 }
458
459 /* calculate block count from byte count */
460 count /= 3;
461
462 /* copy RDS block out of internal buffer and to user buffer */
463 mutex_lock(&radio->lock);
464 while (block_count < count) {
465 if (radio->rd_index == radio->wr_index)
466 break;
467
468 /* always transfer rds complete blocks */
469 if (copy_to_user(buf, &radio->buffer[radio->rd_index], 3))
470 /* retval = -EFAULT; */
471 break;
472
473 /* increment and wrap read pointer */
474 radio->rd_index += 3;
475 if (radio->rd_index >= radio->buf_size)
476 radio->rd_index = 0;
477
478 /* increment counters */
479 block_count++;
480 buf += 3;
481 retval += 3;
482 }
483 mutex_unlock(&radio->lock);
484
485done:
486 return retval;
487}
488
489
490/*
491 * si470x_fops_poll - poll RDS data
492 */
493static unsigned int si470x_fops_poll(struct file *file,
494 struct poll_table_struct *pts)
495{
496 struct si470x_device *radio = video_drvdata(file);
497 int retval = 0;
498
499 /* switch on rds reception */
500 if ((radio->registers[SYSCONFIG1] & SYSCONFIG1_RDS) == 0)
501 si470x_rds_on(radio);
502
503 poll_wait(file, &radio->read_queue, pts);
504
505 if (radio->rd_index != radio->wr_index)
506 retval = POLLIN | POLLRDNORM;
507
508 return retval;
509}
510
511
512/*
513 * si470x_fops - file operations interface
514 */
515static const struct v4l2_file_operations si470x_fops = {
516 .owner = THIS_MODULE,
517 .read = si470x_fops_read,
518 .poll = si470x_fops_poll,
519 .ioctl = video_ioctl2,
520 .open = si470x_fops_open,
521 .release = si470x_fops_release,
522};
523
524
525
526/**************************************************************************
429 * Video4Linux Interface 527 * Video4Linux Interface
430 **************************************************************************/ 528 **************************************************************************/
431 529
diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c
index 2d53b6a9409b..5466015346a1 100644
--- a/drivers/media/radio/si470x/radio-si470x-i2c.c
+++ b/drivers/media/radio/si470x/radio-si470x-i2c.c
@@ -22,22 +22,17 @@
22 */ 22 */
23 23
24 24
25/*
26 * ToDo:
27 * - RDS support
28 */
29
30
31/* driver definitions */ 25/* driver definitions */
32#define DRIVER_AUTHOR "Joonyoung Shim <jy0922.shim@samsung.com>"; 26#define DRIVER_AUTHOR "Joonyoung Shim <jy0922.shim@samsung.com>";
33#define DRIVER_KERNEL_VERSION KERNEL_VERSION(1, 0, 0) 27#define DRIVER_KERNEL_VERSION KERNEL_VERSION(1, 0, 1)
34#define DRIVER_CARD "Silicon Labs Si470x FM Radio Receiver" 28#define DRIVER_CARD "Silicon Labs Si470x FM Radio Receiver"
35#define DRIVER_DESC "I2C radio driver for Si470x FM Radio Receivers" 29#define DRIVER_DESC "I2C radio driver for Si470x FM Radio Receivers"
36#define DRIVER_VERSION "1.0.0" 30#define DRIVER_VERSION "1.0.1"
37 31
38/* kernel includes */ 32/* kernel includes */
39#include <linux/i2c.h> 33#include <linux/i2c.h>
40#include <linux/delay.h> 34#include <linux/delay.h>
35#include <linux/interrupt.h>
41 36
42#include "radio-si470x.h" 37#include "radio-si470x.h"
43 38
@@ -62,6 +57,20 @@ static int radio_nr = -1;
62module_param(radio_nr, int, 0444); 57module_param(radio_nr, int, 0444);
63MODULE_PARM_DESC(radio_nr, "Radio Nr"); 58MODULE_PARM_DESC(radio_nr, "Radio Nr");
64 59
60/* RDS buffer blocks */
61static unsigned int rds_buf = 100;
62module_param(rds_buf, uint, 0444);
63MODULE_PARM_DESC(rds_buf, "RDS buffer entries: *100*");
64
65/* RDS maximum block errors */
66static unsigned short max_rds_errors = 1;
67/* 0 means 0 errors requiring correction */
68/* 1 means 1-2 errors requiring correction (used by original USBRadio.exe) */
69/* 2 means 3-5 errors requiring correction */
70/* 3 means 6+ errors or errors in checkword, correction not possible */
71module_param(max_rds_errors, ushort, 0644);
72MODULE_PARM_DESC(max_rds_errors, "RDS maximum block errors: *1*");
73
65 74
66 75
67/************************************************************************** 76/**************************************************************************
@@ -173,7 +182,7 @@ int si470x_disconnect_check(struct si470x_device *radio)
173/* 182/*
174 * si470x_fops_open - file open 183 * si470x_fops_open - file open
175 */ 184 */
176static int si470x_fops_open(struct file *file) 185int si470x_fops_open(struct file *file)
177{ 186{
178 struct si470x_device *radio = video_drvdata(file); 187 struct si470x_device *radio = video_drvdata(file);
179 int retval = 0; 188 int retval = 0;
@@ -181,12 +190,21 @@ static int si470x_fops_open(struct file *file)
181 mutex_lock(&radio->lock); 190 mutex_lock(&radio->lock);
182 radio->users++; 191 radio->users++;
183 192
184 if (radio->users == 1) 193 if (radio->users == 1) {
185 /* start radio */ 194 /* start radio */
186 retval = si470x_start(radio); 195 retval = si470x_start(radio);
196 if (retval < 0)
197 goto done;
198
199 /* enable RDS interrupt */
200 radio->registers[SYSCONFIG1] |= SYSCONFIG1_RDSIEN;
201 radio->registers[SYSCONFIG1] &= ~SYSCONFIG1_GPIO2;
202 radio->registers[SYSCONFIG1] |= 0x1 << 2;
203 retval = si470x_set_register(radio, SYSCONFIG1);
204 }
187 205
206done:
188 mutex_unlock(&radio->lock); 207 mutex_unlock(&radio->lock);
189
190 return retval; 208 return retval;
191} 209}
192 210
@@ -194,7 +212,7 @@ static int si470x_fops_open(struct file *file)
194/* 212/*
195 * si470x_fops_release - file release 213 * si470x_fops_release - file release
196 */ 214 */
197static int si470x_fops_release(struct file *file) 215int si470x_fops_release(struct file *file)
198{ 216{
199 struct si470x_device *radio = video_drvdata(file); 217 struct si470x_device *radio = video_drvdata(file);
200 int retval = 0; 218 int retval = 0;
@@ -215,17 +233,6 @@ static int si470x_fops_release(struct file *file)
215} 233}
216 234
217 235
218/*
219 * si470x_fops - file operations interface
220 */
221const struct v4l2_file_operations si470x_fops = {
222 .owner = THIS_MODULE,
223 .ioctl = video_ioctl2,
224 .open = si470x_fops_open,
225 .release = si470x_fops_release,
226};
227
228
229 236
230/************************************************************************** 237/**************************************************************************
231 * Video4Linux Interface 238 * Video4Linux Interface
@@ -253,6 +260,105 @@ int si470x_vidioc_querycap(struct file *file, void *priv,
253 **************************************************************************/ 260 **************************************************************************/
254 261
255/* 262/*
263 * si470x_i2c_interrupt_work - rds processing function
264 */
265static void si470x_i2c_interrupt_work(struct work_struct *work)
266{
267 struct si470x_device *radio = container_of(work,
268 struct si470x_device, radio_work);
269 unsigned char regnr;
270 unsigned char blocknum;
271 unsigned short bler; /* rds block errors */
272 unsigned short rds;
273 unsigned char tmpbuf[3];
274 int retval = 0;
275
276 /* safety checks */
277 if ((radio->registers[SYSCONFIG1] & SYSCONFIG1_RDS) == 0)
278 return;
279
280 /* Update RDS registers */
281 for (regnr = 0; regnr < RDS_REGISTER_NUM; regnr++) {
282 retval = si470x_get_register(radio, STATUSRSSI + regnr);
283 if (retval < 0)
284 return;
285 }
286
287 /* get rds blocks */
288 if ((radio->registers[STATUSRSSI] & STATUSRSSI_RDSR) == 0)
289 /* No RDS group ready, better luck next time */
290 return;
291
292 for (blocknum = 0; blocknum < 4; blocknum++) {
293 switch (blocknum) {
294 default:
295 bler = (radio->registers[STATUSRSSI] &
296 STATUSRSSI_BLERA) >> 9;
297 rds = radio->registers[RDSA];
298 break;
299 case 1:
300 bler = (radio->registers[READCHAN] &
301 READCHAN_BLERB) >> 14;
302 rds = radio->registers[RDSB];
303 break;
304 case 2:
305 bler = (radio->registers[READCHAN] &
306 READCHAN_BLERC) >> 12;
307 rds = radio->registers[RDSC];
308 break;
309 case 3:
310 bler = (radio->registers[READCHAN] &
311 READCHAN_BLERD) >> 10;
312 rds = radio->registers[RDSD];
313 break;
314 };
315
316 /* Fill the V4L2 RDS buffer */
317 put_unaligned_le16(rds, &tmpbuf);
318 tmpbuf[2] = blocknum; /* offset name */
319 tmpbuf[2] |= blocknum << 3; /* received offset */
320 if (bler > max_rds_errors)
321 tmpbuf[2] |= 0x80; /* uncorrectable errors */
322 else if (bler > 0)
323 tmpbuf[2] |= 0x40; /* corrected error(s) */
324
325 /* copy RDS block to internal buffer */
326 memcpy(&radio->buffer[radio->wr_index], &tmpbuf, 3);
327 radio->wr_index += 3;
328
329 /* wrap write pointer */
330 if (radio->wr_index >= radio->buf_size)
331 radio->wr_index = 0;
332
333 /* check for overflow */
334 if (radio->wr_index == radio->rd_index) {
335 /* increment and wrap read pointer */
336 radio->rd_index += 3;
337 if (radio->rd_index >= radio->buf_size)
338 radio->rd_index = 0;
339 }
340 }
341
342 if (radio->wr_index != radio->rd_index)
343 wake_up_interruptible(&radio->read_queue);
344}
345
346
347/*
348 * si470x_i2c_interrupt - interrupt handler
349 */
350static irqreturn_t si470x_i2c_interrupt(int irq, void *dev_id)
351{
352 struct si470x_device *radio = dev_id;
353
354 if (!work_pending(&radio->radio_work))
355 schedule_work(&radio->radio_work);
356
357 return IRQ_HANDLED;
358}
359
360
361/*
256 * si470x_i2c_probe - probe for the device 362 * si470x_i2c_probe - probe for the device
257 */ 363 */
258static int __devinit si470x_i2c_probe(struct i2c_client *client, 364static int __devinit si470x_i2c_probe(struct i2c_client *client,
@@ -268,6 +374,8 @@ static int __devinit si470x_i2c_probe(struct i2c_client *client,
268 retval = -ENOMEM; 374 retval = -ENOMEM;
269 goto err_initial; 375 goto err_initial;
270 } 376 }
377
378 INIT_WORK(&radio->radio_work, si470x_i2c_interrupt_work);
271 radio->users = 0; 379 radio->users = 0;
272 radio->client = client; 380 radio->client = client;
273 mutex_init(&radio->lock); 381 mutex_init(&radio->lock);
@@ -319,6 +427,26 @@ static int __devinit si470x_i2c_probe(struct i2c_client *client,
319 /* set initial frequency */ 427 /* set initial frequency */
320 si470x_set_freq(radio, 87.5 * FREQ_MUL); /* available in all regions */ 428 si470x_set_freq(radio, 87.5 * FREQ_MUL); /* available in all regions */
321 429
430 /* rds buffer allocation */
431 radio->buf_size = rds_buf * 3;
432 radio->buffer = kmalloc(radio->buf_size, GFP_KERNEL);
433 if (!radio->buffer) {
434 retval = -EIO;
435 goto err_video;
436 }
437
438 /* rds buffer configuration */
439 radio->wr_index = 0;
440 radio->rd_index = 0;
441 init_waitqueue_head(&radio->read_queue);
442
443 retval = request_irq(client->irq, si470x_i2c_interrupt,
444 IRQF_TRIGGER_FALLING, DRIVER_NAME, radio);
445 if (retval) {
446 dev_err(&client->dev, "Failed to register interrupt\n");
447 goto err_rds;
448 }
449
322 /* register video device */ 450 /* register video device */
323 retval = video_register_device(radio->videodev, VFL_TYPE_RADIO, 451 retval = video_register_device(radio->videodev, VFL_TYPE_RADIO,
324 radio_nr); 452 radio_nr);
@@ -330,6 +458,9 @@ static int __devinit si470x_i2c_probe(struct i2c_client *client,
330 458
331 return 0; 459 return 0;
332err_all: 460err_all:
461 free_irq(client->irq, radio);
462err_rds:
463 kfree(radio->buffer);
333err_video: 464err_video:
334 video_device_release(radio->videodev); 465 video_device_release(radio->videodev);
335err_radio: 466err_radio:
@@ -346,6 +477,8 @@ static __devexit int si470x_i2c_remove(struct i2c_client *client)
346{ 477{
347 struct si470x_device *radio = i2c_get_clientdata(client); 478 struct si470x_device *radio = i2c_get_clientdata(client);
348 479
480 free_irq(client->irq, radio);
481 cancel_work_sync(&radio->radio_work);
349 video_unregister_device(radio->videodev); 482 video_unregister_device(radio->videodev);
350 kfree(radio); 483 kfree(radio);
351 i2c_set_clientdata(client, NULL); 484 i2c_set_clientdata(client, NULL);
@@ -354,6 +487,44 @@ static __devexit int si470x_i2c_remove(struct i2c_client *client)
354} 487}
355 488
356 489
490#ifdef CONFIG_PM
491/*
492 * si470x_i2c_suspend - suspend the device
493 */
494static int si470x_i2c_suspend(struct i2c_client *client, pm_message_t mesg)
495{
496 struct si470x_device *radio = i2c_get_clientdata(client);
497
498 /* power down */
499 radio->registers[POWERCFG] |= POWERCFG_DISABLE;
500 if (si470x_set_register(radio, POWERCFG) < 0)
501 return -EIO;
502
503 return 0;
504}
505
506
507/*
508 * si470x_i2c_resume - resume the device
509 */
510static int si470x_i2c_resume(struct i2c_client *client)
511{
512 struct si470x_device *radio = i2c_get_clientdata(client);
513
514 /* power up : need 110ms */
515 radio->registers[POWERCFG] |= POWERCFG_ENABLE;
516 if (si470x_set_register(radio, POWERCFG) < 0)
517 return -EIO;
518 msleep(110);
519
520 return 0;
521}
522#else
523#define si470x_i2c_suspend NULL
524#define si470x_i2c_resume NULL
525#endif
526
527
357/* 528/*
358 * si470x_i2c_driver - i2c driver interface 529 * si470x_i2c_driver - i2c driver interface
359 */ 530 */
@@ -364,6 +535,8 @@ static struct i2c_driver si470x_i2c_driver = {
364 }, 535 },
365 .probe = si470x_i2c_probe, 536 .probe = si470x_i2c_probe,
366 .remove = __devexit_p(si470x_i2c_remove), 537 .remove = __devexit_p(si470x_i2c_remove),
538 .suspend = si470x_i2c_suspend,
539 .resume = si470x_i2c_resume,
367 .id_table = si470x_i2c_id, 540 .id_table = si470x_i2c_id,
368}; 541};
369 542
diff --git a/drivers/media/radio/si470x/radio-si470x-usb.c b/drivers/media/radio/si470x/radio-si470x-usb.c
index f2d0e1ddb301..a96e1b9dd646 100644
--- a/drivers/media/radio/si470x/radio-si470x-usb.c
+++ b/drivers/media/radio/si470x/radio-si470x-usb.c
@@ -509,89 +509,9 @@ resubmit:
509 **************************************************************************/ 509 **************************************************************************/
510 510
511/* 511/*
512 * si470x_fops_read - read RDS data
513 */
514static ssize_t si470x_fops_read(struct file *file, char __user *buf,
515 size_t count, loff_t *ppos)
516{
517 struct si470x_device *radio = video_drvdata(file);
518 int retval = 0;
519 unsigned int block_count = 0;
520
521 /* switch on rds reception */
522 if ((radio->registers[SYSCONFIG1] & SYSCONFIG1_RDS) == 0)
523 si470x_rds_on(radio);
524
525 /* block if no new data available */
526 while (radio->wr_index == radio->rd_index) {
527 if (file->f_flags & O_NONBLOCK) {
528 retval = -EWOULDBLOCK;
529 goto done;
530 }
531 if (wait_event_interruptible(radio->read_queue,
532 radio->wr_index != radio->rd_index) < 0) {
533 retval = -EINTR;
534 goto done;
535 }
536 }
537
538 /* calculate block count from byte count */
539 count /= 3;
540
541 /* copy RDS block out of internal buffer and to user buffer */
542 mutex_lock(&radio->lock);
543 while (block_count < count) {
544 if (radio->rd_index == radio->wr_index)
545 break;
546
547 /* always transfer rds complete blocks */
548 if (copy_to_user(buf, &radio->buffer[radio->rd_index], 3))
549 /* retval = -EFAULT; */
550 break;
551
552 /* increment and wrap read pointer */
553 radio->rd_index += 3;
554 if (radio->rd_index >= radio->buf_size)
555 radio->rd_index = 0;
556
557 /* increment counters */
558 block_count++;
559 buf += 3;
560 retval += 3;
561 }
562 mutex_unlock(&radio->lock);
563
564done:
565 return retval;
566}
567
568
569/*
570 * si470x_fops_poll - poll RDS data
571 */
572static unsigned int si470x_fops_poll(struct file *file,
573 struct poll_table_struct *pts)
574{
575 struct si470x_device *radio = video_drvdata(file);
576 int retval = 0;
577
578 /* switch on rds reception */
579 if ((radio->registers[SYSCONFIG1] & SYSCONFIG1_RDS) == 0)
580 si470x_rds_on(radio);
581
582 poll_wait(file, &radio->read_queue, pts);
583
584 if (radio->rd_index != radio->wr_index)
585 retval = POLLIN | POLLRDNORM;
586
587 return retval;
588}
589
590
591/*
592 * si470x_fops_open - file open 512 * si470x_fops_open - file open
593 */ 513 */
594static int si470x_fops_open(struct file *file) 514int si470x_fops_open(struct file *file)
595{ 515{
596 struct si470x_device *radio = video_drvdata(file); 516 struct si470x_device *radio = video_drvdata(file);
597 int retval; 517 int retval;
@@ -645,7 +565,7 @@ done:
645/* 565/*
646 * si470x_fops_release - file release 566 * si470x_fops_release - file release
647 */ 567 */
648static int si470x_fops_release(struct file *file) 568int si470x_fops_release(struct file *file)
649{ 569{
650 struct si470x_device *radio = video_drvdata(file); 570 struct si470x_device *radio = video_drvdata(file);
651 int retval = 0; 571 int retval = 0;
@@ -688,19 +608,6 @@ done:
688} 608}
689 609
690 610
691/*
692 * si470x_fops - file operations interface
693 */
694const struct v4l2_file_operations si470x_fops = {
695 .owner = THIS_MODULE,
696 .read = si470x_fops_read,
697 .poll = si470x_fops_poll,
698 .ioctl = video_ioctl2,
699 .open = si470x_fops_open,
700 .release = si470x_fops_release,
701};
702
703
704 611
705/************************************************************************** 612/**************************************************************************
706 * Video4Linux Interface 613 * Video4Linux Interface
diff --git a/drivers/media/radio/si470x/radio-si470x.h b/drivers/media/radio/si470x/radio-si470x.h
index d0af194d194c..3cd0a29cd6e7 100644
--- a/drivers/media/radio/si470x/radio-si470x.h
+++ b/drivers/media/radio/si470x/radio-si470x.h
@@ -29,6 +29,7 @@
29#include <linux/kernel.h> 29#include <linux/kernel.h>
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/init.h> 31#include <linux/init.h>
32#include <linux/sched.h>
32#include <linux/slab.h> 33#include <linux/slab.h>
33#include <linux/smp_lock.h> 34#include <linux/smp_lock.h>
34#include <linux/input.h> 35#include <linux/input.h>
@@ -181,6 +182,7 @@ struct si470x_device {
181 182
182#if defined(CONFIG_I2C_SI470X) || defined(CONFIG_I2C_SI470X_MODULE) 183#if defined(CONFIG_I2C_SI470X) || defined(CONFIG_I2C_SI470X_MODULE)
183 struct i2c_client *client; 184 struct i2c_client *client;
185 struct work_struct radio_work;
184#endif 186#endif
185}; 187};
186 188
@@ -212,7 +214,6 @@ struct si470x_device {
212/************************************************************************** 214/**************************************************************************
213 * Common Functions 215 * Common Functions
214 **************************************************************************/ 216 **************************************************************************/
215extern const struct v4l2_file_operations si470x_fops;
216extern struct video_device si470x_viddev_template; 217extern struct video_device si470x_viddev_template;
217int si470x_get_register(struct si470x_device *radio, int regnr); 218int si470x_get_register(struct si470x_device *radio, int regnr);
218int si470x_set_register(struct si470x_device *radio, int regnr); 219int si470x_set_register(struct si470x_device *radio, int regnr);
@@ -221,5 +222,7 @@ int si470x_set_freq(struct si470x_device *radio, unsigned int freq);
221int si470x_start(struct si470x_device *radio); 222int si470x_start(struct si470x_device *radio);
222int si470x_stop(struct si470x_device *radio); 223int si470x_stop(struct si470x_device *radio);
223int si470x_rds_on(struct si470x_device *radio); 224int si470x_rds_on(struct si470x_device *radio);
225int si470x_fops_open(struct file *file);
226int si470x_fops_release(struct file *file);
224int si470x_vidioc_querycap(struct file *file, void *priv, 227int si470x_vidioc_querycap(struct file *file, void *priv,
225 struct v4l2_capability *capability); 228 struct v4l2_capability *capability);
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index 9dc74c93bf24..2f83be766d9f 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -37,10 +37,6 @@ config VIDEO_BTCX
37 depends on PCI 37 depends on PCI
38 tristate 38 tristate
39 39
40config VIDEO_IR
41 tristate
42 depends on INPUT
43
44config VIDEO_TVEEPROM 40config VIDEO_TVEEPROM
45 tristate 41 tristate
46 depends on I2C 42 depends on I2C
@@ -840,6 +836,12 @@ config SOC_CAMERA_MT9T031
840 help 836 help
841 This driver supports MT9T031 cameras from Micron. 837 This driver supports MT9T031 cameras from Micron.
842 838
839config SOC_CAMERA_MT9T112
840 tristate "mt9t112 support"
841 depends on SOC_CAMERA && I2C
842 help
843 This driver supports MT9T112 cameras from Aptina.
844
843config SOC_CAMERA_MT9V022 845config SOC_CAMERA_MT9V022
844 tristate "mt9v022 support" 846 tristate "mt9v022 support"
845 depends on SOC_CAMERA && I2C 847 depends on SOC_CAMERA && I2C
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index 7a2dcc34111c..2af68ee84122 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -75,6 +75,7 @@ obj-$(CONFIG_VIDEO_MT9V011) += mt9v011.o
75obj-$(CONFIG_SOC_CAMERA_MT9M001) += mt9m001.o 75obj-$(CONFIG_SOC_CAMERA_MT9M001) += mt9m001.o
76obj-$(CONFIG_SOC_CAMERA_MT9M111) += mt9m111.o 76obj-$(CONFIG_SOC_CAMERA_MT9M111) += mt9m111.o
77obj-$(CONFIG_SOC_CAMERA_MT9T031) += mt9t031.o 77obj-$(CONFIG_SOC_CAMERA_MT9T031) += mt9t031.o
78obj-$(CONFIG_SOC_CAMERA_MT9T112) += mt9t112.o
78obj-$(CONFIG_SOC_CAMERA_MT9V022) += mt9v022.o 79obj-$(CONFIG_SOC_CAMERA_MT9V022) += mt9v022.o
79obj-$(CONFIG_SOC_CAMERA_OV772X) += ov772x.o 80obj-$(CONFIG_SOC_CAMERA_OV772X) += ov772x.o
80obj-$(CONFIG_SOC_CAMERA_OV9640) += ov9640.o 81obj-$(CONFIG_SOC_CAMERA_OV9640) += ov9640.o
@@ -149,7 +150,7 @@ obj-$(CONFIG_VIDEO_VIVI) += vivi.o
149obj-$(CONFIG_VIDEO_CX23885) += cx23885/ 150obj-$(CONFIG_VIDEO_CX23885) += cx23885/
150 151
151obj-$(CONFIG_VIDEO_OMAP2) += omap2cam.o 152obj-$(CONFIG_VIDEO_OMAP2) += omap2cam.o
152obj-$(CONFIG_SOC_CAMERA) += soc_camera.o 153obj-$(CONFIG_SOC_CAMERA) += soc_camera.o soc_mediabus.o
153obj-$(CONFIG_SOC_CAMERA_PLATFORM) += soc_camera_platform.o 154obj-$(CONFIG_SOC_CAMERA_PLATFORM) += soc_camera_platform.o
154# soc-camera host drivers have to be linked after camera drivers 155# soc-camera host drivers have to be linked after camera drivers
155obj-$(CONFIG_VIDEO_MX1) += mx1_camera.o 156obj-$(CONFIG_VIDEO_MX1) += mx1_camera.o
diff --git a/drivers/media/video/arv.c b/drivers/media/video/arv.c
index d137bac84511..a356d6bd3131 100644
--- a/drivers/media/video/arv.c
+++ b/drivers/media/video/arv.c
@@ -767,7 +767,6 @@ static struct video_device ar_template = {
767 .name = "Colour AR VGA", 767 .name = "Colour AR VGA",
768 .fops = &ar_fops, 768 .fops = &ar_fops,
769 .release = ar_release, 769 .release = ar_release,
770 .minor = -1,
771}; 770};
772 771
773#define ALIGN4(x) ((((int)(x)) & 0x3) == 0) 772#define ALIGN4(x) ((((int)(x)) & 0x3) == 0)
@@ -860,8 +859,8 @@ static int __init ar_init(void)
860 goto out_dev; 859 goto out_dev;
861 } 860 }
862 861
863 printk("video%d: Found M64278 VGA (IRQ %d, Freq %dMHz).\n", 862 printk("%s: Found M64278 VGA (IRQ %d, Freq %dMHz).\n",
864 ar->vdev->num, M32R_IRQ_INT3, freq); 863 video_device_node_name(ar->vdev), M32R_IRQ_INT3, freq);
865 864
866 return 0; 865 return 0;
867 866
diff --git a/drivers/media/video/au0828/au0828-video.c b/drivers/media/video/au0828/au0828-video.c
index 1485aee18d58..dc67bc40f36f 100644
--- a/drivers/media/video/au0828/au0828-video.c
+++ b/drivers/media/video/au0828/au0828-video.c
@@ -40,7 +40,6 @@
40#include "au0828.h" 40#include "au0828.h"
41#include "au0828-reg.h" 41#include "au0828-reg.h"
42 42
43static LIST_HEAD(au0828_devlist);
44static DEFINE_MUTEX(au0828_sysfs_lock); 43static DEFINE_MUTEX(au0828_sysfs_lock);
45 44
46#define AU0828_VERSION_CODE KERNEL_VERSION(0, 0, 1) 45#define AU0828_VERSION_CODE KERNEL_VERSION(0, 0, 1)
@@ -693,10 +692,8 @@ void au0828_analog_unregister(struct au0828_dev *dev)
693 dprintk(1, "au0828_release_resources called\n"); 692 dprintk(1, "au0828_release_resources called\n");
694 mutex_lock(&au0828_sysfs_lock); 693 mutex_lock(&au0828_sysfs_lock);
695 694
696 if (dev->vdev) { 695 if (dev->vdev)
697 list_del(&dev->au0828list);
698 video_unregister_device(dev->vdev); 696 video_unregister_device(dev->vdev);
699 }
700 if (dev->vbi_dev) 697 if (dev->vbi_dev)
701 video_unregister_device(dev->vbi_dev); 698 video_unregister_device(dev->vbi_dev);
702 699
@@ -737,29 +734,15 @@ static void res_free(struct au0828_fh *fh)
737 734
738static int au0828_v4l2_open(struct file *filp) 735static int au0828_v4l2_open(struct file *filp)
739{ 736{
740 int minor = video_devdata(filp)->minor;
741 int ret = 0; 737 int ret = 0;
742 struct au0828_dev *h, *dev = NULL; 738 struct au0828_dev *dev = video_drvdata(filp);
743 struct au0828_fh *fh; 739 struct au0828_fh *fh;
744 int type = 0; 740 int type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
745 struct list_head *list; 741
746
747 list_for_each(list, &au0828_devlist) {
748 h = list_entry(list, struct au0828_dev, au0828list);
749 if (h->vdev->minor == minor) {
750 dev = h;
751 type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
752 }
753#ifdef VBI_IS_WORKING 742#ifdef VBI_IS_WORKING
754 if (h->vbi_dev->minor == minor) { 743 if (video_devdata(filp)->vfl_type == VFL_TYPE_GRABBER)
755 dev = h; 744 type = V4L2_BUF_TYPE_VBI_CAPTURE;
756 type = V4L2_BUF_TYPE_VBI_CAPTURE;
757 }
758#endif 745#endif
759 }
760
761 if (NULL == dev)
762 return -ENODEV;
763 746
764 fh = kzalloc(sizeof(struct au0828_fh), GFP_KERNEL); 747 fh = kzalloc(sizeof(struct au0828_fh), GFP_KERNEL);
765 if (NULL == fh) { 748 if (NULL == fh) {
@@ -1587,7 +1570,6 @@ static const struct video_device au0828_video_template = {
1587 .fops = &au0828_v4l_fops, 1570 .fops = &au0828_v4l_fops,
1588 .release = video_device_release, 1571 .release = video_device_release,
1589 .ioctl_ops = &video_ioctl_ops, 1572 .ioctl_ops = &video_ioctl_ops,
1590 .minor = -1,
1591 .tvnorms = V4L2_STD_NTSC_M, 1573 .tvnorms = V4L2_STD_NTSC_M,
1592 .current_norm = V4L2_STD_NTSC_M, 1574 .current_norm = V4L2_STD_NTSC_M,
1593}; 1575};
@@ -1676,25 +1658,23 @@ int au0828_analog_register(struct au0828_dev *dev,
1676 strcpy(dev->vbi_dev->name, "au0828a vbi"); 1658 strcpy(dev->vbi_dev->name, "au0828a vbi");
1677#endif 1659#endif
1678 1660
1679 list_add_tail(&dev->au0828list, &au0828_devlist);
1680
1681 /* Register the v4l2 device */ 1661 /* Register the v4l2 device */
1662 video_set_drvdata(dev->vdev, dev);
1682 retval = video_register_device(dev->vdev, VFL_TYPE_GRABBER, -1); 1663 retval = video_register_device(dev->vdev, VFL_TYPE_GRABBER, -1);
1683 if (retval != 0) { 1664 if (retval != 0) {
1684 dprintk(1, "unable to register video device (error = %d).\n", 1665 dprintk(1, "unable to register video device (error = %d).\n",
1685 retval); 1666 retval);
1686 list_del(&dev->au0828list);
1687 video_device_release(dev->vdev); 1667 video_device_release(dev->vdev);
1688 return -ENODEV; 1668 return -ENODEV;
1689 } 1669 }
1690 1670
1691#ifdef VBI_IS_WORKING 1671#ifdef VBI_IS_WORKING
1692 /* Register the vbi device */ 1672 /* Register the vbi device */
1673 video_set_drvdata(dev->vbi_dev, dev);
1693 retval = video_register_device(dev->vbi_dev, VFL_TYPE_VBI, -1); 1674 retval = video_register_device(dev->vbi_dev, VFL_TYPE_VBI, -1);
1694 if (retval != 0) { 1675 if (retval != 0) {
1695 dprintk(1, "unable to register vbi device (error = %d).\n", 1676 dprintk(1, "unable to register vbi device (error = %d).\n",
1696 retval); 1677 retval);
1697 list_del(&dev->au0828list);
1698 video_device_release(dev->vbi_dev); 1678 video_device_release(dev->vbi_dev);
1699 video_device_release(dev->vdev); 1679 video_device_release(dev->vdev);
1700 return -ENODEV; 1680 return -ENODEV;
diff --git a/drivers/media/video/au0828/au0828.h b/drivers/media/video/au0828/au0828.h
index b977915efbd0..207f32dec6a6 100644
--- a/drivers/media/video/au0828/au0828.h
+++ b/drivers/media/video/au0828/au0828.h
@@ -192,7 +192,6 @@ struct au0828_dev {
192 struct au0828_dvb dvb; 192 struct au0828_dvb dvb;
193 193
194 /* Analog */ 194 /* Analog */
195 struct list_head au0828list;
196 struct v4l2_device v4l2_dev; 195 struct v4l2_device v4l2_dev;
197 int users; 196 int users;
198 unsigned int stream_on:1; /* Locks streams */ 197 unsigned int stream_on:1; /* Locks streams */
diff --git a/drivers/media/video/bt8xx/bttv-driver.c b/drivers/media/video/bt8xx/bttv-driver.c
index a6724019c66f..3182a406bdd1 100644
--- a/drivers/media/video/bt8xx/bttv-driver.c
+++ b/drivers/media/video/bt8xx/bttv-driver.c
@@ -3206,24 +3206,24 @@ err:
3206 3206
3207static int bttv_open(struct file *file) 3207static int bttv_open(struct file *file)
3208{ 3208{
3209 int minor = video_devdata(file)->minor; 3209 struct video_device *vdev = video_devdata(file);
3210 struct bttv *btv = video_drvdata(file); 3210 struct bttv *btv = video_drvdata(file);
3211 struct bttv_fh *fh; 3211 struct bttv_fh *fh;
3212 enum v4l2_buf_type type = 0; 3212 enum v4l2_buf_type type = 0;
3213 3213
3214 dprintk(KERN_DEBUG "bttv: open minor=%d\n",minor); 3214 dprintk(KERN_DEBUG "bttv: open dev=%s\n", video_device_node_name(vdev));
3215 3215
3216 lock_kernel(); 3216 if (vdev->vfl_type == VFL_TYPE_GRABBER) {
3217 if (btv->video_dev->minor == minor) {
3218 type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 3217 type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
3219 } else if (btv->vbi_dev->minor == minor) { 3218 } else if (vdev->vfl_type == VFL_TYPE_VBI) {
3220 type = V4L2_BUF_TYPE_VBI_CAPTURE; 3219 type = V4L2_BUF_TYPE_VBI_CAPTURE;
3221 } else { 3220 } else {
3222 WARN_ON(1); 3221 WARN_ON(1);
3223 unlock_kernel();
3224 return -ENODEV; 3222 return -ENODEV;
3225 } 3223 }
3226 3224
3225 lock_kernel();
3226
3227 dprintk(KERN_DEBUG "bttv%d: open called (type=%s)\n", 3227 dprintk(KERN_DEBUG "bttv%d: open called (type=%s)\n",
3228 btv->c.nr,v4l2_type_names[type]); 3228 btv->c.nr,v4l2_type_names[type]);
3229 3229
@@ -3397,7 +3397,6 @@ static const struct v4l2_ioctl_ops bttv_ioctl_ops = {
3397 3397
3398static struct video_device bttv_video_template = { 3398static struct video_device bttv_video_template = {
3399 .fops = &bttv_fops, 3399 .fops = &bttv_fops,
3400 .minor = -1,
3401 .ioctl_ops = &bttv_ioctl_ops, 3400 .ioctl_ops = &bttv_ioctl_ops,
3402 .tvnorms = BTTV_NORMS, 3401 .tvnorms = BTTV_NORMS,
3403 .current_norm = V4L2_STD_PAL, 3402 .current_norm = V4L2_STD_PAL,
@@ -3408,18 +3407,13 @@ static struct video_device bttv_video_template = {
3408 3407
3409static int radio_open(struct file *file) 3408static int radio_open(struct file *file)
3410{ 3409{
3411 int minor = video_devdata(file)->minor; 3410 struct video_device *vdev = video_devdata(file);
3412 struct bttv *btv = video_drvdata(file); 3411 struct bttv *btv = video_drvdata(file);
3413 struct bttv_fh *fh; 3412 struct bttv_fh *fh;
3414 3413
3415 dprintk("bttv: open minor=%d\n",minor); 3414 dprintk("bttv: open dev=%s\n", video_device_node_name(vdev));
3416 3415
3417 lock_kernel(); 3416 lock_kernel();
3418 WARN_ON(btv->radio_dev && btv->radio_dev->minor != minor);
3419 if (!btv->radio_dev || btv->radio_dev->minor != minor) {
3420 unlock_kernel();
3421 return -ENODEV;
3422 }
3423 3417
3424 dprintk("bttv%d: open called (radio)\n",btv->c.nr); 3418 dprintk("bttv%d: open called (radio)\n",btv->c.nr);
3425 3419
@@ -3640,7 +3634,6 @@ static const struct v4l2_ioctl_ops radio_ioctl_ops = {
3640 3634
3641static struct video_device radio_template = { 3635static struct video_device radio_template = {
3642 .fops = &radio_fops, 3636 .fops = &radio_fops,
3643 .minor = -1,
3644 .ioctl_ops = &radio_ioctl_ops, 3637 .ioctl_ops = &radio_ioctl_ops,
3645}; 3638};
3646 3639
@@ -4208,21 +4201,21 @@ static struct video_device *vdev_init(struct bttv *btv,
4208static void bttv_unregister_video(struct bttv *btv) 4201static void bttv_unregister_video(struct bttv *btv)
4209{ 4202{
4210 if (btv->video_dev) { 4203 if (btv->video_dev) {
4211 if (-1 != btv->video_dev->minor) 4204 if (video_is_registered(btv->video_dev))
4212 video_unregister_device(btv->video_dev); 4205 video_unregister_device(btv->video_dev);
4213 else 4206 else
4214 video_device_release(btv->video_dev); 4207 video_device_release(btv->video_dev);
4215 btv->video_dev = NULL; 4208 btv->video_dev = NULL;
4216 } 4209 }
4217 if (btv->vbi_dev) { 4210 if (btv->vbi_dev) {
4218 if (-1 != btv->vbi_dev->minor) 4211 if (video_is_registered(btv->vbi_dev))
4219 video_unregister_device(btv->vbi_dev); 4212 video_unregister_device(btv->vbi_dev);
4220 else 4213 else
4221 video_device_release(btv->vbi_dev); 4214 video_device_release(btv->vbi_dev);
4222 btv->vbi_dev = NULL; 4215 btv->vbi_dev = NULL;
4223 } 4216 }
4224 if (btv->radio_dev) { 4217 if (btv->radio_dev) {
4225 if (-1 != btv->radio_dev->minor) 4218 if (video_is_registered(btv->radio_dev))
4226 video_unregister_device(btv->radio_dev); 4219 video_unregister_device(btv->radio_dev);
4227 else 4220 else
4228 video_device_release(btv->radio_dev); 4221 video_device_release(btv->radio_dev);
@@ -4244,8 +4237,8 @@ static int __devinit bttv_register_video(struct bttv *btv)
4244 if (video_register_device(btv->video_dev, VFL_TYPE_GRABBER, 4237 if (video_register_device(btv->video_dev, VFL_TYPE_GRABBER,
4245 video_nr[btv->c.nr]) < 0) 4238 video_nr[btv->c.nr]) < 0)
4246 goto err; 4239 goto err;
4247 printk(KERN_INFO "bttv%d: registered device video%d\n", 4240 printk(KERN_INFO "bttv%d: registered device %s\n",
4248 btv->c.nr, btv->video_dev->num); 4241 btv->c.nr, video_device_node_name(btv->video_dev));
4249 if (device_create_file(&btv->video_dev->dev, 4242 if (device_create_file(&btv->video_dev->dev,
4250 &dev_attr_card)<0) { 4243 &dev_attr_card)<0) {
4251 printk(KERN_ERR "bttv%d: device_create_file 'card' " 4244 printk(KERN_ERR "bttv%d: device_create_file 'card' "
@@ -4261,8 +4254,8 @@ static int __devinit bttv_register_video(struct bttv *btv)
4261 if (video_register_device(btv->vbi_dev, VFL_TYPE_VBI, 4254 if (video_register_device(btv->vbi_dev, VFL_TYPE_VBI,
4262 vbi_nr[btv->c.nr]) < 0) 4255 vbi_nr[btv->c.nr]) < 0)
4263 goto err; 4256 goto err;
4264 printk(KERN_INFO "bttv%d: registered device vbi%d\n", 4257 printk(KERN_INFO "bttv%d: registered device %s\n",
4265 btv->c.nr, btv->vbi_dev->num); 4258 btv->c.nr, video_device_node_name(btv->vbi_dev));
4266 4259
4267 if (!btv->has_radio) 4260 if (!btv->has_radio)
4268 return 0; 4261 return 0;
@@ -4273,8 +4266,8 @@ static int __devinit bttv_register_video(struct bttv *btv)
4273 if (video_register_device(btv->radio_dev, VFL_TYPE_RADIO, 4266 if (video_register_device(btv->radio_dev, VFL_TYPE_RADIO,
4274 radio_nr[btv->c.nr]) < 0) 4267 radio_nr[btv->c.nr]) < 0)
4275 goto err; 4268 goto err;
4276 printk(KERN_INFO "bttv%d: registered device radio%d\n", 4269 printk(KERN_INFO "bttv%d: registered device %s\n",
4277 btv->c.nr, btv->radio_dev->num); 4270 btv->c.nr, video_device_node_name(btv->radio_dev));
4278 4271
4279 /* all done */ 4272 /* all done */
4280 return 0; 4273 return 0;
diff --git a/drivers/media/video/bt8xx/bttv-i2c.c b/drivers/media/video/bt8xx/bttv-i2c.c
index beda363418b0..63aa31a041e8 100644
--- a/drivers/media/video/bt8xx/bttv-i2c.c
+++ b/drivers/media/video/bt8xx/bttv-i2c.c
@@ -40,7 +40,7 @@ static int i2c_debug;
40static int i2c_hw; 40static int i2c_hw;
41static int i2c_scan; 41static int i2c_scan;
42module_param(i2c_debug, int, 0644); 42module_param(i2c_debug, int, 0644);
43MODULE_PARM_DESC(i2c_hw,"configure i2c debug level"); 43MODULE_PARM_DESC(i2c_debug, "configure i2c debug level");
44module_param(i2c_hw, int, 0444); 44module_param(i2c_hw, int, 0444);
45MODULE_PARM_DESC(i2c_hw,"force use of hardware i2c support, " 45MODULE_PARM_DESC(i2c_hw,"force use of hardware i2c support, "
46 "instead of software bitbang"); 46 "instead of software bitbang");
@@ -400,7 +400,7 @@ int __devinit init_bttv_i2c(struct bttv *btv)
400 That's why we probe 0x1a (~0x34) first. CB 400 That's why we probe 0x1a (~0x34) first. CB
401 */ 401 */
402 const unsigned short addr_list[] = { 402 const unsigned short addr_list[] = {
403 0x1a, 0x18, 0x4b, 0x64, 0x30, 403 0x1a, 0x18, 0x4b, 0x64, 0x30, 0x71,
404 I2C_CLIENT_END 404 I2C_CLIENT_END
405 }; 405 };
406 406
diff --git a/drivers/media/video/bt8xx/bttv-input.c b/drivers/media/video/bt8xx/bttv-input.c
index 84a957e52c4b..277a092e1214 100644
--- a/drivers/media/video/bt8xx/bttv-input.c
+++ b/drivers/media/video/bt8xx/bttv-input.c
@@ -368,7 +368,7 @@ int bttv_input_init(struct bttv *btv)
368 snprintf(ir->phys, sizeof(ir->phys), "pci-%s/ir0", 368 snprintf(ir->phys, sizeof(ir->phys), "pci-%s/ir0",
369 pci_name(btv->c.pci)); 369 pci_name(btv->c.pci));
370 370
371 err = ir_input_init(input_dev, &ir->ir, ir_type, ir_codes); 371 err = ir_input_init(input_dev, &ir->ir, ir_type);
372 if (err < 0) 372 if (err < 0)
373 goto err_out_free; 373 goto err_out_free;
374 374
@@ -389,7 +389,7 @@ int bttv_input_init(struct bttv *btv)
389 bttv_ir_start(btv, ir); 389 bttv_ir_start(btv, ir);
390 390
391 /* all done */ 391 /* all done */
392 err = input_register_device(btv->remote->dev); 392 err = ir_input_register(btv->remote->dev, ir_codes);
393 if (err) 393 if (err)
394 goto err_out_stop; 394 goto err_out_stop;
395 395
@@ -403,8 +403,6 @@ int bttv_input_init(struct bttv *btv)
403 bttv_ir_stop(btv); 403 bttv_ir_stop(btv);
404 btv->remote = NULL; 404 btv->remote = NULL;
405 err_out_free: 405 err_out_free:
406 ir_input_free(input_dev);
407 input_free_device(input_dev);
408 kfree(ir); 406 kfree(ir);
409 return err; 407 return err;
410} 408}
@@ -415,8 +413,7 @@ void bttv_input_fini(struct bttv *btv)
415 return; 413 return;
416 414
417 bttv_ir_stop(btv); 415 bttv_ir_stop(btv);
418 ir_input_free(btv->remote->dev); 416 ir_input_unregister(btv->remote->dev);
419 input_unregister_device(btv->remote->dev);
420 kfree(btv->remote); 417 kfree(btv->remote);
421 btv->remote = NULL; 418 btv->remote = NULL;
422} 419}
diff --git a/drivers/media/video/c-qcam.c b/drivers/media/video/c-qcam.c
index 85cf1778827a..e2cbebab959b 100644
--- a/drivers/media/video/c-qcam.c
+++ b/drivers/media/video/c-qcam.c
@@ -809,8 +809,8 @@ static int init_cqcam(struct parport *port)
809 return -ENODEV; 809 return -ENODEV;
810 } 810 }
811 811
812 printk(KERN_INFO "video%d: Colour QuickCam found on %s\n", 812 printk(KERN_INFO "%s: Colour QuickCam found on %s\n",
813 qcam->vdev.num, qcam->pport->name); 813 video_device_node_name(&qcam->vdev), qcam->pport->name);
814 814
815 qcams[num_cams++] = qcam; 815 qcams[num_cams++] = qcam;
816 816
diff --git a/drivers/media/video/cafe_ccic.c b/drivers/media/video/cafe_ccic.c
index 10230cb3d210..7bb9c1ec7819 100644
--- a/drivers/media/video/cafe_ccic.c
+++ b/drivers/media/video/cafe_ccic.c
@@ -1723,7 +1723,6 @@ static const struct v4l2_ioctl_ops cafe_v4l_ioctl_ops = {
1723 1723
1724static struct video_device cafe_v4l_template = { 1724static struct video_device cafe_v4l_template = {
1725 .name = "cafe", 1725 .name = "cafe",
1726 .minor = -1, /* Get one dynamically */
1727 .tvnorms = V4L2_STD_NTSC_M, 1726 .tvnorms = V4L2_STD_NTSC_M,
1728 .current_norm = V4L2_STD_NTSC_M, /* make mplayer happy */ 1727 .current_norm = V4L2_STD_NTSC_M, /* make mplayer happy */
1729 1728
diff --git a/drivers/media/video/cpia.c b/drivers/media/video/cpia.c
index 2377313c041a..551ddf216a4b 100644
--- a/drivers/media/video/cpia.c
+++ b/drivers/media/video/cpia.c
@@ -32,6 +32,7 @@
32#include <linux/fs.h> 32#include <linux/fs.h>
33#include <linux/vmalloc.h> 33#include <linux/vmalloc.h>
34#include <linux/sched.h> 34#include <linux/sched.h>
35#include <linux/seq_file.h>
35#include <linux/slab.h> 36#include <linux/slab.h>
36#include <linux/proc_fs.h> 37#include <linux/proc_fs.h>
37#include <linux/ctype.h> 38#include <linux/ctype.h>
@@ -244,72 +245,67 @@ static void rvfree(void *mem, unsigned long size)
244#ifdef CONFIG_PROC_FS 245#ifdef CONFIG_PROC_FS
245static struct proc_dir_entry *cpia_proc_root=NULL; 246static struct proc_dir_entry *cpia_proc_root=NULL;
246 247
247static int cpia_read_proc(char *page, char **start, off_t off, 248static int cpia_proc_show(struct seq_file *m, void *v)
248 int count, int *eof, void *data)
249{ 249{
250 char *out = page; 250 struct cam_data *cam = m->private;
251 int len, tmp; 251 int tmp;
252 struct cam_data *cam = data;
253 char tmpstr[29]; 252 char tmpstr[29];
254 253
255 /* IMPORTANT: This output MUST be kept under PAGE_SIZE 254 seq_printf(m, "read-only\n-----------------------\n");
256 * or we need to get more sophisticated. */ 255 seq_printf(m, "V4L Driver version: %d.%d.%d\n",
257
258 out += sprintf(out, "read-only\n-----------------------\n");
259 out += sprintf(out, "V4L Driver version: %d.%d.%d\n",
260 CPIA_MAJ_VER, CPIA_MIN_VER, CPIA_PATCH_VER); 256 CPIA_MAJ_VER, CPIA_MIN_VER, CPIA_PATCH_VER);
261 out += sprintf(out, "CPIA Version: %d.%02d (%d.%d)\n", 257 seq_printf(m, "CPIA Version: %d.%02d (%d.%d)\n",
262 cam->params.version.firmwareVersion, 258 cam->params.version.firmwareVersion,
263 cam->params.version.firmwareRevision, 259 cam->params.version.firmwareRevision,
264 cam->params.version.vcVersion, 260 cam->params.version.vcVersion,
265 cam->params.version.vcRevision); 261 cam->params.version.vcRevision);
266 out += sprintf(out, "CPIA PnP-ID: %04x:%04x:%04x\n", 262 seq_printf(m, "CPIA PnP-ID: %04x:%04x:%04x\n",
267 cam->params.pnpID.vendor, cam->params.pnpID.product, 263 cam->params.pnpID.vendor, cam->params.pnpID.product,
268 cam->params.pnpID.deviceRevision); 264 cam->params.pnpID.deviceRevision);
269 out += sprintf(out, "VP-Version: %d.%d %04x\n", 265 seq_printf(m, "VP-Version: %d.%d %04x\n",
270 cam->params.vpVersion.vpVersion, 266 cam->params.vpVersion.vpVersion,
271 cam->params.vpVersion.vpRevision, 267 cam->params.vpVersion.vpRevision,
272 cam->params.vpVersion.cameraHeadID); 268 cam->params.vpVersion.cameraHeadID);
273 269
274 out += sprintf(out, "system_state: %#04x\n", 270 seq_printf(m, "system_state: %#04x\n",
275 cam->params.status.systemState); 271 cam->params.status.systemState);
276 out += sprintf(out, "grab_state: %#04x\n", 272 seq_printf(m, "grab_state: %#04x\n",
277 cam->params.status.grabState); 273 cam->params.status.grabState);
278 out += sprintf(out, "stream_state: %#04x\n", 274 seq_printf(m, "stream_state: %#04x\n",
279 cam->params.status.streamState); 275 cam->params.status.streamState);
280 out += sprintf(out, "fatal_error: %#04x\n", 276 seq_printf(m, "fatal_error: %#04x\n",
281 cam->params.status.fatalError); 277 cam->params.status.fatalError);
282 out += sprintf(out, "cmd_error: %#04x\n", 278 seq_printf(m, "cmd_error: %#04x\n",
283 cam->params.status.cmdError); 279 cam->params.status.cmdError);
284 out += sprintf(out, "debug_flags: %#04x\n", 280 seq_printf(m, "debug_flags: %#04x\n",
285 cam->params.status.debugFlags); 281 cam->params.status.debugFlags);
286 out += sprintf(out, "vp_status: %#04x\n", 282 seq_printf(m, "vp_status: %#04x\n",
287 cam->params.status.vpStatus); 283 cam->params.status.vpStatus);
288 out += sprintf(out, "error_code: %#04x\n", 284 seq_printf(m, "error_code: %#04x\n",
289 cam->params.status.errorCode); 285 cam->params.status.errorCode);
290 /* QX3 specific entries */ 286 /* QX3 specific entries */
291 if (cam->params.qx3.qx3_detected) { 287 if (cam->params.qx3.qx3_detected) {
292 out += sprintf(out, "button: %4d\n", 288 seq_printf(m, "button: %4d\n",
293 cam->params.qx3.button); 289 cam->params.qx3.button);
294 out += sprintf(out, "cradled: %4d\n", 290 seq_printf(m, "cradled: %4d\n",
295 cam->params.qx3.cradled); 291 cam->params.qx3.cradled);
296 } 292 }
297 out += sprintf(out, "video_size: %s\n", 293 seq_printf(m, "video_size: %s\n",
298 cam->params.format.videoSize == VIDEOSIZE_CIF ? 294 cam->params.format.videoSize == VIDEOSIZE_CIF ?
299 "CIF " : "QCIF"); 295 "CIF " : "QCIF");
300 out += sprintf(out, "roi: (%3d, %3d) to (%3d, %3d)\n", 296 seq_printf(m, "roi: (%3d, %3d) to (%3d, %3d)\n",
301 cam->params.roi.colStart*8, 297 cam->params.roi.colStart*8,
302 cam->params.roi.rowStart*4, 298 cam->params.roi.rowStart*4,
303 cam->params.roi.colEnd*8, 299 cam->params.roi.colEnd*8,
304 cam->params.roi.rowEnd*4); 300 cam->params.roi.rowEnd*4);
305 out += sprintf(out, "actual_fps: %3d\n", cam->fps); 301 seq_printf(m, "actual_fps: %3d\n", cam->fps);
306 out += sprintf(out, "transfer_rate: %4dkB/s\n", 302 seq_printf(m, "transfer_rate: %4dkB/s\n",
307 cam->transfer_rate); 303 cam->transfer_rate);
308 304
309 out += sprintf(out, "\nread-write\n"); 305 seq_printf(m, "\nread-write\n");
310 out += sprintf(out, "----------------------- current min" 306 seq_printf(m, "----------------------- current min"
311 " max default comment\n"); 307 " max default comment\n");
312 out += sprintf(out, "brightness: %8d %8d %8d %8d\n", 308 seq_printf(m, "brightness: %8d %8d %8d %8d\n",
313 cam->params.colourParams.brightness, 0, 100, 50); 309 cam->params.colourParams.brightness, 0, 100, 50);
314 if (cam->params.version.firmwareVersion == 1 && 310 if (cam->params.version.firmwareVersion == 1 &&
315 cam->params.version.firmwareRevision == 2) 311 cam->params.version.firmwareRevision == 2)
@@ -318,26 +314,26 @@ static int cpia_read_proc(char *page, char **start, off_t off,
318 else 314 else
319 tmp = 96; 315 tmp = 96;
320 316
321 out += sprintf(out, "contrast: %8d %8d %8d %8d" 317 seq_printf(m, "contrast: %8d %8d %8d %8d"
322 " steps of 8\n", 318 " steps of 8\n",
323 cam->params.colourParams.contrast, 0, tmp, 48); 319 cam->params.colourParams.contrast, 0, tmp, 48);
324 out += sprintf(out, "saturation: %8d %8d %8d %8d\n", 320 seq_printf(m, "saturation: %8d %8d %8d %8d\n",
325 cam->params.colourParams.saturation, 0, 100, 50); 321 cam->params.colourParams.saturation, 0, 100, 50);
326 tmp = (25000+5000*cam->params.sensorFps.baserate)/ 322 tmp = (25000+5000*cam->params.sensorFps.baserate)/
327 (1<<cam->params.sensorFps.divisor); 323 (1<<cam->params.sensorFps.divisor);
328 out += sprintf(out, "sensor_fps: %4d.%03d %8d %8d %8d\n", 324 seq_printf(m, "sensor_fps: %4d.%03d %8d %8d %8d\n",
329 tmp/1000, tmp%1000, 3, 30, 15); 325 tmp/1000, tmp%1000, 3, 30, 15);
330 out += sprintf(out, "stream_start_line: %8d %8d %8d %8d\n", 326 seq_printf(m, "stream_start_line: %8d %8d %8d %8d\n",
331 2*cam->params.streamStartLine, 0, 327 2*cam->params.streamStartLine, 0,
332 cam->params.format.videoSize == VIDEOSIZE_CIF ? 288:144, 328 cam->params.format.videoSize == VIDEOSIZE_CIF ? 288:144,
333 cam->params.format.videoSize == VIDEOSIZE_CIF ? 240:120); 329 cam->params.format.videoSize == VIDEOSIZE_CIF ? 240:120);
334 out += sprintf(out, "sub_sample: %8s %8s %8s %8s\n", 330 seq_printf(m, "sub_sample: %8s %8s %8s %8s\n",
335 cam->params.format.subSample == SUBSAMPLE_420 ? 331 cam->params.format.subSample == SUBSAMPLE_420 ?
336 "420" : "422", "420", "422", "422"); 332 "420" : "422", "420", "422", "422");
337 out += sprintf(out, "yuv_order: %8s %8s %8s %8s\n", 333 seq_printf(m, "yuv_order: %8s %8s %8s %8s\n",
338 cam->params.format.yuvOrder == YUVORDER_YUYV ? 334 cam->params.format.yuvOrder == YUVORDER_YUYV ?
339 "YUYV" : "UYVY", "YUYV" , "UYVY", "YUYV"); 335 "YUYV" : "UYVY", "YUYV" , "UYVY", "YUYV");
340 out += sprintf(out, "ecp_timing: %8s %8s %8s %8s\n", 336 seq_printf(m, "ecp_timing: %8s %8s %8s %8s\n",
341 cam->params.ecpTiming ? "slow" : "normal", "slow", 337 cam->params.ecpTiming ? "slow" : "normal", "slow",
342 "normal", "normal"); 338 "normal", "normal");
343 339
@@ -346,13 +342,13 @@ static int cpia_read_proc(char *page, char **start, off_t off,
346 } else { 342 } else {
347 sprintf(tmpstr, "manual"); 343 sprintf(tmpstr, "manual");
348 } 344 }
349 out += sprintf(out, "color_balance_mode: %8s %8s %8s" 345 seq_printf(m, "color_balance_mode: %8s %8s %8s"
350 " %8s\n", tmpstr, "manual", "auto", "auto"); 346 " %8s\n", tmpstr, "manual", "auto", "auto");
351 out += sprintf(out, "red_gain: %8d %8d %8d %8d\n", 347 seq_printf(m, "red_gain: %8d %8d %8d %8d\n",
352 cam->params.colourBalance.redGain, 0, 212, 32); 348 cam->params.colourBalance.redGain, 0, 212, 32);
353 out += sprintf(out, "green_gain: %8d %8d %8d %8d\n", 349 seq_printf(m, "green_gain: %8d %8d %8d %8d\n",
354 cam->params.colourBalance.greenGain, 0, 212, 6); 350 cam->params.colourBalance.greenGain, 0, 212, 6);
355 out += sprintf(out, "blue_gain: %8d %8d %8d %8d\n", 351 seq_printf(m, "blue_gain: %8d %8d %8d %8d\n",
356 cam->params.colourBalance.blueGain, 0, 212, 92); 352 cam->params.colourBalance.blueGain, 0, 212, 92);
357 353
358 if (cam->params.version.firmwareVersion == 1 && 354 if (cam->params.version.firmwareVersion == 1 &&
@@ -363,10 +359,10 @@ static int cpia_read_proc(char *page, char **start, off_t off,
363 sprintf(tmpstr, "%8d %8d %8d", 1, 8, 2); 359 sprintf(tmpstr, "%8d %8d %8d", 1, 8, 2);
364 360
365 if (cam->params.exposure.gainMode == 0) 361 if (cam->params.exposure.gainMode == 0)
366 out += sprintf(out, "max_gain: unknown %28s" 362 seq_printf(m, "max_gain: unknown %28s"
367 " powers of 2\n", tmpstr); 363 " powers of 2\n", tmpstr);
368 else 364 else
369 out += sprintf(out, "max_gain: %8d %28s" 365 seq_printf(m, "max_gain: %8d %28s"
370 " 1,2,4 or 8 \n", 366 " 1,2,4 or 8 \n",
371 1<<(cam->params.exposure.gainMode-1), tmpstr); 367 1<<(cam->params.exposure.gainMode-1), tmpstr);
372 368
@@ -382,12 +378,12 @@ static int cpia_read_proc(char *page, char **start, off_t off,
382 sprintf(tmpstr, "unknown"); 378 sprintf(tmpstr, "unknown");
383 break; 379 break;
384 } 380 }
385 out += sprintf(out, "exposure_mode: %8s %8s %8s" 381 seq_printf(m, "exposure_mode: %8s %8s %8s"
386 " %8s\n", tmpstr, "manual", "auto", "auto"); 382 " %8s\n", tmpstr, "manual", "auto", "auto");
387 out += sprintf(out, "centre_weight: %8s %8s %8s %8s\n", 383 seq_printf(m, "centre_weight: %8s %8s %8s %8s\n",
388 (2-cam->params.exposure.centreWeight) ? "on" : "off", 384 (2-cam->params.exposure.centreWeight) ? "on" : "off",
389 "off", "on", "on"); 385 "off", "on", "on");
390 out += sprintf(out, "gain: %8d %8d max_gain %8d 1,2,4,8 possible\n", 386 seq_printf(m, "gain: %8d %8d max_gain %8d 1,2,4,8 possible\n",
391 1<<cam->params.exposure.gain, 1, 1); 387 1<<cam->params.exposure.gain, 1, 1);
392 if (cam->params.version.firmwareVersion == 1 && 388 if (cam->params.version.firmwareVersion == 1 &&
393 cam->params.version.firmwareRevision == 2) 389 cam->params.version.firmwareRevision == 2)
@@ -396,7 +392,7 @@ static int cpia_read_proc(char *page, char **start, off_t off,
396 else 392 else
397 tmp = 510; 393 tmp = 510;
398 394
399 out += sprintf(out, "fine_exp: %8d %8d %8d %8d\n", 395 seq_printf(m, "fine_exp: %8d %8d %8d %8d\n",
400 cam->params.exposure.fineExp*2, 0, tmp, 0); 396 cam->params.exposure.fineExp*2, 0, tmp, 0);
401 if (cam->params.version.firmwareVersion == 1 && 397 if (cam->params.version.firmwareVersion == 1 &&
402 cam->params.version.firmwareRevision == 2) 398 cam->params.version.firmwareRevision == 2)
@@ -405,127 +401,122 @@ static int cpia_read_proc(char *page, char **start, off_t off,
405 else 401 else
406 tmp = MAX_EXP; 402 tmp = MAX_EXP;
407 403
408 out += sprintf(out, "coarse_exp: %8d %8d %8d" 404 seq_printf(m, "coarse_exp: %8d %8d %8d"
409 " %8d\n", cam->params.exposure.coarseExpLo+ 405 " %8d\n", cam->params.exposure.coarseExpLo+
410 256*cam->params.exposure.coarseExpHi, 0, tmp, 185); 406 256*cam->params.exposure.coarseExpHi, 0, tmp, 185);
411 out += sprintf(out, "red_comp: %8d %8d %8d %8d\n", 407 seq_printf(m, "red_comp: %8d %8d %8d %8d\n",
412 cam->params.exposure.redComp, COMP_RED, 255, COMP_RED); 408 cam->params.exposure.redComp, COMP_RED, 255, COMP_RED);
413 out += sprintf(out, "green1_comp: %8d %8d %8d %8d\n", 409 seq_printf(m, "green1_comp: %8d %8d %8d %8d\n",
414 cam->params.exposure.green1Comp, COMP_GREEN1, 255, 410 cam->params.exposure.green1Comp, COMP_GREEN1, 255,
415 COMP_GREEN1); 411 COMP_GREEN1);
416 out += sprintf(out, "green2_comp: %8d %8d %8d %8d\n", 412 seq_printf(m, "green2_comp: %8d %8d %8d %8d\n",
417 cam->params.exposure.green2Comp, COMP_GREEN2, 255, 413 cam->params.exposure.green2Comp, COMP_GREEN2, 255,
418 COMP_GREEN2); 414 COMP_GREEN2);
419 out += sprintf(out, "blue_comp: %8d %8d %8d %8d\n", 415 seq_printf(m, "blue_comp: %8d %8d %8d %8d\n",
420 cam->params.exposure.blueComp, COMP_BLUE, 255, COMP_BLUE); 416 cam->params.exposure.blueComp, COMP_BLUE, 255, COMP_BLUE);
421 417
422 out += sprintf(out, "apcor_gain1: %#8x %#8x %#8x %#8x\n", 418 seq_printf(m, "apcor_gain1: %#8x %#8x %#8x %#8x\n",
423 cam->params.apcor.gain1, 0, 0xff, 0x1c); 419 cam->params.apcor.gain1, 0, 0xff, 0x1c);
424 out += sprintf(out, "apcor_gain2: %#8x %#8x %#8x %#8x\n", 420 seq_printf(m, "apcor_gain2: %#8x %#8x %#8x %#8x\n",
425 cam->params.apcor.gain2, 0, 0xff, 0x1a); 421 cam->params.apcor.gain2, 0, 0xff, 0x1a);
426 out += sprintf(out, "apcor_gain4: %#8x %#8x %#8x %#8x\n", 422 seq_printf(m, "apcor_gain4: %#8x %#8x %#8x %#8x\n",
427 cam->params.apcor.gain4, 0, 0xff, 0x2d); 423 cam->params.apcor.gain4, 0, 0xff, 0x2d);
428 out += sprintf(out, "apcor_gain8: %#8x %#8x %#8x %#8x\n", 424 seq_printf(m, "apcor_gain8: %#8x %#8x %#8x %#8x\n",
429 cam->params.apcor.gain8, 0, 0xff, 0x2a); 425 cam->params.apcor.gain8, 0, 0xff, 0x2a);
430 out += sprintf(out, "vl_offset_gain1: %8d %8d %8d %8d\n", 426 seq_printf(m, "vl_offset_gain1: %8d %8d %8d %8d\n",
431 cam->params.vlOffset.gain1, 0, 255, 24); 427 cam->params.vlOffset.gain1, 0, 255, 24);
432 out += sprintf(out, "vl_offset_gain2: %8d %8d %8d %8d\n", 428 seq_printf(m, "vl_offset_gain2: %8d %8d %8d %8d\n",
433 cam->params.vlOffset.gain2, 0, 255, 28); 429 cam->params.vlOffset.gain2, 0, 255, 28);
434 out += sprintf(out, "vl_offset_gain4: %8d %8d %8d %8d\n", 430 seq_printf(m, "vl_offset_gain4: %8d %8d %8d %8d\n",
435 cam->params.vlOffset.gain4, 0, 255, 30); 431 cam->params.vlOffset.gain4, 0, 255, 30);
436 out += sprintf(out, "vl_offset_gain8: %8d %8d %8d %8d\n", 432 seq_printf(m, "vl_offset_gain8: %8d %8d %8d %8d\n",
437 cam->params.vlOffset.gain8, 0, 255, 30); 433 cam->params.vlOffset.gain8, 0, 255, 30);
438 out += sprintf(out, "flicker_control: %8s %8s %8s %8s\n", 434 seq_printf(m, "flicker_control: %8s %8s %8s %8s\n",
439 cam->params.flickerControl.flickerMode ? "on" : "off", 435 cam->params.flickerControl.flickerMode ? "on" : "off",
440 "off", "on", "off"); 436 "off", "on", "off");
441 out += sprintf(out, "mains_frequency: %8d %8d %8d %8d" 437 seq_printf(m, "mains_frequency: %8d %8d %8d %8d"
442 " only 50/60\n", 438 " only 50/60\n",
443 cam->mainsFreq ? 60 : 50, 50, 60, 50); 439 cam->mainsFreq ? 60 : 50, 50, 60, 50);
444 if(cam->params.flickerControl.allowableOverExposure < 0) 440 if(cam->params.flickerControl.allowableOverExposure < 0)
445 out += sprintf(out, "allowable_overexposure: %4dauto auto %8d auto\n", 441 seq_printf(m, "allowable_overexposure: %4dauto auto %8d auto\n",
446 -cam->params.flickerControl.allowableOverExposure, 442 -cam->params.flickerControl.allowableOverExposure,
447 255); 443 255);
448 else 444 else
449 out += sprintf(out, "allowable_overexposure: %8d auto %8d auto\n", 445 seq_printf(m, "allowable_overexposure: %8d auto %8d auto\n",
450 cam->params.flickerControl.allowableOverExposure, 446 cam->params.flickerControl.allowableOverExposure,
451 255); 447 255);
452 out += sprintf(out, "compression_mode: "); 448 seq_printf(m, "compression_mode: ");
453 switch(cam->params.compression.mode) { 449 switch(cam->params.compression.mode) {
454 case CPIA_COMPRESSION_NONE: 450 case CPIA_COMPRESSION_NONE:
455 out += sprintf(out, "%8s", "none"); 451 seq_printf(m, "%8s", "none");
456 break; 452 break;
457 case CPIA_COMPRESSION_AUTO: 453 case CPIA_COMPRESSION_AUTO:
458 out += sprintf(out, "%8s", "auto"); 454 seq_printf(m, "%8s", "auto");
459 break; 455 break;
460 case CPIA_COMPRESSION_MANUAL: 456 case CPIA_COMPRESSION_MANUAL:
461 out += sprintf(out, "%8s", "manual"); 457 seq_printf(m, "%8s", "manual");
462 break; 458 break;
463 default: 459 default:
464 out += sprintf(out, "%8s", "unknown"); 460 seq_printf(m, "%8s", "unknown");
465 break; 461 break;
466 } 462 }
467 out += sprintf(out, " none,auto,manual auto\n"); 463 seq_printf(m, " none,auto,manual auto\n");
468 out += sprintf(out, "decimation_enable: %8s %8s %8s %8s\n", 464 seq_printf(m, "decimation_enable: %8s %8s %8s %8s\n",
469 cam->params.compression.decimation == 465 cam->params.compression.decimation ==
470 DECIMATION_ENAB ? "on":"off", "off", "on", 466 DECIMATION_ENAB ? "on":"off", "off", "on",
471 "off"); 467 "off");
472 out += sprintf(out, "compression_target: %9s %9s %9s %9s\n", 468 seq_printf(m, "compression_target: %9s %9s %9s %9s\n",
473 cam->params.compressionTarget.frTargeting == 469 cam->params.compressionTarget.frTargeting ==
474 CPIA_COMPRESSION_TARGET_FRAMERATE ? 470 CPIA_COMPRESSION_TARGET_FRAMERATE ?
475 "framerate":"quality", 471 "framerate":"quality",
476 "framerate", "quality", "quality"); 472 "framerate", "quality", "quality");
477 out += sprintf(out, "target_framerate: %8d %8d %8d %8d\n", 473 seq_printf(m, "target_framerate: %8d %8d %8d %8d\n",
478 cam->params.compressionTarget.targetFR, 1, 30, 15); 474 cam->params.compressionTarget.targetFR, 1, 30, 15);
479 out += sprintf(out, "target_quality: %8d %8d %8d %8d\n", 475 seq_printf(m, "target_quality: %8d %8d %8d %8d\n",
480 cam->params.compressionTarget.targetQ, 1, 64, 5); 476 cam->params.compressionTarget.targetQ, 1, 64, 5);
481 out += sprintf(out, "y_threshold: %8d %8d %8d %8d\n", 477 seq_printf(m, "y_threshold: %8d %8d %8d %8d\n",
482 cam->params.yuvThreshold.yThreshold, 0, 31, 6); 478 cam->params.yuvThreshold.yThreshold, 0, 31, 6);
483 out += sprintf(out, "uv_threshold: %8d %8d %8d %8d\n", 479 seq_printf(m, "uv_threshold: %8d %8d %8d %8d\n",
484 cam->params.yuvThreshold.uvThreshold, 0, 31, 6); 480 cam->params.yuvThreshold.uvThreshold, 0, 31, 6);
485 out += sprintf(out, "hysteresis: %8d %8d %8d %8d\n", 481 seq_printf(m, "hysteresis: %8d %8d %8d %8d\n",
486 cam->params.compressionParams.hysteresis, 0, 255, 3); 482 cam->params.compressionParams.hysteresis, 0, 255, 3);
487 out += sprintf(out, "threshold_max: %8d %8d %8d %8d\n", 483 seq_printf(m, "threshold_max: %8d %8d %8d %8d\n",
488 cam->params.compressionParams.threshMax, 0, 255, 11); 484 cam->params.compressionParams.threshMax, 0, 255, 11);
489 out += sprintf(out, "small_step: %8d %8d %8d %8d\n", 485 seq_printf(m, "small_step: %8d %8d %8d %8d\n",
490 cam->params.compressionParams.smallStep, 0, 255, 1); 486 cam->params.compressionParams.smallStep, 0, 255, 1);
491 out += sprintf(out, "large_step: %8d %8d %8d %8d\n", 487 seq_printf(m, "large_step: %8d %8d %8d %8d\n",
492 cam->params.compressionParams.largeStep, 0, 255, 3); 488 cam->params.compressionParams.largeStep, 0, 255, 3);
493 out += sprintf(out, "decimation_hysteresis: %8d %8d %8d %8d\n", 489 seq_printf(m, "decimation_hysteresis: %8d %8d %8d %8d\n",
494 cam->params.compressionParams.decimationHysteresis, 490 cam->params.compressionParams.decimationHysteresis,
495 0, 255, 2); 491 0, 255, 2);
496 out += sprintf(out, "fr_diff_step_thresh: %8d %8d %8d %8d\n", 492 seq_printf(m, "fr_diff_step_thresh: %8d %8d %8d %8d\n",
497 cam->params.compressionParams.frDiffStepThresh, 493 cam->params.compressionParams.frDiffStepThresh,
498 0, 255, 5); 494 0, 255, 5);
499 out += sprintf(out, "q_diff_step_thresh: %8d %8d %8d %8d\n", 495 seq_printf(m, "q_diff_step_thresh: %8d %8d %8d %8d\n",
500 cam->params.compressionParams.qDiffStepThresh, 496 cam->params.compressionParams.qDiffStepThresh,
501 0, 255, 3); 497 0, 255, 3);
502 out += sprintf(out, "decimation_thresh_mod: %8d %8d %8d %8d\n", 498 seq_printf(m, "decimation_thresh_mod: %8d %8d %8d %8d\n",
503 cam->params.compressionParams.decimationThreshMod, 499 cam->params.compressionParams.decimationThreshMod,
504 0, 255, 2); 500 0, 255, 2);
505 /* QX3 specific entries */ 501 /* QX3 specific entries */
506 if (cam->params.qx3.qx3_detected) { 502 if (cam->params.qx3.qx3_detected) {
507 out += sprintf(out, "toplight: %8s %8s %8s %8s\n", 503 seq_printf(m, "toplight: %8s %8s %8s %8s\n",
508 cam->params.qx3.toplight ? "on" : "off", 504 cam->params.qx3.toplight ? "on" : "off",
509 "off", "on", "off"); 505 "off", "on", "off");
510 out += sprintf(out, "bottomlight: %8s %8s %8s %8s\n", 506 seq_printf(m, "bottomlight: %8s %8s %8s %8s\n",
511 cam->params.qx3.bottomlight ? "on" : "off", 507 cam->params.qx3.bottomlight ? "on" : "off",
512 "off", "on", "off"); 508 "off", "on", "off");
513 } 509 }
514 510
515 len = out - page; 511 return 0;
516 len -= off;
517 if (len < count) {
518 *eof = 1;
519 if (len <= 0) return 0;
520 } else
521 len = count;
522
523 *start = page + off;
524 return len;
525} 512}
526 513
514static int cpia_proc_open(struct inode *inode, struct file *file)
515{
516 return single_open(file, cpia_proc_show, PDE(inode)->data);
517}
527 518
528static int match(char *checkstr, char **buffer, unsigned long *count, 519static int match(char *checkstr, char **buffer, size_t *count,
529 int *find_colon, int *err) 520 int *find_colon, int *err)
530{ 521{
531 int ret, colon_found = 1; 522 int ret, colon_found = 1;
@@ -551,7 +542,7 @@ static int match(char *checkstr, char **buffer, unsigned long *count,
551 return ret; 542 return ret;
552} 543}
553 544
554static unsigned long int value(char **buffer, unsigned long *count, int *err) 545static unsigned long int value(char **buffer, size_t *count, int *err)
555{ 546{
556 char *p; 547 char *p;
557 unsigned long int ret; 548 unsigned long int ret;
@@ -565,10 +556,10 @@ static unsigned long int value(char **buffer, unsigned long *count, int *err)
565 return ret; 556 return ret;
566} 557}
567 558
568static int cpia_write_proc(struct file *file, const char __user *buf, 559static ssize_t cpia_proc_write(struct file *file, const char __user *buf,
569 unsigned long count, void *data) 560 size_t count, loff_t *pos)
570{ 561{
571 struct cam_data *cam = data; 562 struct cam_data *cam = PDE(file->f_path.dentry->d_inode)->data;
572 struct cam_params new_params; 563 struct cam_params new_params;
573 char *page, *buffer; 564 char *page, *buffer;
574 int retval, find_colon; 565 int retval, find_colon;
@@ -582,7 +573,7 @@ static int cpia_write_proc(struct file *file, const char __user *buf,
582 * from the comx driver 573 * from the comx driver
583 */ 574 */
584 if (count > PAGE_SIZE) { 575 if (count > PAGE_SIZE) {
585 printk(KERN_ERR "count is %lu > %d!!!\n", count, (int)PAGE_SIZE); 576 printk(KERN_ERR "count is %zu > %d!!!\n", count, (int)PAGE_SIZE);
586 return -ENOSPC; 577 return -ENOSPC;
587 } 578 }
588 579
@@ -1340,23 +1331,28 @@ out:
1340 return retval; 1331 return retval;
1341} 1332}
1342 1333
1334static const struct file_operations cpia_proc_fops = {
1335 .owner = THIS_MODULE,
1336 .open = cpia_proc_open,
1337 .read = seq_read,
1338 .llseek = seq_lseek,
1339 .release = single_release,
1340 .write = cpia_proc_write,
1341};
1342
1343static void create_proc_cpia_cam(struct cam_data *cam) 1343static void create_proc_cpia_cam(struct cam_data *cam)
1344{ 1344{
1345 char name[5 + 1 + 10 + 1];
1346 struct proc_dir_entry *ent; 1345 struct proc_dir_entry *ent;
1347 1346
1348 if (!cpia_proc_root || !cam) 1347 if (!cpia_proc_root || !cam)
1349 return; 1348 return;
1350 1349
1351 snprintf(name, sizeof(name), "video%d", cam->vdev.num); 1350 ent = proc_create_data(video_device_node_name(&cam->vdev),
1352 1351 S_IRUGO|S_IWUSR, cpia_proc_root,
1353 ent = create_proc_entry(name, S_IFREG|S_IRUGO|S_IWUSR, cpia_proc_root); 1352 &cpia_proc_fops, cam);
1354 if (!ent) 1353 if (!ent)
1355 return; 1354 return;
1356 1355
1357 ent->data = cam;
1358 ent->read_proc = cpia_read_proc;
1359 ent->write_proc = cpia_write_proc;
1360 /* 1356 /*
1361 size of the proc entry is 3736 bytes for the standard webcam; 1357 size of the proc entry is 3736 bytes for the standard webcam;
1362 the extra features of the QX3 microscope add 189 bytes. 1358 the extra features of the QX3 microscope add 189 bytes.
@@ -1368,13 +1364,10 @@ static void create_proc_cpia_cam(struct cam_data *cam)
1368 1364
1369static void destroy_proc_cpia_cam(struct cam_data *cam) 1365static void destroy_proc_cpia_cam(struct cam_data *cam)
1370{ 1366{
1371 char name[5 + 1 + 10 + 1];
1372
1373 if (!cam || !cam->proc_entry) 1367 if (!cam || !cam->proc_entry)
1374 return; 1368 return;
1375 1369
1376 snprintf(name, sizeof(name), "video%d", cam->vdev.num); 1370 remove_proc_entry(video_device_node_name(&cam->vdev), cpia_proc_root);
1377 remove_proc_entry(name, cpia_proc_root);
1378 cam->proc_entry = NULL; 1371 cam->proc_entry = NULL;
1379} 1372}
1380 1373
@@ -3999,7 +3992,7 @@ void cpia_unregister_camera(struct cam_data *cam)
3999 } 3992 }
4000 3993
4001#ifdef CONFIG_PROC_FS 3994#ifdef CONFIG_PROC_FS
4002 DBG("destroying /proc/cpia/video%d\n", cam->vdev.num); 3995 DBG("destroying /proc/cpia/%s\n", video_device_node_name(&cam->vdev));
4003 destroy_proc_cpia_cam(cam); 3996 destroy_proc_cpia_cam(cam);
4004#endif 3997#endif
4005 if (!cam->open_count) { 3998 if (!cam->open_count) {
diff --git a/drivers/media/video/cpia2/cpia2_v4l.c b/drivers/media/video/cpia2/cpia2_v4l.c
index 0b4a8f309cfa..6f91415eb7b4 100644
--- a/drivers/media/video/cpia2/cpia2_v4l.c
+++ b/drivers/media/video/cpia2/cpia2_v4l.c
@@ -38,17 +38,12 @@
38#include <linux/slab.h> 38#include <linux/slab.h>
39#include <linux/init.h> 39#include <linux/init.h>
40#include <linux/videodev.h> 40#include <linux/videodev.h>
41#include <linux/stringify.h>
41#include <media/v4l2-ioctl.h> 42#include <media/v4l2-ioctl.h>
42 43
43#include "cpia2.h" 44#include "cpia2.h"
44#include "cpia2dev.h" 45#include "cpia2dev.h"
45 46
46
47//#define _CPIA2_DEBUG_
48
49#define MAKE_STRING_1(x) #x
50#define MAKE_STRING(x) MAKE_STRING_1(x)
51
52static int video_nr = -1; 47static int video_nr = -1;
53module_param(video_nr, int, 0); 48module_param(video_nr, int, 0);
54MODULE_PARM_DESC(video_nr,"video device to register (0=/dev/video0, etc)"); 49MODULE_PARM_DESC(video_nr,"video device to register (0=/dev/video0, etc)");
@@ -60,26 +55,26 @@ MODULE_PARM_DESC(buffer_size, "Size for each frame buffer in bytes (default 68k)
60static int num_buffers = 3; 55static int num_buffers = 3;
61module_param(num_buffers, int, 0); 56module_param(num_buffers, int, 0);
62MODULE_PARM_DESC(num_buffers, "Number of frame buffers (1-" 57MODULE_PARM_DESC(num_buffers, "Number of frame buffers (1-"
63 MAKE_STRING(VIDEO_MAX_FRAME) ", default 3)"); 58 __stringify(VIDEO_MAX_FRAME) ", default 3)");
64 59
65static int alternate = DEFAULT_ALT; 60static int alternate = DEFAULT_ALT;
66module_param(alternate, int, 0); 61module_param(alternate, int, 0);
67MODULE_PARM_DESC(alternate, "USB Alternate (" MAKE_STRING(USBIF_ISO_1) "-" 62MODULE_PARM_DESC(alternate, "USB Alternate (" __stringify(USBIF_ISO_1) "-"
68 MAKE_STRING(USBIF_ISO_6) ", default " 63 __stringify(USBIF_ISO_6) ", default "
69 MAKE_STRING(DEFAULT_ALT) ")"); 64 __stringify(DEFAULT_ALT) ")");
70 65
71static int flicker_freq = 60; 66static int flicker_freq = 60;
72module_param(flicker_freq, int, 0); 67module_param(flicker_freq, int, 0);
73MODULE_PARM_DESC(flicker_freq, "Flicker frequency (" MAKE_STRING(50) "or" 68MODULE_PARM_DESC(flicker_freq, "Flicker frequency (" __stringify(50) "or"
74 MAKE_STRING(60) ", default " 69 __stringify(60) ", default "
75 MAKE_STRING(60) ")"); 70 __stringify(60) ")");
76 71
77static int flicker_mode = NEVER_FLICKER; 72static int flicker_mode = NEVER_FLICKER;
78module_param(flicker_mode, int, 0); 73module_param(flicker_mode, int, 0);
79MODULE_PARM_DESC(flicker_mode, 74MODULE_PARM_DESC(flicker_mode,
80 "Flicker supression (" MAKE_STRING(NEVER_FLICKER) "or" 75 "Flicker supression (" __stringify(NEVER_FLICKER) "or"
81 MAKE_STRING(ANTI_FLICKER_ON) ", default " 76 __stringify(ANTI_FLICKER_ON) ", default "
82 MAKE_STRING(NEVER_FLICKER) ")"); 77 __stringify(NEVER_FLICKER) ")");
83 78
84MODULE_AUTHOR("Steve Miller (STMicroelectronics) <steve.miller@st.com>"); 79MODULE_AUTHOR("Steve Miller (STMicroelectronics) <steve.miller@st.com>");
85MODULE_DESCRIPTION("V4L-driver for STMicroelectronics CPiA2 based cameras"); 80MODULE_DESCRIPTION("V4L-driver for STMicroelectronics CPiA2 based cameras");
@@ -1926,7 +1921,6 @@ static const struct v4l2_file_operations fops_template = {
1926static struct video_device cpia2_template = { 1921static struct video_device cpia2_template = {
1927 /* I could not find any place for the old .initialize initializer?? */ 1922 /* I could not find any place for the old .initialize initializer?? */
1928 .name= "CPiA2 Camera", 1923 .name= "CPiA2 Camera",
1929 .minor= -1,
1930 .fops= &fops_template, 1924 .fops= &fops_template,
1931 .release= video_device_release, 1925 .release= video_device_release,
1932}; 1926};
@@ -1967,9 +1961,9 @@ void cpia2_unregister_camera(struct camera_data *cam)
1967 if (!cam->open_count) { 1961 if (!cam->open_count) {
1968 video_unregister_device(cam->vdev); 1962 video_unregister_device(cam->vdev);
1969 } else { 1963 } else {
1970 LOG("/dev/video%d removed while open, " 1964 LOG("%s removed while open, deferring "
1971 "deferring video_unregister_device\n", 1965 "video_unregister_device\n",
1972 cam->vdev->num); 1966 video_device_node_name(cam->vdev));
1973 } 1967 }
1974} 1968}
1975 1969
diff --git a/drivers/media/video/cx18/cx18-fileops.c b/drivers/media/video/cx18/cx18-fileops.c
index 4e278db31cc9..c0885c69fd89 100644
--- a/drivers/media/video/cx18/cx18-fileops.c
+++ b/drivers/media/video/cx18/cx18-fileops.c
@@ -758,8 +758,8 @@ int cx18_v4l2_open(struct file *filp)
758 758
759 mutex_lock(&cx->serialize_lock); 759 mutex_lock(&cx->serialize_lock);
760 if (cx18_init_on_first_open(cx)) { 760 if (cx18_init_on_first_open(cx)) {
761 CX18_ERR("Failed to initialize on minor %d\n", 761 CX18_ERR("Failed to initialize on %s\n",
762 video_dev->minor); 762 video_device_node_name(video_dev));
763 mutex_unlock(&cx->serialize_lock); 763 mutex_unlock(&cx->serialize_lock);
764 return -ENXIO; 764 return -ENXIO;
765 } 765 }
diff --git a/drivers/media/video/cx18/cx18-streams.c b/drivers/media/video/cx18/cx18-streams.c
index c398651dd74c..987a9308d938 100644
--- a/drivers/media/video/cx18/cx18-streams.c
+++ b/drivers/media/video/cx18/cx18-streams.c
@@ -219,6 +219,7 @@ static int cx18_reg_dev(struct cx18 *cx, int type)
219{ 219{
220 struct cx18_stream *s = &cx->streams[type]; 220 struct cx18_stream *s = &cx->streams[type];
221 int vfl_type = cx18_stream_info[type].vfl_type; 221 int vfl_type = cx18_stream_info[type].vfl_type;
222 const char *name;
222 int num, ret; 223 int num, ret;
223 224
224 /* TODO: Shouldn't this be a VFL_TYPE_TRANSPORT or something? 225 /* TODO: Shouldn't this be a VFL_TYPE_TRANSPORT or something?
@@ -258,31 +259,30 @@ static int cx18_reg_dev(struct cx18 *cx, int type)
258 s->video_dev = NULL; 259 s->video_dev = NULL;
259 return ret; 260 return ret;
260 } 261 }
261 num = s->video_dev->num; 262
263 name = video_device_node_name(s->video_dev);
262 264
263 switch (vfl_type) { 265 switch (vfl_type) {
264 case VFL_TYPE_GRABBER: 266 case VFL_TYPE_GRABBER:
265 CX18_INFO("Registered device video%d for %s " 267 CX18_INFO("Registered device %s for %s (%d x %d.%02d kB)\n",
266 "(%d x %d.%02d kB)\n", 268 name, s->name, cx->stream_buffers[type],
267 num, s->name, cx->stream_buffers[type],
268 cx->stream_buf_size[type] / 1024, 269 cx->stream_buf_size[type] / 1024,
269 (cx->stream_buf_size[type] * 100 / 1024) % 100); 270 (cx->stream_buf_size[type] * 100 / 1024) % 100);
270 break; 271 break;
271 272
272 case VFL_TYPE_RADIO: 273 case VFL_TYPE_RADIO:
273 CX18_INFO("Registered device radio%d for %s\n", 274 CX18_INFO("Registered device %s for %s\n", name, s->name);
274 num, s->name);
275 break; 275 break;
276 276
277 case VFL_TYPE_VBI: 277 case VFL_TYPE_VBI:
278 if (cx->stream_buffers[type]) 278 if (cx->stream_buffers[type])
279 CX18_INFO("Registered device vbi%d for %s " 279 CX18_INFO("Registered device %s for %s "
280 "(%d x %d bytes)\n", 280 "(%d x %d bytes)\n",
281 num, s->name, cx->stream_buffers[type], 281 name, s->name, cx->stream_buffers[type],
282 cx->stream_buf_size[type]); 282 cx->stream_buf_size[type]);
283 else 283 else
284 CX18_INFO("Registered device vbi%d for %s\n", 284 CX18_INFO("Registered device %s for %s\n",
285 num, s->name); 285 name, s->name);
286 break; 286 break;
287 } 287 }
288 288
diff --git a/drivers/media/video/cx231xx/cx231xx-cards.c b/drivers/media/video/cx231xx/cx231xx-cards.c
index 319c459459e0..a54908235009 100644
--- a/drivers/media/video/cx231xx/cx231xx-cards.c
+++ b/drivers/media/video/cx231xx/cx231xx-cards.c
@@ -68,19 +68,19 @@ struct cx231xx_board cx231xx_boards[] = {
68 .type = CX231XX_VMUX_TELEVISION, 68 .type = CX231XX_VMUX_TELEVISION,
69 .vmux = CX231XX_VIN_3_1, 69 .vmux = CX231XX_VIN_3_1,
70 .amux = CX231XX_AMUX_VIDEO, 70 .amux = CX231XX_AMUX_VIDEO,
71 .gpio = 0, 71 .gpio = NULL,
72 }, { 72 }, {
73 .type = CX231XX_VMUX_COMPOSITE1, 73 .type = CX231XX_VMUX_COMPOSITE1,
74 .vmux = CX231XX_VIN_2_1, 74 .vmux = CX231XX_VIN_2_1,
75 .amux = CX231XX_AMUX_LINE_IN, 75 .amux = CX231XX_AMUX_LINE_IN,
76 .gpio = 0, 76 .gpio = NULL,
77 }, { 77 }, {
78 .type = CX231XX_VMUX_SVIDEO, 78 .type = CX231XX_VMUX_SVIDEO,
79 .vmux = CX231XX_VIN_1_1 | 79 .vmux = CX231XX_VIN_1_1 |
80 (CX231XX_VIN_1_2 << 8) | 80 (CX231XX_VIN_1_2 << 8) |
81 CX25840_SVIDEO_ON, 81 CX25840_SVIDEO_ON,
82 .amux = CX231XX_AMUX_LINE_IN, 82 .amux = CX231XX_AMUX_LINE_IN,
83 .gpio = 0, 83 .gpio = NULL,
84 } 84 }
85 }, 85 },
86 }, 86 },
@@ -107,19 +107,19 @@ struct cx231xx_board cx231xx_boards[] = {
107 .type = CX231XX_VMUX_TELEVISION, 107 .type = CX231XX_VMUX_TELEVISION,
108 .vmux = CX231XX_VIN_3_1, 108 .vmux = CX231XX_VIN_3_1,
109 .amux = CX231XX_AMUX_VIDEO, 109 .amux = CX231XX_AMUX_VIDEO,
110 .gpio = 0, 110 .gpio = NULL,
111 }, { 111 }, {
112 .type = CX231XX_VMUX_COMPOSITE1, 112 .type = CX231XX_VMUX_COMPOSITE1,
113 .vmux = CX231XX_VIN_2_1, 113 .vmux = CX231XX_VIN_2_1,
114 .amux = CX231XX_AMUX_LINE_IN, 114 .amux = CX231XX_AMUX_LINE_IN,
115 .gpio = 0, 115 .gpio = NULL,
116 }, { 116 }, {
117 .type = CX231XX_VMUX_SVIDEO, 117 .type = CX231XX_VMUX_SVIDEO,
118 .vmux = CX231XX_VIN_1_1 | 118 .vmux = CX231XX_VIN_1_1 |
119 (CX231XX_VIN_1_2 << 8) | 119 (CX231XX_VIN_1_2 << 8) |
120 CX25840_SVIDEO_ON, 120 CX25840_SVIDEO_ON,
121 .amux = CX231XX_AMUX_LINE_IN, 121 .amux = CX231XX_AMUX_LINE_IN,
122 .gpio = 0, 122 .gpio = NULL,
123 } 123 }
124 }, 124 },
125 }, 125 },
@@ -147,19 +147,19 @@ struct cx231xx_board cx231xx_boards[] = {
147 .type = CX231XX_VMUX_TELEVISION, 147 .type = CX231XX_VMUX_TELEVISION,
148 .vmux = CX231XX_VIN_3_1, 148 .vmux = CX231XX_VIN_3_1,
149 .amux = CX231XX_AMUX_VIDEO, 149 .amux = CX231XX_AMUX_VIDEO,
150 .gpio = 0, 150 .gpio = NULL,
151 }, { 151 }, {
152 .type = CX231XX_VMUX_COMPOSITE1, 152 .type = CX231XX_VMUX_COMPOSITE1,
153 .vmux = CX231XX_VIN_2_1, 153 .vmux = CX231XX_VIN_2_1,
154 .amux = CX231XX_AMUX_LINE_IN, 154 .amux = CX231XX_AMUX_LINE_IN,
155 .gpio = 0, 155 .gpio = NULL,
156 }, { 156 }, {
157 .type = CX231XX_VMUX_SVIDEO, 157 .type = CX231XX_VMUX_SVIDEO,
158 .vmux = CX231XX_VIN_1_1 | 158 .vmux = CX231XX_VIN_1_1 |
159 (CX231XX_VIN_1_2 << 8) | 159 (CX231XX_VIN_1_2 << 8) |
160 CX25840_SVIDEO_ON, 160 CX25840_SVIDEO_ON,
161 .amux = CX231XX_AMUX_LINE_IN, 161 .amux = CX231XX_AMUX_LINE_IN,
162 .gpio = 0, 162 .gpio = NULL,
163 } 163 }
164 }, 164 },
165 }, 165 },
@@ -856,8 +856,9 @@ static void cx231xx_usb_disconnect(struct usb_interface *interface)
856 856
857 if (dev->users) { 857 if (dev->users) {
858 cx231xx_warn 858 cx231xx_warn
859 ("device /dev/video%d is open! Deregistration and memory " 859 ("device %s is open! Deregistration and memory "
860 "deallocation are deferred on close.\n", dev->vdev->num); 860 "deallocation are deferred on close.\n",
861 video_device_node_name(dev->vdev));
861 862
862 dev->state |= DEV_MISCONFIGURED; 863 dev->state |= DEV_MISCONFIGURED;
863 cx231xx_uninit_isoc(dev); 864 cx231xx_uninit_isoc(dev);
diff --git a/drivers/media/video/cx231xx/cx231xx-core.c b/drivers/media/video/cx231xx/cx231xx-core.c
index 0d333e679f70..4a60dfbc347d 100644
--- a/drivers/media/video/cx231xx/cx231xx-core.c
+++ b/drivers/media/video/cx231xx/cx231xx-core.c
@@ -66,32 +66,6 @@ MODULE_PARM_DESC(alt, "alternate setting to use for video endpoint");
66static LIST_HEAD(cx231xx_devlist); 66static LIST_HEAD(cx231xx_devlist);
67static DEFINE_MUTEX(cx231xx_devlist_mutex); 67static DEFINE_MUTEX(cx231xx_devlist_mutex);
68 68
69struct cx231xx *cx231xx_get_device(int minor,
70 enum v4l2_buf_type *fh_type, int *has_radio)
71{
72 struct cx231xx *h, *dev = NULL;
73
74 *fh_type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
75 *has_radio = 0;
76
77 mutex_lock(&cx231xx_devlist_mutex);
78 list_for_each_entry(h, &cx231xx_devlist, devlist) {
79 if (h->vdev->minor == minor)
80 dev = h;
81 if (h->vbi_dev->minor == minor) {
82 dev = h;
83 *fh_type = V4L2_BUF_TYPE_VBI_CAPTURE;
84 }
85 if (h->radio_dev && h->radio_dev->minor == minor) {
86 dev = h;
87 *has_radio = 1;
88 }
89 }
90 mutex_unlock(&cx231xx_devlist_mutex);
91
92 return dev;
93}
94
95/* 69/*
96 * cx231xx_realease_resources() 70 * cx231xx_realease_resources()
97 * unregisters the v4l2,i2c and usb devices 71 * unregisters the v4l2,i2c and usb devices
diff --git a/drivers/media/video/cx231xx/cx231xx-input.c b/drivers/media/video/cx231xx/cx231xx-input.c
index cd135f01b9c1..15826f98b688 100644
--- a/drivers/media/video/cx231xx/cx231xx-input.c
+++ b/drivers/media/video/cx231xx/cx231xx-input.c
@@ -197,8 +197,7 @@ int cx231xx_ir_init(struct cx231xx *dev)
197 usb_make_path(dev->udev, ir->phys, sizeof(ir->phys)); 197 usb_make_path(dev->udev, ir->phys, sizeof(ir->phys));
198 strlcat(ir->phys, "/input0", sizeof(ir->phys)); 198 strlcat(ir->phys, "/input0", sizeof(ir->phys));
199 199
200 err = ir_input_init(input_dev, &ir->ir, IR_TYPE_OTHER, 200 err = ir_input_init(input_dev, &ir->ir, IR_TYPE_OTHER);
201 dev->board.ir_codes);
202 if (err < 0) 201 if (err < 0)
203 goto err_out_free; 202 goto err_out_free;
204 203
@@ -217,7 +216,7 @@ int cx231xx_ir_init(struct cx231xx *dev)
217 cx231xx_ir_start(ir); 216 cx231xx_ir_start(ir);
218 217
219 /* all done */ 218 /* all done */
220 err = input_register_device(ir->input); 219 err = ir_input_register(ir->input, dev->board.ir_codes);
221 if (err) 220 if (err)
222 goto err_out_stop; 221 goto err_out_stop;
223 222
@@ -226,8 +225,6 @@ err_out_stop:
226 cx231xx_ir_stop(ir); 225 cx231xx_ir_stop(ir);
227 dev->ir = NULL; 226 dev->ir = NULL;
228err_out_free: 227err_out_free:
229 ir_input_free(input_dev);
230 input_free_device(input_dev);
231 kfree(ir); 228 kfree(ir);
232 return err; 229 return err;
233} 230}
@@ -241,8 +238,7 @@ int cx231xx_ir_fini(struct cx231xx *dev)
241 return 0; 238 return 0;
242 239
243 cx231xx_ir_stop(ir); 240 cx231xx_ir_stop(ir);
244 ir_input_free(ir->input); 241 ir_input_unregister(ir->input);
245 input_unregister_device(ir->input);
246 kfree(ir); 242 kfree(ir);
247 243
248 /* done */ 244 /* done */
diff --git a/drivers/media/video/cx231xx/cx231xx-video.c b/drivers/media/video/cx231xx/cx231xx-video.c
index d095aa0d6d19..d4f546f11d74 100644
--- a/drivers/media/video/cx231xx/cx231xx-video.c
+++ b/drivers/media/video/cx231xx/cx231xx-video.c
@@ -1916,20 +1916,29 @@ static int radio_queryctrl(struct file *file, void *priv,
1916 */ 1916 */
1917static int cx231xx_v4l2_open(struct file *filp) 1917static int cx231xx_v4l2_open(struct file *filp)
1918{ 1918{
1919 int minor = video_devdata(filp)->minor;
1920 int errCode = 0, radio = 0; 1919 int errCode = 0, radio = 0;
1921 struct cx231xx *dev = NULL; 1920 struct video_device *vdev = video_devdata(filp);
1921 struct cx231xx *dev = video_drvdata(filp);
1922 struct cx231xx_fh *fh; 1922 struct cx231xx_fh *fh;
1923 enum v4l2_buf_type fh_type = 0; 1923 enum v4l2_buf_type fh_type = 0;
1924 1924
1925 dev = cx231xx_get_device(minor, &fh_type, &radio); 1925 switch (vdev->vfl_type) {
1926 if (NULL == dev) 1926 case VFL_TYPE_GRABBER:
1927 return -ENODEV; 1927 fh_type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1928 break;
1929 case VFL_TYPE_VBI:
1930 fh_type = V4L2_BUF_TYPE_VBI_CAPTURE;
1931 break;
1932 case VFL_TYPE_RADIO:
1933 radio = 1;
1934 break;
1935 }
1928 1936
1929 mutex_lock(&dev->lock); 1937 mutex_lock(&dev->lock);
1930 1938
1931 cx231xx_videodbg("open minor=%d type=%s users=%d\n", 1939 cx231xx_videodbg("open dev=%s type=%s users=%d\n",
1932 minor, v4l2_type_names[fh_type], dev->users); 1940 video_device_node_name(vdev), v4l2_type_names[fh_type],
1941 dev->users);
1933 1942
1934#if 0 1943#if 0
1935 errCode = cx231xx_set_mode(dev, CX231XX_ANALOG_MODE); 1944 errCode = cx231xx_set_mode(dev, CX231XX_ANALOG_MODE);
@@ -2020,25 +2029,25 @@ void cx231xx_release_analog_resources(struct cx231xx *dev)
2020 /*FIXME: I2C IR should be disconnected */ 2029 /*FIXME: I2C IR should be disconnected */
2021 2030
2022 if (dev->radio_dev) { 2031 if (dev->radio_dev) {
2023 if (-1 != dev->radio_dev->minor) 2032 if (video_is_registered(dev->radio_dev))
2024 video_unregister_device(dev->radio_dev); 2033 video_unregister_device(dev->radio_dev);
2025 else 2034 else
2026 video_device_release(dev->radio_dev); 2035 video_device_release(dev->radio_dev);
2027 dev->radio_dev = NULL; 2036 dev->radio_dev = NULL;
2028 } 2037 }
2029 if (dev->vbi_dev) { 2038 if (dev->vbi_dev) {
2030 cx231xx_info("V4L2 device /dev/vbi%d deregistered\n", 2039 cx231xx_info("V4L2 device %s deregistered\n",
2031 dev->vbi_dev->num); 2040 video_device_node_name(dev->vbi_dev));
2032 if (-1 != dev->vbi_dev->minor) 2041 if (video_is_registered(dev->vbi_dev))
2033 video_unregister_device(dev->vbi_dev); 2042 video_unregister_device(dev->vbi_dev);
2034 else 2043 else
2035 video_device_release(dev->vbi_dev); 2044 video_device_release(dev->vbi_dev);
2036 dev->vbi_dev = NULL; 2045 dev->vbi_dev = NULL;
2037 } 2046 }
2038 if (dev->vdev) { 2047 if (dev->vdev) {
2039 cx231xx_info("V4L2 device /dev/video%d deregistered\n", 2048 cx231xx_info("V4L2 device %s deregistered\n",
2040 dev->vdev->num); 2049 video_device_node_name(dev->vdev));
2041 if (-1 != dev->vdev->minor) 2050 if (video_is_registered(dev->vdev))
2042 video_unregister_device(dev->vdev); 2051 video_unregister_device(dev->vdev);
2043 else 2052 else
2044 video_device_release(dev->vdev); 2053 video_device_release(dev->vdev);
@@ -2268,7 +2277,6 @@ static const struct video_device cx231xx_video_template = {
2268 .fops = &cx231xx_v4l_fops, 2277 .fops = &cx231xx_v4l_fops,
2269 .release = video_device_release, 2278 .release = video_device_release,
2270 .ioctl_ops = &video_ioctl_ops, 2279 .ioctl_ops = &video_ioctl_ops,
2271 .minor = -1,
2272 .tvnorms = V4L2_STD_ALL, 2280 .tvnorms = V4L2_STD_ALL,
2273 .current_norm = V4L2_STD_PAL, 2281 .current_norm = V4L2_STD_PAL,
2274}; 2282};
@@ -2303,7 +2311,6 @@ static struct video_device cx231xx_radio_template = {
2303 .name = "cx231xx-radio", 2311 .name = "cx231xx-radio",
2304 .fops = &radio_fops, 2312 .fops = &radio_fops,
2305 .ioctl_ops = &radio_ioctl_ops, 2313 .ioctl_ops = &radio_ioctl_ops,
2306 .minor = -1,
2307}; 2314};
2308 2315
2309/******************************** usb interface ******************************/ 2316/******************************** usb interface ******************************/
@@ -2319,13 +2326,13 @@ static struct video_device *cx231xx_vdev_init(struct cx231xx *dev,
2319 return NULL; 2326 return NULL;
2320 2327
2321 *vfd = *template; 2328 *vfd = *template;
2322 vfd->minor = -1;
2323 vfd->v4l2_dev = &dev->v4l2_dev; 2329 vfd->v4l2_dev = &dev->v4l2_dev;
2324 vfd->release = video_device_release; 2330 vfd->release = video_device_release;
2325 vfd->debug = video_debug; 2331 vfd->debug = video_debug;
2326 2332
2327 snprintf(vfd->name, sizeof(vfd->name), "%s %s", dev->name, type_name); 2333 snprintf(vfd->name, sizeof(vfd->name), "%s %s", dev->name, type_name);
2328 2334
2335 video_set_drvdata(vfd, dev);
2329 return vfd; 2336 return vfd;
2330} 2337}
2331 2338
@@ -2374,8 +2381,8 @@ int cx231xx_register_analog_devices(struct cx231xx *dev)
2374 return ret; 2381 return ret;
2375 } 2382 }
2376 2383
2377 cx231xx_info("%s/0: registered device video%d [v4l2]\n", 2384 cx231xx_info("%s/0: registered device %s [v4l2]\n",
2378 dev->name, dev->vdev->num); 2385 dev->name, video_device_node_name(dev->vdev));
2379 2386
2380 /* Initialize VBI template */ 2387 /* Initialize VBI template */
2381 memcpy(&cx231xx_vbi_template, &cx231xx_video_template, 2388 memcpy(&cx231xx_vbi_template, &cx231xx_video_template,
@@ -2393,8 +2400,8 @@ int cx231xx_register_analog_devices(struct cx231xx *dev)
2393 return ret; 2400 return ret;
2394 } 2401 }
2395 2402
2396 cx231xx_info("%s/0: registered device vbi%d\n", 2403 cx231xx_info("%s/0: registered device %s\n",
2397 dev->name, dev->vbi_dev->num); 2404 dev->name, video_device_node_name(dev->vbi_dev));
2398 2405
2399 if (cx231xx_boards[dev->model].radio.type == CX231XX_RADIO) { 2406 if (cx231xx_boards[dev->model].radio.type == CX231XX_RADIO) {
2400 dev->radio_dev = cx231xx_vdev_init(dev, &cx231xx_radio_template, 2407 dev->radio_dev = cx231xx_vdev_init(dev, &cx231xx_radio_template,
@@ -2409,12 +2416,13 @@ int cx231xx_register_analog_devices(struct cx231xx *dev)
2409 cx231xx_errdev("can't register radio device\n"); 2416 cx231xx_errdev("can't register radio device\n");
2410 return ret; 2417 return ret;
2411 } 2418 }
2412 cx231xx_info("Registered radio device as /dev/radio%d\n", 2419 cx231xx_info("Registered radio device as %s\n",
2413 dev->radio_dev->num); 2420 video_device_node_name(dev->radio_dev));
2414 } 2421 }
2415 2422
2416 cx231xx_info("V4L2 device registered as /dev/video%d and /dev/vbi%d\n", 2423 cx231xx_info("V4L2 device registered as %s and %s\n",
2417 dev->vdev->num, dev->vbi_dev->num); 2424 video_device_node_name(dev->vdev),
2425 video_device_node_name(dev->vbi_dev));
2418 2426
2419 return 0; 2427 return 0;
2420} 2428}
diff --git a/drivers/media/video/cx231xx/cx231xx.h b/drivers/media/video/cx231xx/cx231xx.h
index 64e2ddd3c401..17d4d1a800ce 100644
--- a/drivers/media/video/cx231xx/cx231xx.h
+++ b/drivers/media/video/cx231xx/cx231xx.h
@@ -689,8 +689,6 @@ void cx231xx_release_analog_resources(struct cx231xx *dev);
689int cx231xx_register_analog_devices(struct cx231xx *dev); 689int cx231xx_register_analog_devices(struct cx231xx *dev);
690void cx231xx_remove_from_devlist(struct cx231xx *dev); 690void cx231xx_remove_from_devlist(struct cx231xx *dev);
691void cx231xx_add_into_devlist(struct cx231xx *dev); 691void cx231xx_add_into_devlist(struct cx231xx *dev);
692struct cx231xx *cx231xx_get_device(int minor,
693 enum v4l2_buf_type *fh_type, int *has_radio);
694void cx231xx_init_extension(struct cx231xx *dev); 692void cx231xx_init_extension(struct cx231xx *dev);
695void cx231xx_close_extension(struct cx231xx *dev); 693void cx231xx_close_extension(struct cx231xx *dev);
696 694
diff --git a/drivers/media/video/cx23885/cimax2.c b/drivers/media/video/cx23885/cimax2.c
index c04222ffb286..d4a9d2c5947c 100644
--- a/drivers/media/video/cx23885/cimax2.c
+++ b/drivers/media/video/cx23885/cimax2.c
@@ -53,6 +53,8 @@
53#define NETUP_CI_CTL 0x04 53#define NETUP_CI_CTL 0x04
54#define NETUP_CI_RD 1 54#define NETUP_CI_RD 1
55 55
56#define NETUP_IRQ_DETAM 0x1
57#define NETUP_IRQ_IRQAM 0x4
56 58
57static unsigned int ci_dbg; 59static unsigned int ci_dbg;
58module_param(ci_dbg, int, 0644); 60module_param(ci_dbg, int, 0644);
@@ -73,6 +75,9 @@ struct netup_ci_state {
73 int status; 75 int status;
74 struct work_struct work; 76 struct work_struct work;
75 void *priv; 77 void *priv;
78 u8 current_irq_mode;
79 int current_ci_flag;
80 unsigned long next_status_checked_time;
76}; 81};
77 82
78 83
@@ -169,24 +174,26 @@ int netup_ci_op_cam(struct dvb_ca_en50221 *en50221, int slot,
169 if (0 != slot) 174 if (0 != slot)
170 return -EINVAL; 175 return -EINVAL;
171 176
172 ret = netup_read_i2c(state->i2c_adap, state->ci_i2c_addr, 177 if (state->current_ci_flag != flag) {
173 0, &store, 1); 178 ret = netup_read_i2c(state->i2c_adap, state->ci_i2c_addr,
174 if (ret != 0) 179 0, &store, 1);
175 return ret; 180 if (ret != 0)
181 return ret;
176 182
177 store &= ~0x0c; 183 store &= ~0x0c;
178 store |= flag; 184 store |= flag;
179 185
180 ret = netup_write_i2c(state->i2c_adap, state->ci_i2c_addr, 186 ret = netup_write_i2c(state->i2c_adap, state->ci_i2c_addr,
181 0, &store, 1); 187 0, &store, 1);
182 if (ret != 0) 188 if (ret != 0)
183 return ret; 189 return ret;
190 };
191 state->current_ci_flag = flag;
184 192
185 mutex_lock(&dev->gpio_lock); 193 mutex_lock(&dev->gpio_lock);
186 194
187 /* write addr */ 195 /* write addr */
188 cx_write(MC417_OEN, NETUP_EN_ALL); 196 cx_write(MC417_OEN, NETUP_EN_ALL);
189 msleep(2);
190 cx_write(MC417_RWD, NETUP_CTRL_OFF | 197 cx_write(MC417_RWD, NETUP_CTRL_OFF |
191 NETUP_ADLO | (0xff & addr)); 198 NETUP_ADLO | (0xff & addr));
192 cx_clear(MC417_RWD, NETUP_ADLO); 199 cx_clear(MC417_RWD, NETUP_ADLO);
@@ -196,7 +203,6 @@ int netup_ci_op_cam(struct dvb_ca_en50221 *en50221, int slot,
196 203
197 if (read) { /* data in */ 204 if (read) { /* data in */
198 cx_write(MC417_OEN, NETUP_EN_ALL | NETUP_DATA); 205 cx_write(MC417_OEN, NETUP_EN_ALL | NETUP_DATA);
199 msleep(2);
200 } else /* data out */ 206 } else /* data out */
201 cx_write(MC417_RWD, NETUP_CTRL_OFF | data); 207 cx_write(MC417_RWD, NETUP_CTRL_OFF | data);
202 208
@@ -213,8 +219,8 @@ int netup_ci_op_cam(struct dvb_ca_en50221 *en50221, int slot,
213 if (mem < 0) 219 if (mem < 0)
214 return -EREMOTEIO; 220 return -EREMOTEIO;
215 221
216 ci_dbg_print("%s: %s: addr=[0x%02x], %s=%x\n", __func__, 222 ci_dbg_print("%s: %s: chipaddr=[0x%x] addr=[0x%02x], %s=%x\n", __func__,
217 (read) ? "read" : "write", addr, 223 (read) ? "read" : "write", state->ci_i2c_addr, addr,
218 (flag == NETUP_CI_CTL) ? "ctl" : "mem", 224 (flag == NETUP_CI_CTL) ? "ctl" : "mem",
219 (read) ? mem : data); 225 (read) ? mem : data);
220 226
@@ -283,14 +289,39 @@ int netup_ci_slot_shutdown(struct dvb_ca_en50221 *en50221, int slot)
283 return 0; 289 return 0;
284} 290}
285 291
292int netup_ci_set_irq(struct dvb_ca_en50221 *en50221, u8 irq_mode)
293{
294 struct netup_ci_state *state = en50221->data;
295 int ret;
296
297 if (irq_mode == state->current_irq_mode)
298 return 0;
299
300 ci_dbg_print("%s: chipaddr=[0x%x] setting ci IRQ to [0x%x] \n",
301 __func__, state->ci_i2c_addr, irq_mode);
302 ret = netup_write_i2c(state->i2c_adap, state->ci_i2c_addr,
303 0x1b, &irq_mode, 1);
304
305 if (ret != 0)
306 return ret;
307
308 state->current_irq_mode = irq_mode;
309
310 return 0;
311}
312
286int netup_ci_slot_ts_ctl(struct dvb_ca_en50221 *en50221, int slot) 313int netup_ci_slot_ts_ctl(struct dvb_ca_en50221 *en50221, int slot)
287{ 314{
288 struct netup_ci_state *state = en50221->data; 315 struct netup_ci_state *state = en50221->data;
289 u8 buf = 0x60; 316 u8 buf;
290 317
291 if (0 != slot) 318 if (0 != slot)
292 return -EINVAL; 319 return -EINVAL;
293 320
321 netup_read_i2c(state->i2c_adap, state->ci_i2c_addr,
322 0, &buf, 1);
323 buf |= 0x60;
324
294 return netup_write_i2c(state->i2c_adap, state->ci_i2c_addr, 325 return netup_write_i2c(state->i2c_adap, state->ci_i2c_addr,
295 0, &buf, 1); 326 0, &buf, 1);
296} 327}
@@ -303,21 +334,35 @@ static void netup_read_ci_status(struct work_struct *work)
303 u8 buf[33]; 334 u8 buf[33];
304 int ret; 335 int ret;
305 336
306 ret = netup_read_i2c(state->i2c_adap, state->ci_i2c_addr, 337 /* CAM module IRQ processing. fast operation */
307 0, &buf[0], 33); 338 dvb_ca_en50221_frda_irq(&state->ca, 0);
308 339
309 if (ret != 0) 340 /* CAM module INSERT/REMOVE processing. slow operation because of i2c
310 return; 341 * transfers */
342 if (time_after(jiffies, state->next_status_checked_time)
343 || !state->status) {
344 ret = netup_read_i2c(state->i2c_adap, state->ci_i2c_addr,
345 0, &buf[0], 33);
346
347 state->next_status_checked_time = jiffies
348 + msecs_to_jiffies(1000);
349
350 if (ret != 0)
351 return;
311 352
312 ci_dbg_print("%s: Slot Status Addr=[0x%04x], Reg=[0x%02x], data=%02x, " 353 ci_dbg_print("%s: Slot Status Addr=[0x%04x], "
313 "TS config = %02x\n", __func__, state->ci_i2c_addr, 0, buf[0], 354 "Reg=[0x%02x], data=%02x, "
314 buf[32]); 355 "TS config = %02x\n", __func__,
356 state->ci_i2c_addr, 0, buf[0],
357 buf[0]);
315 358
316 if (buf[0] & 1) 359
317 state->status = DVB_CA_EN50221_POLL_CAM_PRESENT | 360 if (buf[0] & 1)
318 DVB_CA_EN50221_POLL_CAM_READY; 361 state->status = DVB_CA_EN50221_POLL_CAM_PRESENT |
319 else 362 DVB_CA_EN50221_POLL_CAM_READY;
320 state->status = 0; 363 else
364 state->status = 0;
365 };
321} 366}
322 367
323/* CI irq handler */ 368/* CI irq handler */
@@ -347,6 +392,9 @@ int netup_poll_ci_slot_status(struct dvb_ca_en50221 *en50221, int slot, int open
347 if (0 != slot) 392 if (0 != slot)
348 return -EINVAL; 393 return -EINVAL;
349 394
395 netup_ci_set_irq(en50221, open ? (NETUP_IRQ_DETAM | NETUP_IRQ_IRQAM)
396 : NETUP_IRQ_DETAM);
397
350 return state->status; 398 return state->status;
351} 399}
352 400
@@ -381,8 +429,8 @@ int netup_ci_init(struct cx23885_tsport *port)
381 0x01, /* power on (use it like store place) */ 429 0x01, /* power on (use it like store place) */
382 0x00, /* RFU */ 430 0x00, /* RFU */
383 0x00, /* int status read only */ 431 0x00, /* int status read only */
384 0x01, /* all int unmasked */ 432 NETUP_IRQ_IRQAM | NETUP_IRQ_DETAM, /* DETAM, IRQAM unmasked */
385 0x04, /* int config */ 433 0x05, /* EXTINT=active-high, INT=push-pull */
386 0x00, /* USCG1 */ 434 0x00, /* USCG1 */
387 0x04, /* ack active low */ 435 0x04, /* ack active low */
388 0x00, /* LOCK = 0 */ 436 0x00, /* LOCK = 0 */
@@ -422,6 +470,7 @@ int netup_ci_init(struct cx23885_tsport *port)
422 state->ca.poll_slot_status = netup_poll_ci_slot_status; 470 state->ca.poll_slot_status = netup_poll_ci_slot_status;
423 state->ca.data = state; 471 state->ca.data = state;
424 state->priv = port; 472 state->priv = port;
473 state->current_irq_mode = NETUP_IRQ_IRQAM | NETUP_IRQ_DETAM;
425 474
426 ret = netup_write_i2c(state->i2c_adap, state->ci_i2c_addr, 475 ret = netup_write_i2c(state->i2c_adap, state->ci_i2c_addr,
427 0, &cimax_init[0], 34); 476 0, &cimax_init[0], 34);
diff --git a/drivers/media/video/cx23885/cx23885-417.c b/drivers/media/video/cx23885/cx23885-417.c
index 0eed852c61e9..88c0d2481118 100644
--- a/drivers/media/video/cx23885/cx23885-417.c
+++ b/drivers/media/video/cx23885/cx23885-417.c
@@ -1568,28 +1568,11 @@ static int vidioc_queryctrl(struct file *file, void *priv,
1568 1568
1569static int mpeg_open(struct file *file) 1569static int mpeg_open(struct file *file)
1570{ 1570{
1571 int minor = video_devdata(file)->minor; 1571 struct cx23885_dev *dev = video_drvdata(file);
1572 struct cx23885_dev *h, *dev = NULL;
1573 struct list_head *list;
1574 struct cx23885_fh *fh; 1572 struct cx23885_fh *fh;
1575 1573
1576 dprintk(2, "%s()\n", __func__); 1574 dprintk(2, "%s()\n", __func__);
1577 1575
1578 lock_kernel();
1579 list_for_each(list, &cx23885_devlist) {
1580 h = list_entry(list, struct cx23885_dev, devlist);
1581 if (h->v4l_device &&
1582 h->v4l_device->minor == minor) {
1583 dev = h;
1584 break;
1585 }
1586 }
1587
1588 if (dev == NULL) {
1589 unlock_kernel();
1590 return -ENODEV;
1591 }
1592
1593 /* allocate + initialize per filehandle data */ 1576 /* allocate + initialize per filehandle data */
1594 fh = kzalloc(sizeof(*fh), GFP_KERNEL); 1577 fh = kzalloc(sizeof(*fh), GFP_KERNEL);
1595 if (NULL == fh) { 1578 if (NULL == fh) {
@@ -1597,6 +1580,8 @@ static int mpeg_open(struct file *file)
1597 return -ENOMEM; 1580 return -ENOMEM;
1598 } 1581 }
1599 1582
1583 lock_kernel();
1584
1600 file->private_data = fh; 1585 file->private_data = fh;
1601 fh->dev = dev; 1586 fh->dev = dev;
1602 1587
@@ -1736,7 +1721,6 @@ static struct video_device cx23885_mpeg_template = {
1736 .name = "cx23885", 1721 .name = "cx23885",
1737 .fops = &mpeg_fops, 1722 .fops = &mpeg_fops,
1738 .ioctl_ops = &mpeg_ioctl_ops, 1723 .ioctl_ops = &mpeg_ioctl_ops,
1739 .minor = -1,
1740 .tvnorms = CX23885_NORMS, 1724 .tvnorms = CX23885_NORMS,
1741 .current_norm = V4L2_STD_NTSC_M, 1725 .current_norm = V4L2_STD_NTSC_M,
1742}; 1726};
@@ -1746,7 +1730,7 @@ void cx23885_417_unregister(struct cx23885_dev *dev)
1746 dprintk(1, "%s()\n", __func__); 1730 dprintk(1, "%s()\n", __func__);
1747 1731
1748 if (dev->v4l_device) { 1732 if (dev->v4l_device) {
1749 if (-1 != dev->v4l_device->minor) 1733 if (video_is_registered(dev->v4l_device))
1750 video_unregister_device(dev->v4l_device); 1734 video_unregister_device(dev->v4l_device);
1751 else 1735 else
1752 video_device_release(dev->v4l_device); 1736 video_device_release(dev->v4l_device);
@@ -1803,6 +1787,7 @@ int cx23885_417_register(struct cx23885_dev *dev)
1803 /* Allocate and initialize V4L video device */ 1787 /* Allocate and initialize V4L video device */
1804 dev->v4l_device = cx23885_video_dev_alloc(tsport, 1788 dev->v4l_device = cx23885_video_dev_alloc(tsport,
1805 dev->pci, &cx23885_mpeg_template, "mpeg"); 1789 dev->pci, &cx23885_mpeg_template, "mpeg");
1790 video_set_drvdata(dev->v4l_device, dev);
1806 err = video_register_device(dev->v4l_device, 1791 err = video_register_device(dev->v4l_device,
1807 VFL_TYPE_GRABBER, -1); 1792 VFL_TYPE_GRABBER, -1);
1808 if (err < 0) { 1793 if (err < 0) {
@@ -1810,8 +1795,8 @@ int cx23885_417_register(struct cx23885_dev *dev)
1810 return err; 1795 return err;
1811 } 1796 }
1812 1797
1813 printk(KERN_INFO "%s: registered device video%d [mpeg]\n", 1798 printk(KERN_INFO "%s: registered device %s [mpeg]\n",
1814 dev->name, dev->v4l_device->num); 1799 dev->name, video_device_node_name(dev->v4l_device));
1815 1800
1816 return 0; 1801 return 0;
1817} 1802}
diff --git a/drivers/media/video/cx23885/cx23885-core.c b/drivers/media/video/cx23885/cx23885-core.c
index 04b12d27bc13..0dde57e96d30 100644
--- a/drivers/media/video/cx23885/cx23885-core.c
+++ b/drivers/media/video/cx23885/cx23885-core.c
@@ -55,9 +55,6 @@ MODULE_PARM_DESC(card, "card type");
55 55
56static unsigned int cx23885_devcount; 56static unsigned int cx23885_devcount;
57 57
58static DEFINE_MUTEX(devlist);
59LIST_HEAD(cx23885_devlist);
60
61#define NO_SYNC_LINE (-1U) 58#define NO_SYNC_LINE (-1U)
62 59
63/* FIXME, these allocations will change when 60/* FIXME, these allocations will change when
@@ -785,10 +782,6 @@ static int cx23885_dev_setup(struct cx23885_dev *dev)
785 dev->nr = cx23885_devcount++; 782 dev->nr = cx23885_devcount++;
786 sprintf(dev->name, "cx23885[%d]", dev->nr); 783 sprintf(dev->name, "cx23885[%d]", dev->nr);
787 784
788 mutex_lock(&devlist);
789 list_add_tail(&dev->devlist, &cx23885_devlist);
790 mutex_unlock(&devlist);
791
792 /* Configure the internal memory */ 785 /* Configure the internal memory */
793 if (dev->pci->device == 0x8880) { 786 if (dev->pci->device == 0x8880) {
794 /* Could be 887 or 888, assume a default */ 787 /* Could be 887 or 888, assume a default */
@@ -2008,10 +2001,6 @@ static void __devexit cx23885_finidev(struct pci_dev *pci_dev)
2008 /* unregister stuff */ 2001 /* unregister stuff */
2009 free_irq(pci_dev->irq, dev); 2002 free_irq(pci_dev->irq, dev);
2010 2003
2011 mutex_lock(&devlist);
2012 list_del(&dev->devlist);
2013 mutex_unlock(&devlist);
2014
2015 cx23885_dev_unregister(dev); 2004 cx23885_dev_unregister(dev);
2016 v4l2_device_unregister(v4l2_dev); 2005 v4l2_device_unregister(v4l2_dev);
2017 kfree(dev); 2006 kfree(dev);
diff --git a/drivers/media/video/cx23885/cx23885-input.c b/drivers/media/video/cx23885/cx23885-input.c
index 469e083dd5f8..768eec92ccf9 100644
--- a/drivers/media/video/cx23885/cx23885-input.c
+++ b/drivers/media/video/cx23885/cx23885-input.c
@@ -377,7 +377,7 @@ int cx23885_input_init(struct cx23885_dev *dev)
377 cx23885_boards[dev->board].name); 377 cx23885_boards[dev->board].name);
378 snprintf(ir->phys, sizeof(ir->phys), "pci-%s/ir0", pci_name(dev->pci)); 378 snprintf(ir->phys, sizeof(ir->phys), "pci-%s/ir0", pci_name(dev->pci));
379 379
380 ret = ir_input_init(input_dev, &ir->ir, ir_type, ir_codes); 380 ret = ir_input_init(input_dev, &ir->ir, ir_type);
381 if (ret < 0) 381 if (ret < 0)
382 goto err_out_free; 382 goto err_out_free;
383 383
@@ -397,7 +397,7 @@ int cx23885_input_init(struct cx23885_dev *dev)
397 dev->ir_input = ir; 397 dev->ir_input = ir;
398 cx23885_input_ir_start(dev); 398 cx23885_input_ir_start(dev);
399 399
400 ret = input_register_device(ir->dev); 400 ret = ir_input_register(ir->dev, ir_codes);
401 if (ret) 401 if (ret)
402 goto err_out_stop; 402 goto err_out_stop;
403 403
@@ -407,8 +407,6 @@ err_out_stop:
407 cx23885_input_ir_stop(dev); 407 cx23885_input_ir_stop(dev);
408 dev->ir_input = NULL; 408 dev->ir_input = NULL;
409err_out_free: 409err_out_free:
410 ir_input_free(input_dev);
411 input_free_device(input_dev);
412 kfree(ir); 410 kfree(ir);
413 return ret; 411 return ret;
414} 412}
@@ -420,8 +418,7 @@ void cx23885_input_fini(struct cx23885_dev *dev)
420 418
421 if (dev->ir_input == NULL) 419 if (dev->ir_input == NULL)
422 return; 420 return;
423 ir_input_free(dev->ir_input->dev); 421 ir_input_unregister(dev->ir_input->dev);
424 input_unregister_device(dev->ir_input->dev);
425 kfree(dev->ir_input); 422 kfree(dev->ir_input);
426 dev->ir_input = NULL; 423 dev->ir_input = NULL;
427} 424}
diff --git a/drivers/media/video/cx23885/cx23885-video.c b/drivers/media/video/cx23885/cx23885-video.c
index 8b372b4f0de2..8934d61cf660 100644
--- a/drivers/media/video/cx23885/cx23885-video.c
+++ b/drivers/media/video/cx23885/cx23885-video.c
@@ -318,11 +318,11 @@ static struct video_device *cx23885_vdev_init(struct cx23885_dev *dev,
318 if (NULL == vfd) 318 if (NULL == vfd)
319 return NULL; 319 return NULL;
320 *vfd = *template; 320 *vfd = *template;
321 vfd->minor = -1;
322 vfd->v4l2_dev = &dev->v4l2_dev; 321 vfd->v4l2_dev = &dev->v4l2_dev;
323 vfd->release = video_device_release; 322 vfd->release = video_device_release;
324 snprintf(vfd->name, sizeof(vfd->name), "%s %s (%s)", 323 snprintf(vfd->name, sizeof(vfd->name), "%s %s (%s)",
325 dev->name, type, cx23885_boards[dev->board].name); 324 dev->name, type, cx23885_boards[dev->board].name);
325 video_set_drvdata(vfd, dev);
326 return vfd; 326 return vfd;
327} 327}
328 328
@@ -716,46 +716,34 @@ static int get_resource(struct cx23885_fh *fh)
716 716
717static int video_open(struct file *file) 717static int video_open(struct file *file)
718{ 718{
719 int minor = video_devdata(file)->minor; 719 struct video_device *vdev = video_devdata(file);
720 struct cx23885_dev *h, *dev = NULL; 720 struct cx23885_dev *dev = video_drvdata(file);
721 struct cx23885_fh *fh; 721 struct cx23885_fh *fh;
722 struct list_head *list;
723 enum v4l2_buf_type type = 0; 722 enum v4l2_buf_type type = 0;
724 int radio = 0; 723 int radio = 0;
725 724
726 lock_kernel(); 725 switch (vdev->vfl_type) {
727 list_for_each(list, &cx23885_devlist) { 726 case VFL_TYPE_GRABBER:
728 h = list_entry(list, struct cx23885_dev, devlist); 727 type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
729 if (h->video_dev && 728 break;
730 h->video_dev->minor == minor) { 729 case VFL_TYPE_VBI:
731 dev = h; 730 type = V4L2_BUF_TYPE_VBI_CAPTURE;
732 type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 731 break;
733 } 732 case VFL_TYPE_RADIO:
734 if (h->vbi_dev && 733 radio = 1;
735 h->vbi_dev->minor == minor) { 734 break;
736 dev = h;
737 type = V4L2_BUF_TYPE_VBI_CAPTURE;
738 }
739 if (h->radio_dev &&
740 h->radio_dev->minor == minor) {
741 radio = 1;
742 dev = h;
743 }
744 }
745 if (NULL == dev) {
746 unlock_kernel();
747 return -ENODEV;
748 } 735 }
749 736
750 dprintk(1, "open minor=%d radio=%d type=%s\n", 737 dprintk(1, "open dev=%s radio=%d type=%s\n",
751 minor, radio, v4l2_type_names[type]); 738 video_device_node_name(vdev), radio, v4l2_type_names[type]);
752 739
753 /* allocate + initialize per filehandle data */ 740 /* allocate + initialize per filehandle data */
754 fh = kzalloc(sizeof(*fh), GFP_KERNEL); 741 fh = kzalloc(sizeof(*fh), GFP_KERNEL);
755 if (NULL == fh) { 742 if (NULL == fh)
756 unlock_kernel();
757 return -ENOMEM; 743 return -ENOMEM;
758 } 744
745 lock_kernel();
746
759 file->private_data = fh; 747 file->private_data = fh;
760 fh->dev = dev; 748 fh->dev = dev;
761 fh->radio = radio; 749 fh->radio = radio;
@@ -1441,7 +1429,6 @@ static struct video_device cx23885_vbi_template;
1441static struct video_device cx23885_video_template = { 1429static struct video_device cx23885_video_template = {
1442 .name = "cx23885-video", 1430 .name = "cx23885-video",
1443 .fops = &video_fops, 1431 .fops = &video_fops,
1444 .minor = -1,
1445 .ioctl_ops = &video_ioctl_ops, 1432 .ioctl_ops = &video_ioctl_ops,
1446 .tvnorms = CX23885_NORMS, 1433 .tvnorms = CX23885_NORMS,
1447 .current_norm = V4L2_STD_NTSC_M, 1434 .current_norm = V4L2_STD_NTSC_M,
@@ -1461,7 +1448,7 @@ void cx23885_video_unregister(struct cx23885_dev *dev)
1461 cx_clear(PCI_INT_MSK, 1); 1448 cx_clear(PCI_INT_MSK, 1);
1462 1449
1463 if (dev->video_dev) { 1450 if (dev->video_dev) {
1464 if (-1 != dev->video_dev->minor) 1451 if (video_is_registered(dev->video_dev))
1465 video_unregister_device(dev->video_dev); 1452 video_unregister_device(dev->video_dev);
1466 else 1453 else
1467 video_device_release(dev->video_dev); 1454 video_device_release(dev->video_dev);
@@ -1532,8 +1519,8 @@ int cx23885_video_register(struct cx23885_dev *dev)
1532 dev->name); 1519 dev->name);
1533 goto fail_unreg; 1520 goto fail_unreg;
1534 } 1521 }
1535 printk(KERN_INFO "%s/0: registered device video%d [v4l2]\n", 1522 printk(KERN_INFO "%s/0: registered device %s [v4l2]\n",
1536 dev->name, dev->video_dev->num); 1523 dev->name, video_device_node_name(dev->video_dev));
1537 /* initial device configuration */ 1524 /* initial device configuration */
1538 mutex_lock(&dev->lock); 1525 mutex_lock(&dev->lock);
1539 cx23885_set_tvnorm(dev, dev->tvnorm); 1526 cx23885_set_tvnorm(dev, dev->tvnorm);
diff --git a/drivers/media/video/cx23885/cx23885.h b/drivers/media/video/cx23885/cx23885.h
index fa744764dc8b..08b3f6b136a0 100644
--- a/drivers/media/video/cx23885/cx23885.h
+++ b/drivers/media/video/cx23885/cx23885.h
@@ -303,7 +303,6 @@ struct cx23885_tsport {
303}; 303};
304 304
305struct cx23885_dev { 305struct cx23885_dev {
306 struct list_head devlist;
307 atomic_t refcount; 306 atomic_t refcount;
308 struct v4l2_device v4l2_dev; 307 struct v4l2_device v4l2_dev;
309 308
@@ -399,8 +398,6 @@ static inline struct cx23885_dev *to_cx23885(struct v4l2_device *v4l2_dev)
399 398
400extern struct v4l2_subdev *cx23885_find_hw(struct cx23885_dev *dev, u32 hw); 399extern struct v4l2_subdev *cx23885_find_hw(struct cx23885_dev *dev, u32 hw);
401 400
402extern struct list_head cx23885_devlist;
403
404#define SRAM_CH01 0 /* Video A */ 401#define SRAM_CH01 0 /* Video A */
405#define SRAM_CH02 1 /* VBI A */ 402#define SRAM_CH02 1 /* VBI A */
406#define SRAM_CH03 2 /* Video B */ 403#define SRAM_CH03 2 /* Video B */
diff --git a/drivers/media/video/cx88/cx88-blackbird.c b/drivers/media/video/cx88/cx88-blackbird.c
index fbdc1cde56a6..6fe30e6c4262 100644
--- a/drivers/media/video/cx88/cx88-blackbird.c
+++ b/drivers/media/video/cx88/cx88-blackbird.c
@@ -1048,21 +1048,15 @@ static int vidioc_s_std (struct file *file, void *priv, v4l2_std_id *id)
1048 1048
1049static int mpeg_open(struct file *file) 1049static int mpeg_open(struct file *file)
1050{ 1050{
1051 int minor = video_devdata(file)->minor; 1051 struct video_device *vdev = video_devdata(file);
1052 struct cx8802_dev *dev = NULL; 1052 struct cx8802_dev *dev = video_drvdata(file);
1053 struct cx8802_fh *fh; 1053 struct cx8802_fh *fh;
1054 struct cx8802_driver *drv = NULL; 1054 struct cx8802_driver *drv = NULL;
1055 int err; 1055 int err;
1056 1056
1057 lock_kernel();
1058 dev = cx8802_get_device(minor);
1059
1060 dprintk( 1, "%s\n", __func__); 1057 dprintk( 1, "%s\n", __func__);
1061 1058
1062 if (dev == NULL) { 1059 lock_kernel();
1063 unlock_kernel();
1064 return -ENODEV;
1065 }
1066 1060
1067 /* Make sure we can acquire the hardware */ 1061 /* Make sure we can acquire the hardware */
1068 drv = cx8802_get_driver(dev, CX88_MPEG_BLACKBIRD); 1062 drv = cx8802_get_driver(dev, CX88_MPEG_BLACKBIRD);
@@ -1081,7 +1075,7 @@ static int mpeg_open(struct file *file)
1081 unlock_kernel(); 1075 unlock_kernel();
1082 return -EINVAL; 1076 return -EINVAL;
1083 } 1077 }
1084 dprintk(1,"open minor=%d\n",minor); 1078 dprintk(1, "open dev=%s\n", video_device_node_name(vdev));
1085 1079
1086 /* allocate + initialize per filehandle data */ 1080 /* allocate + initialize per filehandle data */
1087 fh = kzalloc(sizeof(*fh),GFP_KERNEL); 1081 fh = kzalloc(sizeof(*fh),GFP_KERNEL);
@@ -1129,10 +1123,6 @@ static int mpeg_release(struct file *file)
1129 kfree(fh); 1123 kfree(fh);
1130 1124
1131 /* Make sure we release the hardware */ 1125 /* Make sure we release the hardware */
1132 dev = cx8802_get_device(video_devdata(file)->minor);
1133 if (dev == NULL)
1134 return -ENODEV;
1135
1136 drv = cx8802_get_driver(dev, CX88_MPEG_BLACKBIRD); 1126 drv = cx8802_get_driver(dev, CX88_MPEG_BLACKBIRD);
1137 if (drv) 1127 if (drv)
1138 drv->request_release(drv); 1128 drv->request_release(drv);
@@ -1220,7 +1210,6 @@ static struct video_device cx8802_mpeg_template = {
1220 .name = "cx8802", 1210 .name = "cx8802",
1221 .fops = &mpeg_fops, 1211 .fops = &mpeg_fops,
1222 .ioctl_ops = &mpeg_ioctl_ops, 1212 .ioctl_ops = &mpeg_ioctl_ops,
1223 .minor = -1,
1224 .tvnorms = CX88_NORMS, 1213 .tvnorms = CX88_NORMS,
1225 .current_norm = V4L2_STD_NTSC_M, 1214 .current_norm = V4L2_STD_NTSC_M,
1226}; 1215};
@@ -1276,7 +1265,7 @@ static int cx8802_blackbird_advise_release(struct cx8802_driver *drv)
1276static void blackbird_unregister_video(struct cx8802_dev *dev) 1265static void blackbird_unregister_video(struct cx8802_dev *dev)
1277{ 1266{
1278 if (dev->mpeg_dev) { 1267 if (dev->mpeg_dev) {
1279 if (-1 != dev->mpeg_dev->minor) 1268 if (video_is_registered(dev->mpeg_dev))
1280 video_unregister_device(dev->mpeg_dev); 1269 video_unregister_device(dev->mpeg_dev);
1281 else 1270 else
1282 video_device_release(dev->mpeg_dev); 1271 video_device_release(dev->mpeg_dev);
@@ -1290,14 +1279,15 @@ static int blackbird_register_video(struct cx8802_dev *dev)
1290 1279
1291 dev->mpeg_dev = cx88_vdev_init(dev->core,dev->pci, 1280 dev->mpeg_dev = cx88_vdev_init(dev->core,dev->pci,
1292 &cx8802_mpeg_template,"mpeg"); 1281 &cx8802_mpeg_template,"mpeg");
1282 video_set_drvdata(dev->mpeg_dev, dev);
1293 err = video_register_device(dev->mpeg_dev,VFL_TYPE_GRABBER, -1); 1283 err = video_register_device(dev->mpeg_dev,VFL_TYPE_GRABBER, -1);
1294 if (err < 0) { 1284 if (err < 0) {
1295 printk(KERN_INFO "%s/2: can't register mpeg device\n", 1285 printk(KERN_INFO "%s/2: can't register mpeg device\n",
1296 dev->core->name); 1286 dev->core->name);
1297 return err; 1287 return err;
1298 } 1288 }
1299 printk(KERN_INFO "%s/2: registered device video%d [mpeg]\n", 1289 printk(KERN_INFO "%s/2: registered device %s [mpeg]\n",
1300 dev->core->name, dev->mpeg_dev->num); 1290 dev->core->name, video_device_node_name(dev->mpeg_dev));
1301 return 0; 1291 return 0;
1302} 1292}
1303 1293
diff --git a/drivers/media/video/cx88/cx88-input.c b/drivers/media/video/cx88/cx88-input.c
index 92b8cdf9fb81..f9fda18b410c 100644
--- a/drivers/media/video/cx88/cx88-input.c
+++ b/drivers/media/video/cx88/cx88-input.c
@@ -360,7 +360,7 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
360 snprintf(ir->name, sizeof(ir->name), "cx88 IR (%s)", core->board.name); 360 snprintf(ir->name, sizeof(ir->name), "cx88 IR (%s)", core->board.name);
361 snprintf(ir->phys, sizeof(ir->phys), "pci-%s/ir0", pci_name(pci)); 361 snprintf(ir->phys, sizeof(ir->phys), "pci-%s/ir0", pci_name(pci));
362 362
363 err = ir_input_init(input_dev, &ir->ir, ir_type, ir_codes); 363 err = ir_input_init(input_dev, &ir->ir, ir_type);
364 if (err < 0) 364 if (err < 0)
365 goto err_out_free; 365 goto err_out_free;
366 366
@@ -383,7 +383,7 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
383 cx88_ir_start(core, ir); 383 cx88_ir_start(core, ir);
384 384
385 /* all done */ 385 /* all done */
386 err = input_register_device(ir->input); 386 err = ir_input_register(ir->input, ir_codes);
387 if (err) 387 if (err)
388 goto err_out_stop; 388 goto err_out_stop;
389 389
@@ -393,8 +393,6 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
393 cx88_ir_stop(core, ir); 393 cx88_ir_stop(core, ir);
394 core->ir = NULL; 394 core->ir = NULL;
395 err_out_free: 395 err_out_free:
396 ir_input_free(input_dev);
397 input_free_device(input_dev);
398 kfree(ir); 396 kfree(ir);
399 return err; 397 return err;
400} 398}
@@ -408,8 +406,7 @@ int cx88_ir_fini(struct cx88_core *core)
408 return 0; 406 return 0;
409 407
410 cx88_ir_stop(core, ir); 408 cx88_ir_stop(core, ir);
411 ir_input_free(ir->input); 409 ir_input_unregister(ir->input);
412 input_unregister_device(ir->input);
413 kfree(ir); 410 kfree(ir);
414 411
415 /* done */ 412 /* done */
diff --git a/drivers/media/video/cx88/cx88-mpeg.c b/drivers/media/video/cx88/cx88-mpeg.c
index de9ff0fc741f..bb5104893411 100644
--- a/drivers/media/video/cx88/cx88-mpeg.c
+++ b/drivers/media/video/cx88/cx88-mpeg.c
@@ -580,21 +580,6 @@ static int cx8802_resume_common(struct pci_dev *pci_dev)
580 return 0; 580 return 0;
581} 581}
582 582
583#if defined(CONFIG_VIDEO_CX88_BLACKBIRD) || \
584 defined(CONFIG_VIDEO_CX88_BLACKBIRD_MODULE)
585struct cx8802_dev *cx8802_get_device(int minor)
586{
587 struct cx8802_dev *dev;
588
589 list_for_each_entry(dev, &cx8802_devlist, devlist)
590 if (dev->mpeg_dev && dev->mpeg_dev->minor == minor)
591 return dev;
592
593 return NULL;
594}
595EXPORT_SYMBOL(cx8802_get_device);
596#endif
597
598struct cx8802_driver * cx8802_get_driver(struct cx8802_dev *dev, enum cx88_board_type btype) 583struct cx8802_driver * cx8802_get_driver(struct cx8802_dev *dev, enum cx88_board_type btype)
599{ 584{
600 struct cx8802_driver *d; 585 struct cx8802_driver *d;
diff --git a/drivers/media/video/cx88/cx88-video.c b/drivers/media/video/cx88/cx88-video.c
index d7e8fcee559c..48c450f4a85a 100644
--- a/drivers/media/video/cx88/cx88-video.c
+++ b/drivers/media/video/cx88/cx88-video.c
@@ -75,10 +75,6 @@ MODULE_PARM_DESC(vid_limit,"capture memory limit in megabytes");
75#define dprintk(level,fmt, arg...) if (video_debug >= level) \ 75#define dprintk(level,fmt, arg...) if (video_debug >= level) \
76 printk(KERN_DEBUG "%s/0: " fmt, core->name , ## arg) 76 printk(KERN_DEBUG "%s/0: " fmt, core->name , ## arg)
77 77
78/* ------------------------------------------------------------------ */
79
80static LIST_HEAD(cx8800_devlist);
81
82/* ------------------------------------------------------------------- */ 78/* ------------------------------------------------------------------- */
83/* static data */ 79/* static data */
84 80
@@ -753,38 +749,31 @@ static int get_ressource(struct cx8800_fh *fh)
753 749
754static int video_open(struct file *file) 750static int video_open(struct file *file)
755{ 751{
756 int minor = video_devdata(file)->minor; 752 struct video_device *vdev = video_devdata(file);
757 struct cx8800_dev *h,*dev = NULL; 753 struct cx8800_dev *dev = video_drvdata(file);
758 struct cx88_core *core; 754 struct cx88_core *core;
759 struct cx8800_fh *fh; 755 struct cx8800_fh *fh;
760 enum v4l2_buf_type type = 0; 756 enum v4l2_buf_type type = 0;
761 int radio = 0; 757 int radio = 0;
762 758
763 lock_kernel(); 759 switch (vdev->vfl_type) {
764 list_for_each_entry(h, &cx8800_devlist, devlist) { 760 case VFL_TYPE_GRABBER:
765 if (h->video_dev->minor == minor) { 761 type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
766 dev = h; 762 break;
767 type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 763 case VFL_TYPE_VBI:
768 } 764 type = V4L2_BUF_TYPE_VBI_CAPTURE;
769 if (h->vbi_dev->minor == minor) { 765 break;
770 dev = h; 766 case VFL_TYPE_RADIO:
771 type = V4L2_BUF_TYPE_VBI_CAPTURE; 767 radio = 1;
772 } 768 break;
773 if (h->radio_dev &&
774 h->radio_dev->minor == minor) {
775 radio = 1;
776 dev = h;
777 }
778 }
779 if (NULL == dev) {
780 unlock_kernel();
781 return -ENODEV;
782 } 769 }
783 770
771 lock_kernel();
772
784 core = dev->core; 773 core = dev->core;
785 774
786 dprintk(1,"open minor=%d radio=%d type=%s\n", 775 dprintk(1, "open dev=%s radio=%d type=%s\n",
787 minor,radio,v4l2_type_names[type]); 776 video_device_node_name(vdev), radio, v4l2_type_names[type]);
788 777
789 /* allocate + initialize per filehandle data */ 778 /* allocate + initialize per filehandle data */
790 fh = kzalloc(sizeof(*fh),GFP_KERNEL); 779 fh = kzalloc(sizeof(*fh),GFP_KERNEL);
@@ -1733,7 +1722,6 @@ static struct video_device cx8800_vbi_template;
1733static struct video_device cx8800_video_template = { 1722static struct video_device cx8800_video_template = {
1734 .name = "cx8800-video", 1723 .name = "cx8800-video",
1735 .fops = &video_fops, 1724 .fops = &video_fops,
1736 .minor = -1,
1737 .ioctl_ops = &video_ioctl_ops, 1725 .ioctl_ops = &video_ioctl_ops,
1738 .tvnorms = CX88_NORMS, 1726 .tvnorms = CX88_NORMS,
1739 .current_norm = V4L2_STD_NTSC_M, 1727 .current_norm = V4L2_STD_NTSC_M,
@@ -1769,7 +1757,6 @@ static const struct v4l2_ioctl_ops radio_ioctl_ops = {
1769static struct video_device cx8800_radio_template = { 1757static struct video_device cx8800_radio_template = {
1770 .name = "cx8800-radio", 1758 .name = "cx8800-radio",
1771 .fops = &radio_fops, 1759 .fops = &radio_fops,
1772 .minor = -1,
1773 .ioctl_ops = &radio_ioctl_ops, 1760 .ioctl_ops = &radio_ioctl_ops,
1774}; 1761};
1775 1762
@@ -1778,21 +1765,21 @@ static struct video_device cx8800_radio_template = {
1778static void cx8800_unregister_video(struct cx8800_dev *dev) 1765static void cx8800_unregister_video(struct cx8800_dev *dev)
1779{ 1766{
1780 if (dev->radio_dev) { 1767 if (dev->radio_dev) {
1781 if (-1 != dev->radio_dev->minor) 1768 if (video_is_registered(dev->radio_dev))
1782 video_unregister_device(dev->radio_dev); 1769 video_unregister_device(dev->radio_dev);
1783 else 1770 else
1784 video_device_release(dev->radio_dev); 1771 video_device_release(dev->radio_dev);
1785 dev->radio_dev = NULL; 1772 dev->radio_dev = NULL;
1786 } 1773 }
1787 if (dev->vbi_dev) { 1774 if (dev->vbi_dev) {
1788 if (-1 != dev->vbi_dev->minor) 1775 if (video_is_registered(dev->vbi_dev))
1789 video_unregister_device(dev->vbi_dev); 1776 video_unregister_device(dev->vbi_dev);
1790 else 1777 else
1791 video_device_release(dev->vbi_dev); 1778 video_device_release(dev->vbi_dev);
1792 dev->vbi_dev = NULL; 1779 dev->vbi_dev = NULL;
1793 } 1780 }
1794 if (dev->video_dev) { 1781 if (dev->video_dev) {
1795 if (-1 != dev->video_dev->minor) 1782 if (video_is_registered(dev->video_dev))
1796 video_unregister_device(dev->video_dev); 1783 video_unregister_device(dev->video_dev);
1797 else 1784 else
1798 video_device_release(dev->video_dev); 1785 video_device_release(dev->video_dev);
@@ -1909,6 +1896,7 @@ static int __devinit cx8800_initdev(struct pci_dev *pci_dev,
1909 /* register v4l devices */ 1896 /* register v4l devices */
1910 dev->video_dev = cx88_vdev_init(core,dev->pci, 1897 dev->video_dev = cx88_vdev_init(core,dev->pci,
1911 &cx8800_video_template,"video"); 1898 &cx8800_video_template,"video");
1899 video_set_drvdata(dev->video_dev, dev);
1912 err = video_register_device(dev->video_dev,VFL_TYPE_GRABBER, 1900 err = video_register_device(dev->video_dev,VFL_TYPE_GRABBER,
1913 video_nr[core->nr]); 1901 video_nr[core->nr]);
1914 if (err < 0) { 1902 if (err < 0) {
@@ -1916,10 +1904,11 @@ static int __devinit cx8800_initdev(struct pci_dev *pci_dev,
1916 core->name); 1904 core->name);
1917 goto fail_unreg; 1905 goto fail_unreg;
1918 } 1906 }
1919 printk(KERN_INFO "%s/0: registered device video%d [v4l2]\n", 1907 printk(KERN_INFO "%s/0: registered device %s [v4l2]\n",
1920 core->name, dev->video_dev->num); 1908 core->name, video_device_node_name(dev->video_dev));
1921 1909
1922 dev->vbi_dev = cx88_vdev_init(core,dev->pci,&cx8800_vbi_template,"vbi"); 1910 dev->vbi_dev = cx88_vdev_init(core,dev->pci,&cx8800_vbi_template,"vbi");
1911 video_set_drvdata(dev->vbi_dev, dev);
1923 err = video_register_device(dev->vbi_dev,VFL_TYPE_VBI, 1912 err = video_register_device(dev->vbi_dev,VFL_TYPE_VBI,
1924 vbi_nr[core->nr]); 1913 vbi_nr[core->nr]);
1925 if (err < 0) { 1914 if (err < 0) {
@@ -1927,12 +1916,13 @@ static int __devinit cx8800_initdev(struct pci_dev *pci_dev,
1927 core->name); 1916 core->name);
1928 goto fail_unreg; 1917 goto fail_unreg;
1929 } 1918 }
1930 printk(KERN_INFO "%s/0: registered device vbi%d\n", 1919 printk(KERN_INFO "%s/0: registered device %s\n",
1931 core->name, dev->vbi_dev->num); 1920 core->name, video_device_node_name(dev->vbi_dev));
1932 1921
1933 if (core->board.radio.type == CX88_RADIO) { 1922 if (core->board.radio.type == CX88_RADIO) {
1934 dev->radio_dev = cx88_vdev_init(core,dev->pci, 1923 dev->radio_dev = cx88_vdev_init(core,dev->pci,
1935 &cx8800_radio_template,"radio"); 1924 &cx8800_radio_template,"radio");
1925 video_set_drvdata(dev->radio_dev, dev);
1936 err = video_register_device(dev->radio_dev,VFL_TYPE_RADIO, 1926 err = video_register_device(dev->radio_dev,VFL_TYPE_RADIO,
1937 radio_nr[core->nr]); 1927 radio_nr[core->nr]);
1938 if (err < 0) { 1928 if (err < 0) {
@@ -1940,12 +1930,11 @@ static int __devinit cx8800_initdev(struct pci_dev *pci_dev,
1940 core->name); 1930 core->name);
1941 goto fail_unreg; 1931 goto fail_unreg;
1942 } 1932 }
1943 printk(KERN_INFO "%s/0: registered device radio%d\n", 1933 printk(KERN_INFO "%s/0: registered device %s\n",
1944 core->name, dev->radio_dev->num); 1934 core->name, video_device_node_name(dev->radio_dev));
1945 } 1935 }
1946 1936
1947 /* everything worked */ 1937 /* everything worked */
1948 list_add_tail(&dev->devlist,&cx8800_devlist);
1949 pci_set_drvdata(pci_dev,dev); 1938 pci_set_drvdata(pci_dev,dev);
1950 1939
1951 /* initial device configuration */ 1940 /* initial device configuration */
@@ -2001,7 +1990,6 @@ static void __devexit cx8800_finidev(struct pci_dev *pci_dev)
2001 1990
2002 /* free memory */ 1991 /* free memory */
2003 btcx_riscmem_free(dev->pci,&dev->vidq.stopper); 1992 btcx_riscmem_free(dev->pci,&dev->vidq.stopper);
2004 list_del(&dev->devlist);
2005 cx88_core_put(core,dev->pci); 1993 cx88_core_put(core,dev->pci);
2006 kfree(dev); 1994 kfree(dev);
2007} 1995}
diff --git a/drivers/media/video/cx88/cx88.h b/drivers/media/video/cx88/cx88.h
index e1c521710103..b1499bf604ea 100644
--- a/drivers/media/video/cx88/cx88.h
+++ b/drivers/media/video/cx88/cx88.h
@@ -423,7 +423,6 @@ struct cx8800_suspend_state {
423 423
424struct cx8800_dev { 424struct cx8800_dev {
425 struct cx88_core *core; 425 struct cx88_core *core;
426 struct list_head devlist;
427 spinlock_t slock; 426 spinlock_t slock;
428 427
429 /* various device info */ 428 /* various device info */
@@ -670,7 +669,6 @@ int cx88_audio_thread(void *data);
670 669
671int cx8802_register_driver(struct cx8802_driver *drv); 670int cx8802_register_driver(struct cx8802_driver *drv);
672int cx8802_unregister_driver(struct cx8802_driver *drv); 671int cx8802_unregister_driver(struct cx8802_driver *drv);
673struct cx8802_dev *cx8802_get_device(int minor);
674struct cx8802_driver * cx8802_get_driver(struct cx8802_dev *dev, enum cx88_board_type btype); 672struct cx8802_driver * cx8802_get_driver(struct cx8802_dev *dev, enum cx88_board_type btype);
675 673
676/* ----------------------------------------------------------- */ 674/* ----------------------------------------------------------- */
diff --git a/drivers/media/video/davinci/vpfe_capture.c b/drivers/media/video/davinci/vpfe_capture.c
index 12a1b3d7132d..de22bc9faf21 100644
--- a/drivers/media/video/davinci/vpfe_capture.c
+++ b/drivers/media/video/davinci/vpfe_capture.c
@@ -70,7 +70,6 @@
70#include <linux/init.h> 70#include <linux/init.h>
71#include <linux/platform_device.h> 71#include <linux/platform_device.h>
72#include <linux/interrupt.h> 72#include <linux/interrupt.h>
73#include <linux/version.h>
74#include <media/v4l2-common.h> 73#include <media/v4l2-common.h>
75#include <linux/io.h> 74#include <linux/io.h>
76#include <media/davinci/vpfe_capture.h> 75#include <media/davinci/vpfe_capture.h>
@@ -1967,7 +1966,6 @@ static __init int vpfe_probe(struct platform_device *pdev)
1967 vfd->release = video_device_release; 1966 vfd->release = video_device_release;
1968 vfd->fops = &vpfe_fops; 1967 vfd->fops = &vpfe_fops;
1969 vfd->ioctl_ops = &vpfe_ioctl_ops; 1968 vfd->ioctl_ops = &vpfe_ioctl_ops;
1970 vfd->minor = -1;
1971 vfd->tvnorms = 0; 1969 vfd->tvnorms = 0;
1972 vfd->current_norm = V4L2_STD_PAL; 1970 vfd->current_norm = V4L2_STD_PAL;
1973 vfd->v4l2_dev = &vpfe_dev->v4l2_dev; 1971 vfd->v4l2_dev = &vpfe_dev->v4l2_dev;
@@ -2071,7 +2069,7 @@ probe_out_video_unregister:
2071probe_out_v4l2_unregister: 2069probe_out_v4l2_unregister:
2072 v4l2_device_unregister(&vpfe_dev->v4l2_dev); 2070 v4l2_device_unregister(&vpfe_dev->v4l2_dev);
2073probe_out_video_release: 2071probe_out_video_release:
2074 if (vpfe_dev->video_dev->minor == -1) 2072 if (!video_is_registered(vpfe_dev->video_dev))
2075 video_device_release(vpfe_dev->video_dev); 2073 video_device_release(vpfe_dev->video_dev);
2076probe_out_release_irq: 2074probe_out_release_irq:
2077 free_irq(vpfe_dev->ccdc_irq0, vpfe_dev); 2075 free_irq(vpfe_dev->ccdc_irq0, vpfe_dev);
@@ -2091,7 +2089,7 @@ probe_free_dev_mem:
2091/* 2089/*
2092 * vpfe_remove : It un-register device from V4L2 driver 2090 * vpfe_remove : It un-register device from V4L2 driver
2093 */ 2091 */
2094static int vpfe_remove(struct platform_device *pdev) 2092static int __devexit vpfe_remove(struct platform_device *pdev)
2095{ 2093{
2096 struct vpfe_device *vpfe_dev = platform_get_drvdata(pdev); 2094 struct vpfe_device *vpfe_dev = platform_get_drvdata(pdev);
2097 struct resource *res; 2095 struct resource *res;
@@ -2127,7 +2125,7 @@ vpfe_resume(struct device *dev)
2127 return -1; 2125 return -1;
2128} 2126}
2129 2127
2130static struct dev_pm_ops vpfe_dev_pm_ops = { 2128static const struct dev_pm_ops vpfe_dev_pm_ops = {
2131 .suspend = vpfe_suspend, 2129 .suspend = vpfe_suspend,
2132 .resume = vpfe_resume, 2130 .resume = vpfe_resume,
2133}; 2131};
diff --git a/drivers/media/video/davinci/vpif.c b/drivers/media/video/davinci/vpif.c
index 3b8eac31ecae..1f532e31cd49 100644
--- a/drivers/media/video/davinci/vpif.c
+++ b/drivers/media/video/davinci/vpif.c
@@ -266,7 +266,7 @@ fail:
266 return status; 266 return status;
267} 267}
268 268
269static int vpif_remove(struct platform_device *pdev) 269static int __devexit vpif_remove(struct platform_device *pdev)
270{ 270{
271 iounmap(vpif_base); 271 iounmap(vpif_base);
272 release_mem_region(res->start, res_len); 272 release_mem_region(res->start, res_len);
diff --git a/drivers/media/video/davinci/vpif_capture.c b/drivers/media/video/davinci/vpif_capture.c
index d947ee5e4eb4..78130721f578 100644
--- a/drivers/media/video/davinci/vpif_capture.c
+++ b/drivers/media/video/davinci/vpif_capture.c
@@ -2107,7 +2107,7 @@ vpif_resume(struct device *dev)
2107 return -1; 2107 return -1;
2108} 2108}
2109 2109
2110static struct dev_pm_ops vpif_dev_pm_ops = { 2110static const struct dev_pm_ops vpif_dev_pm_ops = {
2111 .suspend = vpif_suspend, 2111 .suspend = vpif_suspend,
2112 .resume = vpif_resume, 2112 .resume = vpif_resume,
2113}; 2113};
diff --git a/drivers/media/video/davinci/vpif_display.c b/drivers/media/video/davinci/vpif_display.c
index d14cfb200ed0..dfddef7228dd 100644
--- a/drivers/media/video/davinci/vpif_display.c
+++ b/drivers/media/video/davinci/vpif_display.c
@@ -1347,7 +1347,6 @@ static const struct v4l2_file_operations vpif_fops = {
1347static struct video_device vpif_video_template = { 1347static struct video_device vpif_video_template = {
1348 .name = "vpif", 1348 .name = "vpif",
1349 .fops = &vpif_fops, 1349 .fops = &vpif_fops,
1350 .minor = -1,
1351 .ioctl_ops = &vpif_ioctl_ops, 1350 .ioctl_ops = &vpif_ioctl_ops,
1352 .tvnorms = DM646X_V4L2_STD, 1351 .tvnorms = DM646X_V4L2_STD,
1353 .current_norm = V4L2_STD_625_50, 1352 .current_norm = V4L2_STD_625_50,
diff --git a/drivers/media/video/davinci/vpss.c b/drivers/media/video/davinci/vpss.c
index 453236bd7559..7ee72ecd3d81 100644
--- a/drivers/media/video/davinci/vpss.c
+++ b/drivers/media/video/davinci/vpss.c
@@ -268,7 +268,7 @@ fail1:
268 return status; 268 return status;
269} 269}
270 270
271static int vpss_remove(struct platform_device *pdev) 271static int __devexit vpss_remove(struct platform_device *pdev)
272{ 272{
273 iounmap(oper_cfg.vpss_bl_regs_base); 273 iounmap(oper_cfg.vpss_bl_regs_base);
274 release_mem_region(oper_cfg.r1->start, oper_cfg.len1); 274 release_mem_region(oper_cfg.r1->start, oper_cfg.len1);
diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c
index 82da205047be..25100001ffff 100644
--- a/drivers/media/video/em28xx/em28xx-cards.c
+++ b/drivers/media/video/em28xx/em28xx-cards.c
@@ -2285,7 +2285,7 @@ void em28xx_register_i2c_ir(struct em28xx *dev)
2285 dev->init_data.name = "i2c IR (EM28XX Pinnacle PCTV)"; 2285 dev->init_data.name = "i2c IR (EM28XX Pinnacle PCTV)";
2286 break; 2286 break;
2287 case EM2820_BOARD_HAUPPAUGE_WINTV_USB_2: 2287 case EM2820_BOARD_HAUPPAUGE_WINTV_USB_2:
2288 dev->init_data.ir_codes = &ir_codes_hauppauge_new_table; 2288 dev->init_data.ir_codes = &ir_codes_rc5_hauppauge_new_table;
2289 dev->init_data.get_key = em28xx_get_key_em_haup; 2289 dev->init_data.get_key = em28xx_get_key_em_haup;
2290 dev->init_data.name = "i2c IR (EM2840 Hauppauge)"; 2290 dev->init_data.name = "i2c IR (EM2840 Hauppauge)";
2291 break; 2291 break;
@@ -2653,7 +2653,6 @@ static int em28xx_init_dev(struct em28xx **devhandle, struct usb_device *udev,
2653 INIT_LIST_HEAD(&dev->vbiq.active); 2653 INIT_LIST_HEAD(&dev->vbiq.active);
2654 INIT_LIST_HEAD(&dev->vbiq.queued); 2654 INIT_LIST_HEAD(&dev->vbiq.queued);
2655 2655
2656
2657 if (dev->board.has_msp34xx) { 2656 if (dev->board.has_msp34xx) {
2658 /* Send a reset to other chips via gpio */ 2657 /* Send a reset to other chips via gpio */
2659 errCode = em28xx_write_reg(dev, EM28XX_R08_GPIO, 0xf7); 2658 errCode = em28xx_write_reg(dev, EM28XX_R08_GPIO, 0xf7);
@@ -2923,9 +2922,9 @@ static void em28xx_usb_disconnect(struct usb_interface *interface)
2923 2922
2924 if (dev->users) { 2923 if (dev->users) {
2925 em28xx_warn 2924 em28xx_warn
2926 ("device /dev/video%d is open! Deregistration and memory " 2925 ("device %s is open! Deregistration and memory "
2927 "deallocation are deferred on close.\n", 2926 "deallocation are deferred on close.\n",
2928 dev->vdev->num); 2927 video_device_node_name(dev->vdev));
2929 2928
2930 dev->state |= DEV_MISCONFIGURED; 2929 dev->state |= DEV_MISCONFIGURED;
2931 em28xx_uninit_isoc(dev); 2930 em28xx_uninit_isoc(dev);
diff --git a/drivers/media/video/em28xx/em28xx-core.c b/drivers/media/video/em28xx/em28xx-core.c
index 3f86d36dff2b..b311d4514bdf 100644
--- a/drivers/media/video/em28xx/em28xx-core.c
+++ b/drivers/media/video/em28xx/em28xx-core.c
@@ -216,7 +216,7 @@ int em28xx_write_reg(struct em28xx *dev, u16 reg, u8 val)
216 * sets only some bits (specified by bitmask) of a register, by first reading 216 * sets only some bits (specified by bitmask) of a register, by first reading
217 * the actual value 217 * the actual value
218 */ 218 */
219static int em28xx_write_reg_bits(struct em28xx *dev, u16 reg, u8 val, 219int em28xx_write_reg_bits(struct em28xx *dev, u16 reg, u8 val,
220 u8 bitmask) 220 u8 bitmask)
221{ 221{
222 int oldval; 222 int oldval;
@@ -1136,34 +1136,6 @@ void em28xx_wake_i2c(struct em28xx *dev)
1136static LIST_HEAD(em28xx_devlist); 1136static LIST_HEAD(em28xx_devlist);
1137static DEFINE_MUTEX(em28xx_devlist_mutex); 1137static DEFINE_MUTEX(em28xx_devlist_mutex);
1138 1138
1139struct em28xx *em28xx_get_device(int minor,
1140 enum v4l2_buf_type *fh_type,
1141 int *has_radio)
1142{
1143 struct em28xx *h, *dev = NULL;
1144
1145 *fh_type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1146 *has_radio = 0;
1147
1148 mutex_lock(&em28xx_devlist_mutex);
1149 list_for_each_entry(h, &em28xx_devlist, devlist) {
1150 if (h->vdev->minor == minor)
1151 dev = h;
1152 if (h->vbi_dev && h->vbi_dev->minor == minor) {
1153 dev = h;
1154 *fh_type = V4L2_BUF_TYPE_VBI_CAPTURE;
1155 }
1156 if (h->radio_dev &&
1157 h->radio_dev->minor == minor) {
1158 dev = h;
1159 *has_radio = 1;
1160 }
1161 }
1162 mutex_unlock(&em28xx_devlist_mutex);
1163
1164 return dev;
1165}
1166
1167/* 1139/*
1168 * em28xx_realease_resources() 1140 * em28xx_realease_resources()
1169 * unregisters the v4l2,i2c and usb devices 1141 * unregisters the v4l2,i2c and usb devices
diff --git a/drivers/media/video/em28xx/em28xx-input.c b/drivers/media/video/em28xx/em28xx-input.c
index d96ec7c09dca..af0d935c29be 100644
--- a/drivers/media/video/em28xx/em28xx-input.c
+++ b/drivers/media/video/em28xx/em28xx-input.c
@@ -112,10 +112,13 @@ int em28xx_get_key_terratec(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
112int em28xx_get_key_em_haup(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw) 112int em28xx_get_key_em_haup(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
113{ 113{
114 unsigned char buf[2]; 114 unsigned char buf[2];
115 unsigned char code; 115 u16 code;
116 int size;
116 117
117 /* poll IR chip */ 118 /* poll IR chip */
118 if (2 != i2c_master_recv(ir->c, buf, 2)) 119 size = i2c_master_recv(ir->c, buf, sizeof(buf));
120
121 if (size != 2)
119 return -EIO; 122 return -EIO;
120 123
121 /* Does eliminate repeated parity code */ 124 /* Does eliminate repeated parity code */
@@ -124,16 +127,30 @@ int em28xx_get_key_em_haup(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
124 127
125 ir->old = buf[1]; 128 ir->old = buf[1];
126 129
127 /* Rearranges bits to the right order */ 130 /*
128 code = ((buf[0]&0x01)<<5) | /* 0010 0000 */ 131 * Rearranges bits to the right order.
129 ((buf[0]&0x02)<<3) | /* 0001 0000 */ 132 * The bit order were determined experimentally by using
130 ((buf[0]&0x04)<<1) | /* 0000 1000 */ 133 * The original Hauppauge Grey IR and another RC5 that uses addr=0x08
131 ((buf[0]&0x08)>>1) | /* 0000 0100 */ 134 * The RC5 code has 14 bits, but we've experimentally determined
132 ((buf[0]&0x10)>>3) | /* 0000 0010 */ 135 * the meaning for only 11 bits.
133 ((buf[0]&0x20)>>5); /* 0000 0001 */ 136 * So, the code translation is not complete. Yet, it is enough to
134 137 * work with the provided RC5 IR.
135 i2cdprintk("ir hauppauge (em2840): code=0x%02x (rcv=0x%02x)\n", 138 */
136 code, buf[0]); 139 code =
140 ((buf[0] & 0x01) ? 0x0020 : 0) | /* 0010 0000 */
141 ((buf[0] & 0x02) ? 0x0010 : 0) | /* 0001 0000 */
142 ((buf[0] & 0x04) ? 0x0008 : 0) | /* 0000 1000 */
143 ((buf[0] & 0x08) ? 0x0004 : 0) | /* 0000 0100 */
144 ((buf[0] & 0x10) ? 0x0002 : 0) | /* 0000 0010 */
145 ((buf[0] & 0x20) ? 0x0001 : 0) | /* 0000 0001 */
146 ((buf[1] & 0x08) ? 0x1000 : 0) | /* 0001 0000 */
147 ((buf[1] & 0x10) ? 0x0800 : 0) | /* 0000 1000 */
148 ((buf[1] & 0x20) ? 0x0400 : 0) | /* 0000 0100 */
149 ((buf[1] & 0x40) ? 0x0200 : 0) | /* 0000 0010 */
150 ((buf[1] & 0x80) ? 0x0100 : 0); /* 0000 0001 */
151
152 i2cdprintk("ir hauppauge (em2840): code=0x%02x (rcv=0x%02x%02x)\n",
153 code, buf[1], buf[0]);
137 154
138 /* return key */ 155 /* return key */
139 *ir_key = code; 156 *ir_key = code;
@@ -337,19 +354,28 @@ int em28xx_ir_init(struct em28xx *dev)
337 goto err_out_free; 354 goto err_out_free;
338 355
339 ir->input = input_dev; 356 ir->input = input_dev;
357 ir_config = EM2874_IR_RC5;
358
359 /* Adjust xclk based o IR table for RC5/NEC tables */
360 if (dev->board.ir_codes->ir_type == IR_TYPE_RC5) {
361 dev->board.xclk |= EM28XX_XCLK_IR_RC5_MODE;
362 ir->full_code = 1;
363 } else if (dev->board.ir_codes->ir_type == IR_TYPE_NEC) {
364 dev->board.xclk &= ~EM28XX_XCLK_IR_RC5_MODE;
365 ir_config = EM2874_IR_NEC;
366 ir->full_code = 1;
367 }
368 em28xx_write_reg_bits(dev, EM28XX_R0F_XCLK, dev->board.xclk,
369 EM28XX_XCLK_IR_RC5_MODE);
340 370
341 /* Setup the proper handler based on the chip */ 371 /* Setup the proper handler based on the chip */
342 switch (dev->chip_id) { 372 switch (dev->chip_id) {
343 case CHIP_ID_EM2860: 373 case CHIP_ID_EM2860:
344 case CHIP_ID_EM2883: 374 case CHIP_ID_EM2883:
345 if (dev->model == EM2883_BOARD_HAUPPAUGE_WINTV_HVR_950)
346 ir->full_code = 1;
347 ir->get_key = default_polling_getkey; 375 ir->get_key = default_polling_getkey;
348 break; 376 break;
349 case CHIP_ID_EM2874: 377 case CHIP_ID_EM2874:
350 ir->get_key = em2874_polling_getkey; 378 ir->get_key = em2874_polling_getkey;
351 /* For now we only support RC5, so enable it */
352 ir_config = EM2874_IR_RC5;
353 em28xx_write_regs(dev, EM2874_R50_IR_CONFIG, &ir_config, 1); 379 em28xx_write_regs(dev, EM2874_R50_IR_CONFIG, &ir_config, 1);
354 break; 380 break;
355 default: 381 default:
@@ -367,8 +393,7 @@ int em28xx_ir_init(struct em28xx *dev)
367 usb_make_path(dev->udev, ir->phys, sizeof(ir->phys)); 393 usb_make_path(dev->udev, ir->phys, sizeof(ir->phys));
368 strlcat(ir->phys, "/input0", sizeof(ir->phys)); 394 strlcat(ir->phys, "/input0", sizeof(ir->phys));
369 395
370 err = ir_input_init(input_dev, &ir->ir, IR_TYPE_OTHER, 396 err = ir_input_init(input_dev, &ir->ir, IR_TYPE_OTHER);
371 dev->board.ir_codes);
372 if (err < 0) 397 if (err < 0)
373 goto err_out_free; 398 goto err_out_free;
374 399
@@ -387,7 +412,7 @@ int em28xx_ir_init(struct em28xx *dev)
387 em28xx_ir_start(ir); 412 em28xx_ir_start(ir);
388 413
389 /* all done */ 414 /* all done */
390 err = input_register_device(ir->input); 415 err = ir_input_register(ir->input, dev->board.ir_codes);
391 if (err) 416 if (err)
392 goto err_out_stop; 417 goto err_out_stop;
393 418
@@ -396,8 +421,6 @@ int em28xx_ir_init(struct em28xx *dev)
396 em28xx_ir_stop(ir); 421 em28xx_ir_stop(ir);
397 dev->ir = NULL; 422 dev->ir = NULL;
398 err_out_free: 423 err_out_free:
399 ir_input_free(input_dev);
400 input_free_device(input_dev);
401 kfree(ir); 424 kfree(ir);
402 return err; 425 return err;
403} 426}
@@ -411,8 +434,7 @@ int em28xx_ir_fini(struct em28xx *dev)
411 return 0; 434 return 0;
412 435
413 em28xx_ir_stop(ir); 436 em28xx_ir_stop(ir);
414 ir_input_free(ir->input); 437 ir_input_unregister(ir->input);
415 input_unregister_device(ir->input);
416 kfree(ir); 438 kfree(ir);
417 439
418 /* done */ 440 /* done */
diff --git a/drivers/media/video/em28xx/em28xx-video.c b/drivers/media/video/em28xx/em28xx-video.c
index 7ad65370f274..849b18c94037 100644
--- a/drivers/media/video/em28xx/em28xx-video.c
+++ b/drivers/media/video/em28xx/em28xx-video.c
@@ -2081,22 +2081,30 @@ static int radio_queryctrl(struct file *file, void *priv,
2081 */ 2081 */
2082static int em28xx_v4l2_open(struct file *filp) 2082static int em28xx_v4l2_open(struct file *filp)
2083{ 2083{
2084 int minor = video_devdata(filp)->minor; 2084 int errCode = 0, radio = 0;
2085 int errCode = 0, radio; 2085 struct video_device *vdev = video_devdata(filp);
2086 struct em28xx *dev; 2086 struct em28xx *dev = video_drvdata(filp);
2087 enum v4l2_buf_type fh_type; 2087 enum v4l2_buf_type fh_type = 0;
2088 struct em28xx_fh *fh; 2088 struct em28xx_fh *fh;
2089 enum v4l2_field field; 2089 enum v4l2_field field;
2090 2090
2091 dev = em28xx_get_device(minor, &fh_type, &radio); 2091 switch (vdev->vfl_type) {
2092 2092 case VFL_TYPE_GRABBER:
2093 if (NULL == dev) 2093 fh_type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
2094 return -ENODEV; 2094 break;
2095 case VFL_TYPE_VBI:
2096 fh_type = V4L2_BUF_TYPE_VBI_CAPTURE;
2097 break;
2098 case VFL_TYPE_RADIO:
2099 radio = 1;
2100 break;
2101 }
2095 2102
2096 mutex_lock(&dev->lock); 2103 mutex_lock(&dev->lock);
2097 2104
2098 em28xx_videodbg("open minor=%d type=%s users=%d\n", 2105 em28xx_videodbg("open dev=%s type=%s users=%d\n",
2099 minor, v4l2_type_names[fh_type], dev->users); 2106 video_device_node_name(vdev), v4l2_type_names[fh_type],
2107 dev->users);
2100 2108
2101 2109
2102 fh = kzalloc(sizeof(struct em28xx_fh), GFP_KERNEL); 2110 fh = kzalloc(sizeof(struct em28xx_fh), GFP_KERNEL);
@@ -2160,25 +2168,25 @@ void em28xx_release_analog_resources(struct em28xx *dev)
2160 /*FIXME: I2C IR should be disconnected */ 2168 /*FIXME: I2C IR should be disconnected */
2161 2169
2162 if (dev->radio_dev) { 2170 if (dev->radio_dev) {
2163 if (-1 != dev->radio_dev->minor) 2171 if (video_is_registered(dev->radio_dev))
2164 video_unregister_device(dev->radio_dev); 2172 video_unregister_device(dev->radio_dev);
2165 else 2173 else
2166 video_device_release(dev->radio_dev); 2174 video_device_release(dev->radio_dev);
2167 dev->radio_dev = NULL; 2175 dev->radio_dev = NULL;
2168 } 2176 }
2169 if (dev->vbi_dev) { 2177 if (dev->vbi_dev) {
2170 em28xx_info("V4L2 device /dev/vbi%d deregistered\n", 2178 em28xx_info("V4L2 device %s deregistered\n",
2171 dev->vbi_dev->num); 2179 video_device_node_name(dev->vbi_dev));
2172 if (-1 != dev->vbi_dev->minor) 2180 if (video_is_registered(dev->vbi_dev))
2173 video_unregister_device(dev->vbi_dev); 2181 video_unregister_device(dev->vbi_dev);
2174 else 2182 else
2175 video_device_release(dev->vbi_dev); 2183 video_device_release(dev->vbi_dev);
2176 dev->vbi_dev = NULL; 2184 dev->vbi_dev = NULL;
2177 } 2185 }
2178 if (dev->vdev) { 2186 if (dev->vdev) {
2179 em28xx_info("V4L2 device /dev/video%d deregistered\n", 2187 em28xx_info("V4L2 device %s deregistered\n",
2180 dev->vdev->num); 2188 video_device_node_name(dev->vdev));
2181 if (-1 != dev->vdev->minor) 2189 if (video_is_registered(dev->vdev))
2182 video_unregister_device(dev->vdev); 2190 video_unregister_device(dev->vdev);
2183 else 2191 else
2184 video_device_release(dev->vdev); 2192 video_device_release(dev->vdev);
@@ -2397,8 +2405,6 @@ static const struct video_device em28xx_video_template = {
2397 .release = video_device_release, 2405 .release = video_device_release,
2398 .ioctl_ops = &video_ioctl_ops, 2406 .ioctl_ops = &video_ioctl_ops,
2399 2407
2400 .minor = -1,
2401
2402 .tvnorms = V4L2_STD_ALL, 2408 .tvnorms = V4L2_STD_ALL,
2403 .current_norm = V4L2_STD_PAL, 2409 .current_norm = V4L2_STD_PAL,
2404}; 2410};
@@ -2433,7 +2439,6 @@ static struct video_device em28xx_radio_template = {
2433 .name = "em28xx-radio", 2439 .name = "em28xx-radio",
2434 .fops = &radio_fops, 2440 .fops = &radio_fops,
2435 .ioctl_ops = &radio_ioctl_ops, 2441 .ioctl_ops = &radio_ioctl_ops,
2436 .minor = -1,
2437}; 2442};
2438 2443
2439/******************************** usb interface ******************************/ 2444/******************************** usb interface ******************************/
@@ -2451,7 +2456,6 @@ static struct video_device *em28xx_vdev_init(struct em28xx *dev,
2451 return NULL; 2456 return NULL;
2452 2457
2453 *vfd = *template; 2458 *vfd = *template;
2454 vfd->minor = -1;
2455 vfd->v4l2_dev = &dev->v4l2_dev; 2459 vfd->v4l2_dev = &dev->v4l2_dev;
2456 vfd->release = video_device_release; 2460 vfd->release = video_device_release;
2457 vfd->debug = video_debug; 2461 vfd->debug = video_debug;
@@ -2459,6 +2463,7 @@ static struct video_device *em28xx_vdev_init(struct em28xx *dev,
2459 snprintf(vfd->name, sizeof(vfd->name), "%s %s", 2463 snprintf(vfd->name, sizeof(vfd->name), "%s %s",
2460 dev->name, type_name); 2464 dev->name, type_name);
2461 2465
2466 video_set_drvdata(vfd, dev);
2462 return vfd; 2467 return vfd;
2463} 2468}
2464 2469
@@ -2540,16 +2545,16 @@ int em28xx_register_analog_devices(struct em28xx *dev)
2540 em28xx_errdev("can't register radio device\n"); 2545 em28xx_errdev("can't register radio device\n");
2541 return ret; 2546 return ret;
2542 } 2547 }
2543 em28xx_info("Registered radio device as /dev/radio%d\n", 2548 em28xx_info("Registered radio device as %s\n",
2544 dev->radio_dev->num); 2549 video_device_node_name(dev->radio_dev));
2545 } 2550 }
2546 2551
2547 em28xx_info("V4L2 video device registered as /dev/video%d\n", 2552 em28xx_info("V4L2 video device registered as %s\n",
2548 dev->vdev->num); 2553 video_device_node_name(dev->vdev));
2549 2554
2550 if (dev->vbi_dev) 2555 if (dev->vbi_dev)
2551 em28xx_info("V4L2 VBI device registered as /dev/vbi%d\n", 2556 em28xx_info("V4L2 VBI device registered as %s\n",
2552 dev->vbi_dev->num); 2557 video_device_node_name(dev->vbi_dev));
2553 2558
2554 return 0; 2559 return 0;
2555} 2560}
diff --git a/drivers/media/video/em28xx/em28xx.h b/drivers/media/video/em28xx/em28xx.h
index 441df644ddbe..80d9b4fa1b97 100644
--- a/drivers/media/video/em28xx/em28xx.h
+++ b/drivers/media/video/em28xx/em28xx.h
@@ -643,6 +643,8 @@ int em28xx_write_regs_req(struct em28xx *dev, u8 req, u16 reg, char *buf,
643 int len); 643 int len);
644int em28xx_write_regs(struct em28xx *dev, u16 reg, char *buf, int len); 644int em28xx_write_regs(struct em28xx *dev, u16 reg, char *buf, int len);
645int em28xx_write_reg(struct em28xx *dev, u16 reg, u8 val); 645int em28xx_write_reg(struct em28xx *dev, u16 reg, u8 val);
646int em28xx_write_reg_bits(struct em28xx *dev, u16 reg, u8 val,
647 u8 bitmask);
646 648
647int em28xx_read_ac97(struct em28xx *dev, u8 reg); 649int em28xx_read_ac97(struct em28xx *dev, u8 reg);
648int em28xx_write_ac97(struct em28xx *dev, u8 reg, u16 val); 650int em28xx_write_ac97(struct em28xx *dev, u8 reg, u16 val);
@@ -666,9 +668,6 @@ int em28xx_gpio_set(struct em28xx *dev, struct em28xx_reg_seq *gpio);
666void em28xx_wake_i2c(struct em28xx *dev); 668void em28xx_wake_i2c(struct em28xx *dev);
667void em28xx_remove_from_devlist(struct em28xx *dev); 669void em28xx_remove_from_devlist(struct em28xx *dev);
668void em28xx_add_into_devlist(struct em28xx *dev); 670void em28xx_add_into_devlist(struct em28xx *dev);
669struct em28xx *em28xx_get_device(int minor,
670 enum v4l2_buf_type *fh_type,
671 int *has_radio);
672int em28xx_register_extension(struct em28xx_ops *dev); 671int em28xx_register_extension(struct em28xx_ops *dev);
673void em28xx_unregister_extension(struct em28xx_ops *dev); 672void em28xx_unregister_extension(struct em28xx_ops *dev);
674void em28xx_init_extension(struct em28xx *dev); 673void em28xx_init_extension(struct em28xx *dev);
diff --git a/drivers/media/video/et61x251/et61x251_core.c b/drivers/media/video/et61x251/et61x251_core.c
index 88987a57cf7b..e6c23d509862 100644
--- a/drivers/media/video/et61x251/et61x251_core.c
+++ b/drivers/media/video/et61x251/et61x251_core.c
@@ -587,8 +587,8 @@ static int et61x251_stream_interrupt(struct et61x251_device* cam)
587 else if (cam->stream != STREAM_OFF) { 587 else if (cam->stream != STREAM_OFF) {
588 cam->state |= DEV_MISCONFIGURED; 588 cam->state |= DEV_MISCONFIGURED;
589 DBG(1, "URB timeout reached. The camera is misconfigured. To " 589 DBG(1, "URB timeout reached. The camera is misconfigured. To "
590 "use it, close and open /dev/video%d again.", 590 "use it, close and open %s again.",
591 cam->v4ldev->num); 591 video_device_node_name(cam->v4ldev));
592 return -EIO; 592 return -EIO;
593 } 593 }
594 594
@@ -1195,7 +1195,8 @@ static void et61x251_release_resources(struct kref *kref)
1195 1195
1196 cam = container_of(kref, struct et61x251_device, kref); 1196 cam = container_of(kref, struct et61x251_device, kref);
1197 1197
1198 DBG(2, "V4L2 device /dev/video%d deregistered", cam->v4ldev->num); 1198 DBG(2, "V4L2 device %s deregistered",
1199 video_device_node_name(cam->v4ldev));
1199 video_set_drvdata(cam->v4ldev, NULL); 1200 video_set_drvdata(cam->v4ldev, NULL);
1200 video_unregister_device(cam->v4ldev); 1201 video_unregister_device(cam->v4ldev);
1201 usb_put_dev(cam->usbdev); 1202 usb_put_dev(cam->usbdev);
@@ -1236,8 +1237,8 @@ static int et61x251_open(struct file *filp)
1236 } 1237 }
1237 1238
1238 if (cam->users) { 1239 if (cam->users) {
1239 DBG(2, "Device /dev/video%d is already in use", 1240 DBG(2, "Device %s is already in use",
1240 cam->v4ldev->num); 1241 video_device_node_name(cam->v4ldev));
1241 DBG(3, "Simultaneous opens are not supported"); 1242 DBG(3, "Simultaneous opens are not supported");
1242 if ((filp->f_flags & O_NONBLOCK) || 1243 if ((filp->f_flags & O_NONBLOCK) ||
1243 (filp->f_flags & O_NDELAY)) { 1244 (filp->f_flags & O_NDELAY)) {
@@ -1280,7 +1281,8 @@ static int et61x251_open(struct file *filp)
1280 cam->frame_count = 0; 1281 cam->frame_count = 0;
1281 et61x251_empty_framequeues(cam); 1282 et61x251_empty_framequeues(cam);
1282 1283
1283 DBG(3, "Video device /dev/video%d is open", cam->v4ldev->num); 1284 DBG(3, "Video device %s is open",
1285 video_device_node_name(cam->v4ldev));
1284 1286
1285out: 1287out:
1286 mutex_unlock(&cam->open_mutex); 1288 mutex_unlock(&cam->open_mutex);
@@ -1304,7 +1306,8 @@ static int et61x251_release(struct file *filp)
1304 cam->users--; 1306 cam->users--;
1305 wake_up_interruptible_nr(&cam->wait_open, 1); 1307 wake_up_interruptible_nr(&cam->wait_open, 1);
1306 1308
1307 DBG(3, "Video device /dev/video%d closed", cam->v4ldev->num); 1309 DBG(3, "Video device %s closed",
1310 video_device_node_name(cam->v4ldev));
1308 1311
1309 kref_put(&cam->kref, et61x251_release_resources); 1312 kref_put(&cam->kref, et61x251_release_resources);
1310 1313
@@ -1846,8 +1849,8 @@ et61x251_vidioc_s_crop(struct et61x251_device* cam, void __user * arg)
1846 if (err) { /* atomic, no rollback in ioctl() */ 1849 if (err) { /* atomic, no rollback in ioctl() */
1847 cam->state |= DEV_MISCONFIGURED; 1850 cam->state |= DEV_MISCONFIGURED;
1848 DBG(1, "VIDIOC_S_CROP failed because of hardware problems. To " 1851 DBG(1, "VIDIOC_S_CROP failed because of hardware problems. To "
1849 "use the camera, close and open /dev/video%d again.", 1852 "use the camera, close and open %s again.",
1850 cam->v4ldev->num); 1853 video_device_node_name(cam->v4ldev));
1851 return -EIO; 1854 return -EIO;
1852 } 1855 }
1853 1856
@@ -1859,8 +1862,8 @@ et61x251_vidioc_s_crop(struct et61x251_device* cam, void __user * arg)
1859 nbuffers != et61x251_request_buffers(cam, nbuffers, cam->io)) { 1862 nbuffers != et61x251_request_buffers(cam, nbuffers, cam->io)) {
1860 cam->state |= DEV_MISCONFIGURED; 1863 cam->state |= DEV_MISCONFIGURED;
1861 DBG(1, "VIDIOC_S_CROP failed because of not enough memory. To " 1864 DBG(1, "VIDIOC_S_CROP failed because of not enough memory. To "
1862 "use the camera, close and open /dev/video%d again.", 1865 "use the camera, close and open %s again.",
1863 cam->v4ldev->num); 1866 video_device_node_name(cam->v4ldev));
1864 return -ENOMEM; 1867 return -ENOMEM;
1865 } 1868 }
1866 1869
@@ -2069,8 +2072,8 @@ et61x251_vidioc_try_s_fmt(struct et61x251_device* cam, unsigned int cmd,
2069 if (err) { /* atomic, no rollback in ioctl() */ 2072 if (err) { /* atomic, no rollback in ioctl() */
2070 cam->state |= DEV_MISCONFIGURED; 2073 cam->state |= DEV_MISCONFIGURED;
2071 DBG(1, "VIDIOC_S_FMT failed because of hardware problems. To " 2074 DBG(1, "VIDIOC_S_FMT failed because of hardware problems. To "
2072 "use the camera, close and open /dev/video%d again.", 2075 "use the camera, close and open %s again.",
2073 cam->v4ldev->num); 2076 video_device_node_name(cam->v4ldev));
2074 return -EIO; 2077 return -EIO;
2075 } 2078 }
2076 2079
@@ -2081,8 +2084,8 @@ et61x251_vidioc_try_s_fmt(struct et61x251_device* cam, unsigned int cmd,
2081 nbuffers != et61x251_request_buffers(cam, nbuffers, cam->io)) { 2084 nbuffers != et61x251_request_buffers(cam, nbuffers, cam->io)) {
2082 cam->state |= DEV_MISCONFIGURED; 2085 cam->state |= DEV_MISCONFIGURED;
2083 DBG(1, "VIDIOC_S_FMT failed because of not enough memory. To " 2086 DBG(1, "VIDIOC_S_FMT failed because of not enough memory. To "
2084 "use the camera, close and open /dev/video%d again.", 2087 "use the camera, close and open %s again.",
2085 cam->v4ldev->num); 2088 video_device_node_name(cam->v4ldev));
2086 return -ENOMEM; 2089 return -ENOMEM;
2087 } 2090 }
2088 2091
@@ -2130,7 +2133,7 @@ et61x251_vidioc_s_jpegcomp(struct et61x251_device* cam, void __user * arg)
2130 cam->state |= DEV_MISCONFIGURED; 2133 cam->state |= DEV_MISCONFIGURED;
2131 DBG(1, "VIDIOC_S_JPEGCOMP failed because of hardware " 2134 DBG(1, "VIDIOC_S_JPEGCOMP failed because of hardware "
2132 "problems. To use the camera, close and open " 2135 "problems. To use the camera, close and open "
2133 "/dev/video%d again.", cam->v4ldev->num); 2136 "%s again.", video_device_node_name(cam->v4ldev));
2134 return -EIO; 2137 return -EIO;
2135 } 2138 }
2136 2139
@@ -2584,7 +2587,6 @@ et61x251_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
2584 2587
2585 strcpy(cam->v4ldev->name, "ET61X[12]51 PC Camera"); 2588 strcpy(cam->v4ldev->name, "ET61X[12]51 PC Camera");
2586 cam->v4ldev->fops = &et61x251_fops; 2589 cam->v4ldev->fops = &et61x251_fops;
2587 cam->v4ldev->minor = video_nr[dev_nr];
2588 cam->v4ldev->release = video_device_release; 2590 cam->v4ldev->release = video_device_release;
2589 cam->v4ldev->parent = &udev->dev; 2591 cam->v4ldev->parent = &udev->dev;
2590 video_set_drvdata(cam->v4ldev, cam); 2592 video_set_drvdata(cam->v4ldev, cam);
@@ -2603,7 +2605,8 @@ et61x251_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
2603 goto fail; 2605 goto fail;
2604 } 2606 }
2605 2607
2606 DBG(2, "V4L2 device registered as /dev/video%d", cam->v4ldev->num); 2608 DBG(2, "V4L2 device registered as %s",
2609 video_device_node_name(cam->v4ldev));
2607 2610
2608 cam->module_param.force_munmap = force_munmap[dev_nr]; 2611 cam->module_param.force_munmap = force_munmap[dev_nr];
2609 cam->module_param.frame_timeout = frame_timeout[dev_nr]; 2612 cam->module_param.frame_timeout = frame_timeout[dev_nr];
@@ -2654,9 +2657,9 @@ static void et61x251_usb_disconnect(struct usb_interface* intf)
2654 DBG(2, "Disconnecting %s...", cam->v4ldev->name); 2657 DBG(2, "Disconnecting %s...", cam->v4ldev->name);
2655 2658
2656 if (cam->users) { 2659 if (cam->users) {
2657 DBG(2, "Device /dev/video%d is open! Deregistration and " 2660 DBG(2, "Device %s is open! Deregistration and memory "
2658 "memory deallocation are deferred.", 2661 "deallocation are deferred.",
2659 cam->v4ldev->num); 2662 video_device_node_name(cam->v4ldev));
2660 cam->state |= DEV_MISCONFIGURED; 2663 cam->state |= DEV_MISCONFIGURED;
2661 et61x251_stop_transfer(cam); 2664 et61x251_stop_transfer(cam);
2662 cam->state |= DEV_DISCONNECTED; 2665 cam->state |= DEV_DISCONNECTED;
diff --git a/drivers/media/video/gspca/conex.c b/drivers/media/video/gspca/conex.c
index 2f0b8d621e00..c98b5d69c438 100644
--- a/drivers/media/video/gspca/conex.c
+++ b/drivers/media/video/gspca/conex.c
@@ -1046,14 +1046,14 @@ static struct sd_desc sd_desc = {
1046}; 1046};
1047 1047
1048/* -- module initialisation -- */ 1048/* -- module initialisation -- */
1049static __devinitdata struct usb_device_id device_table[] = { 1049static const struct usb_device_id device_table[] __devinitconst = {
1050 {USB_DEVICE(0x0572, 0x0041)}, 1050 {USB_DEVICE(0x0572, 0x0041)},
1051 {} 1051 {}
1052}; 1052};
1053MODULE_DEVICE_TABLE(usb, device_table); 1053MODULE_DEVICE_TABLE(usb, device_table);
1054 1054
1055/* -- device connect -- */ 1055/* -- device connect -- */
1056static int sd_probe(struct usb_interface *intf, 1056static int __devinit sd_probe(struct usb_interface *intf,
1057 const struct usb_device_id *id) 1057 const struct usb_device_id *id)
1058{ 1058{
1059 return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), 1059 return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
diff --git a/drivers/media/video/gspca/etoms.c b/drivers/media/video/gspca/etoms.c
index 9de86419ae1e..fdf4c0ec5e7a 100644
--- a/drivers/media/video/gspca/etoms.c
+++ b/drivers/media/video/gspca/etoms.c
@@ -864,7 +864,7 @@ static struct sd_desc sd_desc = {
864}; 864};
865 865
866/* -- module initialisation -- */ 866/* -- module initialisation -- */
867static __devinitdata struct usb_device_id device_table[] = { 867static const struct usb_device_id device_table[] __devinitconst = {
868 {USB_DEVICE(0x102c, 0x6151), .driver_info = SENSOR_PAS106}, 868 {USB_DEVICE(0x102c, 0x6151), .driver_info = SENSOR_PAS106},
869#if !defined CONFIG_USB_ET61X251 && !defined CONFIG_USB_ET61X251_MODULE 869#if !defined CONFIG_USB_ET61X251 && !defined CONFIG_USB_ET61X251_MODULE
870 {USB_DEVICE(0x102c, 0x6251), .driver_info = SENSOR_TAS5130CXX}, 870 {USB_DEVICE(0x102c, 0x6251), .driver_info = SENSOR_TAS5130CXX},
@@ -875,7 +875,7 @@ static __devinitdata struct usb_device_id device_table[] = {
875MODULE_DEVICE_TABLE(usb, device_table); 875MODULE_DEVICE_TABLE(usb, device_table);
876 876
877/* -- device connect -- */ 877/* -- device connect -- */
878static int sd_probe(struct usb_interface *intf, 878static int __devinit sd_probe(struct usb_interface *intf,
879 const struct usb_device_id *id) 879 const struct usb_device_id *id)
880{ 880{
881 return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), 881 return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
diff --git a/drivers/media/video/gspca/gl860/gl860-mi1320.c b/drivers/media/video/gspca/gl860/gl860-mi1320.c
index 1355e526ee84..c276a7debdec 100644
--- a/drivers/media/video/gspca/gl860/gl860-mi1320.c
+++ b/drivers/media/video/gspca/gl860/gl860-mi1320.c
@@ -345,7 +345,7 @@ static int mi1320_configure_alt(struct gspca_dev *gspca_dev)
345 return 0; 345 return 0;
346} 346}
347 347
348int mi1320_camera_settings(struct gspca_dev *gspca_dev) 348static int mi1320_camera_settings(struct gspca_dev *gspca_dev)
349{ 349{
350 struct sd *sd = (struct sd *) gspca_dev; 350 struct sd *sd = (struct sd *) gspca_dev;
351 351
diff --git a/drivers/media/video/gspca/gl860/gl860-mi2020.c b/drivers/media/video/gspca/gl860/gl860-mi2020.c
index 80cb3f1b36f7..7c31b4f2abea 100644
--- a/drivers/media/video/gspca/gl860/gl860-mi2020.c
+++ b/drivers/media/video/gspca/gl860/gl860-mi2020.c
@@ -769,7 +769,7 @@ static int mi2020_configure_alt(struct gspca_dev *gspca_dev)
769 return 0; 769 return 0;
770} 770}
771 771
772int mi2020_camera_settings(struct gspca_dev *gspca_dev) 772static int mi2020_camera_settings(struct gspca_dev *gspca_dev)
773{ 773{
774 struct sd *sd = (struct sd *) gspca_dev; 774 struct sd *sd = (struct sd *) gspca_dev;
775 775
diff --git a/drivers/media/video/gspca/gl860/gl860.c b/drivers/media/video/gspca/gl860/gl860.c
index a695e0ae13c2..4878c8f66543 100644
--- a/drivers/media/video/gspca/gl860/gl860.c
+++ b/drivers/media/video/gspca/gl860/gl860.c
@@ -40,7 +40,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
40static void sd_callback(struct gspca_dev *gspca_dev); 40static void sd_callback(struct gspca_dev *gspca_dev);
41 41
42static int gl860_guess_sensor(struct gspca_dev *gspca_dev, 42static int gl860_guess_sensor(struct gspca_dev *gspca_dev,
43 s32 vendor_id, s32 product_id); 43 u16 vendor_id, u16 product_id);
44 44
45/*============================ driver options ==============================*/ 45/*============================ driver options ==============================*/
46 46
@@ -326,11 +326,11 @@ static int sd_config(struct gspca_dev *gspca_dev,
326{ 326{
327 struct sd *sd = (struct sd *) gspca_dev; 327 struct sd *sd = (struct sd *) gspca_dev;
328 struct cam *cam; 328 struct cam *cam;
329 s32 vendor_id, product_id; 329 u16 vendor_id, product_id;
330 330
331 /* Get USB VendorID and ProductID */ 331 /* Get USB VendorID and ProductID */
332 vendor_id = le16_to_cpu(id->idVendor); 332 vendor_id = id->idVendor;
333 product_id = le16_to_cpu(id->idProduct); 333 product_id = id->idProduct;
334 334
335 sd->nbRightUp = 1; 335 sd->nbRightUp = 1;
336 sd->nbIm = -1; 336 sd->nbIm = -1;
@@ -534,8 +534,8 @@ static int sd_probe(struct usb_interface *intf,
534 gspca_dev = usb_get_intfdata(intf); 534 gspca_dev = usb_get_intfdata(intf);
535 535
536 PDEBUG(D_PROBE, 536 PDEBUG(D_PROBE,
537 "Camera is now controlling video device /dev/video%d", 537 "Camera is now controlling video device %s",
538 gspca_dev->vdev.minor); 538 video_device_node_name(&gspca_dev->vdev));
539 } 539 }
540 540
541 return ret; 541 return ret;
@@ -673,7 +673,7 @@ void fetch_idxdata(struct gspca_dev *gspca_dev, struct idxdata *tbl, int len)
673} 673}
674 674
675static int gl860_guess_sensor(struct gspca_dev *gspca_dev, 675static int gl860_guess_sensor(struct gspca_dev *gspca_dev,
676 s32 vendor_id, s32 product_id) 676 u16 vendor_id, u16 product_id)
677{ 677{
678 struct sd *sd = (struct sd *) gspca_dev; 678 struct sd *sd = (struct sd *) gspca_dev;
679 u8 probe, nb26, nb96, nOV, ntry; 679 u8 probe, nb26, nb96, nOV, ntry;
diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c
index 4076f8e5a6fc..e930a67d526b 100644
--- a/drivers/media/video/gspca/gspca.c
+++ b/drivers/media/video/gspca/gspca.c
@@ -304,7 +304,6 @@ void gspca_frame_add(struct gspca_dev *gspca_dev,
304 j = gspca_dev->fr_queue[i]; 304 j = gspca_dev->fr_queue[i];
305 gspca_dev->cur_frame = &gspca_dev->frame[j]; 305 gspca_dev->cur_frame = &gspca_dev->frame[j];
306 } 306 }
307 return;
308} 307}
309EXPORT_SYMBOL(gspca_frame_add); 308EXPORT_SYMBOL(gspca_frame_add);
310 309
@@ -321,7 +320,7 @@ static int gspca_is_compressed(__u32 format)
321 return 0; 320 return 0;
322} 321}
323 322
324static void *rvmalloc(unsigned long size) 323static void *rvmalloc(long size)
325{ 324{
326 void *mem; 325 void *mem;
327 unsigned long adr; 326 unsigned long adr;
@@ -329,7 +328,7 @@ static void *rvmalloc(unsigned long size)
329 mem = vmalloc_32(size); 328 mem = vmalloc_32(size);
330 if (mem != NULL) { 329 if (mem != NULL) {
331 adr = (unsigned long) mem; 330 adr = (unsigned long) mem;
332 while ((long) size > 0) { 331 while (size > 0) {
333 SetPageReserved(vmalloc_to_page((void *) adr)); 332 SetPageReserved(vmalloc_to_page((void *) adr));
334 adr += PAGE_SIZE; 333 adr += PAGE_SIZE;
335 size -= PAGE_SIZE; 334 size -= PAGE_SIZE;
@@ -768,6 +767,7 @@ static int vidioc_g_register(struct file *file, void *priv,
768 767
769 if (mutex_lock_interruptible(&gspca_dev->usb_lock)) 768 if (mutex_lock_interruptible(&gspca_dev->usb_lock))
770 return -ERESTARTSYS; 769 return -ERESTARTSYS;
770 gspca_dev->usb_err = 0;
771 if (gspca_dev->present) 771 if (gspca_dev->present)
772 ret = gspca_dev->sd_desc->get_register(gspca_dev, reg); 772 ret = gspca_dev->sd_desc->get_register(gspca_dev, reg);
773 else 773 else
@@ -791,6 +791,7 @@ static int vidioc_s_register(struct file *file, void *priv,
791 791
792 if (mutex_lock_interruptible(&gspca_dev->usb_lock)) 792 if (mutex_lock_interruptible(&gspca_dev->usb_lock))
793 return -ERESTARTSYS; 793 return -ERESTARTSYS;
794 gspca_dev->usb_err = 0;
794 if (gspca_dev->present) 795 if (gspca_dev->present)
795 ret = gspca_dev->sd_desc->set_register(gspca_dev, reg); 796 ret = gspca_dev->sd_desc->set_register(gspca_dev, reg);
796 else 797 else
@@ -812,6 +813,7 @@ static int vidioc_g_chip_ident(struct file *file, void *priv,
812 813
813 if (mutex_lock_interruptible(&gspca_dev->usb_lock)) 814 if (mutex_lock_interruptible(&gspca_dev->usb_lock))
814 return -ERESTARTSYS; 815 return -ERESTARTSYS;
816 gspca_dev->usb_err = 0;
815 if (gspca_dev->present) 817 if (gspca_dev->present)
816 ret = gspca_dev->sd_desc->get_chip_ident(gspca_dev, chip); 818 ret = gspca_dev->sd_desc->get_chip_ident(gspca_dev, chip);
817 else 819 else
@@ -983,11 +985,40 @@ static int vidioc_enum_framesizes(struct file *file, void *priv,
983 return -EINVAL; 985 return -EINVAL;
984} 986}
985 987
988static int vidioc_enum_frameintervals(struct file *filp, void *priv,
989 struct v4l2_frmivalenum *fival)
990{
991 struct gspca_dev *gspca_dev = priv;
992 int mode = wxh_to_mode(gspca_dev, fival->width, fival->height);
993 __u32 i;
994
995 if (gspca_dev->cam.mode_framerates == NULL ||
996 gspca_dev->cam.mode_framerates[mode].nrates == 0)
997 return -EINVAL;
998
999 if (fival->pixel_format !=
1000 gspca_dev->cam.cam_mode[mode].pixelformat)
1001 return -EINVAL;
1002
1003 for (i = 0; i < gspca_dev->cam.mode_framerates[mode].nrates; i++) {
1004 if (fival->index == i) {
1005 fival->type = V4L2_FRMSIZE_TYPE_DISCRETE;
1006 fival->discrete.numerator = 1;
1007 fival->discrete.denominator =
1008 gspca_dev->cam.mode_framerates[mode].rates[i];
1009 return 0;
1010 }
1011 }
1012
1013 return -EINVAL;
1014}
1015
986static void gspca_release(struct video_device *vfd) 1016static void gspca_release(struct video_device *vfd)
987{ 1017{
988 struct gspca_dev *gspca_dev = container_of(vfd, struct gspca_dev, vdev); 1018 struct gspca_dev *gspca_dev = container_of(vfd, struct gspca_dev, vdev);
989 1019
990 PDEBUG(D_PROBE, "/dev/video%d released", gspca_dev->vdev.num); 1020 PDEBUG(D_PROBE, "%s released",
1021 video_device_node_name(&gspca_dev->vdev));
991 1022
992 kfree(gspca_dev->usb_buf); 1023 kfree(gspca_dev->usb_buf);
993 kfree(gspca_dev); 1024 kfree(gspca_dev);
@@ -1053,6 +1084,7 @@ static int dev_close(struct file *file)
1053 if (gspca_dev->capt_file == file) { 1084 if (gspca_dev->capt_file == file) {
1054 if (gspca_dev->streaming) { 1085 if (gspca_dev->streaming) {
1055 mutex_lock(&gspca_dev->usb_lock); 1086 mutex_lock(&gspca_dev->usb_lock);
1087 gspca_dev->usb_err = 0;
1056 gspca_stream_off(gspca_dev); 1088 gspca_stream_off(gspca_dev);
1057 mutex_unlock(&gspca_dev->usb_lock); 1089 mutex_unlock(&gspca_dev->usb_lock);
1058 } 1090 }
@@ -1143,12 +1175,14 @@ static int vidioc_queryctrl(struct file *file, void *priv,
1143 continue; 1175 continue;
1144 ctrls = &gspca_dev->sd_desc->ctrls[i]; 1176 ctrls = &gspca_dev->sd_desc->ctrls[i];
1145 } 1177 }
1178 if (ctrls == NULL)
1179 return -EINVAL;
1146 } else { 1180 } else {
1147 ctrls = get_ctrl(gspca_dev, id); 1181 ctrls = get_ctrl(gspca_dev, id);
1182 if (ctrls == NULL)
1183 return -EINVAL;
1148 i = ctrls - gspca_dev->sd_desc->ctrls; 1184 i = ctrls - gspca_dev->sd_desc->ctrls;
1149 } 1185 }
1150 if (ctrls == NULL)
1151 return -EINVAL;
1152 memcpy(q_ctrl, ctrls, sizeof *q_ctrl); 1186 memcpy(q_ctrl, ctrls, sizeof *q_ctrl);
1153 if (gspca_dev->ctrl_inac & (1 << i)) 1187 if (gspca_dev->ctrl_inac & (1 << i))
1154 q_ctrl->flags |= V4L2_CTRL_FLAG_INACTIVE; 1188 q_ctrl->flags |= V4L2_CTRL_FLAG_INACTIVE;
@@ -1172,6 +1206,7 @@ static int vidioc_s_ctrl(struct file *file, void *priv,
1172 PDEBUG(D_CONF, "set ctrl [%08x] = %d", ctrl->id, ctrl->value); 1206 PDEBUG(D_CONF, "set ctrl [%08x] = %d", ctrl->id, ctrl->value);
1173 if (mutex_lock_interruptible(&gspca_dev->usb_lock)) 1207 if (mutex_lock_interruptible(&gspca_dev->usb_lock))
1174 return -ERESTARTSYS; 1208 return -ERESTARTSYS;
1209 gspca_dev->usb_err = 0;
1175 if (gspca_dev->present) 1210 if (gspca_dev->present)
1176 ret = ctrls->set(gspca_dev, ctrl->value); 1211 ret = ctrls->set(gspca_dev, ctrl->value);
1177 else 1212 else
@@ -1193,6 +1228,7 @@ static int vidioc_g_ctrl(struct file *file, void *priv,
1193 1228
1194 if (mutex_lock_interruptible(&gspca_dev->usb_lock)) 1229 if (mutex_lock_interruptible(&gspca_dev->usb_lock))
1195 return -ERESTARTSYS; 1230 return -ERESTARTSYS;
1231 gspca_dev->usb_err = 0;
1196 if (gspca_dev->present) 1232 if (gspca_dev->present)
1197 ret = ctrls->get(gspca_dev, &ctrl->value); 1233 ret = ctrls->get(gspca_dev, &ctrl->value);
1198 else 1234 else
@@ -1307,6 +1343,7 @@ static int vidioc_reqbufs(struct file *file, void *priv,
1307 /* stop streaming */ 1343 /* stop streaming */
1308 if (gspca_dev->streaming) { 1344 if (gspca_dev->streaming) {
1309 mutex_lock(&gspca_dev->usb_lock); 1345 mutex_lock(&gspca_dev->usb_lock);
1346 gspca_dev->usb_err = 0;
1310 gspca_stream_off(gspca_dev); 1347 gspca_stream_off(gspca_dev);
1311 mutex_unlock(&gspca_dev->usb_lock); 1348 mutex_unlock(&gspca_dev->usb_lock);
1312 } 1349 }
@@ -1398,6 +1435,7 @@ static int vidioc_streamoff(struct file *file, void *priv,
1398 ret = -ERESTARTSYS; 1435 ret = -ERESTARTSYS;
1399 goto out; 1436 goto out;
1400 } 1437 }
1438 gspca_dev->usb_err = 0;
1401 gspca_stream_off(gspca_dev); 1439 gspca_stream_off(gspca_dev);
1402 mutex_unlock(&gspca_dev->usb_lock); 1440 mutex_unlock(&gspca_dev->usb_lock);
1403 1441
@@ -1423,6 +1461,7 @@ static int vidioc_g_jpegcomp(struct file *file, void *priv,
1423 return -EINVAL; 1461 return -EINVAL;
1424 if (mutex_lock_interruptible(&gspca_dev->usb_lock)) 1462 if (mutex_lock_interruptible(&gspca_dev->usb_lock))
1425 return -ERESTARTSYS; 1463 return -ERESTARTSYS;
1464 gspca_dev->usb_err = 0;
1426 if (gspca_dev->present) 1465 if (gspca_dev->present)
1427 ret = gspca_dev->sd_desc->get_jcomp(gspca_dev, jpegcomp); 1466 ret = gspca_dev->sd_desc->get_jcomp(gspca_dev, jpegcomp);
1428 else 1467 else
@@ -1441,6 +1480,7 @@ static int vidioc_s_jpegcomp(struct file *file, void *priv,
1441 return -EINVAL; 1480 return -EINVAL;
1442 if (mutex_lock_interruptible(&gspca_dev->usb_lock)) 1481 if (mutex_lock_interruptible(&gspca_dev->usb_lock))
1443 return -ERESTARTSYS; 1482 return -ERESTARTSYS;
1483 gspca_dev->usb_err = 0;
1444 if (gspca_dev->present) 1484 if (gspca_dev->present)
1445 ret = gspca_dev->sd_desc->set_jcomp(gspca_dev, jpegcomp); 1485 ret = gspca_dev->sd_desc->set_jcomp(gspca_dev, jpegcomp);
1446 else 1486 else
@@ -1461,6 +1501,7 @@ static int vidioc_g_parm(struct file *filp, void *priv,
1461 1501
1462 if (mutex_lock_interruptible(&gspca_dev->usb_lock)) 1502 if (mutex_lock_interruptible(&gspca_dev->usb_lock))
1463 return -ERESTARTSYS; 1503 return -ERESTARTSYS;
1504 gspca_dev->usb_err = 0;
1464 if (gspca_dev->present) 1505 if (gspca_dev->present)
1465 ret = gspca_dev->sd_desc->get_streamparm(gspca_dev, 1506 ret = gspca_dev->sd_desc->get_streamparm(gspca_dev,
1466 parm); 1507 parm);
@@ -1490,6 +1531,7 @@ static int vidioc_s_parm(struct file *filp, void *priv,
1490 1531
1491 if (mutex_lock_interruptible(&gspca_dev->usb_lock)) 1532 if (mutex_lock_interruptible(&gspca_dev->usb_lock))
1492 return -ERESTARTSYS; 1533 return -ERESTARTSYS;
1534 gspca_dev->usb_err = 0;
1493 if (gspca_dev->present) 1535 if (gspca_dev->present)
1494 ret = gspca_dev->sd_desc->set_streamparm(gspca_dev, 1536 ret = gspca_dev->sd_desc->set_streamparm(gspca_dev,
1495 parm); 1537 parm);
@@ -1613,7 +1655,7 @@ static int dev_mmap(struct file *file, struct vm_area_struct *vma)
1613 size -= PAGE_SIZE; 1655 size -= PAGE_SIZE;
1614 } 1656 }
1615 1657
1616 vma->vm_ops = (struct vm_operations_struct *) &gspca_vm_ops; 1658 vma->vm_ops = &gspca_vm_ops;
1617 vma->vm_private_data = frame; 1659 vma->vm_private_data = frame;
1618 gspca_vm_open(vma); 1660 gspca_vm_open(vma);
1619 ret = 0; 1661 ret = 0;
@@ -1661,6 +1703,7 @@ static int frame_wait(struct gspca_dev *gspca_dev,
1661 1703
1662 if (gspca_dev->sd_desc->dq_callback) { 1704 if (gspca_dev->sd_desc->dq_callback) {
1663 mutex_lock(&gspca_dev->usb_lock); 1705 mutex_lock(&gspca_dev->usb_lock);
1706 gspca_dev->usb_err = 0;
1664 if (gspca_dev->present) 1707 if (gspca_dev->present)
1665 gspca_dev->sd_desc->dq_callback(gspca_dev); 1708 gspca_dev->sd_desc->dq_callback(gspca_dev);
1666 mutex_unlock(&gspca_dev->usb_lock); 1709 mutex_unlock(&gspca_dev->usb_lock);
@@ -1973,6 +2016,7 @@ static const struct v4l2_ioctl_ops dev_ioctl_ops = {
1973 .vidioc_g_parm = vidioc_g_parm, 2016 .vidioc_g_parm = vidioc_g_parm,
1974 .vidioc_s_parm = vidioc_s_parm, 2017 .vidioc_s_parm = vidioc_s_parm,
1975 .vidioc_enum_framesizes = vidioc_enum_framesizes, 2018 .vidioc_enum_framesizes = vidioc_enum_framesizes,
2019 .vidioc_enum_frameintervals = vidioc_enum_frameintervals,
1976#ifdef CONFIG_VIDEO_ADV_DEBUG 2020#ifdef CONFIG_VIDEO_ADV_DEBUG
1977 .vidioc_g_register = vidioc_g_register, 2021 .vidioc_g_register = vidioc_g_register,
1978 .vidioc_s_register = vidioc_s_register, 2022 .vidioc_s_register = vidioc_s_register,
@@ -1988,7 +2032,6 @@ static struct video_device gspca_template = {
1988 .fops = &dev_fops, 2032 .fops = &dev_fops,
1989 .ioctl_ops = &dev_ioctl_ops, 2033 .ioctl_ops = &dev_ioctl_ops,
1990 .release = gspca_release, 2034 .release = gspca_release,
1991 .minor = -1,
1992}; 2035};
1993 2036
1994/* 2037/*
@@ -2049,9 +2092,6 @@ int gspca_dev_probe(struct usb_interface *intf,
2049 ret = sd_desc->init(gspca_dev); 2092 ret = sd_desc->init(gspca_dev);
2050 if (ret < 0) 2093 if (ret < 0)
2051 goto out; 2094 goto out;
2052 ret = gspca_set_alt0(gspca_dev);
2053 if (ret < 0)
2054 goto out;
2055 gspca_set_default_mode(gspca_dev); 2095 gspca_set_default_mode(gspca_dev);
2056 2096
2057 mutex_init(&gspca_dev->usb_lock); 2097 mutex_init(&gspca_dev->usb_lock);
@@ -2073,7 +2113,7 @@ int gspca_dev_probe(struct usb_interface *intf,
2073 } 2113 }
2074 2114
2075 usb_set_intfdata(intf, gspca_dev); 2115 usb_set_intfdata(intf, gspca_dev);
2076 PDEBUG(D_PROBE, "/dev/video%d created", gspca_dev->vdev.num); 2116 PDEBUG(D_PROBE, "%s created", video_device_node_name(&gspca_dev->vdev));
2077 return 0; 2117 return 0;
2078out: 2118out:
2079 kfree(gspca_dev->usb_buf); 2119 kfree(gspca_dev->usb_buf);
@@ -2092,7 +2132,8 @@ void gspca_disconnect(struct usb_interface *intf)
2092{ 2132{
2093 struct gspca_dev *gspca_dev = usb_get_intfdata(intf); 2133 struct gspca_dev *gspca_dev = usb_get_intfdata(intf);
2094 2134
2095 PDEBUG(D_PROBE, "/dev/video%d disconnect", gspca_dev->vdev.num); 2135 PDEBUG(D_PROBE, "%s disconnect",
2136 video_device_node_name(&gspca_dev->vdev));
2096 mutex_lock(&gspca_dev->usb_lock); 2137 mutex_lock(&gspca_dev->usb_lock);
2097 gspca_dev->present = 0; 2138 gspca_dev->present = 0;
2098 2139
diff --git a/drivers/media/video/gspca/gspca.h b/drivers/media/video/gspca/gspca.h
index 181617355ec3..59c7941da999 100644
--- a/drivers/media/video/gspca/gspca.h
+++ b/drivers/media/video/gspca/gspca.h
@@ -45,11 +45,20 @@ extern int gspca_debug;
45/* image transfers */ 45/* image transfers */
46#define MAX_NURBS 4 /* max number of URBs */ 46#define MAX_NURBS 4 /* max number of URBs */
47 47
48
49/* used to list framerates supported by a camera mode (resolution) */
50struct framerates {
51 int *rates;
52 int nrates;
53};
54
48/* device information - set at probe time */ 55/* device information - set at probe time */
49struct cam { 56struct cam {
50 int bulk_size; /* buffer size when image transfer by bulk */ 57 int bulk_size; /* buffer size when image transfer by bulk */
51 const struct v4l2_pix_format *cam_mode; /* size nmodes */ 58 const struct v4l2_pix_format *cam_mode; /* size nmodes */
52 char nmodes; 59 char nmodes;
60 const struct framerates *mode_framerates; /* must have size nmode,
61 * just like cam_mode */
53 __u8 bulk_nurbs; /* number of URBs in bulk mode 62 __u8 bulk_nurbs; /* number of URBs in bulk mode
54 * - cannot be > MAX_NURBS 63 * - cannot be > MAX_NURBS
55 * - when 0 and bulk_size != 0 means 64 * - when 0 and bulk_size != 0 means
@@ -171,6 +180,7 @@ struct gspca_dev {
171 struct mutex usb_lock; /* usb exchange protection */ 180 struct mutex usb_lock; /* usb exchange protection */
172 struct mutex read_lock; /* read protection */ 181 struct mutex read_lock; /* read protection */
173 struct mutex queue_lock; /* ISOC queue protection */ 182 struct mutex queue_lock; /* ISOC queue protection */
183 int usb_err; /* USB error - protected by usb_lock */
174#ifdef CONFIG_PM 184#ifdef CONFIG_PM
175 char frozen; /* suspend - resume */ 185 char frozen; /* suspend - resume */
176#endif 186#endif
diff --git a/drivers/media/video/gspca/m5602/m5602_core.c b/drivers/media/video/gspca/m5602/m5602_core.c
index 844fc1d886d1..4294c75e3b11 100644
--- a/drivers/media/video/gspca/m5602/m5602_core.c
+++ b/drivers/media/video/gspca/m5602/m5602_core.c
@@ -81,7 +81,7 @@ int m5602_write_bridge(struct sd *sd, const u8 address, const u8 i2c_data)
81 return (err < 0) ? err : 0; 81 return (err < 0) ? err : 0;
82} 82}
83 83
84int m5602_wait_for_i2c(struct sd *sd) 84static int m5602_wait_for_i2c(struct sd *sd)
85{ 85{
86 int err; 86 int err;
87 u8 data; 87 u8 data;
@@ -388,7 +388,7 @@ static int m5602_probe(struct usb_interface *intf,
388 THIS_MODULE); 388 THIS_MODULE);
389} 389}
390 390
391void m5602_disconnect(struct usb_interface *intf) 391static void m5602_disconnect(struct usb_interface *intf)
392{ 392{
393 struct gspca_dev *gspca_dev = usb_get_intfdata(intf); 393 struct gspca_dev *gspca_dev = usb_get_intfdata(intf);
394 struct sd *sd = (struct sd *) gspca_dev; 394 struct sd *sd = (struct sd *) gspca_dev;
diff --git a/drivers/media/video/gspca/m5602/m5602_ov9650.c b/drivers/media/video/gspca/m5602/m5602_ov9650.c
index c2739d6605a1..923cdd5f7a6b 100644
--- a/drivers/media/video/gspca/m5602/m5602_ov9650.c
+++ b/drivers/media/video/gspca/m5602/m5602_ov9650.c
@@ -439,7 +439,7 @@ int ov9650_start(struct sd *sd)
439 err = m5602_write_bridge(sd, res_init_ov9650[i][1], 439 err = m5602_write_bridge(sd, res_init_ov9650[i][1],
440 res_init_ov9650[i][2]); 440 res_init_ov9650[i][2]);
441 else if (res_init_ov9650[i][0] == SENSOR) { 441 else if (res_init_ov9650[i][0] == SENSOR) {
442 u8 data = res_init_ov9650[i][2]; 442 data = res_init_ov9650[i][2];
443 err = m5602_write_sensor(sd, 443 err = m5602_write_sensor(sd,
444 res_init_ov9650[i][1], &data, 1); 444 res_init_ov9650[i][1], &data, 1);
445 } 445 }
diff --git a/drivers/media/video/gspca/m5602/m5602_s5k4aa.c b/drivers/media/video/gspca/m5602/m5602_s5k4aa.c
index a27afeb6f39b..aa2f3c7e2cb5 100644
--- a/drivers/media/video/gspca/m5602/m5602_s5k4aa.c
+++ b/drivers/media/video/gspca/m5602/m5602_s5k4aa.c
@@ -525,7 +525,10 @@ static int s5k4aa_set_vflip(struct gspca_dev *gspca_dev, __s32 val)
525 err = m5602_read_sensor(sd, S5K4AA_ROWSTART_LO, &data, 1); 525 err = m5602_read_sensor(sd, S5K4AA_ROWSTART_LO, &data, 1);
526 if (err < 0) 526 if (err < 0)
527 return err; 527 return err;
528 data = (data & 0xfe) | !val; 528 if (val)
529 data &= 0xfe;
530 else
531 data |= 0x01;
529 err = m5602_write_sensor(sd, S5K4AA_ROWSTART_LO, &data, 1); 532 err = m5602_write_sensor(sd, S5K4AA_ROWSTART_LO, &data, 1);
530 return err; 533 return err;
531} 534}
@@ -570,7 +573,10 @@ static int s5k4aa_set_hflip(struct gspca_dev *gspca_dev, __s32 val)
570 err = m5602_read_sensor(sd, S5K4AA_COLSTART_LO, &data, 1); 573 err = m5602_read_sensor(sd, S5K4AA_COLSTART_LO, &data, 1);
571 if (err < 0) 574 if (err < 0)
572 return err; 575 return err;
573 data = (data & 0xfe) | !val; 576 if (val)
577 data &= 0xfe;
578 else
579 data |= 0x01;
574 err = m5602_write_sensor(sd, S5K4AA_COLSTART_LO, &data, 1); 580 err = m5602_write_sensor(sd, S5K4AA_COLSTART_LO, &data, 1);
575 return err; 581 return err;
576} 582}
diff --git a/drivers/media/video/gspca/mr97310a.c b/drivers/media/video/gspca/mr97310a.c
index 126d968dd9e0..9154870e07d2 100644
--- a/drivers/media/video/gspca/mr97310a.c
+++ b/drivers/media/video/gspca/mr97310a.c
@@ -67,7 +67,7 @@ MODULE_DESCRIPTION("GSPCA/Mars-Semi MR97310A USB Camera Driver");
67MODULE_LICENSE("GPL"); 67MODULE_LICENSE("GPL");
68 68
69/* global parameters */ 69/* global parameters */
70int force_sensor_type = -1; 70static int force_sensor_type = -1;
71module_param(force_sensor_type, int, 0644); 71module_param(force_sensor_type, int, 0644);
72MODULE_PARM_DESC(force_sensor_type, "Force sensor type (-1 (auto), 0 or 1)"); 72MODULE_PARM_DESC(force_sensor_type, "Force sensor type (-1 (auto), 0 or 1)");
73 73
diff --git a/drivers/media/video/gspca/ov519.c b/drivers/media/video/gspca/ov519.c
index ad9ec339981d..b4f965731244 100644
--- a/drivers/media/video/gspca/ov519.c
+++ b/drivers/media/video/gspca/ov519.c
@@ -1982,7 +1982,7 @@ static int ov518_reg_w32(struct sd *sd, __u16 index, u32 value, int n)
1982{ 1982{
1983 int ret; 1983 int ret;
1984 1984
1985 *((u32 *)sd->gspca_dev.usb_buf) = __cpu_to_le32(value); 1985 *((__le32 *) sd->gspca_dev.usb_buf) = __cpu_to_le32(value);
1986 1986
1987 ret = usb_control_msg(sd->gspca_dev.dev, 1987 ret = usb_control_msg(sd->gspca_dev.dev,
1988 usb_sndctrlpipe(sd->gspca_dev.dev, 0), 1988 usb_sndctrlpipe(sd->gspca_dev.dev, 0),
@@ -2021,9 +2021,9 @@ static int ov511_i2c_w(struct sd *sd, __u8 reg, __u8 value)
2021 if (rc < 0) 2021 if (rc < 0)
2022 return rc; 2022 return rc;
2023 2023
2024 do 2024 do {
2025 rc = reg_r(sd, R511_I2C_CTL); 2025 rc = reg_r(sd, R511_I2C_CTL);
2026 while (rc > 0 && ((rc & 1) == 0)); /* Retry until idle */ 2026 } while (rc > 0 && ((rc & 1) == 0)); /* Retry until idle */
2027 2027
2028 if (rc < 0) 2028 if (rc < 0)
2029 return rc; 2029 return rc;
@@ -2055,9 +2055,9 @@ static int ov511_i2c_r(struct sd *sd, __u8 reg)
2055 if (rc < 0) 2055 if (rc < 0)
2056 return rc; 2056 return rc;
2057 2057
2058 do 2058 do {
2059 rc = reg_r(sd, R511_I2C_CTL); 2059 rc = reg_r(sd, R511_I2C_CTL);
2060 while (rc > 0 && ((rc & 1) == 0)); /* Retry until idle */ 2060 } while (rc > 0 && ((rc & 1) == 0)); /* Retry until idle */
2061 2061
2062 if (rc < 0) 2062 if (rc < 0)
2063 return rc; 2063 return rc;
@@ -2081,9 +2081,9 @@ static int ov511_i2c_r(struct sd *sd, __u8 reg)
2081 if (rc < 0) 2081 if (rc < 0)
2082 return rc; 2082 return rc;
2083 2083
2084 do 2084 do {
2085 rc = reg_r(sd, R511_I2C_CTL); 2085 rc = reg_r(sd, R511_I2C_CTL);
2086 while (rc > 0 && ((rc & 1) == 0)); /* Retry until idle */ 2086 } while (rc > 0 && ((rc & 1) == 0)); /* Retry until idle */
2087 2087
2088 if (rc < 0) 2088 if (rc < 0)
2089 return rc; 2089 return rc;
diff --git a/drivers/media/video/gspca/pac7302.c b/drivers/media/video/gspca/pac7302.c
index 74acceea8094..de0b66c4b56e 100644
--- a/drivers/media/video/gspca/pac7302.c
+++ b/drivers/media/video/gspca/pac7302.c
@@ -90,6 +90,9 @@ struct sd {
90 unsigned char autogain; 90 unsigned char autogain;
91 __u8 hflip; 91 __u8 hflip;
92 __u8 vflip; 92 __u8 vflip;
93 u8 flags;
94#define FL_HFLIP 0x01 /* mirrored by default */
95#define FL_VFLIP 0x02 /* vertical flipped by default */
93 96
94 u8 sof_read; 97 u8 sof_read;
95 u8 autogain_ignore_frames; 98 u8 autogain_ignore_frames;
@@ -552,6 +555,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
552 sd->autogain = AUTOGAIN_DEF; 555 sd->autogain = AUTOGAIN_DEF;
553 sd->hflip = HFLIP_DEF; 556 sd->hflip = HFLIP_DEF;
554 sd->vflip = VFLIP_DEF; 557 sd->vflip = VFLIP_DEF;
558 sd->flags = id->driver_info;
555 return 0; 559 return 0;
556} 560}
557 561
@@ -708,10 +712,17 @@ static int sethvflip(struct gspca_dev *gspca_dev)
708{ 712{
709 struct sd *sd = (struct sd *) gspca_dev; 713 struct sd *sd = (struct sd *) gspca_dev;
710 int ret; 714 int ret;
711 __u8 data; 715 u8 data, hflip, vflip;
716
717 hflip = sd->hflip;
718 if (sd->flags & FL_HFLIP)
719 hflip = !hflip;
720 vflip = sd->vflip;
721 if (sd->flags & FL_VFLIP)
722 vflip = !vflip;
712 723
713 ret = reg_w(gspca_dev, 0xff, 0x03); /* page 3 */ 724 ret = reg_w(gspca_dev, 0xff, 0x03); /* page 3 */
714 data = (sd->hflip ? 0x08 : 0x00) | (sd->vflip ? 0x04 : 0x00); 725 data = (hflip ? 0x08 : 0x00) | (vflip ? 0x04 : 0x00);
715 if (0 <= ret) 726 if (0 <= ret)
716 ret = reg_w(gspca_dev, 0x21, data); 727 ret = reg_w(gspca_dev, 0x21, data);
717 /* load registers to sensor (Bit 0, auto clear) */ 728 /* load registers to sensor (Bit 0, auto clear) */
@@ -1218,15 +1229,15 @@ static struct sd_desc sd_desc = {
1218}; 1229};
1219 1230
1220/* -- module initialisation -- */ 1231/* -- module initialisation -- */
1221static __devinitdata struct usb_device_id device_table[] = { 1232static const struct usb_device_id device_table[] __devinitconst = {
1222 {USB_DEVICE(0x06f8, 0x3009)}, 1233 {USB_DEVICE(0x06f8, 0x3009)},
1223 {USB_DEVICE(0x093a, 0x2620)}, 1234 {USB_DEVICE(0x093a, 0x2620)},
1224 {USB_DEVICE(0x093a, 0x2621)}, 1235 {USB_DEVICE(0x093a, 0x2621)},
1225 {USB_DEVICE(0x093a, 0x2622)}, 1236 {USB_DEVICE(0x093a, 0x2622), .driver_info = FL_VFLIP},
1226 {USB_DEVICE(0x093a, 0x2624)}, 1237 {USB_DEVICE(0x093a, 0x2624), .driver_info = FL_VFLIP},
1227 {USB_DEVICE(0x093a, 0x2626)}, 1238 {USB_DEVICE(0x093a, 0x2626)},
1228 {USB_DEVICE(0x093a, 0x2628)}, 1239 {USB_DEVICE(0x093a, 0x2628)},
1229 {USB_DEVICE(0x093a, 0x2629)}, 1240 {USB_DEVICE(0x093a, 0x2629), .driver_info = FL_VFLIP},
1230 {USB_DEVICE(0x093a, 0x262a)}, 1241 {USB_DEVICE(0x093a, 0x262a)},
1231 {USB_DEVICE(0x093a, 0x262c)}, 1242 {USB_DEVICE(0x093a, 0x262c)},
1232 {} 1243 {}
@@ -1234,7 +1245,7 @@ static __devinitdata struct usb_device_id device_table[] = {
1234MODULE_DEVICE_TABLE(usb, device_table); 1245MODULE_DEVICE_TABLE(usb, device_table);
1235 1246
1236/* -- device connect -- */ 1247/* -- device connect -- */
1237static int sd_probe(struct usb_interface *intf, 1248static int __devinit sd_probe(struct usb_interface *intf,
1238 const struct usb_device_id *id) 1249 const struct usb_device_id *id)
1239{ 1250{
1240 return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), 1251 return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
diff --git a/drivers/media/video/gspca/pac7311.c b/drivers/media/video/gspca/pac7311.c
index e5697a6345e8..42cfcdfd8f4f 100644
--- a/drivers/media/video/gspca/pac7311.c
+++ b/drivers/media/video/gspca/pac7311.c
@@ -863,7 +863,7 @@ static struct sd_desc sd_desc = {
863}; 863};
864 864
865/* -- module initialisation -- */ 865/* -- module initialisation -- */
866static __devinitdata struct usb_device_id device_table[] = { 866static const struct usb_device_id device_table[] __devinitconst = {
867 {USB_DEVICE(0x093a, 0x2600)}, 867 {USB_DEVICE(0x093a, 0x2600)},
868 {USB_DEVICE(0x093a, 0x2601)}, 868 {USB_DEVICE(0x093a, 0x2601)},
869 {USB_DEVICE(0x093a, 0x2603)}, 869 {USB_DEVICE(0x093a, 0x2603)},
@@ -875,7 +875,7 @@ static __devinitdata struct usb_device_id device_table[] = {
875MODULE_DEVICE_TABLE(usb, device_table); 875MODULE_DEVICE_TABLE(usb, device_table);
876 876
877/* -- device connect -- */ 877/* -- device connect -- */
878static int sd_probe(struct usb_interface *intf, 878static int __devinit sd_probe(struct usb_interface *intf,
879 const struct usb_device_id *id) 879 const struct usb_device_id *id)
880{ 880{
881 return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), 881 return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
diff --git a/drivers/media/video/gspca/sn9c20x.c b/drivers/media/video/gspca/sn9c20x.c
index b1944a7cbb0f..4cff8035614f 100644
--- a/drivers/media/video/gspca/sn9c20x.c
+++ b/drivers/media/video/gspca/sn9c20x.c
@@ -1158,7 +1158,7 @@ static int i2c_w2(struct gspca_dev *gspca_dev, u8 reg, u16 val)
1158 return i2c_w(gspca_dev, row); 1158 return i2c_w(gspca_dev, row);
1159} 1159}
1160 1160
1161int i2c_r1(struct gspca_dev *gspca_dev, u8 reg, u8 *val) 1161static int i2c_r1(struct gspca_dev *gspca_dev, u8 reg, u8 *val)
1162{ 1162{
1163 struct sd *sd = (struct sd *) gspca_dev; 1163 struct sd *sd = (struct sd *) gspca_dev;
1164 u8 row[8]; 1164 u8 row[8];
@@ -1183,7 +1183,7 @@ int i2c_r1(struct gspca_dev *gspca_dev, u8 reg, u8 *val)
1183 return 0; 1183 return 0;
1184} 1184}
1185 1185
1186int i2c_r2(struct gspca_dev *gspca_dev, u8 reg, u16 *val) 1186static int i2c_r2(struct gspca_dev *gspca_dev, u8 reg, u16 *val)
1187{ 1187{
1188 struct sd *sd = (struct sd *) gspca_dev; 1188 struct sd *sd = (struct sd *) gspca_dev;
1189 u8 row[8]; 1189 u8 row[8];
@@ -1476,8 +1476,9 @@ static int sn9c20x_input_init(struct gspca_dev *gspca_dev)
1476 if (input_register_device(sd->input_dev)) 1476 if (input_register_device(sd->input_dev))
1477 return -EINVAL; 1477 return -EINVAL;
1478 1478
1479 sd->input_task = kthread_run(input_kthread, gspca_dev, "sn9c20x/%d", 1479 sd->input_task = kthread_run(input_kthread, gspca_dev, "sn9c20x/%s-%s",
1480 gspca_dev->vdev.minor); 1480 gspca_dev->dev->bus->bus_name,
1481 gspca_dev->dev->devpath);
1481 1482
1482 if (IS_ERR(sd->input_task)) 1483 if (IS_ERR(sd->input_task))
1483 return -EINVAL; 1484 return -EINVAL;
@@ -2174,8 +2175,7 @@ static void configure_sensor_output(struct gspca_dev *gspca_dev, int mode)
2174} 2175}
2175 2176
2176#define HW_WIN(mode, hstart, vstart) \ 2177#define HW_WIN(mode, hstart, vstart) \
2177((const u8 []){hstart & 0xff, hstart >> 8, \ 2178((const u8 []){hstart, 0, vstart, 0, \
2178vstart & 0xff, vstart >> 8, \
2179(mode & MODE_SXGA ? 1280 >> 4 : 640 >> 4), \ 2179(mode & MODE_SXGA ? 1280 >> 4 : 640 >> 4), \
2180(mode & MODE_SXGA ? 1024 >> 3 : 480 >> 3)}) 2180(mode & MODE_SXGA ? 1024 >> 3 : 480 >> 3)})
2181 2181
diff --git a/drivers/media/video/gspca/sonixb.c b/drivers/media/video/gspca/sonixb.c
index 5be95bc65138..ddff2b5ee5c2 100644
--- a/drivers/media/video/gspca/sonixb.c
+++ b/drivers/media/video/gspca/sonixb.c
@@ -1226,7 +1226,7 @@ static const struct sd_desc sd_desc = {
1226 .driver_info = (SENSOR_ ## sensor << 8) | BRIDGE_ ## bridge 1226 .driver_info = (SENSOR_ ## sensor << 8) | BRIDGE_ ## bridge
1227 1227
1228 1228
1229static __devinitdata struct usb_device_id device_table[] = { 1229static const struct usb_device_id device_table[] __devinitconst = {
1230 {USB_DEVICE(0x0c45, 0x6001), SB(TAS5110, 102)}, /* TAS5110C1B */ 1230 {USB_DEVICE(0x0c45, 0x6001), SB(TAS5110, 102)}, /* TAS5110C1B */
1231 {USB_DEVICE(0x0c45, 0x6005), SB(TAS5110, 101)}, /* TAS5110C1B */ 1231 {USB_DEVICE(0x0c45, 0x6005), SB(TAS5110, 101)}, /* TAS5110C1B */
1232#if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE 1232#if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE
@@ -1257,7 +1257,7 @@ static __devinitdata struct usb_device_id device_table[] = {
1257MODULE_DEVICE_TABLE(usb, device_table); 1257MODULE_DEVICE_TABLE(usb, device_table);
1258 1258
1259/* -- device connect -- */ 1259/* -- device connect -- */
1260static int sd_probe(struct usb_interface *intf, 1260static int __devinit sd_probe(struct usb_interface *intf,
1261 const struct usb_device_id *id) 1261 const struct usb_device_id *id)
1262{ 1262{
1263 return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), 1263 return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
diff --git a/drivers/media/video/gspca/spca506.c b/drivers/media/video/gspca/spca506.c
index ab28cc23e415..39257e4e074f 100644
--- a/drivers/media/video/gspca/spca506.c
+++ b/drivers/media/video/gspca/spca506.c
@@ -685,7 +685,7 @@ static struct sd_desc sd_desc = {
685}; 685};
686 686
687/* -- module initialisation -- */ 687/* -- module initialisation -- */
688static __devinitdata struct usb_device_id device_table[] = { 688static const struct usb_device_id device_table[] __devinitconst = {
689 {USB_DEVICE(0x06e1, 0xa190)}, 689 {USB_DEVICE(0x06e1, 0xa190)},
690/*fixme: may be IntelPCCameraPro BRIDGE_SPCA505 690/*fixme: may be IntelPCCameraPro BRIDGE_SPCA505
691 {USB_DEVICE(0x0733, 0x0430)}, */ 691 {USB_DEVICE(0x0733, 0x0430)}, */
@@ -696,7 +696,7 @@ static __devinitdata struct usb_device_id device_table[] = {
696MODULE_DEVICE_TABLE(usb, device_table); 696MODULE_DEVICE_TABLE(usb, device_table);
697 697
698/* -- device connect -- */ 698/* -- device connect -- */
699static int sd_probe(struct usb_interface *intf, 699static int __devinit sd_probe(struct usb_interface *intf,
700 const struct usb_device_id *id) 700 const struct usb_device_id *id)
701{ 701{
702 return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), 702 return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
diff --git a/drivers/media/video/gspca/stk014.c b/drivers/media/video/gspca/stk014.c
index 8e23320d7ab7..2e2935532d99 100644
--- a/drivers/media/video/gspca/stk014.c
+++ b/drivers/media/video/gspca/stk014.c
@@ -126,12 +126,14 @@ static const struct v4l2_pix_format vga_mode[] = {
126}; 126};
127 127
128/* -- read a register -- */ 128/* -- read a register -- */
129static int reg_r(struct gspca_dev *gspca_dev, 129static u8 reg_r(struct gspca_dev *gspca_dev,
130 __u16 index) 130 __u16 index)
131{ 131{
132 struct usb_device *dev = gspca_dev->dev; 132 struct usb_device *dev = gspca_dev->dev;
133 int ret; 133 int ret;
134 134
135 if (gspca_dev->usb_err < 0)
136 return 0;
135 ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 137 ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
136 0x00, 138 0x00,
137 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 139 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
@@ -141,18 +143,21 @@ static int reg_r(struct gspca_dev *gspca_dev,
141 500); 143 500);
142 if (ret < 0) { 144 if (ret < 0) {
143 PDEBUG(D_ERR, "reg_r err %d", ret); 145 PDEBUG(D_ERR, "reg_r err %d", ret);
144 return ret; 146 gspca_dev->usb_err = ret;
147 return 0;
145 } 148 }
146 return gspca_dev->usb_buf[0]; 149 return gspca_dev->usb_buf[0];
147} 150}
148 151
149/* -- write a register -- */ 152/* -- write a register -- */
150static int reg_w(struct gspca_dev *gspca_dev, 153static void reg_w(struct gspca_dev *gspca_dev,
151 __u16 index, __u16 value) 154 __u16 index, __u16 value)
152{ 155{
153 struct usb_device *dev = gspca_dev->dev; 156 struct usb_device *dev = gspca_dev->dev;
154 int ret; 157 int ret;
155 158
159 if (gspca_dev->usb_err < 0)
160 return;
156 ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 161 ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
157 0x01, 162 0x01,
158 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 163 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
@@ -161,13 +166,14 @@ static int reg_w(struct gspca_dev *gspca_dev,
161 NULL, 166 NULL,
162 0, 167 0,
163 500); 168 500);
164 if (ret < 0) 169 if (ret < 0) {
165 PDEBUG(D_ERR, "reg_w err %d", ret); 170 PDEBUG(D_ERR, "reg_w err %d", ret);
166 return ret; 171 gspca_dev->usb_err = ret;
172 }
167} 173}
168 174
169/* -- get a bulk value (4 bytes) -- */ 175/* -- get a bulk value (4 bytes) -- */
170static int rcv_val(struct gspca_dev *gspca_dev, 176static void rcv_val(struct gspca_dev *gspca_dev,
171 int ads) 177 int ads)
172{ 178{
173 struct usb_device *dev = gspca_dev->dev; 179 struct usb_device *dev = gspca_dev->dev;
@@ -182,17 +188,22 @@ static int rcv_val(struct gspca_dev *gspca_dev,
182 reg_w(gspca_dev, 0x63a, 0); 188 reg_w(gspca_dev, 0x63a, 0);
183 reg_w(gspca_dev, 0x63b, 0); 189 reg_w(gspca_dev, 0x63b, 0);
184 reg_w(gspca_dev, 0x630, 5); 190 reg_w(gspca_dev, 0x630, 5);
191 if (gspca_dev->usb_err < 0)
192 return;
185 ret = usb_bulk_msg(dev, 193 ret = usb_bulk_msg(dev,
186 usb_rcvbulkpipe(dev, 0x05), 194 usb_rcvbulkpipe(dev, 0x05),
187 gspca_dev->usb_buf, 195 gspca_dev->usb_buf,
188 4, /* length */ 196 4, /* length */
189 &alen, 197 &alen,
190 500); /* timeout in milliseconds */ 198 500); /* timeout in milliseconds */
191 return ret; 199 if (ret < 0) {
200 PDEBUG(D_ERR, "rcv_val err %d", ret);
201 gspca_dev->usb_err = ret;
202 }
192} 203}
193 204
194/* -- send a bulk value -- */ 205/* -- send a bulk value -- */
195static int snd_val(struct gspca_dev *gspca_dev, 206static void snd_val(struct gspca_dev *gspca_dev,
196 int ads, 207 int ads,
197 unsigned int val) 208 unsigned int val)
198{ 209{
@@ -201,16 +212,9 @@ static int snd_val(struct gspca_dev *gspca_dev,
201 __u8 seq = 0; 212 __u8 seq = 0;
202 213
203 if (ads == 0x003f08) { 214 if (ads == 0x003f08) {
204 ret = reg_r(gspca_dev, 0x0704); 215 reg_r(gspca_dev, 0x0704);
205 if (ret < 0) 216 seq = reg_r(gspca_dev, 0x0705);
206 goto ko; 217 reg_r(gspca_dev, 0x0650);
207 ret = reg_r(gspca_dev, 0x0705);
208 if (ret < 0)
209 goto ko;
210 seq = ret; /* keep the sequence number */
211 ret = reg_r(gspca_dev, 0x0650);
212 if (ret < 0)
213 goto ko;
214 reg_w(gspca_dev, 0x654, seq); 218 reg_w(gspca_dev, 0x654, seq);
215 } else { 219 } else {
216 reg_w(gspca_dev, 0x654, (ads >> 16) & 0xff); 220 reg_w(gspca_dev, 0x654, (ads >> 16) & 0xff);
@@ -223,6 +227,8 @@ static int snd_val(struct gspca_dev *gspca_dev,
223 reg_w(gspca_dev, 0x65a, 0); 227 reg_w(gspca_dev, 0x65a, 0);
224 reg_w(gspca_dev, 0x65b, 0); 228 reg_w(gspca_dev, 0x65b, 0);
225 reg_w(gspca_dev, 0x650, 5); 229 reg_w(gspca_dev, 0x650, 5);
230 if (gspca_dev->usb_err < 0)
231 return;
226 gspca_dev->usb_buf[0] = val >> 24; 232 gspca_dev->usb_buf[0] = val >> 24;
227 gspca_dev->usb_buf[1] = val >> 16; 233 gspca_dev->usb_buf[1] = val >> 16;
228 gspca_dev->usb_buf[2] = val >> 8; 234 gspca_dev->usb_buf[2] = val >> 8;
@@ -233,24 +239,23 @@ static int snd_val(struct gspca_dev *gspca_dev,
233 4, 239 4,
234 &alen, 240 &alen,
235 500); /* timeout in milliseconds */ 241 500); /* timeout in milliseconds */
236 if (ret < 0) 242 if (ret < 0) {
237 goto ko; 243 PDEBUG(D_ERR, "snd_val err %d", ret);
238 if (ads == 0x003f08) { 244 gspca_dev->usb_err = ret;
239 seq += 4; 245 } else {
240 seq &= 0x3f; 246 if (ads == 0x003f08) {
241 reg_w(gspca_dev, 0x705, seq); 247 seq += 4;
248 seq &= 0x3f;
249 reg_w(gspca_dev, 0x705, seq);
250 }
242 } 251 }
243 return ret;
244ko:
245 PDEBUG(D_ERR, "snd_val err %d", ret);
246 return ret;
247} 252}
248 253
249/* set a camera parameter */ 254/* set a camera parameter */
250static int set_par(struct gspca_dev *gspca_dev, 255static void set_par(struct gspca_dev *gspca_dev,
251 int parval) 256 int parval)
252{ 257{
253 return snd_val(gspca_dev, 0x003f08, parval); 258 snd_val(gspca_dev, 0x003f08, parval);
254} 259}
255 260
256static void setbrightness(struct gspca_dev *gspca_dev) 261static void setbrightness(struct gspca_dev *gspca_dev)
@@ -311,18 +316,18 @@ static int sd_config(struct gspca_dev *gspca_dev,
311/* this function is called at probe and resume time */ 316/* this function is called at probe and resume time */
312static int sd_init(struct gspca_dev *gspca_dev) 317static int sd_init(struct gspca_dev *gspca_dev)
313{ 318{
314 int ret; 319 u8 ret;
315 320
316 /* check if the device responds */ 321 /* check if the device responds */
317 usb_set_interface(gspca_dev->dev, gspca_dev->iface, 1); 322 usb_set_interface(gspca_dev->dev, gspca_dev->iface, 1);
318 ret = reg_r(gspca_dev, 0x0740); 323 ret = reg_r(gspca_dev, 0x0740);
319 if (ret < 0) 324 if (gspca_dev->usb_err >= 0) {
320 return ret; 325 if (ret != 0xff) {
321 if (ret != 0xff) { 326 PDEBUG(D_ERR|D_STREAM, "init reg: 0x%02x", ret);
322 PDEBUG(D_ERR|D_STREAM, "init reg: 0x%02x", ret); 327 gspca_dev->usb_err = -EIO;
323 return -1; 328 }
324 } 329 }
325 return 0; 330 return gspca_dev->usb_err;
326} 331}
327 332
328/* -- start the camera -- */ 333/* -- start the camera -- */
@@ -357,15 +362,12 @@ static int sd_start(struct gspca_dev *gspca_dev)
357 if (ret < 0) { 362 if (ret < 0) {
358 PDEBUG(D_ERR|D_STREAM, "set intf %d %d failed", 363 PDEBUG(D_ERR|D_STREAM, "set intf %d %d failed",
359 gspca_dev->iface, gspca_dev->alt); 364 gspca_dev->iface, gspca_dev->alt);
365 gspca_dev->usb_err = ret;
360 goto out; 366 goto out;
361 } 367 }
362 ret = reg_r(gspca_dev, 0x0630); 368 reg_r(gspca_dev, 0x0630);
363 if (ret < 0)
364 goto out;
365 rcv_val(gspca_dev, 0x000020); /* << (value ff ff ff ff) */ 369 rcv_val(gspca_dev, 0x000020); /* << (value ff ff ff ff) */
366 ret = reg_r(gspca_dev, 0x0650); 370 reg_r(gspca_dev, 0x0650);
367 if (ret < 0)
368 goto out;
369 snd_val(gspca_dev, 0x000020, 0xffffffff); 371 snd_val(gspca_dev, 0x000020, 0xffffffff);
370 reg_w(gspca_dev, 0x0620, 0); 372 reg_w(gspca_dev, 0x0620, 0);
371 reg_w(gspca_dev, 0x0630, 0); 373 reg_w(gspca_dev, 0x0630, 0);
@@ -384,11 +386,11 @@ static int sd_start(struct gspca_dev *gspca_dev)
384 /* start the video flow */ 386 /* start the video flow */
385 set_par(gspca_dev, 0x01000000); 387 set_par(gspca_dev, 0x01000000);
386 set_par(gspca_dev, 0x01000000); 388 set_par(gspca_dev, 0x01000000);
387 PDEBUG(D_STREAM, "camera started alt: 0x%02x", gspca_dev->alt); 389 if (gspca_dev->usb_err >= 0)
388 return 0; 390 PDEBUG(D_STREAM, "camera started alt: 0x%02x",
391 gspca_dev->alt);
389out: 392out:
390 PDEBUG(D_ERR|D_STREAM, "camera start err %d", ret); 393 return gspca_dev->usb_err;
391 return ret;
392} 394}
393 395
394static void sd_stopN(struct gspca_dev *gspca_dev) 396static void sd_stopN(struct gspca_dev *gspca_dev)
@@ -456,7 +458,7 @@ static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val)
456 sd->brightness = val; 458 sd->brightness = val;
457 if (gspca_dev->streaming) 459 if (gspca_dev->streaming)
458 setbrightness(gspca_dev); 460 setbrightness(gspca_dev);
459 return 0; 461 return gspca_dev->usb_err;
460} 462}
461 463
462static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val) 464static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val)
@@ -474,7 +476,7 @@ static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val)
474 sd->contrast = val; 476 sd->contrast = val;
475 if (gspca_dev->streaming) 477 if (gspca_dev->streaming)
476 setcontrast(gspca_dev); 478 setcontrast(gspca_dev);
477 return 0; 479 return gspca_dev->usb_err;
478} 480}
479 481
480static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val) 482static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val)
@@ -492,7 +494,7 @@ static int sd_setcolors(struct gspca_dev *gspca_dev, __s32 val)
492 sd->colors = val; 494 sd->colors = val;
493 if (gspca_dev->streaming) 495 if (gspca_dev->streaming)
494 setcolors(gspca_dev); 496 setcolors(gspca_dev);
495 return 0; 497 return gspca_dev->usb_err;
496} 498}
497 499
498static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val) 500static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val)
@@ -510,7 +512,7 @@ static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val)
510 sd->lightfreq = val; 512 sd->lightfreq = val;
511 if (gspca_dev->streaming) 513 if (gspca_dev->streaming)
512 setfreq(gspca_dev); 514 setfreq(gspca_dev);
513 return 0; 515 return gspca_dev->usb_err;
514} 516}
515 517
516static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val) 518static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val)
@@ -552,7 +554,7 @@ static int sd_set_jcomp(struct gspca_dev *gspca_dev,
552 sd->quality = jcomp->quality; 554 sd->quality = jcomp->quality;
553 if (gspca_dev->streaming) 555 if (gspca_dev->streaming)
554 jpeg_set_qual(sd->jpeg_hdr, sd->quality); 556 jpeg_set_qual(sd->jpeg_hdr, sd->quality);
555 return 0; 557 return gspca_dev->usb_err;
556} 558}
557 559
558static int sd_get_jcomp(struct gspca_dev *gspca_dev, 560static int sd_get_jcomp(struct gspca_dev *gspca_dev,
diff --git a/drivers/media/video/gspca/sunplus.c b/drivers/media/video/gspca/sunplus.c
index 72bf3b4f0a31..716df6b15fc5 100644
--- a/drivers/media/video/gspca/sunplus.c
+++ b/drivers/media/video/gspca/sunplus.c
@@ -460,13 +460,17 @@ static void reg_r(struct gspca_dev *gspca_dev,
460 u16 index, 460 u16 index,
461 u16 len) 461 u16 len)
462{ 462{
463 int ret;
464
463#ifdef GSPCA_DEBUG 465#ifdef GSPCA_DEBUG
464 if (len > USB_BUF_SZ) { 466 if (len > USB_BUF_SZ) {
465 err("reg_r: buffer overflow"); 467 err("reg_r: buffer overflow");
466 return; 468 return;
467 } 469 }
468#endif 470#endif
469 usb_control_msg(gspca_dev->dev, 471 if (gspca_dev->usb_err < 0)
472 return;
473 ret = usb_control_msg(gspca_dev->dev,
470 usb_rcvctrlpipe(gspca_dev->dev, 0), 474 usb_rcvctrlpipe(gspca_dev->dev, 0),
471 req, 475 req,
472 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 476 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
@@ -474,6 +478,10 @@ static void reg_r(struct gspca_dev *gspca_dev,
474 index, 478 index,
475 len ? gspca_dev->usb_buf : NULL, len, 479 len ? gspca_dev->usb_buf : NULL, len,
476 500); 480 500);
481 if (ret < 0) {
482 PDEBUG(D_ERR, "reg_r err %d", ret);
483 gspca_dev->usb_err = ret;
484 }
477} 485}
478 486
479/* write one byte */ 487/* write one byte */
@@ -483,40 +491,55 @@ static void reg_w_1(struct gspca_dev *gspca_dev,
483 u16 index, 491 u16 index,
484 u16 byte) 492 u16 byte)
485{ 493{
494 int ret;
495
496 if (gspca_dev->usb_err < 0)
497 return;
486 gspca_dev->usb_buf[0] = byte; 498 gspca_dev->usb_buf[0] = byte;
487 usb_control_msg(gspca_dev->dev, 499 ret = usb_control_msg(gspca_dev->dev,
488 usb_sndctrlpipe(gspca_dev->dev, 0), 500 usb_sndctrlpipe(gspca_dev->dev, 0),
489 req, 501 req,
490 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 502 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
491 value, index, 503 value, index,
492 gspca_dev->usb_buf, 1, 504 gspca_dev->usb_buf, 1,
493 500); 505 500);
506 if (ret < 0) {
507 PDEBUG(D_ERR, "reg_w_1 err %d", ret);
508 gspca_dev->usb_err = ret;
509 }
494} 510}
495 511
496/* write req / index / value */ 512/* write req / index / value */
497static int reg_w_riv(struct usb_device *dev, 513static void reg_w_riv(struct gspca_dev *gspca_dev,
498 u8 req, u16 index, u16 value) 514 u8 req, u16 index, u16 value)
499{ 515{
516 struct usb_device *dev = gspca_dev->dev;
500 int ret; 517 int ret;
501 518
519 if (gspca_dev->usb_err < 0)
520 return;
502 ret = usb_control_msg(dev, 521 ret = usb_control_msg(dev,
503 usb_sndctrlpipe(dev, 0), 522 usb_sndctrlpipe(dev, 0),
504 req, 523 req,
505 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 524 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
506 value, index, NULL, 0, 500); 525 value, index, NULL, 0, 500);
507 PDEBUG(D_USBO, "reg write: 0x%02x,0x%02x:0x%02x, %d", 526 if (ret < 0) {
508 req, index, value, ret); 527 PDEBUG(D_ERR, "reg_w_riv err %d", ret);
509 if (ret < 0) 528 gspca_dev->usb_err = ret;
510 PDEBUG(D_ERR, "reg write: error %d", ret); 529 return;
511 return ret; 530 }
531 PDEBUG(D_USBO, "reg_w_riv: 0x%02x,0x%04x:0x%04x",
532 req, index, value);
512} 533}
513 534
514/* read 1 byte */ 535/* read 1 byte */
515static int reg_r_1(struct gspca_dev *gspca_dev, 536static u8 reg_r_1(struct gspca_dev *gspca_dev,
516 u16 value) /* wValue */ 537 u16 value) /* wValue */
517{ 538{
518 int ret; 539 int ret;
519 540
541 if (gspca_dev->usb_err < 0)
542 return 0;
520 ret = usb_control_msg(gspca_dev->dev, 543 ret = usb_control_msg(gspca_dev->dev,
521 usb_rcvctrlpipe(gspca_dev->dev, 0), 544 usb_rcvctrlpipe(gspca_dev->dev, 0),
522 0x20, /* request */ 545 0x20, /* request */
@@ -527,19 +550,22 @@ static int reg_r_1(struct gspca_dev *gspca_dev,
527 500); /* timeout */ 550 500); /* timeout */
528 if (ret < 0) { 551 if (ret < 0) {
529 PDEBUG(D_ERR, "reg_r_1 err %d", ret); 552 PDEBUG(D_ERR, "reg_r_1 err %d", ret);
553 gspca_dev->usb_err = ret;
530 return 0; 554 return 0;
531 } 555 }
532 return gspca_dev->usb_buf[0]; 556 return gspca_dev->usb_buf[0];
533} 557}
534 558
535/* read 1 or 2 bytes - returns < 0 if error */ 559/* read 1 or 2 bytes */
536static int reg_r_12(struct gspca_dev *gspca_dev, 560static u16 reg_r_12(struct gspca_dev *gspca_dev,
537 u8 req, /* bRequest */ 561 u8 req, /* bRequest */
538 u16 index, /* wIndex */ 562 u16 index, /* wIndex */
539 u16 length) /* wLength (1 or 2 only) */ 563 u16 length) /* wLength (1 or 2 only) */
540{ 564{
541 int ret; 565 int ret;
542 566
567 if (gspca_dev->usb_err < 0)
568 return 0;
543 gspca_dev->usb_buf[1] = 0; 569 gspca_dev->usb_buf[1] = 0;
544 ret = usb_control_msg(gspca_dev->dev, 570 ret = usb_control_msg(gspca_dev->dev,
545 usb_rcvctrlpipe(gspca_dev->dev, 0), 571 usb_rcvctrlpipe(gspca_dev->dev, 0),
@@ -550,62 +576,44 @@ static int reg_r_12(struct gspca_dev *gspca_dev,
550 gspca_dev->usb_buf, length, 576 gspca_dev->usb_buf, length,
551 500); 577 500);
552 if (ret < 0) { 578 if (ret < 0) {
553 PDEBUG(D_ERR, "reg_read err %d", ret); 579 PDEBUG(D_ERR, "reg_r_12 err %d", ret);
554 return -1; 580 gspca_dev->usb_err = ret;
581 return 0;
555 } 582 }
556 return (gspca_dev->usb_buf[1] << 8) + gspca_dev->usb_buf[0]; 583 return (gspca_dev->usb_buf[1] << 8) + gspca_dev->usb_buf[0];
557} 584}
558 585
559static int write_vector(struct gspca_dev *gspca_dev, 586static void write_vector(struct gspca_dev *gspca_dev,
560 const struct cmd *data, int ncmds) 587 const struct cmd *data, int ncmds)
561{ 588{
562 struct usb_device *dev = gspca_dev->dev;
563 int ret;
564
565 while (--ncmds >= 0) { 589 while (--ncmds >= 0) {
566 ret = reg_w_riv(dev, data->req, data->idx, data->val); 590 reg_w_riv(gspca_dev, data->req, data->idx, data->val);
567 if (ret < 0) {
568 PDEBUG(D_ERR,
569 "Register write failed for 0x%02x, 0x%04x, 0x%04x",
570 data->req, data->val, data->idx);
571 return ret;
572 }
573 data++; 591 data++;
574 } 592 }
575 return 0;
576} 593}
577 594
578static int spca50x_setup_qtable(struct gspca_dev *gspca_dev, 595static void setup_qtable(struct gspca_dev *gspca_dev,
579 const u8 qtable[2][64]) 596 const u8 qtable[2][64])
580{ 597{
581 struct usb_device *dev = gspca_dev->dev; 598 int i;
582 int i, err;
583 599
584 /* loop over y components */ 600 /* loop over y components */
585 for (i = 0; i < 64; i++) { 601 for (i = 0; i < 64; i++)
586 err = reg_w_riv(dev, 0x00, 0x2800 + i, qtable[0][i]); 602 reg_w_riv(gspca_dev, 0x00, 0x2800 + i, qtable[0][i]);
587 if (err < 0)
588 return err;
589 }
590 603
591 /* loop over c components */ 604 /* loop over c components */
592 for (i = 0; i < 64; i++) { 605 for (i = 0; i < 64; i++)
593 err = reg_w_riv(dev, 0x00, 0x2840 + i, qtable[1][i]); 606 reg_w_riv(gspca_dev, 0x00, 0x2840 + i, qtable[1][i]);
594 if (err < 0)
595 return err;
596 }
597 return 0;
598} 607}
599 608
600static void spca504_acknowledged_command(struct gspca_dev *gspca_dev, 609static void spca504_acknowledged_command(struct gspca_dev *gspca_dev,
601 u8 req, u16 idx, u16 val) 610 u8 req, u16 idx, u16 val)
602{ 611{
603 struct usb_device *dev = gspca_dev->dev; 612 u16 notdone;
604 int notdone;
605 613
606 reg_w_riv(dev, req, idx, val); 614 reg_w_riv(gspca_dev, req, idx, val);
607 notdone = reg_r_12(gspca_dev, 0x01, 0x0001, 1); 615 notdone = reg_r_12(gspca_dev, 0x01, 0x0001, 1);
608 reg_w_riv(dev, req, idx, val); 616 reg_w_riv(gspca_dev, req, idx, val);
609 617
610 PDEBUG(D_FRAM, "before wait 0x%04x", notdone); 618 PDEBUG(D_FRAM, "before wait 0x%04x", notdone);
611 619
@@ -616,23 +624,22 @@ static void spca504_acknowledged_command(struct gspca_dev *gspca_dev,
616 624
617static void spca504A_acknowledged_command(struct gspca_dev *gspca_dev, 625static void spca504A_acknowledged_command(struct gspca_dev *gspca_dev,
618 u8 req, 626 u8 req,
619 u16 idx, u16 val, u8 stat, u8 count) 627 u16 idx, u16 val, u16 endcode, u8 count)
620{ 628{
621 struct usb_device *dev = gspca_dev->dev; 629 u16 status;
622 int status;
623 u8 endcode;
624 630
625 reg_w_riv(dev, req, idx, val); 631 reg_w_riv(gspca_dev, req, idx, val);
626 status = reg_r_12(gspca_dev, 0x01, 0x0001, 1); 632 status = reg_r_12(gspca_dev, 0x01, 0x0001, 1);
627 endcode = stat; 633 if (gspca_dev->usb_err < 0)
628 PDEBUG(D_FRAM, "Status 0x%x Need 0x%04x", status, stat); 634 return;
635 PDEBUG(D_FRAM, "Status 0x%04x Need 0x%04x", status, endcode);
629 if (!count) 636 if (!count)
630 return; 637 return;
631 count = 200; 638 count = 200;
632 while (--count > 0) { 639 while (--count > 0) {
633 msleep(10); 640 msleep(10);
634 /* gsmart mini2 write a each wait setting 1 ms is enough */ 641 /* gsmart mini2 write a each wait setting 1 ms is enough */
635/* reg_w_riv(dev, req, idx, val); */ 642/* reg_w_riv(gspca_dev, req, idx, val); */
636 status = reg_r_12(gspca_dev, 0x01, 0x0001, 1); 643 status = reg_r_12(gspca_dev, 0x01, 0x0001, 1);
637 if (status == endcode) { 644 if (status == endcode) {
638 PDEBUG(D_FRAM, "status 0x%04x after wait %d", 645 PDEBUG(D_FRAM, "status 0x%04x after wait %d",
@@ -642,7 +649,7 @@ static void spca504A_acknowledged_command(struct gspca_dev *gspca_dev,
642 } 649 }
643} 650}
644 651
645static int spca504B_PollingDataReady(struct gspca_dev *gspca_dev) 652static void spca504B_PollingDataReady(struct gspca_dev *gspca_dev)
646{ 653{
647 int count = 10; 654 int count = 10;
648 655
@@ -652,7 +659,6 @@ static int spca504B_PollingDataReady(struct gspca_dev *gspca_dev)
652 break; 659 break;
653 msleep(10); 660 msleep(10);
654 } 661 }
655 return gspca_dev->usb_buf[0];
656} 662}
657 663
658static void spca504B_WaitCmdStatus(struct gspca_dev *gspca_dev) 664static void spca504B_WaitCmdStatus(struct gspca_dev *gspca_dev)
@@ -686,28 +692,26 @@ static void spca50x_GetFirmware(struct gspca_dev *gspca_dev)
686static void spca504B_SetSizeType(struct gspca_dev *gspca_dev) 692static void spca504B_SetSizeType(struct gspca_dev *gspca_dev)
687{ 693{
688 struct sd *sd = (struct sd *) gspca_dev; 694 struct sd *sd = (struct sd *) gspca_dev;
689 struct usb_device *dev = gspca_dev->dev;
690 u8 Size; 695 u8 Size;
691 int rc;
692 696
693 Size = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv; 697 Size = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv;
694 switch (sd->bridge) { 698 switch (sd->bridge) {
695 case BRIDGE_SPCA533: 699 case BRIDGE_SPCA533:
696 reg_w_riv(dev, 0x31, 0, 0); 700 reg_w_riv(gspca_dev, 0x31, 0, 0);
697 spca504B_WaitCmdStatus(gspca_dev); 701 spca504B_WaitCmdStatus(gspca_dev);
698 rc = spca504B_PollingDataReady(gspca_dev); 702 spca504B_PollingDataReady(gspca_dev);
699 spca50x_GetFirmware(gspca_dev); 703 spca50x_GetFirmware(gspca_dev);
700 reg_w_1(gspca_dev, 0x24, 0, 8, 2); /* type */ 704 reg_w_1(gspca_dev, 0x24, 0, 8, 2); /* type */
701 reg_r(gspca_dev, 0x24, 8, 1); 705 reg_r(gspca_dev, 0x24, 8, 1);
702 706
703 reg_w_1(gspca_dev, 0x25, 0, 4, Size); 707 reg_w_1(gspca_dev, 0x25, 0, 4, Size);
704 reg_r(gspca_dev, 0x25, 4, 1); /* size */ 708 reg_r(gspca_dev, 0x25, 4, 1); /* size */
705 rc = spca504B_PollingDataReady(gspca_dev); 709 spca504B_PollingDataReady(gspca_dev);
706 710
707 /* Init the cam width height with some values get on init ? */ 711 /* Init the cam width height with some values get on init ? */
708 reg_w_riv(dev, 0x31, 0, 0x04); 712 reg_w_riv(gspca_dev, 0x31, 0, 0x04);
709 spca504B_WaitCmdStatus(gspca_dev); 713 spca504B_WaitCmdStatus(gspca_dev);
710 rc = spca504B_PollingDataReady(gspca_dev); 714 spca504B_PollingDataReady(gspca_dev);
711 break; 715 break;
712 default: 716 default:
713/* case BRIDGE_SPCA504B: */ 717/* case BRIDGE_SPCA504B: */
@@ -716,7 +720,7 @@ static void spca504B_SetSizeType(struct gspca_dev *gspca_dev)
716 reg_r(gspca_dev, 0x25, 4, 1); /* size */ 720 reg_r(gspca_dev, 0x25, 4, 1); /* size */
717 reg_w_1(gspca_dev, 0x27, 0, 0, 6); 721 reg_w_1(gspca_dev, 0x27, 0, 0, 6);
718 reg_r(gspca_dev, 0x27, 0, 1); /* type */ 722 reg_r(gspca_dev, 0x27, 0, 1); /* type */
719 rc = spca504B_PollingDataReady(gspca_dev); 723 spca504B_PollingDataReady(gspca_dev);
720 break; 724 break;
721 case BRIDGE_SPCA504: 725 case BRIDGE_SPCA504:
722 Size += 3; 726 Size += 3;
@@ -733,8 +737,8 @@ static void spca504B_SetSizeType(struct gspca_dev *gspca_dev)
733 break; 737 break;
734 case BRIDGE_SPCA504C: 738 case BRIDGE_SPCA504C:
735 /* capture mode */ 739 /* capture mode */
736 reg_w_riv(dev, 0xa0, (0x0500 | (Size & 0x0f)), 0x00); 740 reg_w_riv(gspca_dev, 0xa0, (0x0500 | (Size & 0x0f)), 0x00);
737 reg_w_riv(dev, 0x20, 0x01, 0x0500 | (Size & 0x0f)); 741 reg_w_riv(gspca_dev, 0x20, 0x01, 0x0500 | (Size & 0x0f));
738 break; 742 break;
739 } 743 }
740} 744}
@@ -762,37 +766,33 @@ static void spca504B_setQtable(struct gspca_dev *gspca_dev)
762static void setbrightness(struct gspca_dev *gspca_dev) 766static void setbrightness(struct gspca_dev *gspca_dev)
763{ 767{
764 struct sd *sd = (struct sd *) gspca_dev; 768 struct sd *sd = (struct sd *) gspca_dev;
765 struct usb_device *dev = gspca_dev->dev;
766 u16 reg; 769 u16 reg;
767 770
768 reg = sd->bridge == BRIDGE_SPCA536 ? 0x20f0 : 0x21a7; 771 reg = sd->bridge == BRIDGE_SPCA536 ? 0x20f0 : 0x21a7;
769 reg_w_riv(dev, 0x00, reg, sd->brightness); 772 reg_w_riv(gspca_dev, 0x00, reg, sd->brightness);
770} 773}
771 774
772static void setcontrast(struct gspca_dev *gspca_dev) 775static void setcontrast(struct gspca_dev *gspca_dev)
773{ 776{
774 struct sd *sd = (struct sd *) gspca_dev; 777 struct sd *sd = (struct sd *) gspca_dev;
775 struct usb_device *dev = gspca_dev->dev;
776 u16 reg; 778 u16 reg;
777 779
778 reg = sd->bridge == BRIDGE_SPCA536 ? 0x20f1 : 0x21a8; 780 reg = sd->bridge == BRIDGE_SPCA536 ? 0x20f1 : 0x21a8;
779 reg_w_riv(dev, 0x00, reg, sd->contrast); 781 reg_w_riv(gspca_dev, 0x00, reg, sd->contrast);
780} 782}
781 783
782static void setcolors(struct gspca_dev *gspca_dev) 784static void setcolors(struct gspca_dev *gspca_dev)
783{ 785{
784 struct sd *sd = (struct sd *) gspca_dev; 786 struct sd *sd = (struct sd *) gspca_dev;
785 struct usb_device *dev = gspca_dev->dev;
786 u16 reg; 787 u16 reg;
787 788
788 reg = sd->bridge == BRIDGE_SPCA536 ? 0x20f6 : 0x21ae; 789 reg = sd->bridge == BRIDGE_SPCA536 ? 0x20f6 : 0x21ae;
789 reg_w_riv(dev, 0x00, reg, sd->colors); 790 reg_w_riv(gspca_dev, 0x00, reg, sd->colors);
790} 791}
791 792
792static void init_ctl_reg(struct gspca_dev *gspca_dev) 793static void init_ctl_reg(struct gspca_dev *gspca_dev)
793{ 794{
794 struct sd *sd = (struct sd *) gspca_dev; 795 struct sd *sd = (struct sd *) gspca_dev;
795 struct usb_device *dev = gspca_dev->dev;
796 int pollreg = 1; 796 int pollreg = 1;
797 797
798 setbrightness(gspca_dev); 798 setbrightness(gspca_dev);
@@ -807,14 +807,14 @@ static void init_ctl_reg(struct gspca_dev *gspca_dev)
807 default: 807 default:
808/* case BRIDGE_SPCA533: */ 808/* case BRIDGE_SPCA533: */
809/* case BRIDGE_SPCA504B: */ 809/* case BRIDGE_SPCA504B: */
810 reg_w_riv(dev, 0, 0x00, 0x21ad); /* hue */ 810 reg_w_riv(gspca_dev, 0, 0x00, 0x21ad); /* hue */
811 reg_w_riv(dev, 0, 0x01, 0x21ac); /* sat/hue */ 811 reg_w_riv(gspca_dev, 0, 0x01, 0x21ac); /* sat/hue */
812 reg_w_riv(dev, 0, 0x00, 0x21a3); /* gamma */ 812 reg_w_riv(gspca_dev, 0, 0x00, 0x21a3); /* gamma */
813 break; 813 break;
814 case BRIDGE_SPCA536: 814 case BRIDGE_SPCA536:
815 reg_w_riv(dev, 0, 0x40, 0x20f5); 815 reg_w_riv(gspca_dev, 0, 0x40, 0x20f5);
816 reg_w_riv(dev, 0, 0x01, 0x20f4); 816 reg_w_riv(gspca_dev, 0, 0x01, 0x20f4);
817 reg_w_riv(dev, 0, 0x00, 0x2089); 817 reg_w_riv(gspca_dev, 0, 0x00, 0x2089);
818 break; 818 break;
819 } 819 }
820 if (pollreg) 820 if (pollreg)
@@ -881,18 +881,17 @@ static int sd_config(struct gspca_dev *gspca_dev,
881static int sd_init(struct gspca_dev *gspca_dev) 881static int sd_init(struct gspca_dev *gspca_dev)
882{ 882{
883 struct sd *sd = (struct sd *) gspca_dev; 883 struct sd *sd = (struct sd *) gspca_dev;
884 struct usb_device *dev = gspca_dev->dev; 884 int i;
885 int i, err_code;
886 u8 info[6]; 885 u8 info[6];
887 886
888 switch (sd->bridge) { 887 switch (sd->bridge) {
889 case BRIDGE_SPCA504B: 888 case BRIDGE_SPCA504B:
890 reg_w_riv(dev, 0x1d, 0x00, 0); 889 reg_w_riv(gspca_dev, 0x1d, 0x00, 0);
891 reg_w_riv(dev, 0, 0x01, 0x2306); 890 reg_w_riv(gspca_dev, 0, 0x01, 0x2306);
892 reg_w_riv(dev, 0, 0x00, 0x0d04); 891 reg_w_riv(gspca_dev, 0, 0x00, 0x0d04);
893 reg_w_riv(dev, 0, 0x00, 0x2000); 892 reg_w_riv(gspca_dev, 0, 0x00, 0x2000);
894 reg_w_riv(dev, 0, 0x13, 0x2301); 893 reg_w_riv(gspca_dev, 0, 0x13, 0x2301);
895 reg_w_riv(dev, 0, 0x00, 0x2306); 894 reg_w_riv(gspca_dev, 0, 0x00, 0x2306);
896 /* fall thru */ 895 /* fall thru */
897 case BRIDGE_SPCA533: 896 case BRIDGE_SPCA533:
898 spca504B_PollingDataReady(gspca_dev); 897 spca504B_PollingDataReady(gspca_dev);
@@ -904,13 +903,13 @@ static int sd_init(struct gspca_dev *gspca_dev)
904 reg_w_1(gspca_dev, 0x24, 0, 0, 0); 903 reg_w_1(gspca_dev, 0x24, 0, 0, 0);
905 reg_r(gspca_dev, 0x24, 0, 1); 904 reg_r(gspca_dev, 0x24, 0, 1);
906 spca504B_PollingDataReady(gspca_dev); 905 spca504B_PollingDataReady(gspca_dev);
907 reg_w_riv(dev, 0x34, 0, 0); 906 reg_w_riv(gspca_dev, 0x34, 0, 0);
908 spca504B_WaitCmdStatus(gspca_dev); 907 spca504B_WaitCmdStatus(gspca_dev);
909 break; 908 break;
910 case BRIDGE_SPCA504C: /* pccam600 */ 909 case BRIDGE_SPCA504C: /* pccam600 */
911 PDEBUG(D_STREAM, "Opening SPCA504 (PC-CAM 600)"); 910 PDEBUG(D_STREAM, "Opening SPCA504 (PC-CAM 600)");
912 reg_w_riv(dev, 0xe0, 0x0000, 0x0000); 911 reg_w_riv(gspca_dev, 0xe0, 0x0000, 0x0000);
913 reg_w_riv(dev, 0xe0, 0x0000, 0x0001); /* reset */ 912 reg_w_riv(gspca_dev, 0xe0, 0x0000, 0x0001); /* reset */
914 spca504_wait_status(gspca_dev); 913 spca504_wait_status(gspca_dev);
915 if (sd->subtype == LogitechClickSmart420) 914 if (sd->subtype == LogitechClickSmart420)
916 write_vector(gspca_dev, 915 write_vector(gspca_dev,
@@ -919,12 +918,7 @@ static int sd_init(struct gspca_dev *gspca_dev)
919 else 918 else
920 write_vector(gspca_dev, spca504_pccam600_open_data, 919 write_vector(gspca_dev, spca504_pccam600_open_data,
921 ARRAY_SIZE(spca504_pccam600_open_data)); 920 ARRAY_SIZE(spca504_pccam600_open_data));
922 err_code = spca50x_setup_qtable(gspca_dev, 921 setup_qtable(gspca_dev, qtable_creative_pccam);
923 qtable_creative_pccam);
924 if (err_code < 0) {
925 PDEBUG(D_ERR|D_STREAM, "spca50x_setup_qtable failed");
926 return err_code;
927 }
928 break; 922 break;
929 default: 923 default:
930/* case BRIDGE_SPCA504: */ 924/* case BRIDGE_SPCA504: */
@@ -958,29 +952,24 @@ static int sd_init(struct gspca_dev *gspca_dev)
958 6, 0, 0x86, 1); */ 952 6, 0, 0x86, 1); */
959/* spca504A_acknowledged_command (gspca_dev, 0x24, 953/* spca504A_acknowledged_command (gspca_dev, 0x24,
960 0, 0, 0x9D, 1); */ 954 0, 0, 0x9D, 1); */
961 reg_w_riv(dev, 0x00, 0x270c, 0x05); /* L92 sno1t.txt */ 955 reg_w_riv(gspca_dev, 0x00, 0x270c, 0x05);
962 reg_w_riv(dev, 0x00, 0x2310, 0x05); 956 /* L92 sno1t.txt */
957 reg_w_riv(gspca_dev, 0x00, 0x2310, 0x05);
963 spca504A_acknowledged_command(gspca_dev, 0x01, 958 spca504A_acknowledged_command(gspca_dev, 0x01,
964 0x0f, 0, 0xff, 0); 959 0x0f, 0, 0xff, 0);
965 } 960 }
966 /* setup qtable */ 961 /* setup qtable */
967 reg_w_riv(dev, 0, 0x2000, 0); 962 reg_w_riv(gspca_dev, 0, 0x2000, 0);
968 reg_w_riv(dev, 0, 0x2883, 1); 963 reg_w_riv(gspca_dev, 0, 0x2883, 1);
969 err_code = spca50x_setup_qtable(gspca_dev, 964 setup_qtable(gspca_dev, qtable_spca504_default);
970 qtable_spca504_default);
971 if (err_code < 0) {
972 PDEBUG(D_ERR, "spca50x_setup_qtable failed");
973 return err_code;
974 }
975 break; 965 break;
976 } 966 }
977 return 0; 967 return gspca_dev->usb_err;
978} 968}
979 969
980static int sd_start(struct gspca_dev *gspca_dev) 970static int sd_start(struct gspca_dev *gspca_dev)
981{ 971{
982 struct sd *sd = (struct sd *) gspca_dev; 972 struct sd *sd = (struct sd *) gspca_dev;
983 struct usb_device *dev = gspca_dev->dev;
984 int enable; 973 int enable;
985 int i; 974 int i;
986 u8 info[6]; 975 u8 info[6];
@@ -1005,13 +994,13 @@ static int sd_start(struct gspca_dev *gspca_dev)
1005 case MegapixV4: 994 case MegapixV4:
1006 case LogitechClickSmart820: 995 case LogitechClickSmart820:
1007 case MegaImageVI: 996 case MegaImageVI:
1008 reg_w_riv(dev, 0xf0, 0, 0); 997 reg_w_riv(gspca_dev, 0xf0, 0, 0);
1009 spca504B_WaitCmdStatus(gspca_dev); 998 spca504B_WaitCmdStatus(gspca_dev);
1010 reg_r(gspca_dev, 0xf0, 4, 0); 999 reg_r(gspca_dev, 0xf0, 4, 0);
1011 spca504B_WaitCmdStatus(gspca_dev); 1000 spca504B_WaitCmdStatus(gspca_dev);
1012 break; 1001 break;
1013 default: 1002 default:
1014 reg_w_riv(dev, 0x31, 0, 0x04); 1003 reg_w_riv(gspca_dev, 0x31, 0, 0x04);
1015 spca504B_WaitCmdStatus(gspca_dev); 1004 spca504B_WaitCmdStatus(gspca_dev);
1016 spca504B_PollingDataReady(gspca_dev); 1005 spca504B_PollingDataReady(gspca_dev);
1017 break; 1006 break;
@@ -1048,8 +1037,9 @@ static int sd_start(struct gspca_dev *gspca_dev)
1048 spca504_acknowledged_command(gspca_dev, 0x24, 0, 0); 1037 spca504_acknowledged_command(gspca_dev, 0x24, 0, 0);
1049 } 1038 }
1050 spca504B_SetSizeType(gspca_dev); 1039 spca504B_SetSizeType(gspca_dev);
1051 reg_w_riv(dev, 0x00, 0x270c, 0x05); /* L92 sno1t.txt */ 1040 reg_w_riv(gspca_dev, 0x00, 0x270c, 0x05);
1052 reg_w_riv(dev, 0x00, 0x2310, 0x05); 1041 /* L92 sno1t.txt */
1042 reg_w_riv(gspca_dev, 0x00, 0x2310, 0x05);
1053 break; 1043 break;
1054 case BRIDGE_SPCA504C: 1044 case BRIDGE_SPCA504C:
1055 if (sd->subtype == LogitechClickSmart420) { 1045 if (sd->subtype == LogitechClickSmart420) {
@@ -1061,36 +1051,37 @@ static int sd_start(struct gspca_dev *gspca_dev)
1061 ARRAY_SIZE(spca504_pccam600_init_data)); 1051 ARRAY_SIZE(spca504_pccam600_init_data));
1062 } 1052 }
1063 enable = (sd->autogain ? 0x04 : 0x01); 1053 enable = (sd->autogain ? 0x04 : 0x01);
1064 reg_w_riv(dev, 0x0c, 0x0000, enable); /* auto exposure */ 1054 reg_w_riv(gspca_dev, 0x0c, 0x0000, enable);
1065 reg_w_riv(dev, 0xb0, 0x0000, enable); /* auto whiteness */ 1055 /* auto exposure */
1056 reg_w_riv(gspca_dev, 0xb0, 0x0000, enable);
1057 /* auto whiteness */
1066 1058
1067 /* set default exposure compensation and whiteness balance */ 1059 /* set default exposure compensation and whiteness balance */
1068 reg_w_riv(dev, 0x30, 0x0001, 800); /* ~ 20 fps */ 1060 reg_w_riv(gspca_dev, 0x30, 0x0001, 800); /* ~ 20 fps */
1069 reg_w_riv(dev, 0x30, 0x0002, 1600); 1061 reg_w_riv(gspca_dev, 0x30, 0x0002, 1600);
1070 spca504B_SetSizeType(gspca_dev); 1062 spca504B_SetSizeType(gspca_dev);
1071 break; 1063 break;
1072 } 1064 }
1073 init_ctl_reg(gspca_dev); 1065 init_ctl_reg(gspca_dev);
1074 return 0; 1066 return gspca_dev->usb_err;
1075} 1067}
1076 1068
1077static void sd_stopN(struct gspca_dev *gspca_dev) 1069static void sd_stopN(struct gspca_dev *gspca_dev)
1078{ 1070{
1079 struct sd *sd = (struct sd *) gspca_dev; 1071 struct sd *sd = (struct sd *) gspca_dev;
1080 struct usb_device *dev = gspca_dev->dev;
1081 1072
1082 switch (sd->bridge) { 1073 switch (sd->bridge) {
1083 default: 1074 default:
1084/* case BRIDGE_SPCA533: */ 1075/* case BRIDGE_SPCA533: */
1085/* case BRIDGE_SPCA536: */ 1076/* case BRIDGE_SPCA536: */
1086/* case BRIDGE_SPCA504B: */ 1077/* case BRIDGE_SPCA504B: */
1087 reg_w_riv(dev, 0x31, 0, 0); 1078 reg_w_riv(gspca_dev, 0x31, 0, 0);
1088 spca504B_WaitCmdStatus(gspca_dev); 1079 spca504B_WaitCmdStatus(gspca_dev);
1089 spca504B_PollingDataReady(gspca_dev); 1080 spca504B_PollingDataReady(gspca_dev);
1090 break; 1081 break;
1091 case BRIDGE_SPCA504: 1082 case BRIDGE_SPCA504:
1092 case BRIDGE_SPCA504C: 1083 case BRIDGE_SPCA504C:
1093 reg_w_riv(dev, 0x00, 0x2000, 0x0000); 1084 reg_w_riv(gspca_dev, 0x00, 0x2000, 0x0000);
1094 1085
1095 if (sd->subtype == AiptekMiniPenCam13) { 1086 if (sd->subtype == AiptekMiniPenCam13) {
1096 /* spca504a aiptek */ 1087 /* spca504a aiptek */
@@ -1102,7 +1093,7 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
1102 0x0f, 0x00, 0xff, 1); 1093 0x0f, 0x00, 0xff, 1);
1103 } else { 1094 } else {
1104 spca504_acknowledged_command(gspca_dev, 0x24, 0, 0); 1095 spca504_acknowledged_command(gspca_dev, 0x24, 0, 0);
1105 reg_w_riv(dev, 0x01, 0x000f, 0x0000); 1096 reg_w_riv(gspca_dev, 0x01, 0x000f, 0x0000);
1106 } 1097 }
1107 break; 1098 break;
1108 } 1099 }
@@ -1216,7 +1207,7 @@ static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val)
1216 sd->brightness = val; 1207 sd->brightness = val;
1217 if (gspca_dev->streaming) 1208 if (gspca_dev->streaming)
1218 setbrightness(gspca_dev); 1209 setbrightness(gspca_dev);
1219 return 0; 1210 return gspca_dev->usb_err;
1220} 1211}
1221 1212
1222static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val) 1213static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val)
@@ -1234,7 +1225,7 @@ static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val)
1234 sd->contrast = val; 1225 sd->contrast = val;
1235 if (gspca_dev->streaming) 1226 if (gspca_dev->streaming)
1236 setcontrast(gspca_dev); 1227 setcontrast(gspca_dev);
1237 return 0; 1228 return gspca_dev->usb_err;
1238} 1229}
1239 1230
1240static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val) 1231static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val)
@@ -1252,7 +1243,7 @@ static int sd_setcolors(struct gspca_dev *gspca_dev, __s32 val)
1252 sd->colors = val; 1243 sd->colors = val;
1253 if (gspca_dev->streaming) 1244 if (gspca_dev->streaming)
1254 setcolors(gspca_dev); 1245 setcolors(gspca_dev);
1255 return 0; 1246 return gspca_dev->usb_err;
1256} 1247}
1257 1248
1258static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val) 1249static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val)
@@ -1292,7 +1283,7 @@ static int sd_set_jcomp(struct gspca_dev *gspca_dev,
1292 sd->quality = jcomp->quality; 1283 sd->quality = jcomp->quality;
1293 if (gspca_dev->streaming) 1284 if (gspca_dev->streaming)
1294 jpeg_set_qual(sd->jpeg_hdr, sd->quality); 1285 jpeg_set_qual(sd->jpeg_hdr, sd->quality);
1295 return 0; 1286 return gspca_dev->usb_err;
1296} 1287}
1297 1288
1298static int sd_get_jcomp(struct gspca_dev *gspca_dev, 1289static int sd_get_jcomp(struct gspca_dev *gspca_dev,
diff --git a/drivers/media/video/gspca/zc3xx.c b/drivers/media/video/gspca/zc3xx.c
index 69e5dc4fc9de..1a800fc1c00e 100644
--- a/drivers/media/video/gspca/zc3xx.c
+++ b/drivers/media/video/gspca/zc3xx.c
@@ -5345,9 +5345,6 @@ static const struct usb_action tas5130cxx_InitialScale[] = { /* 320x240 */
5345 {0xa0, 0x01, ZC3XX_R012_VIDEOCONTROLFUNC}, 5345 {0xa0, 0x01, ZC3XX_R012_VIDEOCONTROLFUNC},
5346 {0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING}, 5346 {0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING},
5347 {0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC}, 5347 {0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC},
5348 {0xa0, 0x07, ZC3XX_R0A5_EXPOSUREGAIN},
5349 {0xa0, 0x02, ZC3XX_R0A6_EXPOSUREBLACKLVL},
5350
5351 {0xa0, 0x02, ZC3XX_R003_FRAMEWIDTHHIGH}, 5348 {0xa0, 0x02, ZC3XX_R003_FRAMEWIDTHHIGH},
5352 {0xa0, 0x80, ZC3XX_R004_FRAMEWIDTHLOW}, 5349 {0xa0, 0x80, ZC3XX_R004_FRAMEWIDTHLOW},
5353 {0xa0, 0x01, ZC3XX_R005_FRAMEHEIGHTHIGH}, 5350 {0xa0, 0x01, ZC3XX_R005_FRAMEHEIGHTHIGH},
@@ -5364,27 +5361,27 @@ static const struct usb_action tas5130cxx_InitialScale[] = { /* 320x240 */
5364 {0xa0, 0xf7, ZC3XX_R101_SENSORCORRECTION}, 5361 {0xa0, 0xf7, ZC3XX_R101_SENSORCORRECTION},
5365 {0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE}, 5362 {0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE},
5366 {0xa0, 0x06, ZC3XX_R189_AWBSTATUS}, 5363 {0xa0, 0x06, ZC3XX_R189_AWBSTATUS},
5367 {0xa0, 0x95, ZC3XX_R18D_YTARGET}, 5364 {0xa0, 0x70, ZC3XX_R18D_YTARGET},
5368 {0xa0, 0x50, ZC3XX_R1A8_DIGITALGAIN}, 5365 {0xa0, 0x50, ZC3XX_R1A8_DIGITALGAIN},
5369 {0xa0, 0x00, 0x01ad}, 5366 {0xa0, 0x00, 0x01ad},
5370 {0xa0, 0x03, ZC3XX_R1C5_SHARPNESSMODE}, 5367 {0xa0, 0x03, ZC3XX_R1C5_SHARPNESSMODE},
5371 {0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05}, 5368 {0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05},
5372 {0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE}, 5369 {0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE},
5373 {0xa0, 0x08, ZC3XX_R301_EEPROMACCESS}, 5370 {0xa0, 0x08, ZC3XX_R301_EEPROMACCESS},
5371 {0xa0, 0x07, ZC3XX_R0A5_EXPOSUREGAIN},
5372 {0xa0, 0x02, ZC3XX_R0A6_EXPOSUREBLACKLVL},
5374 {} 5373 {}
5375}; 5374};
5376static const struct usb_action tas5130cxx_Initial[] = { /* 640x480 */ 5375static const struct usb_action tas5130cxx_Initial[] = { /* 640x480 */
5377 {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, 5376 {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL},
5378 {0xa0, 0x40, ZC3XX_R002_CLOCKSELECT}, 5377 {0xa0, 0x40, ZC3XX_R002_CLOCKSELECT},
5379 {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, 5378 {0xa0, 0x00, ZC3XX_R008_CLOCKSETTING},
5380 {0xa0, 0x02, ZC3XX_R010_CMOSSENSORSELECT}, 5379 {0xa0, 0x02, ZC3XX_R010_CMOSSENSORSELECT},
5381 {0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING}, 5380 {0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING},
5382 {0xa0, 0x00, ZC3XX_R001_SYSTEMOPERATING}, 5381 {0xa0, 0x00, ZC3XX_R001_SYSTEMOPERATING},
5383 {0xa0, 0x01, ZC3XX_R012_VIDEOCONTROLFUNC}, 5382 {0xa0, 0x01, ZC3XX_R012_VIDEOCONTROLFUNC},
5384 {0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING}, 5383 {0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING},
5385 {0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC}, 5384 {0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC},
5386 {0xa0, 0x07, ZC3XX_R0A5_EXPOSUREGAIN},
5387 {0xa0, 0x02, ZC3XX_R0A6_EXPOSUREBLACKLVL},
5388 {0xa0, 0x02, ZC3XX_R003_FRAMEWIDTHHIGH}, 5385 {0xa0, 0x02, ZC3XX_R003_FRAMEWIDTHHIGH},
5389 {0xa0, 0x80, ZC3XX_R004_FRAMEWIDTHLOW}, 5386 {0xa0, 0x80, ZC3XX_R004_FRAMEWIDTHLOW},
5390 {0xa0, 0x01, ZC3XX_R005_FRAMEHEIGHTHIGH}, 5387 {0xa0, 0x01, ZC3XX_R005_FRAMEHEIGHTHIGH},
@@ -5400,13 +5397,15 @@ static const struct usb_action tas5130cxx_Initial[] = { /* 640x480 */
5400 {0xa0, 0x37, ZC3XX_R101_SENSORCORRECTION}, 5397 {0xa0, 0x37, ZC3XX_R101_SENSORCORRECTION},
5401 {0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE}, 5398 {0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE},
5402 {0xa0, 0x06, ZC3XX_R189_AWBSTATUS}, 5399 {0xa0, 0x06, ZC3XX_R189_AWBSTATUS},
5403 {0xa0, 0x95, ZC3XX_R18D_YTARGET}, 5400 {0xa0, 0x70, ZC3XX_R18D_YTARGET},
5404 {0xa0, 0x50, ZC3XX_R1A8_DIGITALGAIN}, 5401 {0xa0, 0x50, ZC3XX_R1A8_DIGITALGAIN},
5405 {0xa0, 0x00, 0x01ad}, 5402 {0xa0, 0x00, 0x01ad},
5406 {0xa0, 0x03, ZC3XX_R1C5_SHARPNESSMODE}, 5403 {0xa0, 0x03, ZC3XX_R1C5_SHARPNESSMODE},
5407 {0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05}, 5404 {0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05},
5408 {0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE}, 5405 {0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE},
5409 {0xa0, 0x08, ZC3XX_R301_EEPROMACCESS}, 5406 {0xa0, 0x08, ZC3XX_R301_EEPROMACCESS},
5407 {0xa0, 0x07, ZC3XX_R0A5_EXPOSUREGAIN},
5408 {0xa0, 0x02, ZC3XX_R0A6_EXPOSUREBLACKLVL},
5410 {} 5409 {}
5411}; 5410};
5412static const struct usb_action tas5130cxx_50HZ[] = { 5411static const struct usb_action tas5130cxx_50HZ[] = {
@@ -6424,11 +6423,11 @@ static int vga_2wr_probe(struct gspca_dev *gspca_dev)
6424 if (retword != 0) 6423 if (retword != 0)
6425 return 0x0e; /* PAS202BCB */ 6424 return 0x0e; /* PAS202BCB */
6426 6425
6427 start_2wr_probe(dev, 0x02); /* ?? */ 6426 start_2wr_probe(dev, 0x02); /* TAS5130C */
6428 i2c_write(gspca_dev, 0x01, 0xaa, 0x00); 6427 i2c_write(gspca_dev, 0x01, 0xaa, 0x00);
6429 retword = i2c_read(gspca_dev, 0x01); 6428 retword = i2c_read(gspca_dev, 0x01);
6430 if (retword != 0) 6429 if (retword != 0)
6431 return 0x02; /* ?? */ 6430 return 0x02; /* TAS5130C */
6432ov_check: 6431ov_check:
6433 reg_r(gspca_dev, 0x0010); /* ?? */ 6432 reg_r(gspca_dev, 0x0010); /* ?? */
6434 reg_r(gspca_dev, 0x0010); 6433 reg_r(gspca_dev, 0x0010);
@@ -6505,6 +6504,8 @@ static int vga_3wr_probe(struct gspca_dev *gspca_dev)
6505 reg_r(gspca_dev, 0x0010); 6504 reg_r(gspca_dev, 0x0010);
6506 /* value 0x4001 is meaningless */ 6505 /* value 0x4001 is meaningless */
6507 if (retword != 0x4001) { 6506 if (retword != 0x4001) {
6507 if ((retword & 0xff00) == 0x6400)
6508 return 0x02; /* TAS5130C */
6508 for (i = 0; i < ARRAY_SIZE(chipset_revision_sensor); i++) { 6509 for (i = 0; i < ARRAY_SIZE(chipset_revision_sensor); i++) {
6509 if (chipset_revision_sensor[i].revision == retword) { 6510 if (chipset_revision_sensor[i].revision == retword) {
6510 sd->chip_revision = retword; 6511 sd->chip_revision = retword;
@@ -6515,7 +6516,7 @@ static int vga_3wr_probe(struct gspca_dev *gspca_dev)
6515 } 6516 }
6516 } 6517 }
6517 6518
6518 reg_w(dev, 0x01, 0x0000); /* check ?? */ 6519 reg_w(dev, 0x01, 0x0000); /* check PB0330 */
6519 reg_w(dev, 0x01, 0x0001); 6520 reg_w(dev, 0x01, 0x0001);
6520 reg_w(dev, 0xdd, 0x008b); 6521 reg_w(dev, 0xdd, 0x008b);
6521 reg_w(dev, 0x0a, 0x0010); 6522 reg_w(dev, 0x0a, 0x0010);
@@ -6524,7 +6525,7 @@ static int vga_3wr_probe(struct gspca_dev *gspca_dev)
6524 retword = i2c_read(gspca_dev, 0x00); 6525 retword = i2c_read(gspca_dev, 0x00);
6525 if (retword != 0) { 6526 if (retword != 0) {
6526 PDEBUG(D_PROBE, "probe 3wr vga type 0a ?"); 6527 PDEBUG(D_PROBE, "probe 3wr vga type 0a ?");
6527 return 0x0a; /* ?? */ 6528 return 0x0a; /* PB0330 */
6528 } 6529 }
6529 6530
6530 reg_w(dev, 0x01, 0x0000); 6531 reg_w(dev, 0x01, 0x0000);
@@ -6673,6 +6674,10 @@ static int sd_config(struct gspca_dev *gspca_dev,
6673 PDEBUG(D_PROBE, "Find Sensor HV7131B"); 6674 PDEBUG(D_PROBE, "Find Sensor HV7131B");
6674 sd->sensor = SENSOR_HV7131B; 6675 sd->sensor = SENSOR_HV7131B;
6675 break; 6676 break;
6677 case 0x02:
6678 PDEBUG(D_PROBE, "Sensor TAS5130C");
6679 sd->sensor = SENSOR_TAS5130CXX;
6680 break;
6676 case 0x04: 6681 case 0x04:
6677 PDEBUG(D_PROBE, "Find Sensor CS2102"); 6682 PDEBUG(D_PROBE, "Find Sensor CS2102");
6678 sd->sensor = SENSOR_CS2102; 6683 sd->sensor = SENSOR_CS2102;
@@ -6866,11 +6871,14 @@ static int sd_start(struct gspca_dev *gspca_dev)
6866 case SENSOR_GC0305: 6871 case SENSOR_GC0305:
6867 case SENSOR_OV7620: 6872 case SENSOR_OV7620:
6868 case SENSOR_PO2030: 6873 case SENSOR_PO2030:
6874 case SENSOR_TAS5130CXX:
6869 case SENSOR_TAS5130C_VF0250: 6875 case SENSOR_TAS5130C_VF0250:
6870/* msleep(100); * ?? */ 6876/* msleep(100); * ?? */
6871 reg_r(gspca_dev, 0x0002); /* --> 0x40 */ 6877 reg_r(gspca_dev, 0x0002); /* --> 0x40 */
6872 reg_w(dev, 0x09, 0x01ad); /* (from win traces) */ 6878 reg_w(dev, 0x09, 0x01ad); /* (from win traces) */
6873 reg_w(dev, 0x15, 0x01ae); 6879 reg_w(dev, 0x15, 0x01ae);
6880 if (sd->sensor == SENSOR_TAS5130CXX)
6881 break;
6874 reg_w(dev, 0x0d, 0x003a); 6882 reg_w(dev, 0x0d, 0x003a);
6875 reg_w(dev, 0x02, 0x003b); 6883 reg_w(dev, 0x02, 0x003b);
6876 reg_w(dev, 0x00, 0x0038); 6884 reg_w(dev, 0x00, 0x0038);
@@ -6887,6 +6895,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
6887 break; 6895 break;
6888 case SENSOR_PAS202B: 6896 case SENSOR_PAS202B:
6889 case SENSOR_GC0305: 6897 case SENSOR_GC0305:
6898 case SENSOR_TAS5130CXX:
6890 reg_r(gspca_dev, 0x0008); 6899 reg_r(gspca_dev, 0x0008);
6891 /* fall thru */ 6900 /* fall thru */
6892 case SENSOR_PO2030: 6901 case SENSOR_PO2030:
@@ -6928,6 +6937,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
6928 reg_w(dev, 0x40, 0x0117); 6937 reg_w(dev, 0x40, 0x0117);
6929 break; 6938 break;
6930 case SENSOR_GC0305: 6939 case SENSOR_GC0305:
6940 case SENSOR_TAS5130CXX:
6931 reg_w(dev, 0x09, 0x01ad); /* (from win traces) */ 6941 reg_w(dev, 0x09, 0x01ad); /* (from win traces) */
6932 reg_w(dev, 0x15, 0x01ae); 6942 reg_w(dev, 0x15, 0x01ae);
6933 /* fall thru */ 6943 /* fall thru */
@@ -7220,7 +7230,7 @@ static const __devinitdata struct usb_device_id device_table[] = {
7220 {USB_DEVICE(0x0ac8, 0x0302), .driver_info = SENSOR_PAS106}, 7230 {USB_DEVICE(0x0ac8, 0x0302), .driver_info = SENSOR_PAS106},
7221 {USB_DEVICE(0x0ac8, 0x301b)}, 7231 {USB_DEVICE(0x0ac8, 0x301b)},
7222 {USB_DEVICE(0x0ac8, 0x303b)}, 7232 {USB_DEVICE(0x0ac8, 0x303b)},
7223 {USB_DEVICE(0x0ac8, 0x305b), .driver_info = SENSOR_TAS5130C_VF0250}, 7233 {USB_DEVICE(0x0ac8, 0x305b)},
7224 {USB_DEVICE(0x0ac8, 0x307b)}, 7234 {USB_DEVICE(0x0ac8, 0x307b)},
7225 {USB_DEVICE(0x10fd, 0x0128)}, 7235 {USB_DEVICE(0x10fd, 0x0128)},
7226 {USB_DEVICE(0x10fd, 0x804d)}, 7236 {USB_DEVICE(0x10fd, 0x804d)},
diff --git a/drivers/media/video/hdpvr/hdpvr-core.c b/drivers/media/video/hdpvr/hdpvr-core.c
index 1c9bc94c905c..51f393d03a46 100644
--- a/drivers/media/video/hdpvr/hdpvr-core.c
+++ b/drivers/media/video/hdpvr/hdpvr-core.c
@@ -145,7 +145,7 @@ static int device_authorization(struct hdpvr_device *dev)
145#ifdef HDPVR_DEBUG 145#ifdef HDPVR_DEBUG
146 else { 146 else {
147 hex_dump_to_buffer(dev->usbc_buf, 46, 16, 1, print_buf, 147 hex_dump_to_buffer(dev->usbc_buf, 46, 16, 1, print_buf,
148 sizeof(print_buf), 0); 148 5*buf_size+1, 0);
149 v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev, 149 v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev,
150 "Status request returned, len %d: %s\n", 150 "Status request returned, len %d: %s\n",
151 ret, print_buf); 151 ret, print_buf);
@@ -168,13 +168,13 @@ static int device_authorization(struct hdpvr_device *dev)
168 168
169 response = dev->usbc_buf+38; 169 response = dev->usbc_buf+38;
170#ifdef HDPVR_DEBUG 170#ifdef HDPVR_DEBUG
171 hex_dump_to_buffer(response, 8, 16, 1, print_buf, sizeof(print_buf), 0); 171 hex_dump_to_buffer(response, 8, 16, 1, print_buf, 5*buf_size+1, 0);
172 v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev, "challenge: %s\n", 172 v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev, "challenge: %s\n",
173 print_buf); 173 print_buf);
174#endif 174#endif
175 challenge(response); 175 challenge(response);
176#ifdef HDPVR_DEBUG 176#ifdef HDPVR_DEBUG
177 hex_dump_to_buffer(response, 8, 16, 1, print_buf, sizeof(print_buf), 0); 177 hex_dump_to_buffer(response, 8, 16, 1, print_buf, 5*buf_size+1, 0);
178 v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev, " response: %s\n", 178 v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev, " response: %s\n",
179 print_buf); 179 print_buf);
180#endif 180#endif
@@ -376,8 +376,8 @@ static int hdpvr_probe(struct usb_interface *interface,
376 usb_set_intfdata(interface, dev); 376 usb_set_intfdata(interface, dev);
377 377
378 /* let the user know what node this device is now attached to */ 378 /* let the user know what node this device is now attached to */
379 v4l2_info(&dev->v4l2_dev, "device now attached to /dev/video%d\n", 379 v4l2_info(&dev->v4l2_dev, "device now attached to %s\n",
380 dev->video_dev->minor); 380 video_device_node_name(dev->video_dev));
381 return 0; 381 return 0;
382 382
383error: 383error:
@@ -391,13 +391,10 @@ error:
391static void hdpvr_disconnect(struct usb_interface *interface) 391static void hdpvr_disconnect(struct usb_interface *interface)
392{ 392{
393 struct hdpvr_device *dev; 393 struct hdpvr_device *dev;
394 int minor;
395 394
396 dev = usb_get_intfdata(interface); 395 dev = usb_get_intfdata(interface);
397 usb_set_intfdata(interface, NULL); 396 usb_set_intfdata(interface, NULL);
398 397
399 minor = dev->video_dev->minor;
400
401 /* prevent more I/O from starting and stop any ongoing */ 398 /* prevent more I/O from starting and stop any ongoing */
402 mutex_lock(&dev->io_mutex); 399 mutex_lock(&dev->io_mutex);
403 dev->status = STATUS_DISCONNECTED; 400 dev->status = STATUS_DISCONNECTED;
@@ -425,7 +422,8 @@ static void hdpvr_disconnect(struct usb_interface *interface)
425 422
426 atomic_dec(&dev_nr); 423 atomic_dec(&dev_nr);
427 424
428 v4l2_info(&dev->v4l2_dev, "device /dev/video%d disconnected\n", minor); 425 v4l2_info(&dev->v4l2_dev, "device %s disconnected\n",
426 video_device_node_name(dev->video_dev));
429 427
430 v4l2_device_unregister(&dev->v4l2_dev); 428 v4l2_device_unregister(&dev->v4l2_dev);
431 kfree(dev->usbc_buf); 429 kfree(dev->usbc_buf);
diff --git a/drivers/media/video/hdpvr/hdpvr-video.c b/drivers/media/video/hdpvr/hdpvr-video.c
index b5439cabb381..fdd782039e9d 100644
--- a/drivers/media/video/hdpvr/hdpvr-video.c
+++ b/drivers/media/video/hdpvr/hdpvr-video.c
@@ -523,7 +523,7 @@ static unsigned int hdpvr_poll(struct file *filp, poll_table *wait)
523 523
524 mutex_lock(&dev->io_mutex); 524 mutex_lock(&dev->io_mutex);
525 525
526 if (video_is_unregistered(dev->video_dev)) { 526 if (!video_is_registered(dev->video_dev)) {
527 mutex_unlock(&dev->io_mutex); 527 mutex_unlock(&dev->io_mutex);
528 return -EIO; 528 return -EIO;
529 } 529 }
diff --git a/drivers/media/video/ir-kbd-i2c.c b/drivers/media/video/ir-kbd-i2c.c
index 64360d26b32d..b86e35386cee 100644
--- a/drivers/media/video/ir-kbd-i2c.c
+++ b/drivers/media/video/ir-kbd-i2c.c
@@ -353,6 +353,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
353 ir_type = IR_TYPE_RC5; 353 ir_type = IR_TYPE_RC5;
354 ir_codes = &ir_codes_fusionhdtv_mce_table; 354 ir_codes = &ir_codes_fusionhdtv_mce_table;
355 break; 355 break;
356 case 0x0b:
356 case 0x47: 357 case 0x47:
357 case 0x71: 358 case 0x71:
358 if (adap->id == I2C_HW_B_CX2388x || 359 if (adap->id == I2C_HW_B_CX2388x ||
@@ -422,7 +423,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
422 423
423 /* Make sure we are all setup before going on */ 424 /* Make sure we are all setup before going on */
424 if (!name || !ir->get_key || !ir_type || !ir_codes) { 425 if (!name || !ir->get_key || !ir_type || !ir_codes) {
425 dprintk(1, DEVNAME ": Unsupported device at address 0x%02x\n", 426 dprintk(1, ": Unsupported device at address 0x%02x\n",
426 addr); 427 addr);
427 err = -ENODEV; 428 err = -ENODEV;
428 goto err_out_free; 429 goto err_out_free;
@@ -437,7 +438,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
437 dev_name(&client->dev)); 438 dev_name(&client->dev));
438 439
439 /* init + register input device */ 440 /* init + register input device */
440 err = ir_input_init(input_dev, &ir->ir, ir_type, ir->ir_codes); 441 err = ir_input_init(input_dev, &ir->ir, ir_type);
441 if (err < 0) 442 if (err < 0)
442 goto err_out_free; 443 goto err_out_free;
443 444
@@ -445,7 +446,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
445 input_dev->name = ir->name; 446 input_dev->name = ir->name;
446 input_dev->phys = ir->phys; 447 input_dev->phys = ir->phys;
447 448
448 err = input_register_device(ir->input); 449 err = ir_input_register(ir->input, ir->ir_codes);
449 if (err) 450 if (err)
450 goto err_out_free; 451 goto err_out_free;
451 452
@@ -459,8 +460,6 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
459 return 0; 460 return 0;
460 461
461 err_out_free: 462 err_out_free:
462 ir_input_free(input_dev);
463 input_free_device(input_dev);
464 kfree(ir); 463 kfree(ir);
465 return err; 464 return err;
466} 465}
@@ -473,8 +472,7 @@ static int ir_remove(struct i2c_client *client)
473 cancel_delayed_work_sync(&ir->work); 472 cancel_delayed_work_sync(&ir->work);
474 473
475 /* unregister device */ 474 /* unregister device */
476 ir_input_free(ir->input); 475 ir_input_unregister(ir->input);
477 input_unregister_device(ir->input);
478 476
479 /* free memory */ 477 /* free memory */
480 kfree(ir); 478 kfree(ir);
diff --git a/drivers/media/video/ivtv/ivtv-fileops.c b/drivers/media/video/ivtv/ivtv-fileops.c
index e707ef3086b2..babcabd73c08 100644
--- a/drivers/media/video/ivtv/ivtv-fileops.c
+++ b/drivers/media/video/ivtv/ivtv-fileops.c
@@ -985,8 +985,8 @@ int ivtv_v4l2_open(struct file *filp)
985 985
986 mutex_lock(&itv->serialize_lock); 986 mutex_lock(&itv->serialize_lock);
987 if (ivtv_init_on_first_open(itv)) { 987 if (ivtv_init_on_first_open(itv)) {
988 IVTV_ERR("Failed to initialize on minor %d\n", 988 IVTV_ERR("Failed to initialize on device %s\n",
989 vdev->minor); 989 video_device_node_name(vdev));
990 mutex_unlock(&itv->serialize_lock); 990 mutex_unlock(&itv->serialize_lock);
991 return -ENXIO; 991 return -ENXIO;
992 } 992 }
diff --git a/drivers/media/video/ivtv/ivtv-streams.c b/drivers/media/video/ivtv/ivtv-streams.c
index 67699e3f2aaa..e12c6022373e 100644
--- a/drivers/media/video/ivtv/ivtv-streams.c
+++ b/drivers/media/video/ivtv/ivtv-streams.c
@@ -245,6 +245,7 @@ static int ivtv_reg_dev(struct ivtv *itv, int type)
245{ 245{
246 struct ivtv_stream *s = &itv->streams[type]; 246 struct ivtv_stream *s = &itv->streams[type];
247 int vfl_type = ivtv_stream_info[type].vfl_type; 247 int vfl_type = ivtv_stream_info[type].vfl_type;
248 const char *name;
248 int num; 249 int num;
249 250
250 if (s->vdev == NULL) 251 if (s->vdev == NULL)
@@ -268,24 +269,24 @@ static int ivtv_reg_dev(struct ivtv *itv, int type)
268 s->vdev = NULL; 269 s->vdev = NULL;
269 return -ENOMEM; 270 return -ENOMEM;
270 } 271 }
271 num = s->vdev->num; 272 name = video_device_node_name(s->vdev);
272 273
273 switch (vfl_type) { 274 switch (vfl_type) {
274 case VFL_TYPE_GRABBER: 275 case VFL_TYPE_GRABBER:
275 IVTV_INFO("Registered device video%d for %s (%d kB)\n", 276 IVTV_INFO("Registered device %s for %s (%d kB)\n",
276 num, s->name, itv->options.kilobytes[type]); 277 name, s->name, itv->options.kilobytes[type]);
277 break; 278 break;
278 case VFL_TYPE_RADIO: 279 case VFL_TYPE_RADIO:
279 IVTV_INFO("Registered device radio%d for %s\n", 280 IVTV_INFO("Registered device %s for %s\n",
280 num, s->name); 281 name, s->name);
281 break; 282 break;
282 case VFL_TYPE_VBI: 283 case VFL_TYPE_VBI:
283 if (itv->options.kilobytes[type]) 284 if (itv->options.kilobytes[type])
284 IVTV_INFO("Registered device vbi%d for %s (%d kB)\n", 285 IVTV_INFO("Registered device %s for %s (%d kB)\n",
285 num, s->name, itv->options.kilobytes[type]); 286 name, s->name, itv->options.kilobytes[type]);
286 else 287 else
287 IVTV_INFO("Registered device vbi%d for %s\n", 288 IVTV_INFO("Registered device %s for %s\n",
288 num, s->name); 289 name, s->name);
289 break; 290 break;
290 } 291 }
291 return 0; 292 return 0;
diff --git a/drivers/media/video/meye.c b/drivers/media/video/meye.c
index 01e1eefcf1eb..6ffa64cd1c6d 100644
--- a/drivers/media/video/meye.c
+++ b/drivers/media/video/meye.c
@@ -1681,7 +1681,6 @@ static struct video_device meye_template = {
1681 .fops = &meye_fops, 1681 .fops = &meye_fops,
1682 .ioctl_ops = &meye_ioctl_ops, 1682 .ioctl_ops = &meye_ioctl_ops,
1683 .release = video_device_release, 1683 .release = video_device_release,
1684 .minor = -1,
1685}; 1684};
1686 1685
1687#ifdef CONFIG_PM 1686#ifdef CONFIG_PM
diff --git a/drivers/media/video/mt9m001.c b/drivers/media/video/mt9m001.c
index 45388d2ce2fd..b62c0bd3f8ea 100644
--- a/drivers/media/video/mt9m001.c
+++ b/drivers/media/video/mt9m001.c
@@ -17,9 +17,11 @@
17#include <media/v4l2-chip-ident.h> 17#include <media/v4l2-chip-ident.h>
18#include <media/soc_camera.h> 18#include <media/soc_camera.h>
19 19
20/* mt9m001 i2c address 0x5d 20/*
21 * mt9m001 i2c address 0x5d
21 * The platform has to define ctruct i2c_board_info objects and link to them 22 * The platform has to define ctruct i2c_board_info objects and link to them
22 * from struct soc_camera_link */ 23 * from struct soc_camera_link
24 */
23 25
24/* mt9m001 selected register addresses */ 26/* mt9m001 selected register addresses */
25#define MT9M001_CHIP_VERSION 0x00 27#define MT9M001_CHIP_VERSION 0x00
@@ -46,42 +48,50 @@
46#define MT9M001_COLUMN_SKIP 20 48#define MT9M001_COLUMN_SKIP 20
47#define MT9M001_ROW_SKIP 12 49#define MT9M001_ROW_SKIP 12
48 50
49static const struct soc_camera_data_format mt9m001_colour_formats[] = { 51/* MT9M001 has only one fixed colorspace per pixelcode */
50 /* Order important: first natively supported, 52struct mt9m001_datafmt {
51 * second supported with a GPIO extender */ 53 enum v4l2_mbus_pixelcode code;
52 { 54 enum v4l2_colorspace colorspace;
53 .name = "Bayer (sRGB) 10 bit", 55};
54 .depth = 10, 56
55 .fourcc = V4L2_PIX_FMT_SBGGR16, 57/* Find a data format by a pixel code in an array */
56 .colorspace = V4L2_COLORSPACE_SRGB, 58static const struct mt9m001_datafmt *mt9m001_find_datafmt(
57 }, { 59 enum v4l2_mbus_pixelcode code, const struct mt9m001_datafmt *fmt,
58 .name = "Bayer (sRGB) 8 bit", 60 int n)
59 .depth = 8, 61{
60 .fourcc = V4L2_PIX_FMT_SBGGR8, 62 int i;
61 .colorspace = V4L2_COLORSPACE_SRGB, 63 for (i = 0; i < n; i++)
62 } 64 if (fmt[i].code == code)
65 return fmt + i;
66
67 return NULL;
68}
69
70static const struct mt9m001_datafmt mt9m001_colour_fmts[] = {
71 /*
72 * Order important: first natively supported,
73 * second supported with a GPIO extender
74 */
75 {V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_COLORSPACE_SRGB},
76 {V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_COLORSPACE_SRGB},
63}; 77};
64 78
65static const struct soc_camera_data_format mt9m001_monochrome_formats[] = { 79static const struct mt9m001_datafmt mt9m001_monochrome_fmts[] = {
66 /* Order important - see above */ 80 /* Order important - see above */
67 { 81 {V4L2_MBUS_FMT_Y10_1X10, V4L2_COLORSPACE_JPEG},
68 .name = "Monochrome 10 bit", 82 {V4L2_MBUS_FMT_GREY8_1X8, V4L2_COLORSPACE_JPEG},
69 .depth = 10,
70 .fourcc = V4L2_PIX_FMT_Y16,
71 }, {
72 .name = "Monochrome 8 bit",
73 .depth = 8,
74 .fourcc = V4L2_PIX_FMT_GREY,
75 },
76}; 83};
77 84
78struct mt9m001 { 85struct mt9m001 {
79 struct v4l2_subdev subdev; 86 struct v4l2_subdev subdev;
80 struct v4l2_rect rect; /* Sensor window */ 87 struct v4l2_rect rect; /* Sensor window */
81 __u32 fourcc; 88 const struct mt9m001_datafmt *fmt;
89 const struct mt9m001_datafmt *fmts;
90 int num_fmts;
82 int model; /* V4L2_IDENT_MT9M001* codes from v4l2-chip-ident.h */ 91 int model; /* V4L2_IDENT_MT9M001* codes from v4l2-chip-ident.h */
83 unsigned int gain; 92 unsigned int gain;
84 unsigned int exposure; 93 unsigned int exposure;
94 unsigned short y_skip_top; /* Lines to skip at the top */
85 unsigned char autoexposure; 95 unsigned char autoexposure;
86}; 96};
87 97
@@ -204,8 +214,7 @@ static int mt9m001_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
204 const u16 hblank = 9, vblank = 25; 214 const u16 hblank = 9, vblank = 25;
205 unsigned int total_h; 215 unsigned int total_h;
206 216
207 if (mt9m001->fourcc == V4L2_PIX_FMT_SBGGR8 || 217 if (mt9m001->fmts == mt9m001_colour_fmts)
208 mt9m001->fourcc == V4L2_PIX_FMT_SBGGR16)
209 /* 218 /*
210 * Bayer format - even number of rows for simplicity, 219 * Bayer format - even number of rows for simplicity,
211 * but let the user play with the top row. 220 * but let the user play with the top row.
@@ -222,15 +231,17 @@ static int mt9m001_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
222 soc_camera_limit_side(&rect.top, &rect.height, 231 soc_camera_limit_side(&rect.top, &rect.height,
223 MT9M001_ROW_SKIP, MT9M001_MIN_HEIGHT, MT9M001_MAX_HEIGHT); 232 MT9M001_ROW_SKIP, MT9M001_MIN_HEIGHT, MT9M001_MAX_HEIGHT);
224 233
225 total_h = rect.height + icd->y_skip_top + vblank; 234 total_h = rect.height + mt9m001->y_skip_top + vblank;
226 235
227 /* Blanking and start values - default... */ 236 /* Blanking and start values - default... */
228 ret = reg_write(client, MT9M001_HORIZONTAL_BLANKING, hblank); 237 ret = reg_write(client, MT9M001_HORIZONTAL_BLANKING, hblank);
229 if (!ret) 238 if (!ret)
230 ret = reg_write(client, MT9M001_VERTICAL_BLANKING, vblank); 239 ret = reg_write(client, MT9M001_VERTICAL_BLANKING, vblank);
231 240
232 /* The caller provides a supported format, as verified per 241 /*
233 * call to icd->try_fmt() */ 242 * The caller provides a supported format, as verified per
243 * call to icd->try_fmt()
244 */
234 if (!ret) 245 if (!ret)
235 ret = reg_write(client, MT9M001_COLUMN_START, rect.left); 246 ret = reg_write(client, MT9M001_COLUMN_START, rect.left);
236 if (!ret) 247 if (!ret)
@@ -239,7 +250,7 @@ static int mt9m001_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
239 ret = reg_write(client, MT9M001_WINDOW_WIDTH, rect.width - 1); 250 ret = reg_write(client, MT9M001_WINDOW_WIDTH, rect.width - 1);
240 if (!ret) 251 if (!ret)
241 ret = reg_write(client, MT9M001_WINDOW_HEIGHT, 252 ret = reg_write(client, MT9M001_WINDOW_HEIGHT,
242 rect.height + icd->y_skip_top - 1); 253 rect.height + mt9m001->y_skip_top - 1);
243 if (!ret && mt9m001->autoexposure) { 254 if (!ret && mt9m001->autoexposure) {
244 ret = reg_write(client, MT9M001_SHUTTER_WIDTH, total_h); 255 ret = reg_write(client, MT9M001_SHUTTER_WIDTH, total_h);
245 if (!ret) { 256 if (!ret) {
@@ -283,32 +294,32 @@ static int mt9m001_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
283 return 0; 294 return 0;
284} 295}
285 296
286static int mt9m001_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) 297static int mt9m001_g_fmt(struct v4l2_subdev *sd,
298 struct v4l2_mbus_framefmt *mf)
287{ 299{
288 struct i2c_client *client = sd->priv; 300 struct i2c_client *client = sd->priv;
289 struct mt9m001 *mt9m001 = to_mt9m001(client); 301 struct mt9m001 *mt9m001 = to_mt9m001(client);
290 struct v4l2_pix_format *pix = &f->fmt.pix;
291 302
292 pix->width = mt9m001->rect.width; 303 mf->width = mt9m001->rect.width;
293 pix->height = mt9m001->rect.height; 304 mf->height = mt9m001->rect.height;
294 pix->pixelformat = mt9m001->fourcc; 305 mf->code = mt9m001->fmt->code;
295 pix->field = V4L2_FIELD_NONE; 306 mf->colorspace = mt9m001->fmt->colorspace;
296 pix->colorspace = V4L2_COLORSPACE_SRGB; 307 mf->field = V4L2_FIELD_NONE;
297 308
298 return 0; 309 return 0;
299} 310}
300 311
301static int mt9m001_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) 312static int mt9m001_s_fmt(struct v4l2_subdev *sd,
313 struct v4l2_mbus_framefmt *mf)
302{ 314{
303 struct i2c_client *client = sd->priv; 315 struct i2c_client *client = sd->priv;
304 struct mt9m001 *mt9m001 = to_mt9m001(client); 316 struct mt9m001 *mt9m001 = to_mt9m001(client);
305 struct v4l2_pix_format *pix = &f->fmt.pix;
306 struct v4l2_crop a = { 317 struct v4l2_crop a = {
307 .c = { 318 .c = {
308 .left = mt9m001->rect.left, 319 .left = mt9m001->rect.left,
309 .top = mt9m001->rect.top, 320 .top = mt9m001->rect.top,
310 .width = pix->width, 321 .width = mf->width,
311 .height = pix->height, 322 .height = mf->height,
312 }, 323 },
313 }; 324 };
314 int ret; 325 int ret;
@@ -316,28 +327,39 @@ static int mt9m001_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f)
316 /* No support for scaling so far, just crop. TODO: use skipping */ 327 /* No support for scaling so far, just crop. TODO: use skipping */
317 ret = mt9m001_s_crop(sd, &a); 328 ret = mt9m001_s_crop(sd, &a);
318 if (!ret) { 329 if (!ret) {
319 pix->width = mt9m001->rect.width; 330 mf->width = mt9m001->rect.width;
320 pix->height = mt9m001->rect.height; 331 mf->height = mt9m001->rect.height;
321 mt9m001->fourcc = pix->pixelformat; 332 mt9m001->fmt = mt9m001_find_datafmt(mf->code,
333 mt9m001->fmts, mt9m001->num_fmts);
334 mf->colorspace = mt9m001->fmt->colorspace;
322 } 335 }
323 336
324 return ret; 337 return ret;
325} 338}
326 339
327static int mt9m001_try_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) 340static int mt9m001_try_fmt(struct v4l2_subdev *sd,
341 struct v4l2_mbus_framefmt *mf)
328{ 342{
329 struct i2c_client *client = sd->priv; 343 struct i2c_client *client = sd->priv;
330 struct soc_camera_device *icd = client->dev.platform_data; 344 struct mt9m001 *mt9m001 = to_mt9m001(client);
331 struct v4l2_pix_format *pix = &f->fmt.pix; 345 const struct mt9m001_datafmt *fmt;
332 346
333 v4l_bound_align_image(&pix->width, MT9M001_MIN_WIDTH, 347 v4l_bound_align_image(&mf->width, MT9M001_MIN_WIDTH,
334 MT9M001_MAX_WIDTH, 1, 348 MT9M001_MAX_WIDTH, 1,
335 &pix->height, MT9M001_MIN_HEIGHT + icd->y_skip_top, 349 &mf->height, MT9M001_MIN_HEIGHT + mt9m001->y_skip_top,
336 MT9M001_MAX_HEIGHT + icd->y_skip_top, 0, 0); 350 MT9M001_MAX_HEIGHT + mt9m001->y_skip_top, 0, 0);
351
352 if (mt9m001->fmts == mt9m001_colour_fmts)
353 mf->height = ALIGN(mf->height - 1, 2);
337 354
338 if (pix->pixelformat == V4L2_PIX_FMT_SBGGR8 || 355 fmt = mt9m001_find_datafmt(mf->code, mt9m001->fmts,
339 pix->pixelformat == V4L2_PIX_FMT_SBGGR16) 356 mt9m001->num_fmts);
340 pix->height = ALIGN(pix->height - 1, 2); 357 if (!fmt) {
358 fmt = mt9m001->fmt;
359 mf->code = fmt->code;
360 }
361
362 mf->colorspace = fmt->colorspace;
341 363
342 return 0; 364 return 0;
343} 365}
@@ -552,7 +574,7 @@ static int mt9m001_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
552 if (ctrl->value) { 574 if (ctrl->value) {
553 const u16 vblank = 25; 575 const u16 vblank = 25;
554 unsigned int total_h = mt9m001->rect.height + 576 unsigned int total_h = mt9m001->rect.height +
555 icd->y_skip_top + vblank; 577 mt9m001->y_skip_top + vblank;
556 if (reg_write(client, MT9M001_SHUTTER_WIDTH, 578 if (reg_write(client, MT9M001_SHUTTER_WIDTH,
557 total_h) < 0) 579 total_h) < 0)
558 return -EIO; 580 return -EIO;
@@ -568,8 +590,10 @@ static int mt9m001_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
568 return 0; 590 return 0;
569} 591}
570 592
571/* Interface active, can use i2c. If it fails, it can indeed mean, that 593/*
572 * this wasn't our capture interface, so, we wait for the right one */ 594 * Interface active, can use i2c. If it fails, it can indeed mean, that
595 * this wasn't our capture interface, so, we wait for the right one
596 */
573static int mt9m001_video_probe(struct soc_camera_device *icd, 597static int mt9m001_video_probe(struct soc_camera_device *icd,
574 struct i2c_client *client) 598 struct i2c_client *client)
575{ 599{
@@ -579,8 +603,10 @@ static int mt9m001_video_probe(struct soc_camera_device *icd,
579 unsigned long flags; 603 unsigned long flags;
580 int ret; 604 int ret;
581 605
582 /* We must have a parent by now. And it cannot be a wrong one. 606 /*
583 * So this entire test is completely redundant. */ 607 * We must have a parent by now. And it cannot be a wrong one.
608 * So this entire test is completely redundant.
609 */
584 if (!icd->dev.parent || 610 if (!icd->dev.parent ||
585 to_soc_camera_host(icd->dev.parent)->nr != icd->iface) 611 to_soc_camera_host(icd->dev.parent)->nr != icd->iface)
586 return -ENODEV; 612 return -ENODEV;
@@ -597,11 +623,11 @@ static int mt9m001_video_probe(struct soc_camera_device *icd,
597 case 0x8411: 623 case 0x8411:
598 case 0x8421: 624 case 0x8421:
599 mt9m001->model = V4L2_IDENT_MT9M001C12ST; 625 mt9m001->model = V4L2_IDENT_MT9M001C12ST;
600 icd->formats = mt9m001_colour_formats; 626 mt9m001->fmts = mt9m001_colour_fmts;
601 break; 627 break;
602 case 0x8431: 628 case 0x8431:
603 mt9m001->model = V4L2_IDENT_MT9M001C12STM; 629 mt9m001->model = V4L2_IDENT_MT9M001C12STM;
604 icd->formats = mt9m001_monochrome_formats; 630 mt9m001->fmts = mt9m001_monochrome_fmts;
605 break; 631 break;
606 default: 632 default:
607 dev_err(&client->dev, 633 dev_err(&client->dev,
@@ -609,7 +635,7 @@ static int mt9m001_video_probe(struct soc_camera_device *icd,
609 return -ENODEV; 635 return -ENODEV;
610 } 636 }
611 637
612 icd->num_formats = 0; 638 mt9m001->num_fmts = 0;
613 639
614 /* 640 /*
615 * This is a 10bit sensor, so by default we only allow 10bit. 641 * This is a 10bit sensor, so by default we only allow 10bit.
@@ -622,14 +648,14 @@ static int mt9m001_video_probe(struct soc_camera_device *icd,
622 flags = SOCAM_DATAWIDTH_10; 648 flags = SOCAM_DATAWIDTH_10;
623 649
624 if (flags & SOCAM_DATAWIDTH_10) 650 if (flags & SOCAM_DATAWIDTH_10)
625 icd->num_formats++; 651 mt9m001->num_fmts++;
626 else 652 else
627 icd->formats++; 653 mt9m001->fmts++;
628 654
629 if (flags & SOCAM_DATAWIDTH_8) 655 if (flags & SOCAM_DATAWIDTH_8)
630 icd->num_formats++; 656 mt9m001->num_fmts++;
631 657
632 mt9m001->fourcc = icd->formats->fourcc; 658 mt9m001->fmt = &mt9m001->fmts[0];
633 659
634 dev_info(&client->dev, "Detected a MT9M001 chip ID %x (%s)\n", data, 660 dev_info(&client->dev, "Detected a MT9M001 chip ID %x (%s)\n", data,
635 data == 0x8431 ? "C12STM" : "C12ST"); 661 data == 0x8431 ? "C12STM" : "C12ST");
@@ -655,6 +681,16 @@ static void mt9m001_video_remove(struct soc_camera_device *icd)
655 icl->free_bus(icl); 681 icl->free_bus(icl);
656} 682}
657 683
684static int mt9m001_g_skip_top_lines(struct v4l2_subdev *sd, u32 *lines)
685{
686 struct i2c_client *client = sd->priv;
687 struct mt9m001 *mt9m001 = to_mt9m001(client);
688
689 *lines = mt9m001->y_skip_top;
690
691 return 0;
692}
693
658static struct v4l2_subdev_core_ops mt9m001_subdev_core_ops = { 694static struct v4l2_subdev_core_ops mt9m001_subdev_core_ops = {
659 .g_ctrl = mt9m001_g_ctrl, 695 .g_ctrl = mt9m001_g_ctrl,
660 .s_ctrl = mt9m001_s_ctrl, 696 .s_ctrl = mt9m001_s_ctrl,
@@ -665,19 +701,38 @@ static struct v4l2_subdev_core_ops mt9m001_subdev_core_ops = {
665#endif 701#endif
666}; 702};
667 703
704static int mt9m001_enum_fmt(struct v4l2_subdev *sd, int index,
705 enum v4l2_mbus_pixelcode *code)
706{
707 struct i2c_client *client = sd->priv;
708 struct mt9m001 *mt9m001 = to_mt9m001(client);
709
710 if ((unsigned int)index >= mt9m001->num_fmts)
711 return -EINVAL;
712
713 *code = mt9m001->fmts[index].code;
714 return 0;
715}
716
668static struct v4l2_subdev_video_ops mt9m001_subdev_video_ops = { 717static struct v4l2_subdev_video_ops mt9m001_subdev_video_ops = {
669 .s_stream = mt9m001_s_stream, 718 .s_stream = mt9m001_s_stream,
670 .s_fmt = mt9m001_s_fmt, 719 .s_mbus_fmt = mt9m001_s_fmt,
671 .g_fmt = mt9m001_g_fmt, 720 .g_mbus_fmt = mt9m001_g_fmt,
672 .try_fmt = mt9m001_try_fmt, 721 .try_mbus_fmt = mt9m001_try_fmt,
673 .s_crop = mt9m001_s_crop, 722 .s_crop = mt9m001_s_crop,
674 .g_crop = mt9m001_g_crop, 723 .g_crop = mt9m001_g_crop,
675 .cropcap = mt9m001_cropcap, 724 .cropcap = mt9m001_cropcap,
725 .enum_mbus_fmt = mt9m001_enum_fmt,
726};
727
728static struct v4l2_subdev_sensor_ops mt9m001_subdev_sensor_ops = {
729 .g_skip_top_lines = mt9m001_g_skip_top_lines,
676}; 730};
677 731
678static struct v4l2_subdev_ops mt9m001_subdev_ops = { 732static struct v4l2_subdev_ops mt9m001_subdev_ops = {
679 .core = &mt9m001_subdev_core_ops, 733 .core = &mt9m001_subdev_core_ops,
680 .video = &mt9m001_subdev_video_ops, 734 .video = &mt9m001_subdev_video_ops,
735 .sensor = &mt9m001_subdev_sensor_ops,
681}; 736};
682 737
683static int mt9m001_probe(struct i2c_client *client, 738static int mt9m001_probe(struct i2c_client *client,
@@ -714,15 +769,17 @@ static int mt9m001_probe(struct i2c_client *client,
714 769
715 /* Second stage probe - when a capture adapter is there */ 770 /* Second stage probe - when a capture adapter is there */
716 icd->ops = &mt9m001_ops; 771 icd->ops = &mt9m001_ops;
717 icd->y_skip_top = 0;
718 772
773 mt9m001->y_skip_top = 0;
719 mt9m001->rect.left = MT9M001_COLUMN_SKIP; 774 mt9m001->rect.left = MT9M001_COLUMN_SKIP;
720 mt9m001->rect.top = MT9M001_ROW_SKIP; 775 mt9m001->rect.top = MT9M001_ROW_SKIP;
721 mt9m001->rect.width = MT9M001_MAX_WIDTH; 776 mt9m001->rect.width = MT9M001_MAX_WIDTH;
722 mt9m001->rect.height = MT9M001_MAX_HEIGHT; 777 mt9m001->rect.height = MT9M001_MAX_HEIGHT;
723 778
724 /* Simulated autoexposure. If enabled, we calculate shutter width 779 /*
725 * ourselves in the driver based on vertical blanking and frame width */ 780 * Simulated autoexposure. If enabled, we calculate shutter width
781 * ourselves in the driver based on vertical blanking and frame width
782 */
726 mt9m001->autoexposure = 1; 783 mt9m001->autoexposure = 1;
727 784
728 ret = mt9m001_video_probe(icd, client); 785 ret = mt9m001_video_probe(icd, client);
diff --git a/drivers/media/video/mt9m111.c b/drivers/media/video/mt9m111.c
index 90da699601ea..d35f536f9fc3 100644
--- a/drivers/media/video/mt9m111.c
+++ b/drivers/media/video/mt9m111.c
@@ -123,23 +123,34 @@
123#define MT9M111_MAX_HEIGHT 1024 123#define MT9M111_MAX_HEIGHT 1024
124#define MT9M111_MAX_WIDTH 1280 124#define MT9M111_MAX_WIDTH 1280
125 125
126#define COL_FMT(_name, _depth, _fourcc, _colorspace) \ 126/* MT9M111 has only one fixed colorspace per pixelcode */
127 { .name = _name, .depth = _depth, .fourcc = _fourcc, \ 127struct mt9m111_datafmt {
128 .colorspace = _colorspace } 128 enum v4l2_mbus_pixelcode code;
129#define RGB_FMT(_name, _depth, _fourcc) \ 129 enum v4l2_colorspace colorspace;
130 COL_FMT(_name, _depth, _fourcc, V4L2_COLORSPACE_SRGB) 130};
131#define JPG_FMT(_name, _depth, _fourcc) \ 131
132 COL_FMT(_name, _depth, _fourcc, V4L2_COLORSPACE_JPEG) 132/* Find a data format by a pixel code in an array */
133 133static const struct mt9m111_datafmt *mt9m111_find_datafmt(
134static const struct soc_camera_data_format mt9m111_colour_formats[] = { 134 enum v4l2_mbus_pixelcode code, const struct mt9m111_datafmt *fmt,
135 JPG_FMT("CbYCrY 16 bit", 16, V4L2_PIX_FMT_UYVY), 135 int n)
136 JPG_FMT("CrYCbY 16 bit", 16, V4L2_PIX_FMT_VYUY), 136{
137 JPG_FMT("YCbYCr 16 bit", 16, V4L2_PIX_FMT_YUYV), 137 int i;
138 JPG_FMT("YCrYCb 16 bit", 16, V4L2_PIX_FMT_YVYU), 138 for (i = 0; i < n; i++)
139 RGB_FMT("RGB 565", 16, V4L2_PIX_FMT_RGB565), 139 if (fmt[i].code == code)
140 RGB_FMT("RGB 555", 16, V4L2_PIX_FMT_RGB555), 140 return fmt + i;
141 RGB_FMT("Bayer (sRGB) 10 bit", 10, V4L2_PIX_FMT_SBGGR16), 141
142 RGB_FMT("Bayer (sRGB) 8 bit", 8, V4L2_PIX_FMT_SBGGR8), 142 return NULL;
143}
144
145static const struct mt9m111_datafmt mt9m111_colour_fmts[] = {
146 {V4L2_MBUS_FMT_YUYV8_2X8_LE, V4L2_COLORSPACE_JPEG},
147 {V4L2_MBUS_FMT_YVYU8_2X8_LE, V4L2_COLORSPACE_JPEG},
148 {V4L2_MBUS_FMT_YUYV8_2X8_BE, V4L2_COLORSPACE_JPEG},
149 {V4L2_MBUS_FMT_YVYU8_2X8_BE, V4L2_COLORSPACE_JPEG},
150 {V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE, V4L2_COLORSPACE_SRGB},
151 {V4L2_MBUS_FMT_RGB565_2X8_LE, V4L2_COLORSPACE_SRGB},
152 {V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_COLORSPACE_SRGB},
153 {V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE, V4L2_COLORSPACE_SRGB},
143}; 154};
144 155
145enum mt9m111_context { 156enum mt9m111_context {
@@ -152,7 +163,7 @@ struct mt9m111 {
152 int model; /* V4L2_IDENT_MT9M11x* codes from v4l2-chip-ident.h */ 163 int model; /* V4L2_IDENT_MT9M11x* codes from v4l2-chip-ident.h */
153 enum mt9m111_context context; 164 enum mt9m111_context context;
154 struct v4l2_rect rect; 165 struct v4l2_rect rect;
155 u32 pixfmt; 166 const struct mt9m111_datafmt *fmt;
156 unsigned int gain; 167 unsigned int gain;
157 unsigned char autoexposure; 168 unsigned char autoexposure;
158 unsigned char datawidth; 169 unsigned char datawidth;
@@ -258,8 +269,8 @@ static int mt9m111_setup_rect(struct i2c_client *client,
258 int width = rect->width; 269 int width = rect->width;
259 int height = rect->height; 270 int height = rect->height;
260 271
261 if (mt9m111->pixfmt == V4L2_PIX_FMT_SBGGR8 || 272 if (mt9m111->fmt->code == V4L2_MBUS_FMT_SBGGR8_1X8 ||
262 mt9m111->pixfmt == V4L2_PIX_FMT_SBGGR16) 273 mt9m111->fmt->code == V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE)
263 is_raw_format = 1; 274 is_raw_format = 1;
264 else 275 else
265 is_raw_format = 0; 276 is_raw_format = 0;
@@ -307,7 +318,8 @@ static int mt9m111_setup_pixfmt(struct i2c_client *client, u16 outfmt)
307 318
308static int mt9m111_setfmt_bayer8(struct i2c_client *client) 319static int mt9m111_setfmt_bayer8(struct i2c_client *client)
309{ 320{
310 return mt9m111_setup_pixfmt(client, MT9M111_OUTFMT_PROCESSED_BAYER); 321 return mt9m111_setup_pixfmt(client, MT9M111_OUTFMT_PROCESSED_BAYER |
322 MT9M111_OUTFMT_RGB);
311} 323}
312 324
313static int mt9m111_setfmt_bayer10(struct i2c_client *client) 325static int mt9m111_setfmt_bayer10(struct i2c_client *client)
@@ -401,8 +413,8 @@ static int mt9m111_make_rect(struct i2c_client *client,
401{ 413{
402 struct mt9m111 *mt9m111 = to_mt9m111(client); 414 struct mt9m111 *mt9m111 = to_mt9m111(client);
403 415
404 if (mt9m111->pixfmt == V4L2_PIX_FMT_SBGGR8 || 416 if (mt9m111->fmt->code == V4L2_MBUS_FMT_SBGGR8_1X8 ||
405 mt9m111->pixfmt == V4L2_PIX_FMT_SBGGR16) { 417 mt9m111->fmt->code == V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE) {
406 /* Bayer format - even size lengths */ 418 /* Bayer format - even size lengths */
407 rect->width = ALIGN(rect->width, 2); 419 rect->width = ALIGN(rect->width, 2);
408 rect->height = ALIGN(rect->height, 2); 420 rect->height = ALIGN(rect->height, 2);
@@ -460,120 +472,139 @@ static int mt9m111_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
460 return 0; 472 return 0;
461} 473}
462 474
463static int mt9m111_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) 475static int mt9m111_g_fmt(struct v4l2_subdev *sd,
476 struct v4l2_mbus_framefmt *mf)
464{ 477{
465 struct i2c_client *client = sd->priv; 478 struct i2c_client *client = sd->priv;
466 struct mt9m111 *mt9m111 = to_mt9m111(client); 479 struct mt9m111 *mt9m111 = to_mt9m111(client);
467 struct v4l2_pix_format *pix = &f->fmt.pix;
468 480
469 pix->width = mt9m111->rect.width; 481 mf->width = mt9m111->rect.width;
470 pix->height = mt9m111->rect.height; 482 mf->height = mt9m111->rect.height;
471 pix->pixelformat = mt9m111->pixfmt; 483 mf->code = mt9m111->fmt->code;
472 pix->field = V4L2_FIELD_NONE; 484 mf->field = V4L2_FIELD_NONE;
473 pix->colorspace = V4L2_COLORSPACE_SRGB;
474 485
475 return 0; 486 return 0;
476} 487}
477 488
478static int mt9m111_set_pixfmt(struct i2c_client *client, u32 pixfmt) 489static int mt9m111_set_pixfmt(struct i2c_client *client,
490 enum v4l2_mbus_pixelcode code)
479{ 491{
480 struct mt9m111 *mt9m111 = to_mt9m111(client); 492 struct mt9m111 *mt9m111 = to_mt9m111(client);
481 int ret; 493 int ret;
482 494
483 switch (pixfmt) { 495 switch (code) {
484 case V4L2_PIX_FMT_SBGGR8: 496 case V4L2_MBUS_FMT_SBGGR8_1X8:
485 ret = mt9m111_setfmt_bayer8(client); 497 ret = mt9m111_setfmt_bayer8(client);
486 break; 498 break;
487 case V4L2_PIX_FMT_SBGGR16: 499 case V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE:
488 ret = mt9m111_setfmt_bayer10(client); 500 ret = mt9m111_setfmt_bayer10(client);
489 break; 501 break;
490 case V4L2_PIX_FMT_RGB555: 502 case V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE:
491 ret = mt9m111_setfmt_rgb555(client); 503 ret = mt9m111_setfmt_rgb555(client);
492 break; 504 break;
493 case V4L2_PIX_FMT_RGB565: 505 case V4L2_MBUS_FMT_RGB565_2X8_LE:
494 ret = mt9m111_setfmt_rgb565(client); 506 ret = mt9m111_setfmt_rgb565(client);
495 break; 507 break;
496 case V4L2_PIX_FMT_UYVY: 508 case V4L2_MBUS_FMT_YUYV8_2X8_BE:
497 mt9m111->swap_yuv_y_chromas = 0; 509 mt9m111->swap_yuv_y_chromas = 0;
498 mt9m111->swap_yuv_cb_cr = 0; 510 mt9m111->swap_yuv_cb_cr = 0;
499 ret = mt9m111_setfmt_yuv(client); 511 ret = mt9m111_setfmt_yuv(client);
500 break; 512 break;
501 case V4L2_PIX_FMT_VYUY: 513 case V4L2_MBUS_FMT_YVYU8_2X8_BE:
502 mt9m111->swap_yuv_y_chromas = 0; 514 mt9m111->swap_yuv_y_chromas = 0;
503 mt9m111->swap_yuv_cb_cr = 1; 515 mt9m111->swap_yuv_cb_cr = 1;
504 ret = mt9m111_setfmt_yuv(client); 516 ret = mt9m111_setfmt_yuv(client);
505 break; 517 break;
506 case V4L2_PIX_FMT_YUYV: 518 case V4L2_MBUS_FMT_YUYV8_2X8_LE:
507 mt9m111->swap_yuv_y_chromas = 1; 519 mt9m111->swap_yuv_y_chromas = 1;
508 mt9m111->swap_yuv_cb_cr = 0; 520 mt9m111->swap_yuv_cb_cr = 0;
509 ret = mt9m111_setfmt_yuv(client); 521 ret = mt9m111_setfmt_yuv(client);
510 break; 522 break;
511 case V4L2_PIX_FMT_YVYU: 523 case V4L2_MBUS_FMT_YVYU8_2X8_LE:
512 mt9m111->swap_yuv_y_chromas = 1; 524 mt9m111->swap_yuv_y_chromas = 1;
513 mt9m111->swap_yuv_cb_cr = 1; 525 mt9m111->swap_yuv_cb_cr = 1;
514 ret = mt9m111_setfmt_yuv(client); 526 ret = mt9m111_setfmt_yuv(client);
515 break; 527 break;
516 default: 528 default:
517 dev_err(&client->dev, "Pixel format not handled : %x\n", 529 dev_err(&client->dev, "Pixel format not handled : %x\n",
518 pixfmt); 530 code);
519 ret = -EINVAL; 531 ret = -EINVAL;
520 } 532 }
521 533
522 if (!ret)
523 mt9m111->pixfmt = pixfmt;
524
525 return ret; 534 return ret;
526} 535}
527 536
528static int mt9m111_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) 537static int mt9m111_s_fmt(struct v4l2_subdev *sd,
538 struct v4l2_mbus_framefmt *mf)
529{ 539{
530 struct i2c_client *client = sd->priv; 540 struct i2c_client *client = sd->priv;
541 const struct mt9m111_datafmt *fmt;
531 struct mt9m111 *mt9m111 = to_mt9m111(client); 542 struct mt9m111 *mt9m111 = to_mt9m111(client);
532 struct v4l2_pix_format *pix = &f->fmt.pix;
533 struct v4l2_rect rect = { 543 struct v4l2_rect rect = {
534 .left = mt9m111->rect.left, 544 .left = mt9m111->rect.left,
535 .top = mt9m111->rect.top, 545 .top = mt9m111->rect.top,
536 .width = pix->width, 546 .width = mf->width,
537 .height = pix->height, 547 .height = mf->height,
538 }; 548 };
539 int ret; 549 int ret;
540 550
551 fmt = mt9m111_find_datafmt(mf->code, mt9m111_colour_fmts,
552 ARRAY_SIZE(mt9m111_colour_fmts));
553 if (!fmt)
554 return -EINVAL;
555
541 dev_dbg(&client->dev, 556 dev_dbg(&client->dev,
542 "%s fmt=%x left=%d, top=%d, width=%d, height=%d\n", __func__, 557 "%s code=%x left=%d, top=%d, width=%d, height=%d\n", __func__,
543 pix->pixelformat, rect.left, rect.top, rect.width, rect.height); 558 mf->code, rect.left, rect.top, rect.width, rect.height);
544 559
545 ret = mt9m111_make_rect(client, &rect); 560 ret = mt9m111_make_rect(client, &rect);
546 if (!ret) 561 if (!ret)
547 ret = mt9m111_set_pixfmt(client, pix->pixelformat); 562 ret = mt9m111_set_pixfmt(client, mf->code);
548 if (!ret) 563 if (!ret) {
549 mt9m111->rect = rect; 564 mt9m111->rect = rect;
565 mt9m111->fmt = fmt;
566 mf->colorspace = fmt->colorspace;
567 }
568
550 return ret; 569 return ret;
551} 570}
552 571
553static int mt9m111_try_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) 572static int mt9m111_try_fmt(struct v4l2_subdev *sd,
573 struct v4l2_mbus_framefmt *mf)
554{ 574{
555 struct v4l2_pix_format *pix = &f->fmt.pix; 575 struct i2c_client *client = sd->priv;
556 bool bayer = pix->pixelformat == V4L2_PIX_FMT_SBGGR8 || 576 struct mt9m111 *mt9m111 = to_mt9m111(client);
557 pix->pixelformat == V4L2_PIX_FMT_SBGGR16; 577 const struct mt9m111_datafmt *fmt;
578 bool bayer = mf->code == V4L2_MBUS_FMT_SBGGR8_1X8 ||
579 mf->code == V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE;
580
581 fmt = mt9m111_find_datafmt(mf->code, mt9m111_colour_fmts,
582 ARRAY_SIZE(mt9m111_colour_fmts));
583 if (!fmt) {
584 fmt = mt9m111->fmt;
585 mf->code = fmt->code;
586 }
558 587
559 /* 588 /*
560 * With Bayer format enforce even side lengths, but let the user play 589 * With Bayer format enforce even side lengths, but let the user play
561 * with the starting pixel 590 * with the starting pixel
562 */ 591 */
563 592
564 if (pix->height > MT9M111_MAX_HEIGHT) 593 if (mf->height > MT9M111_MAX_HEIGHT)
565 pix->height = MT9M111_MAX_HEIGHT; 594 mf->height = MT9M111_MAX_HEIGHT;
566 else if (pix->height < 2) 595 else if (mf->height < 2)
567 pix->height = 2; 596 mf->height = 2;
568 else if (bayer) 597 else if (bayer)
569 pix->height = ALIGN(pix->height, 2); 598 mf->height = ALIGN(mf->height, 2);
570 599
571 if (pix->width > MT9M111_MAX_WIDTH) 600 if (mf->width > MT9M111_MAX_WIDTH)
572 pix->width = MT9M111_MAX_WIDTH; 601 mf->width = MT9M111_MAX_WIDTH;
573 else if (pix->width < 2) 602 else if (mf->width < 2)
574 pix->width = 2; 603 mf->width = 2;
575 else if (bayer) 604 else if (bayer)
576 pix->width = ALIGN(pix->width, 2); 605 mf->width = ALIGN(mf->width, 2);
606
607 mf->colorspace = fmt->colorspace;
577 608
578 return 0; 609 return 0;
579} 610}
@@ -863,7 +894,7 @@ static int mt9m111_restore_state(struct i2c_client *client)
863 struct mt9m111 *mt9m111 = to_mt9m111(client); 894 struct mt9m111 *mt9m111 = to_mt9m111(client);
864 895
865 mt9m111_set_context(client, mt9m111->context); 896 mt9m111_set_context(client, mt9m111->context);
866 mt9m111_set_pixfmt(client, mt9m111->pixfmt); 897 mt9m111_set_pixfmt(client, mt9m111->fmt->code);
867 mt9m111_setup_rect(client, &mt9m111->rect); 898 mt9m111_setup_rect(client, &mt9m111->rect);
868 mt9m111_set_flip(client, mt9m111->hflip, MT9M111_RMB_MIRROR_COLS); 899 mt9m111_set_flip(client, mt9m111->hflip, MT9M111_RMB_MIRROR_COLS);
869 mt9m111_set_flip(client, mt9m111->vflip, MT9M111_RMB_MIRROR_ROWS); 900 mt9m111_set_flip(client, mt9m111->vflip, MT9M111_RMB_MIRROR_ROWS);
@@ -952,9 +983,6 @@ static int mt9m111_video_probe(struct soc_camera_device *icd,
952 goto ei2c; 983 goto ei2c;
953 } 984 }
954 985
955 icd->formats = mt9m111_colour_formats;
956 icd->num_formats = ARRAY_SIZE(mt9m111_colour_formats);
957
958 dev_info(&client->dev, "Detected a MT9M11x chip ID %x\n", data); 986 dev_info(&client->dev, "Detected a MT9M11x chip ID %x\n", data);
959 987
960ei2c: 988ei2c:
@@ -971,13 +999,24 @@ static struct v4l2_subdev_core_ops mt9m111_subdev_core_ops = {
971#endif 999#endif
972}; 1000};
973 1001
1002static int mt9m111_enum_fmt(struct v4l2_subdev *sd, int index,
1003 enum v4l2_mbus_pixelcode *code)
1004{
1005 if ((unsigned int)index >= ARRAY_SIZE(mt9m111_colour_fmts))
1006 return -EINVAL;
1007
1008 *code = mt9m111_colour_fmts[index].code;
1009 return 0;
1010}
1011
974static struct v4l2_subdev_video_ops mt9m111_subdev_video_ops = { 1012static struct v4l2_subdev_video_ops mt9m111_subdev_video_ops = {
975 .s_fmt = mt9m111_s_fmt, 1013 .s_mbus_fmt = mt9m111_s_fmt,
976 .g_fmt = mt9m111_g_fmt, 1014 .g_mbus_fmt = mt9m111_g_fmt,
977 .try_fmt = mt9m111_try_fmt, 1015 .try_mbus_fmt = mt9m111_try_fmt,
978 .s_crop = mt9m111_s_crop, 1016 .s_crop = mt9m111_s_crop,
979 .g_crop = mt9m111_g_crop, 1017 .g_crop = mt9m111_g_crop,
980 .cropcap = mt9m111_cropcap, 1018 .cropcap = mt9m111_cropcap,
1019 .enum_mbus_fmt = mt9m111_enum_fmt,
981}; 1020};
982 1021
983static struct v4l2_subdev_ops mt9m111_subdev_ops = { 1022static struct v4l2_subdev_ops mt9m111_subdev_ops = {
@@ -1019,12 +1058,12 @@ static int mt9m111_probe(struct i2c_client *client,
1019 1058
1020 /* Second stage probe - when a capture adapter is there */ 1059 /* Second stage probe - when a capture adapter is there */
1021 icd->ops = &mt9m111_ops; 1060 icd->ops = &mt9m111_ops;
1022 icd->y_skip_top = 0;
1023 1061
1024 mt9m111->rect.left = MT9M111_MIN_DARK_COLS; 1062 mt9m111->rect.left = MT9M111_MIN_DARK_COLS;
1025 mt9m111->rect.top = MT9M111_MIN_DARK_ROWS; 1063 mt9m111->rect.top = MT9M111_MIN_DARK_ROWS;
1026 mt9m111->rect.width = MT9M111_MAX_WIDTH; 1064 mt9m111->rect.width = MT9M111_MAX_WIDTH;
1027 mt9m111->rect.height = MT9M111_MAX_HEIGHT; 1065 mt9m111->rect.height = MT9M111_MAX_HEIGHT;
1066 mt9m111->fmt = &mt9m111_colour_fmts[0];
1028 1067
1029 ret = mt9m111_video_probe(icd, client); 1068 ret = mt9m111_video_probe(icd, client);
1030 if (ret) { 1069 if (ret) {
diff --git a/drivers/media/video/mt9t031.c b/drivers/media/video/mt9t031.c
index 6966f644977e..a9061bff79b2 100644
--- a/drivers/media/video/mt9t031.c
+++ b/drivers/media/video/mt9t031.c
@@ -17,9 +17,11 @@
17#include <media/v4l2-chip-ident.h> 17#include <media/v4l2-chip-ident.h>
18#include <media/soc_camera.h> 18#include <media/soc_camera.h>
19 19
20/* mt9t031 i2c address 0x5d 20/*
21 * mt9t031 i2c address 0x5d
21 * The platform has to define i2c_board_info and link to it from 22 * The platform has to define i2c_board_info and link to it from
22 * struct soc_camera_link */ 23 * struct soc_camera_link
24 */
23 25
24/* mt9t031 selected register addresses */ 26/* mt9t031 selected register addresses */
25#define MT9T031_CHIP_VERSION 0x00 27#define MT9T031_CHIP_VERSION 0x00
@@ -58,15 +60,6 @@
58 SOCAM_VSYNC_ACTIVE_HIGH | SOCAM_DATA_ACTIVE_HIGH | \ 60 SOCAM_VSYNC_ACTIVE_HIGH | SOCAM_DATA_ACTIVE_HIGH | \
59 SOCAM_MASTER | SOCAM_DATAWIDTH_10) 61 SOCAM_MASTER | SOCAM_DATAWIDTH_10)
60 62
61static const struct soc_camera_data_format mt9t031_colour_formats[] = {
62 {
63 .name = "Bayer (sRGB) 10 bit",
64 .depth = 10,
65 .fourcc = V4L2_PIX_FMT_SGRBG10,
66 .colorspace = V4L2_COLORSPACE_SRGB,
67 }
68};
69
70struct mt9t031 { 63struct mt9t031 {
71 struct v4l2_subdev subdev; 64 struct v4l2_subdev subdev;
72 struct v4l2_rect rect; /* Sensor window */ 65 struct v4l2_rect rect; /* Sensor window */
@@ -74,6 +67,7 @@ struct mt9t031 {
74 u16 xskip; 67 u16 xskip;
75 u16 yskip; 68 u16 yskip;
76 unsigned int gain; 69 unsigned int gain;
70 unsigned short y_skip_top; /* Lines to skip at the top */
77 unsigned int exposure; 71 unsigned int exposure;
78 unsigned char autoexposure; 72 unsigned char autoexposure;
79}; 73};
@@ -207,6 +201,71 @@ static unsigned long mt9t031_query_bus_param(struct soc_camera_device *icd)
207 return soc_camera_apply_sensor_flags(icl, MT9T031_BUS_PARAM); 201 return soc_camera_apply_sensor_flags(icl, MT9T031_BUS_PARAM);
208} 202}
209 203
204enum {
205 MT9T031_CTRL_VFLIP,
206 MT9T031_CTRL_HFLIP,
207 MT9T031_CTRL_GAIN,
208 MT9T031_CTRL_EXPOSURE,
209 MT9T031_CTRL_EXPOSURE_AUTO,
210};
211
212static const struct v4l2_queryctrl mt9t031_controls[] = {
213 [MT9T031_CTRL_VFLIP] = {
214 .id = V4L2_CID_VFLIP,
215 .type = V4L2_CTRL_TYPE_BOOLEAN,
216 .name = "Flip Vertically",
217 .minimum = 0,
218 .maximum = 1,
219 .step = 1,
220 .default_value = 0,
221 },
222 [MT9T031_CTRL_HFLIP] = {
223 .id = V4L2_CID_HFLIP,
224 .type = V4L2_CTRL_TYPE_BOOLEAN,
225 .name = "Flip Horizontally",
226 .minimum = 0,
227 .maximum = 1,
228 .step = 1,
229 .default_value = 0,
230 },
231 [MT9T031_CTRL_GAIN] = {
232 .id = V4L2_CID_GAIN,
233 .type = V4L2_CTRL_TYPE_INTEGER,
234 .name = "Gain",
235 .minimum = 0,
236 .maximum = 127,
237 .step = 1,
238 .default_value = 64,
239 .flags = V4L2_CTRL_FLAG_SLIDER,
240 },
241 [MT9T031_CTRL_EXPOSURE] = {
242 .id = V4L2_CID_EXPOSURE,
243 .type = V4L2_CTRL_TYPE_INTEGER,
244 .name = "Exposure",
245 .minimum = 1,
246 .maximum = 255,
247 .step = 1,
248 .default_value = 255,
249 .flags = V4L2_CTRL_FLAG_SLIDER,
250 },
251 [MT9T031_CTRL_EXPOSURE_AUTO] = {
252 .id = V4L2_CID_EXPOSURE_AUTO,
253 .type = V4L2_CTRL_TYPE_BOOLEAN,
254 .name = "Automatic Exposure",
255 .minimum = 0,
256 .maximum = 1,
257 .step = 1,
258 .default_value = 1,
259 }
260};
261
262static struct soc_camera_ops mt9t031_ops = {
263 .set_bus_param = mt9t031_set_bus_param,
264 .query_bus_param = mt9t031_query_bus_param,
265 .controls = mt9t031_controls,
266 .num_controls = ARRAY_SIZE(mt9t031_controls),
267};
268
210/* target must be _even_ */ 269/* target must be _even_ */
211static u16 mt9t031_skip(s32 *source, s32 target, s32 max) 270static u16 mt9t031_skip(s32 *source, s32 target, s32 max)
212{ 271{
@@ -226,10 +285,9 @@ static u16 mt9t031_skip(s32 *source, s32 target, s32 max)
226} 285}
227 286
228/* rect is the sensor rectangle, the caller guarantees parameter validity */ 287/* rect is the sensor rectangle, the caller guarantees parameter validity */
229static int mt9t031_set_params(struct soc_camera_device *icd, 288static int mt9t031_set_params(struct i2c_client *client,
230 struct v4l2_rect *rect, u16 xskip, u16 yskip) 289 struct v4l2_rect *rect, u16 xskip, u16 yskip)
231{ 290{
232 struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd));
233 struct mt9t031 *mt9t031 = to_mt9t031(client); 291 struct mt9t031 *mt9t031 = to_mt9t031(client);
234 int ret; 292 int ret;
235 u16 xbin, ybin; 293 u16 xbin, ybin;
@@ -291,8 +349,10 @@ static int mt9t031_set_params(struct soc_camera_device *icd,
291 dev_dbg(&client->dev, "new physical left %u, top %u\n", 349 dev_dbg(&client->dev, "new physical left %u, top %u\n",
292 rect->left, rect->top); 350 rect->left, rect->top);
293 351
294 /* The caller provides a supported format, as guaranteed by 352 /*
295 * icd->try_fmt_cap(), soc_camera_s_crop() and soc_camera_cropcap() */ 353 * The caller provides a supported format, as guaranteed by
354 * icd->try_fmt_cap(), soc_camera_s_crop() and soc_camera_cropcap()
355 */
296 if (ret >= 0) 356 if (ret >= 0)
297 ret = reg_write(client, MT9T031_COLUMN_START, rect->left); 357 ret = reg_write(client, MT9T031_COLUMN_START, rect->left);
298 if (ret >= 0) 358 if (ret >= 0)
@@ -301,15 +361,14 @@ static int mt9t031_set_params(struct soc_camera_device *icd,
301 ret = reg_write(client, MT9T031_WINDOW_WIDTH, rect->width - 1); 361 ret = reg_write(client, MT9T031_WINDOW_WIDTH, rect->width - 1);
302 if (ret >= 0) 362 if (ret >= 0)
303 ret = reg_write(client, MT9T031_WINDOW_HEIGHT, 363 ret = reg_write(client, MT9T031_WINDOW_HEIGHT,
304 rect->height + icd->y_skip_top - 1); 364 rect->height + mt9t031->y_skip_top - 1);
305 if (ret >= 0 && mt9t031->autoexposure) { 365 if (ret >= 0 && mt9t031->autoexposure) {
306 unsigned int total_h = rect->height + icd->y_skip_top + vblank; 366 unsigned int total_h = rect->height + mt9t031->y_skip_top + vblank;
307 ret = set_shutter(client, total_h); 367 ret = set_shutter(client, total_h);
308 if (ret >= 0) { 368 if (ret >= 0) {
309 const u32 shutter_max = MT9T031_MAX_HEIGHT + vblank; 369 const u32 shutter_max = MT9T031_MAX_HEIGHT + vblank;
310 const struct v4l2_queryctrl *qctrl = 370 const struct v4l2_queryctrl *qctrl =
311 soc_camera_find_qctrl(icd->ops, 371 &mt9t031_controls[MT9T031_CTRL_EXPOSURE];
312 V4L2_CID_EXPOSURE);
313 mt9t031->exposure = (shutter_max / 2 + (total_h - 1) * 372 mt9t031->exposure = (shutter_max / 2 + (total_h - 1) *
314 (qctrl->maximum - qctrl->minimum)) / 373 (qctrl->maximum - qctrl->minimum)) /
315 shutter_max + qctrl->minimum; 374 shutter_max + qctrl->minimum;
@@ -334,7 +393,6 @@ static int mt9t031_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
334 struct v4l2_rect rect = a->c; 393 struct v4l2_rect rect = a->c;
335 struct i2c_client *client = sd->priv; 394 struct i2c_client *client = sd->priv;
336 struct mt9t031 *mt9t031 = to_mt9t031(client); 395 struct mt9t031 *mt9t031 = to_mt9t031(client);
337 struct soc_camera_device *icd = client->dev.platform_data;
338 396
339 rect.width = ALIGN(rect.width, 2); 397 rect.width = ALIGN(rect.width, 2);
340 rect.height = ALIGN(rect.height, 2); 398 rect.height = ALIGN(rect.height, 2);
@@ -345,7 +403,7 @@ static int mt9t031_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
345 soc_camera_limit_side(&rect.top, &rect.height, 403 soc_camera_limit_side(&rect.top, &rect.height,
346 MT9T031_ROW_SKIP, MT9T031_MIN_HEIGHT, MT9T031_MAX_HEIGHT); 404 MT9T031_ROW_SKIP, MT9T031_MIN_HEIGHT, MT9T031_MAX_HEIGHT);
347 405
348 return mt9t031_set_params(icd, &rect, mt9t031->xskip, mt9t031->yskip); 406 return mt9t031_set_params(client, &rect, mt9t031->xskip, mt9t031->yskip);
349} 407}
350 408
351static int mt9t031_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a) 409static int mt9t031_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
@@ -373,27 +431,26 @@ static int mt9t031_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
373 return 0; 431 return 0;
374} 432}
375 433
376static int mt9t031_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) 434static int mt9t031_g_fmt(struct v4l2_subdev *sd,
435 struct v4l2_mbus_framefmt *mf)
377{ 436{
378 struct i2c_client *client = sd->priv; 437 struct i2c_client *client = sd->priv;
379 struct mt9t031 *mt9t031 = to_mt9t031(client); 438 struct mt9t031 *mt9t031 = to_mt9t031(client);
380 struct v4l2_pix_format *pix = &f->fmt.pix;
381 439
382 pix->width = mt9t031->rect.width / mt9t031->xskip; 440 mf->width = mt9t031->rect.width / mt9t031->xskip;
383 pix->height = mt9t031->rect.height / mt9t031->yskip; 441 mf->height = mt9t031->rect.height / mt9t031->yskip;
384 pix->pixelformat = V4L2_PIX_FMT_SGRBG10; 442 mf->code = V4L2_MBUS_FMT_SBGGR10_1X10;
385 pix->field = V4L2_FIELD_NONE; 443 mf->colorspace = V4L2_COLORSPACE_SRGB;
386 pix->colorspace = V4L2_COLORSPACE_SRGB; 444 mf->field = V4L2_FIELD_NONE;
387 445
388 return 0; 446 return 0;
389} 447}
390 448
391static int mt9t031_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) 449static int mt9t031_s_fmt(struct v4l2_subdev *sd,
450 struct v4l2_mbus_framefmt *mf)
392{ 451{
393 struct i2c_client *client = sd->priv; 452 struct i2c_client *client = sd->priv;
394 struct mt9t031 *mt9t031 = to_mt9t031(client); 453 struct mt9t031 *mt9t031 = to_mt9t031(client);
395 struct soc_camera_device *icd = client->dev.platform_data;
396 struct v4l2_pix_format *pix = &f->fmt.pix;
397 u16 xskip, yskip; 454 u16 xskip, yskip;
398 struct v4l2_rect rect = mt9t031->rect; 455 struct v4l2_rect rect = mt9t031->rect;
399 456
@@ -401,24 +458,29 @@ static int mt9t031_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f)
401 * try_fmt has put width and height within limits. 458 * try_fmt has put width and height within limits.
402 * S_FMT: use binning and skipping for scaling 459 * S_FMT: use binning and skipping for scaling
403 */ 460 */
404 xskip = mt9t031_skip(&rect.width, pix->width, MT9T031_MAX_WIDTH); 461 xskip = mt9t031_skip(&rect.width, mf->width, MT9T031_MAX_WIDTH);
405 yskip = mt9t031_skip(&rect.height, pix->height, MT9T031_MAX_HEIGHT); 462 yskip = mt9t031_skip(&rect.height, mf->height, MT9T031_MAX_HEIGHT);
463
464 mf->code = V4L2_MBUS_FMT_SBGGR10_1X10;
465 mf->colorspace = V4L2_COLORSPACE_SRGB;
406 466
407 /* mt9t031_set_params() doesn't change width and height */ 467 /* mt9t031_set_params() doesn't change width and height */
408 return mt9t031_set_params(icd, &rect, xskip, yskip); 468 return mt9t031_set_params(client, &rect, xskip, yskip);
409} 469}
410 470
411/* 471/*
412 * If a user window larger than sensor window is requested, we'll increase the 472 * If a user window larger than sensor window is requested, we'll increase the
413 * sensor window. 473 * sensor window.
414 */ 474 */
415static int mt9t031_try_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) 475static int mt9t031_try_fmt(struct v4l2_subdev *sd,
476 struct v4l2_mbus_framefmt *mf)
416{ 477{
417 struct v4l2_pix_format *pix = &f->fmt.pix;
418
419 v4l_bound_align_image( 478 v4l_bound_align_image(
420 &pix->width, MT9T031_MIN_WIDTH, MT9T031_MAX_WIDTH, 1, 479 &mf->width, MT9T031_MIN_WIDTH, MT9T031_MAX_WIDTH, 1,
421 &pix->height, MT9T031_MIN_HEIGHT, MT9T031_MAX_HEIGHT, 1, 0); 480 &mf->height, MT9T031_MIN_HEIGHT, MT9T031_MAX_HEIGHT, 1, 0);
481
482 mf->code = V4L2_MBUS_FMT_SBGGR10_1X10;
483 mf->colorspace = V4L2_COLORSPACE_SRGB;
422 484
423 return 0; 485 return 0;
424} 486}
@@ -479,59 +541,6 @@ static int mt9t031_s_register(struct v4l2_subdev *sd,
479} 541}
480#endif 542#endif
481 543
482static const struct v4l2_queryctrl mt9t031_controls[] = {
483 {
484 .id = V4L2_CID_VFLIP,
485 .type = V4L2_CTRL_TYPE_BOOLEAN,
486 .name = "Flip Vertically",
487 .minimum = 0,
488 .maximum = 1,
489 .step = 1,
490 .default_value = 0,
491 }, {
492 .id = V4L2_CID_HFLIP,
493 .type = V4L2_CTRL_TYPE_BOOLEAN,
494 .name = "Flip Horizontally",
495 .minimum = 0,
496 .maximum = 1,
497 .step = 1,
498 .default_value = 0,
499 }, {
500 .id = V4L2_CID_GAIN,
501 .type = V4L2_CTRL_TYPE_INTEGER,
502 .name = "Gain",
503 .minimum = 0,
504 .maximum = 127,
505 .step = 1,
506 .default_value = 64,
507 .flags = V4L2_CTRL_FLAG_SLIDER,
508 }, {
509 .id = V4L2_CID_EXPOSURE,
510 .type = V4L2_CTRL_TYPE_INTEGER,
511 .name = "Exposure",
512 .minimum = 1,
513 .maximum = 255,
514 .step = 1,
515 .default_value = 255,
516 .flags = V4L2_CTRL_FLAG_SLIDER,
517 }, {
518 .id = V4L2_CID_EXPOSURE_AUTO,
519 .type = V4L2_CTRL_TYPE_BOOLEAN,
520 .name = "Automatic Exposure",
521 .minimum = 0,
522 .maximum = 1,
523 .step = 1,
524 .default_value = 1,
525 }
526};
527
528static struct soc_camera_ops mt9t031_ops = {
529 .set_bus_param = mt9t031_set_bus_param,
530 .query_bus_param = mt9t031_query_bus_param,
531 .controls = mt9t031_controls,
532 .num_controls = ARRAY_SIZE(mt9t031_controls),
533};
534
535static int mt9t031_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) 544static int mt9t031_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
536{ 545{
537 struct i2c_client *client = sd->priv; 546 struct i2c_client *client = sd->priv;
@@ -568,15 +577,9 @@ static int mt9t031_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
568{ 577{
569 struct i2c_client *client = sd->priv; 578 struct i2c_client *client = sd->priv;
570 struct mt9t031 *mt9t031 = to_mt9t031(client); 579 struct mt9t031 *mt9t031 = to_mt9t031(client);
571 struct soc_camera_device *icd = client->dev.platform_data;
572 const struct v4l2_queryctrl *qctrl; 580 const struct v4l2_queryctrl *qctrl;
573 int data; 581 int data;
574 582
575 qctrl = soc_camera_find_qctrl(&mt9t031_ops, ctrl->id);
576
577 if (!qctrl)
578 return -EINVAL;
579
580 switch (ctrl->id) { 583 switch (ctrl->id) {
581 case V4L2_CID_VFLIP: 584 case V4L2_CID_VFLIP:
582 if (ctrl->value) 585 if (ctrl->value)
@@ -595,6 +598,7 @@ static int mt9t031_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
595 return -EIO; 598 return -EIO;
596 break; 599 break;
597 case V4L2_CID_GAIN: 600 case V4L2_CID_GAIN:
601 qctrl = &mt9t031_controls[MT9T031_CTRL_GAIN];
598 if (ctrl->value > qctrl->maximum || ctrl->value < qctrl->minimum) 602 if (ctrl->value > qctrl->maximum || ctrl->value < qctrl->minimum)
599 return -EINVAL; 603 return -EINVAL;
600 /* See Datasheet Table 7, Gain settings. */ 604 /* See Datasheet Table 7, Gain settings. */
@@ -634,6 +638,7 @@ static int mt9t031_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
634 mt9t031->gain = ctrl->value; 638 mt9t031->gain = ctrl->value;
635 break; 639 break;
636 case V4L2_CID_EXPOSURE: 640 case V4L2_CID_EXPOSURE:
641 qctrl = &mt9t031_controls[MT9T031_CTRL_EXPOSURE];
637 /* mt9t031 has maximum == default */ 642 /* mt9t031 has maximum == default */
638 if (ctrl->value > qctrl->maximum || ctrl->value < qctrl->minimum) 643 if (ctrl->value > qctrl->maximum || ctrl->value < qctrl->minimum)
639 return -EINVAL; 644 return -EINVAL;
@@ -657,11 +662,11 @@ static int mt9t031_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
657 const u16 vblank = MT9T031_VERTICAL_BLANK; 662 const u16 vblank = MT9T031_VERTICAL_BLANK;
658 const u32 shutter_max = MT9T031_MAX_HEIGHT + vblank; 663 const u32 shutter_max = MT9T031_MAX_HEIGHT + vblank;
659 unsigned int total_h = mt9t031->rect.height + 664 unsigned int total_h = mt9t031->rect.height +
660 icd->y_skip_top + vblank; 665 mt9t031->y_skip_top + vblank;
661 666
662 if (set_shutter(client, total_h) < 0) 667 if (set_shutter(client, total_h) < 0)
663 return -EIO; 668 return -EIO;
664 qctrl = soc_camera_find_qctrl(icd->ops, V4L2_CID_EXPOSURE); 669 qctrl = &mt9t031_controls[MT9T031_CTRL_EXPOSURE];
665 mt9t031->exposure = (shutter_max / 2 + (total_h - 1) * 670 mt9t031->exposure = (shutter_max / 2 + (total_h - 1) *
666 (qctrl->maximum - qctrl->minimum)) / 671 (qctrl->maximum - qctrl->minimum)) /
667 shutter_max + qctrl->minimum; 672 shutter_max + qctrl->minimum;
@@ -669,15 +674,18 @@ static int mt9t031_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
669 } else 674 } else
670 mt9t031->autoexposure = 0; 675 mt9t031->autoexposure = 0;
671 break; 676 break;
677 default:
678 return -EINVAL;
672 } 679 }
673 return 0; 680 return 0;
674} 681}
675 682
676/* Interface active, can use i2c. If it fails, it can indeed mean, that 683/*
677 * this wasn't our capture interface, so, we wait for the right one */ 684 * Interface active, can use i2c. If it fails, it can indeed mean, that
685 * this wasn't our capture interface, so, we wait for the right one
686 */
678static int mt9t031_video_probe(struct i2c_client *client) 687static int mt9t031_video_probe(struct i2c_client *client)
679{ 688{
680 struct soc_camera_device *icd = client->dev.platform_data;
681 struct mt9t031 *mt9t031 = to_mt9t031(client); 689 struct mt9t031 *mt9t031 = to_mt9t031(client);
682 s32 data; 690 s32 data;
683 int ret; 691 int ret;
@@ -692,8 +700,6 @@ static int mt9t031_video_probe(struct i2c_client *client)
692 switch (data) { 700 switch (data) {
693 case 0x1621: 701 case 0x1621:
694 mt9t031->model = V4L2_IDENT_MT9T031; 702 mt9t031->model = V4L2_IDENT_MT9T031;
695 icd->formats = mt9t031_colour_formats;
696 icd->num_formats = ARRAY_SIZE(mt9t031_colour_formats);
697 break; 703 break;
698 default: 704 default:
699 dev_err(&client->dev, 705 dev_err(&client->dev,
@@ -714,6 +720,16 @@ static int mt9t031_video_probe(struct i2c_client *client)
714 return ret; 720 return ret;
715} 721}
716 722
723static int mt9t031_g_skip_top_lines(struct v4l2_subdev *sd, u32 *lines)
724{
725 struct i2c_client *client = sd->priv;
726 struct mt9t031 *mt9t031 = to_mt9t031(client);
727
728 *lines = mt9t031->y_skip_top;
729
730 return 0;
731}
732
717static struct v4l2_subdev_core_ops mt9t031_subdev_core_ops = { 733static struct v4l2_subdev_core_ops mt9t031_subdev_core_ops = {
718 .g_ctrl = mt9t031_g_ctrl, 734 .g_ctrl = mt9t031_g_ctrl,
719 .s_ctrl = mt9t031_s_ctrl, 735 .s_ctrl = mt9t031_s_ctrl,
@@ -724,19 +740,35 @@ static struct v4l2_subdev_core_ops mt9t031_subdev_core_ops = {
724#endif 740#endif
725}; 741};
726 742
743static int mt9t031_enum_fmt(struct v4l2_subdev *sd, int index,
744 enum v4l2_mbus_pixelcode *code)
745{
746 if (index)
747 return -EINVAL;
748
749 *code = V4L2_MBUS_FMT_SBGGR10_1X10;
750 return 0;
751}
752
727static struct v4l2_subdev_video_ops mt9t031_subdev_video_ops = { 753static struct v4l2_subdev_video_ops mt9t031_subdev_video_ops = {
728 .s_stream = mt9t031_s_stream, 754 .s_stream = mt9t031_s_stream,
729 .s_fmt = mt9t031_s_fmt, 755 .s_mbus_fmt = mt9t031_s_fmt,
730 .g_fmt = mt9t031_g_fmt, 756 .g_mbus_fmt = mt9t031_g_fmt,
731 .try_fmt = mt9t031_try_fmt, 757 .try_mbus_fmt = mt9t031_try_fmt,
732 .s_crop = mt9t031_s_crop, 758 .s_crop = mt9t031_s_crop,
733 .g_crop = mt9t031_g_crop, 759 .g_crop = mt9t031_g_crop,
734 .cropcap = mt9t031_cropcap, 760 .cropcap = mt9t031_cropcap,
761 .enum_mbus_fmt = mt9t031_enum_fmt,
762};
763
764static struct v4l2_subdev_sensor_ops mt9t031_subdev_sensor_ops = {
765 .g_skip_top_lines = mt9t031_g_skip_top_lines,
735}; 766};
736 767
737static struct v4l2_subdev_ops mt9t031_subdev_ops = { 768static struct v4l2_subdev_ops mt9t031_subdev_ops = {
738 .core = &mt9t031_subdev_core_ops, 769 .core = &mt9t031_subdev_core_ops,
739 .video = &mt9t031_subdev_video_ops, 770 .video = &mt9t031_subdev_video_ops,
771 .sensor = &mt9t031_subdev_sensor_ops,
740}; 772};
741 773
742static int mt9t031_probe(struct i2c_client *client, 774static int mt9t031_probe(struct i2c_client *client,
@@ -745,18 +777,16 @@ static int mt9t031_probe(struct i2c_client *client,
745 struct mt9t031 *mt9t031; 777 struct mt9t031 *mt9t031;
746 struct soc_camera_device *icd = client->dev.platform_data; 778 struct soc_camera_device *icd = client->dev.platform_data;
747 struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); 779 struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
748 struct soc_camera_link *icl;
749 int ret; 780 int ret;
750 781
751 if (!icd) { 782 if (icd) {
752 dev_err(&client->dev, "MT9T031: missing soc-camera data!\n"); 783 struct soc_camera_link *icl = to_soc_camera_link(icd);
753 return -EINVAL; 784 if (!icl) {
754 } 785 dev_err(&client->dev, "MT9T031 driver needs platform data\n");
786 return -EINVAL;
787 }
755 788
756 icl = to_soc_camera_link(icd); 789 icd->ops = &mt9t031_ops;
757 if (!icl) {
758 dev_err(&client->dev, "MT9T031 driver needs platform data\n");
759 return -EINVAL;
760 } 790 }
761 791
762 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA)) { 792 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA)) {
@@ -771,17 +801,16 @@ static int mt9t031_probe(struct i2c_client *client,
771 801
772 v4l2_i2c_subdev_init(&mt9t031->subdev, client, &mt9t031_subdev_ops); 802 v4l2_i2c_subdev_init(&mt9t031->subdev, client, &mt9t031_subdev_ops);
773 803
774 /* Second stage probe - when a capture adapter is there */ 804 mt9t031->y_skip_top = 0;
775 icd->ops = &mt9t031_ops;
776 icd->y_skip_top = 0;
777
778 mt9t031->rect.left = MT9T031_COLUMN_SKIP; 805 mt9t031->rect.left = MT9T031_COLUMN_SKIP;
779 mt9t031->rect.top = MT9T031_ROW_SKIP; 806 mt9t031->rect.top = MT9T031_ROW_SKIP;
780 mt9t031->rect.width = MT9T031_MAX_WIDTH; 807 mt9t031->rect.width = MT9T031_MAX_WIDTH;
781 mt9t031->rect.height = MT9T031_MAX_HEIGHT; 808 mt9t031->rect.height = MT9T031_MAX_HEIGHT;
782 809
783 /* Simulated autoexposure. If enabled, we calculate shutter width 810 /*
784 * ourselves in the driver based on vertical blanking and frame width */ 811 * Simulated autoexposure. If enabled, we calculate shutter width
812 * ourselves in the driver based on vertical blanking and frame width
813 */
785 mt9t031->autoexposure = 1; 814 mt9t031->autoexposure = 1;
786 815
787 mt9t031->xskip = 1; 816 mt9t031->xskip = 1;
@@ -794,7 +823,8 @@ static int mt9t031_probe(struct i2c_client *client,
794 mt9t031_disable(client); 823 mt9t031_disable(client);
795 824
796 if (ret) { 825 if (ret) {
797 icd->ops = NULL; 826 if (icd)
827 icd->ops = NULL;
798 i2c_set_clientdata(client, NULL); 828 i2c_set_clientdata(client, NULL);
799 kfree(mt9t031); 829 kfree(mt9t031);
800 } 830 }
@@ -807,7 +837,8 @@ static int mt9t031_remove(struct i2c_client *client)
807 struct mt9t031 *mt9t031 = to_mt9t031(client); 837 struct mt9t031 *mt9t031 = to_mt9t031(client);
808 struct soc_camera_device *icd = client->dev.platform_data; 838 struct soc_camera_device *icd = client->dev.platform_data;
809 839
810 icd->ops = NULL; 840 if (icd)
841 icd->ops = NULL;
811 i2c_set_clientdata(client, NULL); 842 i2c_set_clientdata(client, NULL);
812 client->driver = NULL; 843 client->driver = NULL;
813 kfree(mt9t031); 844 kfree(mt9t031);
diff --git a/drivers/media/video/mt9t112.c b/drivers/media/video/mt9t112.c
new file mode 100644
index 000000000000..fc4dd6045720
--- /dev/null
+++ b/drivers/media/video/mt9t112.c
@@ -0,0 +1,1177 @@
1/*
2 * mt9t112 Camera Driver
3 *
4 * Copyright (C) 2009 Renesas Solutions Corp.
5 * Kuninori Morimoto <morimoto.kuninori@renesas.com>
6 *
7 * Based on ov772x driver, mt9m111 driver,
8 *
9 * Copyright (C) 2008 Kuninori Morimoto <morimoto.kuninori@renesas.com>
10 * Copyright (C) 2008, Robert Jarzmik <robert.jarzmik@free.fr>
11 * Copyright 2006-7 Jonathan Corbet <corbet@lwn.net>
12 * Copyright (C) 2008 Magnus Damm
13 * Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de>
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License version 2 as
17 * published by the Free Software Foundation.
18 */
19
20#include <linux/delay.h>
21#include <linux/i2c.h>
22#include <linux/init.h>
23#include <linux/module.h>
24#include <linux/slab.h>
25#include <linux/videodev2.h>
26
27#include <media/mt9t112.h>
28#include <media/soc_camera.h>
29#include <media/soc_mediabus.h>
30#include <media/v4l2-chip-ident.h>
31#include <media/v4l2-common.h>
32
33/* you can check PLL/clock info */
34/* #define EXT_CLOCK 24000000 */
35
36/************************************************************************
37
38
39 macro
40
41
42************************************************************************/
43/*
44 * frame size
45 */
46#define MAX_WIDTH 2048
47#define MAX_HEIGHT 1536
48
49#define VGA_WIDTH 640
50#define VGA_HEIGHT 480
51
52/*
53 * macro of read/write
54 */
55#define ECHECKER(ret, x) \
56 do { \
57 (ret) = (x); \
58 if ((ret) < 0) \
59 return (ret); \
60 } while (0)
61
62#define mt9t112_reg_write(ret, client, a, b) \
63 ECHECKER(ret, __mt9t112_reg_write(client, a, b))
64#define mt9t112_mcu_write(ret, client, a, b) \
65 ECHECKER(ret, __mt9t112_mcu_write(client, a, b))
66
67#define mt9t112_reg_mask_set(ret, client, a, b, c) \
68 ECHECKER(ret, __mt9t112_reg_mask_set(client, a, b, c))
69#define mt9t112_mcu_mask_set(ret, client, a, b, c) \
70 ECHECKER(ret, __mt9t112_mcu_mask_set(client, a, b, c))
71
72#define mt9t112_reg_read(ret, client, a) \
73 ECHECKER(ret, __mt9t112_reg_read(client, a))
74
75/*
76 * Logical address
77 */
78#define _VAR(id, offset, base) (base | (id & 0x1f) << 10 | (offset & 0x3ff))
79#define VAR(id, offset) _VAR(id, offset, 0x0000)
80#define VAR8(id, offset) _VAR(id, offset, 0x8000)
81
82/************************************************************************
83
84
85 struct
86
87
88************************************************************************/
89struct mt9t112_frame_size {
90 u16 width;
91 u16 height;
92};
93
94struct mt9t112_format {
95 enum v4l2_mbus_pixelcode code;
96 enum v4l2_colorspace colorspace;
97 u16 fmt;
98 u16 order;
99};
100
101struct mt9t112_priv {
102 struct v4l2_subdev subdev;
103 struct mt9t112_camera_info *info;
104 struct i2c_client *client;
105 struct soc_camera_device icd;
106 struct mt9t112_frame_size frame;
107 const struct mt9t112_format *format;
108 int model;
109 u32 flags;
110/* for flags */
111#define INIT_DONE (1<<0)
112};
113
114/************************************************************************
115
116
117 supported format
118
119
120************************************************************************/
121
122static const struct mt9t112_format mt9t112_cfmts[] = {
123 {
124 .code = V4L2_MBUS_FMT_YUYV8_2X8_BE,
125 .colorspace = V4L2_COLORSPACE_JPEG,
126 .fmt = 1,
127 .order = 0,
128 }, {
129 .code = V4L2_MBUS_FMT_YVYU8_2X8_BE,
130 .colorspace = V4L2_COLORSPACE_JPEG,
131 .fmt = 1,
132 .order = 1,
133 }, {
134 .code = V4L2_MBUS_FMT_YUYV8_2X8_LE,
135 .colorspace = V4L2_COLORSPACE_JPEG,
136 .fmt = 1,
137 .order = 2,
138 }, {
139 .code = V4L2_MBUS_FMT_YVYU8_2X8_LE,
140 .colorspace = V4L2_COLORSPACE_JPEG,
141 .fmt = 1,
142 .order = 3,
143 }, {
144 .code = V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE,
145 .colorspace = V4L2_COLORSPACE_SRGB,
146 .fmt = 8,
147 .order = 2,
148 }, {
149 .code = V4L2_MBUS_FMT_RGB565_2X8_LE,
150 .colorspace = V4L2_COLORSPACE_SRGB,
151 .fmt = 4,
152 .order = 2,
153 },
154};
155
156/************************************************************************
157
158
159 general function
160
161
162************************************************************************/
163static struct mt9t112_priv *to_mt9t112(const struct i2c_client *client)
164{
165 return container_of(i2c_get_clientdata(client),
166 struct mt9t112_priv,
167 subdev);
168}
169
170static int __mt9t112_reg_read(const struct i2c_client *client, u16 command)
171{
172 struct i2c_msg msg[2];
173 u8 buf[2];
174 int ret;
175
176 command = swab16(command);
177
178 msg[0].addr = client->addr;
179 msg[0].flags = 0;
180 msg[0].len = 2;
181 msg[0].buf = (u8 *)&command;
182
183 msg[1].addr = client->addr;
184 msg[1].flags = I2C_M_RD;
185 msg[1].len = 2;
186 msg[1].buf = buf;
187
188 /*
189 * if return value of this function is < 0,
190 * it mean error.
191 * else, under 16bit is valid data.
192 */
193 ret = i2c_transfer(client->adapter, msg, 2);
194 if (ret < 0)
195 return ret;
196
197 memcpy(&ret, buf, 2);
198 return swab16(ret);
199}
200
201static int __mt9t112_reg_write(const struct i2c_client *client,
202 u16 command, u16 data)
203{
204 struct i2c_msg msg;
205 u8 buf[4];
206 int ret;
207
208 command = swab16(command);
209 data = swab16(data);
210
211 memcpy(buf + 0, &command, 2);
212 memcpy(buf + 2, &data, 2);
213
214 msg.addr = client->addr;
215 msg.flags = 0;
216 msg.len = 4;
217 msg.buf = buf;
218
219 /*
220 * i2c_transfer return message length,
221 * but this function should return 0 if correct case
222 */
223 ret = i2c_transfer(client->adapter, &msg, 1);
224 if (ret >= 0)
225 ret = 0;
226
227 return ret;
228}
229
230static int __mt9t112_reg_mask_set(const struct i2c_client *client,
231 u16 command,
232 u16 mask,
233 u16 set)
234{
235 int val = __mt9t112_reg_read(client, command);
236 if (val < 0)
237 return val;
238
239 val &= ~mask;
240 val |= set & mask;
241
242 return __mt9t112_reg_write(client, command, val);
243}
244
245/* mcu access */
246static int __mt9t112_mcu_read(const struct i2c_client *client, u16 command)
247{
248 int ret;
249
250 ret = __mt9t112_reg_write(client, 0x098E, command);
251 if (ret < 0)
252 return ret;
253
254 return __mt9t112_reg_read(client, 0x0990);
255}
256
257static int __mt9t112_mcu_write(const struct i2c_client *client,
258 u16 command, u16 data)
259{
260 int ret;
261
262 ret = __mt9t112_reg_write(client, 0x098E, command);
263 if (ret < 0)
264 return ret;
265
266 return __mt9t112_reg_write(client, 0x0990, data);
267}
268
269static int __mt9t112_mcu_mask_set(const struct i2c_client *client,
270 u16 command,
271 u16 mask,
272 u16 set)
273{
274 int val = __mt9t112_mcu_read(client, command);
275 if (val < 0)
276 return val;
277
278 val &= ~mask;
279 val |= set & mask;
280
281 return __mt9t112_mcu_write(client, command, val);
282}
283
284static int mt9t112_reset(const struct i2c_client *client)
285{
286 int ret;
287
288 mt9t112_reg_mask_set(ret, client, 0x001a, 0x0001, 0x0001);
289 msleep(1);
290 mt9t112_reg_mask_set(ret, client, 0x001a, 0x0001, 0x0000);
291
292 return ret;
293}
294
295#ifndef EXT_CLOCK
296#define CLOCK_INFO(a, b)
297#else
298#define CLOCK_INFO(a, b) mt9t112_clock_info(a, b)
299static int mt9t112_clock_info(const struct i2c_client *client, u32 ext)
300{
301 int m, n, p1, p2, p3, p4, p5, p6, p7;
302 u32 vco, clk;
303 char *enable;
304
305 ext /= 1000; /* kbyte order */
306
307 mt9t112_reg_read(n, client, 0x0012);
308 p1 = n & 0x000f;
309 n = n >> 4;
310 p2 = n & 0x000f;
311 n = n >> 4;
312 p3 = n & 0x000f;
313
314 mt9t112_reg_read(n, client, 0x002a);
315 p4 = n & 0x000f;
316 n = n >> 4;
317 p5 = n & 0x000f;
318 n = n >> 4;
319 p6 = n & 0x000f;
320
321 mt9t112_reg_read(n, client, 0x002c);
322 p7 = n & 0x000f;
323
324 mt9t112_reg_read(n, client, 0x0010);
325 m = n & 0x00ff;
326 n = (n >> 8) & 0x003f;
327
328 enable = ((6000 > ext) || (54000 < ext)) ? "X" : "";
329 dev_info(&client->dev, "EXTCLK : %10u K %s\n", ext, enable);
330
331 vco = 2 * m * ext / (n+1);
332 enable = ((384000 > vco) || (768000 < vco)) ? "X" : "";
333 dev_info(&client->dev, "VCO : %10u K %s\n", vco, enable);
334
335 clk = vco / (p1+1) / (p2+1);
336 enable = (96000 < clk) ? "X" : "";
337 dev_info(&client->dev, "PIXCLK : %10u K %s\n", clk, enable);
338
339 clk = vco / (p3+1);
340 enable = (768000 < clk) ? "X" : "";
341 dev_info(&client->dev, "MIPICLK : %10u K %s\n", clk, enable);
342
343 clk = vco / (p6+1);
344 enable = (96000 < clk) ? "X" : "";
345 dev_info(&client->dev, "MCU CLK : %10u K %s\n", clk, enable);
346
347 clk = vco / (p5+1);
348 enable = (54000 < clk) ? "X" : "";
349 dev_info(&client->dev, "SOC CLK : %10u K %s\n", clk, enable);
350
351 clk = vco / (p4+1);
352 enable = (70000 < clk) ? "X" : "";
353 dev_info(&client->dev, "Sensor CLK : %10u K %s\n", clk, enable);
354
355 clk = vco / (p7+1);
356 dev_info(&client->dev, "External sensor : %10u K\n", clk);
357
358 clk = ext / (n+1);
359 enable = ((2000 > clk) || (24000 < clk)) ? "X" : "";
360 dev_info(&client->dev, "PFD : %10u K %s\n", clk, enable);
361
362 return 0;
363}
364#endif
365
366static void mt9t112_frame_check(u32 *width, u32 *height)
367{
368 if (*width > MAX_WIDTH)
369 *width = MAX_WIDTH;
370
371 if (*height > MAX_HEIGHT)
372 *height = MAX_HEIGHT;
373}
374
375static int mt9t112_set_a_frame_size(const struct i2c_client *client,
376 u16 width,
377 u16 height)
378{
379 int ret;
380 u16 wstart = (MAX_WIDTH - width) / 2;
381 u16 hstart = (MAX_HEIGHT - height) / 2;
382
383 /* (Context A) Image Width/Height */
384 mt9t112_mcu_write(ret, client, VAR(26, 0), width);
385 mt9t112_mcu_write(ret, client, VAR(26, 2), height);
386
387 /* (Context A) Output Width/Height */
388 mt9t112_mcu_write(ret, client, VAR(18, 43), 8 + width);
389 mt9t112_mcu_write(ret, client, VAR(18, 45), 8 + height);
390
391 /* (Context A) Start Row/Column */
392 mt9t112_mcu_write(ret, client, VAR(18, 2), 4 + hstart);
393 mt9t112_mcu_write(ret, client, VAR(18, 4), 4 + wstart);
394
395 /* (Context A) End Row/Column */
396 mt9t112_mcu_write(ret, client, VAR(18, 6), 11 + height + hstart);
397 mt9t112_mcu_write(ret, client, VAR(18, 8), 11 + width + wstart);
398
399 mt9t112_mcu_write(ret, client, VAR8(1, 0), 0x06);
400
401 return ret;
402}
403
404static int mt9t112_set_pll_dividers(const struct i2c_client *client,
405 u8 m, u8 n,
406 u8 p1, u8 p2, u8 p3,
407 u8 p4, u8 p5, u8 p6,
408 u8 p7)
409{
410 int ret;
411 u16 val;
412
413 /* N/M */
414 val = (n << 8) |
415 (m << 0);
416 mt9t112_reg_mask_set(ret, client, 0x0010, 0x3fff, val);
417
418 /* P1/P2/P3 */
419 val = ((p3 & 0x0F) << 8) |
420 ((p2 & 0x0F) << 4) |
421 ((p1 & 0x0F) << 0);
422 mt9t112_reg_mask_set(ret, client, 0x0012, 0x0fff, val);
423
424 /* P4/P5/P6 */
425 val = (0x7 << 12) |
426 ((p6 & 0x0F) << 8) |
427 ((p5 & 0x0F) << 4) |
428 ((p4 & 0x0F) << 0);
429 mt9t112_reg_mask_set(ret, client, 0x002A, 0x7fff, val);
430
431 /* P7 */
432 val = (0x1 << 12) |
433 ((p7 & 0x0F) << 0);
434 mt9t112_reg_mask_set(ret, client, 0x002C, 0x100f, val);
435
436 return ret;
437}
438
439static int mt9t112_init_pll(const struct i2c_client *client)
440{
441 struct mt9t112_priv *priv = to_mt9t112(client);
442 int data, i, ret;
443
444 mt9t112_reg_mask_set(ret, client, 0x0014, 0x003, 0x0001);
445
446 /* PLL control: BYPASS PLL = 8517 */
447 mt9t112_reg_write(ret, client, 0x0014, 0x2145);
448
449 /* Replace these registers when new timing parameters are generated */
450 mt9t112_set_pll_dividers(client,
451 priv->info->divider.m,
452 priv->info->divider.n,
453 priv->info->divider.p1,
454 priv->info->divider.p2,
455 priv->info->divider.p3,
456 priv->info->divider.p4,
457 priv->info->divider.p5,
458 priv->info->divider.p6,
459 priv->info->divider.p7);
460
461 /*
462 * TEST_BYPASS on
463 * PLL_ENABLE on
464 * SEL_LOCK_DET on
465 * TEST_BYPASS off
466 */
467 mt9t112_reg_write(ret, client, 0x0014, 0x2525);
468 mt9t112_reg_write(ret, client, 0x0014, 0x2527);
469 mt9t112_reg_write(ret, client, 0x0014, 0x3427);
470 mt9t112_reg_write(ret, client, 0x0014, 0x3027);
471
472 mdelay(10);
473
474 /*
475 * PLL_BYPASS off
476 * Reference clock count
477 * I2C Master Clock Divider
478 */
479 mt9t112_reg_write(ret, client, 0x0014, 0x3046);
480 mt9t112_reg_write(ret, client, 0x0022, 0x0190);
481 mt9t112_reg_write(ret, client, 0x3B84, 0x0212);
482
483 /* External sensor clock is PLL bypass */
484 mt9t112_reg_write(ret, client, 0x002E, 0x0500);
485
486 mt9t112_reg_mask_set(ret, client, 0x0018, 0x0002, 0x0002);
487 mt9t112_reg_mask_set(ret, client, 0x3B82, 0x0004, 0x0004);
488
489 /* MCU disabled */
490 mt9t112_reg_mask_set(ret, client, 0x0018, 0x0004, 0x0004);
491
492 /* out of standby */
493 mt9t112_reg_mask_set(ret, client, 0x0018, 0x0001, 0);
494
495 mdelay(50);
496
497 /*
498 * Standby Workaround
499 * Disable Secondary I2C Pads
500 */
501 mt9t112_reg_write(ret, client, 0x0614, 0x0001);
502 mdelay(1);
503 mt9t112_reg_write(ret, client, 0x0614, 0x0001);
504 mdelay(1);
505 mt9t112_reg_write(ret, client, 0x0614, 0x0001);
506 mdelay(1);
507 mt9t112_reg_write(ret, client, 0x0614, 0x0001);
508 mdelay(1);
509 mt9t112_reg_write(ret, client, 0x0614, 0x0001);
510 mdelay(1);
511 mt9t112_reg_write(ret, client, 0x0614, 0x0001);
512 mdelay(1);
513
514 /* poll to verify out of standby. Must Poll this bit */
515 for (i = 0; i < 100; i++) {
516 mt9t112_reg_read(data, client, 0x0018);
517 if (0x4000 & data)
518 break;
519
520 mdelay(10);
521 }
522
523 return ret;
524}
525
526static int mt9t112_init_setting(const struct i2c_client *client)
527{
528
529 int ret;
530
531 /* Adaptive Output Clock (A) */
532 mt9t112_mcu_mask_set(ret, client, VAR(26, 160), 0x0040, 0x0000);
533
534 /* Read Mode (A) */
535 mt9t112_mcu_write(ret, client, VAR(18, 12), 0x0024);
536
537 /* Fine Correction (A) */
538 mt9t112_mcu_write(ret, client, VAR(18, 15), 0x00CC);
539
540 /* Fine IT Min (A) */
541 mt9t112_mcu_write(ret, client, VAR(18, 17), 0x01f1);
542
543 /* Fine IT Max Margin (A) */
544 mt9t112_mcu_write(ret, client, VAR(18, 19), 0x00fF);
545
546 /* Base Frame Lines (A) */
547 mt9t112_mcu_write(ret, client, VAR(18, 29), 0x032D);
548
549 /* Min Line Length (A) */
550 mt9t112_mcu_write(ret, client, VAR(18, 31), 0x073a);
551
552 /* Line Length (A) */
553 mt9t112_mcu_write(ret, client, VAR(18, 37), 0x07d0);
554
555 /* Adaptive Output Clock (B) */
556 mt9t112_mcu_mask_set(ret, client, VAR(27, 160), 0x0040, 0x0000);
557
558 /* Row Start (B) */
559 mt9t112_mcu_write(ret, client, VAR(18, 74), 0x004);
560
561 /* Column Start (B) */
562 mt9t112_mcu_write(ret, client, VAR(18, 76), 0x004);
563
564 /* Row End (B) */
565 mt9t112_mcu_write(ret, client, VAR(18, 78), 0x60B);
566
567 /* Column End (B) */
568 mt9t112_mcu_write(ret, client, VAR(18, 80), 0x80B);
569
570 /* Fine Correction (B) */
571 mt9t112_mcu_write(ret, client, VAR(18, 87), 0x008C);
572
573 /* Fine IT Min (B) */
574 mt9t112_mcu_write(ret, client, VAR(18, 89), 0x01F1);
575
576 /* Fine IT Max Margin (B) */
577 mt9t112_mcu_write(ret, client, VAR(18, 91), 0x00FF);
578
579 /* Base Frame Lines (B) */
580 mt9t112_mcu_write(ret, client, VAR(18, 101), 0x0668);
581
582 /* Min Line Length (B) */
583 mt9t112_mcu_write(ret, client, VAR(18, 103), 0x0AF0);
584
585 /* Line Length (B) */
586 mt9t112_mcu_write(ret, client, VAR(18, 109), 0x0AF0);
587
588 /*
589 * Flicker Dectection registers
590 * This section should be replaced whenever new Timing file is generated
591 * All the following registers need to be replaced
592 * Following registers are generated from Register Wizard but user can
593 * modify them. For detail see auto flicker detection tuning
594 */
595
596 /* FD_FDPERIOD_SELECT */
597 mt9t112_mcu_write(ret, client, VAR8(8, 5), 0x01);
598
599 /* PRI_B_CONFIG_FD_ALGO_RUN */
600 mt9t112_mcu_write(ret, client, VAR(27, 17), 0x0003);
601
602 /* PRI_A_CONFIG_FD_ALGO_RUN */
603 mt9t112_mcu_write(ret, client, VAR(26, 17), 0x0003);
604
605 /*
606 * AFD range detection tuning registers
607 */
608
609 /* search_f1_50 */
610 mt9t112_mcu_write(ret, client, VAR8(18, 165), 0x25);
611
612 /* search_f2_50 */
613 mt9t112_mcu_write(ret, client, VAR8(18, 166), 0x28);
614
615 /* search_f1_60 */
616 mt9t112_mcu_write(ret, client, VAR8(18, 167), 0x2C);
617
618 /* search_f2_60 */
619 mt9t112_mcu_write(ret, client, VAR8(18, 168), 0x2F);
620
621 /* period_50Hz (A) */
622 mt9t112_mcu_write(ret, client, VAR8(18, 68), 0xBA);
623
624 /* secret register by aptina */
625 /* period_50Hz (A MSB) */
626 mt9t112_mcu_write(ret, client, VAR8(18, 303), 0x00);
627
628 /* period_60Hz (A) */
629 mt9t112_mcu_write(ret, client, VAR8(18, 69), 0x9B);
630
631 /* secret register by aptina */
632 /* period_60Hz (A MSB) */
633 mt9t112_mcu_write(ret, client, VAR8(18, 301), 0x00);
634
635 /* period_50Hz (B) */
636 mt9t112_mcu_write(ret, client, VAR8(18, 140), 0x82);
637
638 /* secret register by aptina */
639 /* period_50Hz (B) MSB */
640 mt9t112_mcu_write(ret, client, VAR8(18, 304), 0x00);
641
642 /* period_60Hz (B) */
643 mt9t112_mcu_write(ret, client, VAR8(18, 141), 0x6D);
644
645 /* secret register by aptina */
646 /* period_60Hz (B) MSB */
647 mt9t112_mcu_write(ret, client, VAR8(18, 302), 0x00);
648
649 /* FD Mode */
650 mt9t112_mcu_write(ret, client, VAR8(8, 2), 0x10);
651
652 /* Stat_min */
653 mt9t112_mcu_write(ret, client, VAR8(8, 9), 0x02);
654
655 /* Stat_max */
656 mt9t112_mcu_write(ret, client, VAR8(8, 10), 0x03);
657
658 /* Min_amplitude */
659 mt9t112_mcu_write(ret, client, VAR8(8, 12), 0x0A);
660
661 /* RX FIFO Watermark (A) */
662 mt9t112_mcu_write(ret, client, VAR(18, 70), 0x0014);
663
664 /* RX FIFO Watermark (B) */
665 mt9t112_mcu_write(ret, client, VAR(18, 142), 0x0014);
666
667 /* MCLK: 16MHz
668 * PCLK: 73MHz
669 * CorePixCLK: 36.5 MHz
670 */
671 mt9t112_mcu_write(ret, client, VAR8(18, 0x0044), 133);
672 mt9t112_mcu_write(ret, client, VAR8(18, 0x0045), 110);
673 mt9t112_mcu_write(ret, client, VAR8(18, 0x008c), 130);
674 mt9t112_mcu_write(ret, client, VAR8(18, 0x008d), 108);
675
676 mt9t112_mcu_write(ret, client, VAR8(18, 0x00A5), 27);
677 mt9t112_mcu_write(ret, client, VAR8(18, 0x00a6), 30);
678 mt9t112_mcu_write(ret, client, VAR8(18, 0x00a7), 32);
679 mt9t112_mcu_write(ret, client, VAR8(18, 0x00a8), 35);
680
681 return ret;
682}
683
684static int mt9t112_auto_focus_setting(const struct i2c_client *client)
685{
686 int ret;
687
688 mt9t112_mcu_write(ret, client, VAR(12, 13), 0x000F);
689 mt9t112_mcu_write(ret, client, VAR(12, 23), 0x0F0F);
690 mt9t112_mcu_write(ret, client, VAR8(1, 0), 0x06);
691
692 mt9t112_reg_write(ret, client, 0x0614, 0x0000);
693
694 mt9t112_mcu_write(ret, client, VAR8(1, 0), 0x05);
695 mt9t112_mcu_write(ret, client, VAR8(12, 2), 0x02);
696 mt9t112_mcu_write(ret, client, VAR(12, 3), 0x0002);
697 mt9t112_mcu_write(ret, client, VAR(17, 3), 0x8001);
698 mt9t112_mcu_write(ret, client, VAR(17, 11), 0x0025);
699 mt9t112_mcu_write(ret, client, VAR(17, 13), 0x0193);
700 mt9t112_mcu_write(ret, client, VAR8(17, 33), 0x18);
701 mt9t112_mcu_write(ret, client, VAR8(1, 0), 0x05);
702
703 return ret;
704}
705
706static int mt9t112_auto_focus_trigger(const struct i2c_client *client)
707{
708 int ret;
709
710 mt9t112_mcu_write(ret, client, VAR8(12, 25), 0x01);
711
712 return ret;
713}
714
715static int mt9t112_init_camera(const struct i2c_client *client)
716{
717 int ret;
718
719 ECHECKER(ret, mt9t112_reset(client));
720
721 ECHECKER(ret, mt9t112_init_pll(client));
722
723 ECHECKER(ret, mt9t112_init_setting(client));
724
725 ECHECKER(ret, mt9t112_auto_focus_setting(client));
726
727 mt9t112_reg_mask_set(ret, client, 0x0018, 0x0004, 0);
728
729 /* Analog setting B */
730 mt9t112_reg_write(ret, client, 0x3084, 0x2409);
731 mt9t112_reg_write(ret, client, 0x3092, 0x0A49);
732 mt9t112_reg_write(ret, client, 0x3094, 0x4949);
733 mt9t112_reg_write(ret, client, 0x3096, 0x4950);
734
735 /*
736 * Disable adaptive clock
737 * PRI_A_CONFIG_JPEG_OB_TX_CONTROL_VAR
738 * PRI_B_CONFIG_JPEG_OB_TX_CONTROL_VAR
739 */
740 mt9t112_mcu_write(ret, client, VAR(26, 160), 0x0A2E);
741 mt9t112_mcu_write(ret, client, VAR(27, 160), 0x0A2E);
742
743 /* Configure STatus in Status_before_length Format and enable header */
744 /* PRI_B_CONFIG_JPEG_OB_TX_CONTROL_VAR */
745 mt9t112_mcu_write(ret, client, VAR(27, 144), 0x0CB4);
746
747 /* Enable JPEG in context B */
748 /* PRI_B_CONFIG_JPEG_OB_TX_CONTROL_VAR */
749 mt9t112_mcu_write(ret, client, VAR8(27, 142), 0x01);
750
751 /* Disable Dac_TXLO */
752 mt9t112_reg_write(ret, client, 0x316C, 0x350F);
753
754 /* Set max slew rates */
755 mt9t112_reg_write(ret, client, 0x1E, 0x777);
756
757 return ret;
758}
759
760/************************************************************************
761
762
763 soc_camera_ops
764
765
766************************************************************************/
767static int mt9t112_set_bus_param(struct soc_camera_device *icd,
768 unsigned long flags)
769{
770 return 0;
771}
772
773static unsigned long mt9t112_query_bus_param(struct soc_camera_device *icd)
774{
775 struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd));
776 struct mt9t112_priv *priv = to_mt9t112(client);
777 struct soc_camera_link *icl = to_soc_camera_link(icd);
778 unsigned long flags = SOCAM_MASTER | SOCAM_VSYNC_ACTIVE_HIGH |
779 SOCAM_HSYNC_ACTIVE_HIGH | SOCAM_DATA_ACTIVE_HIGH;
780
781 flags |= (priv->info->flags & MT9T112_FLAG_PCLK_RISING_EDGE) ?
782 SOCAM_PCLK_SAMPLE_RISING : SOCAM_PCLK_SAMPLE_FALLING;
783
784 if (priv->info->flags & MT9T112_FLAG_DATAWIDTH_8)
785 flags |= SOCAM_DATAWIDTH_8;
786 else
787 flags |= SOCAM_DATAWIDTH_10;
788
789 return soc_camera_apply_sensor_flags(icl, flags);
790}
791
792static struct soc_camera_ops mt9t112_ops = {
793 .set_bus_param = mt9t112_set_bus_param,
794 .query_bus_param = mt9t112_query_bus_param,
795};
796
797/************************************************************************
798
799
800 v4l2_subdev_core_ops
801
802
803************************************************************************/
804static int mt9t112_g_chip_ident(struct v4l2_subdev *sd,
805 struct v4l2_dbg_chip_ident *id)
806{
807 struct i2c_client *client = sd->priv;
808 struct mt9t112_priv *priv = to_mt9t112(client);
809
810 id->ident = priv->model;
811 id->revision = 0;
812
813 return 0;
814}
815
816#ifdef CONFIG_VIDEO_ADV_DEBUG
817static int mt9t112_g_register(struct v4l2_subdev *sd,
818 struct v4l2_dbg_register *reg)
819{
820 struct i2c_client *client = sd->priv;
821 int ret;
822
823 reg->size = 2;
824 mt9t112_reg_read(ret, client, reg->reg);
825
826 reg->val = (__u64)ret;
827
828 return 0;
829}
830
831static int mt9t112_s_register(struct v4l2_subdev *sd,
832 struct v4l2_dbg_register *reg)
833{
834 struct i2c_client *client = sd->priv;
835 int ret;
836
837 mt9t112_reg_write(ret, client, reg->reg, reg->val);
838
839 return ret;
840}
841#endif
842
843static struct v4l2_subdev_core_ops mt9t112_subdev_core_ops = {
844 .g_chip_ident = mt9t112_g_chip_ident,
845#ifdef CONFIG_VIDEO_ADV_DEBUG
846 .g_register = mt9t112_g_register,
847 .s_register = mt9t112_s_register,
848#endif
849};
850
851
852/************************************************************************
853
854
855 v4l2_subdev_video_ops
856
857
858************************************************************************/
859static int mt9t112_s_stream(struct v4l2_subdev *sd, int enable)
860{
861 struct i2c_client *client = sd->priv;
862 struct mt9t112_priv *priv = to_mt9t112(client);
863 int ret = 0;
864
865 if (!enable) {
866 /* FIXME
867 *
868 * If user selected large output size,
869 * and used it long time,
870 * mt9t112 camera will be very warm.
871 *
872 * But current driver can not stop mt9t112 camera.
873 * So, set small size here to solve this problem.
874 */
875 mt9t112_set_a_frame_size(client, VGA_WIDTH, VGA_HEIGHT);
876 return ret;
877 }
878
879 if (!(priv->flags & INIT_DONE)) {
880 u16 param = (MT9T112_FLAG_PCLK_RISING_EDGE &
881 priv->info->flags) ? 0x0001 : 0x0000;
882
883 ECHECKER(ret, mt9t112_init_camera(client));
884
885 /* Invert PCLK (Data sampled on falling edge of pixclk) */
886 mt9t112_reg_write(ret, client, 0x3C20, param);
887
888 mdelay(5);
889
890 priv->flags |= INIT_DONE;
891 }
892
893 mt9t112_mcu_write(ret, client, VAR(26, 7), priv->format->fmt);
894 mt9t112_mcu_write(ret, client, VAR(26, 9), priv->format->order);
895 mt9t112_mcu_write(ret, client, VAR8(1, 0), 0x06);
896
897 mt9t112_set_a_frame_size(client,
898 priv->frame.width,
899 priv->frame.height);
900
901 ECHECKER(ret, mt9t112_auto_focus_trigger(client));
902
903 dev_dbg(&client->dev, "format : %d\n", priv->format->code);
904 dev_dbg(&client->dev, "size : %d x %d\n",
905 priv->frame.width,
906 priv->frame.height);
907
908 CLOCK_INFO(client, EXT_CLOCK);
909
910 return ret;
911}
912
913static int mt9t112_set_params(struct i2c_client *client, u32 width, u32 height,
914 enum v4l2_mbus_pixelcode code)
915{
916 struct mt9t112_priv *priv = to_mt9t112(client);
917 int i;
918
919 priv->format = NULL;
920
921 /*
922 * frame size check
923 */
924 mt9t112_frame_check(&width, &height);
925
926 /*
927 * get color format
928 */
929 for (i = 0; i < ARRAY_SIZE(mt9t112_cfmts); i++)
930 if (mt9t112_cfmts[i].code == code)
931 break;
932
933 if (i == ARRAY_SIZE(mt9t112_cfmts))
934 return -EINVAL;
935
936 priv->frame.width = (u16)width;
937 priv->frame.height = (u16)height;
938
939 priv->format = mt9t112_cfmts + i;
940
941 return 0;
942}
943
944static int mt9t112_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
945{
946 a->bounds.left = 0;
947 a->bounds.top = 0;
948 a->bounds.width = VGA_WIDTH;
949 a->bounds.height = VGA_HEIGHT;
950 a->defrect = a->bounds;
951 a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
952 a->pixelaspect.numerator = 1;
953 a->pixelaspect.denominator = 1;
954
955 return 0;
956}
957
958static int mt9t112_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
959{
960 a->c.left = 0;
961 a->c.top = 0;
962 a->c.width = VGA_WIDTH;
963 a->c.height = VGA_HEIGHT;
964 a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
965
966 return 0;
967}
968
969static int mt9t112_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
970{
971 struct i2c_client *client = sd->priv;
972 struct v4l2_rect *rect = &a->c;
973
974 return mt9t112_set_params(client, rect->width, rect->height,
975 V4L2_MBUS_FMT_YUYV8_2X8_BE);
976}
977
978static int mt9t112_g_fmt(struct v4l2_subdev *sd,
979 struct v4l2_mbus_framefmt *mf)
980{
981 struct i2c_client *client = sd->priv;
982 struct mt9t112_priv *priv = to_mt9t112(client);
983
984 if (!priv->format) {
985 int ret = mt9t112_set_params(client, VGA_WIDTH, VGA_HEIGHT,
986 V4L2_MBUS_FMT_YUYV8_2X8_BE);
987 if (ret < 0)
988 return ret;
989 }
990
991 mf->width = priv->frame.width;
992 mf->height = priv->frame.height;
993 /* TODO: set colorspace */
994 mf->code = priv->format->code;
995 mf->field = V4L2_FIELD_NONE;
996
997 return 0;
998}
999
1000static int mt9t112_s_fmt(struct v4l2_subdev *sd,
1001 struct v4l2_mbus_framefmt *mf)
1002{
1003 struct i2c_client *client = sd->priv;
1004
1005 /* TODO: set colorspace */
1006 return mt9t112_set_params(client, mf->width, mf->height, mf->code);
1007}
1008
1009static int mt9t112_try_fmt(struct v4l2_subdev *sd,
1010 struct v4l2_mbus_framefmt *mf)
1011{
1012 mt9t112_frame_check(&mf->width, &mf->height);
1013
1014 /* TODO: set colorspace */
1015 mf->field = V4L2_FIELD_NONE;
1016
1017 return 0;
1018}
1019
1020static int mt9t112_enum_fmt(struct v4l2_subdev *sd, int index,
1021 enum v4l2_mbus_pixelcode *code)
1022{
1023 if ((unsigned int)index >= ARRAY_SIZE(mt9t112_cfmts))
1024 return -EINVAL;
1025
1026 *code = mt9t112_cfmts[index].code;
1027 return 0;
1028}
1029
1030static struct v4l2_subdev_video_ops mt9t112_subdev_video_ops = {
1031 .s_stream = mt9t112_s_stream,
1032 .g_mbus_fmt = mt9t112_g_fmt,
1033 .s_mbus_fmt = mt9t112_s_fmt,
1034 .try_mbus_fmt = mt9t112_try_fmt,
1035 .cropcap = mt9t112_cropcap,
1036 .g_crop = mt9t112_g_crop,
1037 .s_crop = mt9t112_s_crop,
1038 .enum_mbus_fmt = mt9t112_enum_fmt,
1039};
1040
1041/************************************************************************
1042
1043
1044 i2c driver
1045
1046
1047************************************************************************/
1048static struct v4l2_subdev_ops mt9t112_subdev_ops = {
1049 .core = &mt9t112_subdev_core_ops,
1050 .video = &mt9t112_subdev_video_ops,
1051};
1052
1053static int mt9t112_camera_probe(struct soc_camera_device *icd,
1054 struct i2c_client *client)
1055{
1056 struct mt9t112_priv *priv = to_mt9t112(client);
1057 const char *devname;
1058 int chipid;
1059
1060 /*
1061 * We must have a parent by now. And it cannot be a wrong one.
1062 * So this entire test is completely redundant.
1063 */
1064 if (!icd->dev.parent ||
1065 to_soc_camera_host(icd->dev.parent)->nr != icd->iface)
1066 return -ENODEV;
1067
1068 /*
1069 * check and show chip ID
1070 */
1071 mt9t112_reg_read(chipid, client, 0x0000);
1072
1073 switch (chipid) {
1074 case 0x2680:
1075 devname = "mt9t111";
1076 priv->model = V4L2_IDENT_MT9T111;
1077 break;
1078 case 0x2682:
1079 devname = "mt9t112";
1080 priv->model = V4L2_IDENT_MT9T112;
1081 break;
1082 default:
1083 dev_err(&client->dev, "Product ID error %04x\n", chipid);
1084 return -ENODEV;
1085 }
1086
1087 dev_info(&client->dev, "%s chip ID %04x\n", devname, chipid);
1088
1089 return 0;
1090}
1091
1092static int mt9t112_probe(struct i2c_client *client,
1093 const struct i2c_device_id *did)
1094{
1095 struct mt9t112_priv *priv;
1096 struct soc_camera_device *icd = client->dev.platform_data;
1097 struct soc_camera_link *icl;
1098 int ret;
1099
1100 if (!icd) {
1101 dev_err(&client->dev, "mt9t112: missing soc-camera data!\n");
1102 return -EINVAL;
1103 }
1104
1105 icl = to_soc_camera_link(icd);
1106 if (!icl || !icl->priv)
1107 return -EINVAL;
1108
1109 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1110 if (!priv)
1111 return -ENOMEM;
1112
1113 priv->info = icl->priv;
1114
1115 v4l2_i2c_subdev_init(&priv->subdev, client, &mt9t112_subdev_ops);
1116
1117 icd->ops = &mt9t112_ops;
1118
1119 ret = mt9t112_camera_probe(icd, client);
1120 if (ret) {
1121 icd->ops = NULL;
1122 i2c_set_clientdata(client, NULL);
1123 kfree(priv);
1124 }
1125
1126 return ret;
1127}
1128
1129static int mt9t112_remove(struct i2c_client *client)
1130{
1131 struct mt9t112_priv *priv = to_mt9t112(client);
1132 struct soc_camera_device *icd = client->dev.platform_data;
1133
1134 icd->ops = NULL;
1135 i2c_set_clientdata(client, NULL);
1136 kfree(priv);
1137 return 0;
1138}
1139
1140static const struct i2c_device_id mt9t112_id[] = {
1141 { "mt9t112", 0 },
1142 { }
1143};
1144MODULE_DEVICE_TABLE(i2c, mt9t112_id);
1145
1146static struct i2c_driver mt9t112_i2c_driver = {
1147 .driver = {
1148 .name = "mt9t112",
1149 },
1150 .probe = mt9t112_probe,
1151 .remove = mt9t112_remove,
1152 .id_table = mt9t112_id,
1153};
1154
1155/************************************************************************
1156
1157
1158 module function
1159
1160
1161************************************************************************/
1162static int __init mt9t112_module_init(void)
1163{
1164 return i2c_add_driver(&mt9t112_i2c_driver);
1165}
1166
1167static void __exit mt9t112_module_exit(void)
1168{
1169 i2c_del_driver(&mt9t112_i2c_driver);
1170}
1171
1172module_init(mt9t112_module_init);
1173module_exit(mt9t112_module_exit);
1174
1175MODULE_DESCRIPTION("SoC Camera driver for mt9t112");
1176MODULE_AUTHOR("Kuninori Morimoto");
1177MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/video/mt9v022.c b/drivers/media/video/mt9v022.c
index 995607f9d3ba..91df7ec91fb6 100644
--- a/drivers/media/video/mt9v022.c
+++ b/drivers/media/video/mt9v022.c
@@ -18,9 +18,11 @@
18#include <media/v4l2-chip-ident.h> 18#include <media/v4l2-chip-ident.h>
19#include <media/soc_camera.h> 19#include <media/soc_camera.h>
20 20
21/* mt9v022 i2c address 0x48, 0x4c, 0x58, 0x5c 21/*
22 * mt9v022 i2c address 0x48, 0x4c, 0x58, 0x5c
22 * The platform has to define ctruct i2c_board_info objects and link to them 23 * The platform has to define ctruct i2c_board_info objects and link to them
23 * from struct soc_camera_link */ 24 * from struct soc_camera_link
25 */
24 26
25static char *sensor_type; 27static char *sensor_type;
26module_param(sensor_type, charp, S_IRUGO); 28module_param(sensor_type, charp, S_IRUGO);
@@ -62,41 +64,49 @@ MODULE_PARM_DESC(sensor_type, "Sensor type: \"colour\" or \"monochrome\"");
62#define MT9V022_COLUMN_SKIP 1 64#define MT9V022_COLUMN_SKIP 1
63#define MT9V022_ROW_SKIP 4 65#define MT9V022_ROW_SKIP 4
64 66
65static const struct soc_camera_data_format mt9v022_colour_formats[] = { 67/* MT9V022 has only one fixed colorspace per pixelcode */
66 /* Order important: first natively supported, 68struct mt9v022_datafmt {
67 * second supported with a GPIO extender */ 69 enum v4l2_mbus_pixelcode code;
68 { 70 enum v4l2_colorspace colorspace;
69 .name = "Bayer (sRGB) 10 bit", 71};
70 .depth = 10, 72
71 .fourcc = V4L2_PIX_FMT_SBGGR16, 73/* Find a data format by a pixel code in an array */
72 .colorspace = V4L2_COLORSPACE_SRGB, 74static const struct mt9v022_datafmt *mt9v022_find_datafmt(
73 }, { 75 enum v4l2_mbus_pixelcode code, const struct mt9v022_datafmt *fmt,
74 .name = "Bayer (sRGB) 8 bit", 76 int n)
75 .depth = 8, 77{
76 .fourcc = V4L2_PIX_FMT_SBGGR8, 78 int i;
77 .colorspace = V4L2_COLORSPACE_SRGB, 79 for (i = 0; i < n; i++)
78 } 80 if (fmt[i].code == code)
81 return fmt + i;
82
83 return NULL;
84}
85
86static const struct mt9v022_datafmt mt9v022_colour_fmts[] = {
87 /*
88 * Order important: first natively supported,
89 * second supported with a GPIO extender
90 */
91 {V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_COLORSPACE_SRGB},
92 {V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_COLORSPACE_SRGB},
79}; 93};
80 94
81static const struct soc_camera_data_format mt9v022_monochrome_formats[] = { 95static const struct mt9v022_datafmt mt9v022_monochrome_fmts[] = {
82 /* Order important - see above */ 96 /* Order important - see above */
83 { 97 {V4L2_MBUS_FMT_Y10_1X10, V4L2_COLORSPACE_JPEG},
84 .name = "Monochrome 10 bit", 98 {V4L2_MBUS_FMT_GREY8_1X8, V4L2_COLORSPACE_JPEG},
85 .depth = 10,
86 .fourcc = V4L2_PIX_FMT_Y16,
87 }, {
88 .name = "Monochrome 8 bit",
89 .depth = 8,
90 .fourcc = V4L2_PIX_FMT_GREY,
91 },
92}; 99};
93 100
94struct mt9v022 { 101struct mt9v022 {
95 struct v4l2_subdev subdev; 102 struct v4l2_subdev subdev;
96 struct v4l2_rect rect; /* Sensor window */ 103 struct v4l2_rect rect; /* Sensor window */
97 __u32 fourcc; 104 const struct mt9v022_datafmt *fmt;
105 const struct mt9v022_datafmt *fmts;
106 int num_fmts;
98 int model; /* V4L2_IDENT_MT9V022* codes from v4l2-chip-ident.h */ 107 int model; /* V4L2_IDENT_MT9V022* codes from v4l2-chip-ident.h */
99 u16 chip_control; 108 u16 chip_control;
109 unsigned short y_skip_top; /* Lines to skip at the top */
100}; 110};
101 111
102static struct mt9v022 *to_mt9v022(const struct i2c_client *client) 112static struct mt9v022 *to_mt9v022(const struct i2c_client *client)
@@ -143,9 +153,11 @@ static int mt9v022_init(struct i2c_client *client)
143 struct mt9v022 *mt9v022 = to_mt9v022(client); 153 struct mt9v022 *mt9v022 = to_mt9v022(client);
144 int ret; 154 int ret;
145 155
146 /* Almost the default mode: master, parallel, simultaneous, and an 156 /*
157 * Almost the default mode: master, parallel, simultaneous, and an
147 * undocumented bit 0x200, which is present in table 7, but not in 8, 158 * undocumented bit 0x200, which is present in table 7, but not in 8,
148 * plus snapshot mode to disable scan for now */ 159 * plus snapshot mode to disable scan for now
160 */
149 mt9v022->chip_control |= 0x10; 161 mt9v022->chip_control |= 0x10;
150 ret = reg_write(client, MT9V022_CHIP_CONTROL, mt9v022->chip_control); 162 ret = reg_write(client, MT9V022_CHIP_CONTROL, mt9v022->chip_control);
151 if (!ret) 163 if (!ret)
@@ -265,12 +277,10 @@ static int mt9v022_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
265 struct i2c_client *client = sd->priv; 277 struct i2c_client *client = sd->priv;
266 struct mt9v022 *mt9v022 = to_mt9v022(client); 278 struct mt9v022 *mt9v022 = to_mt9v022(client);
267 struct v4l2_rect rect = a->c; 279 struct v4l2_rect rect = a->c;
268 struct soc_camera_device *icd = client->dev.platform_data;
269 int ret; 280 int ret;
270 281
271 /* Bayer format - even size lengths */ 282 /* Bayer format - even size lengths */
272 if (mt9v022->fourcc == V4L2_PIX_FMT_SBGGR8 || 283 if (mt9v022->fmts == mt9v022_colour_fmts) {
273 mt9v022->fourcc == V4L2_PIX_FMT_SBGGR16) {
274 rect.width = ALIGN(rect.width, 2); 284 rect.width = ALIGN(rect.width, 2);
275 rect.height = ALIGN(rect.height, 2); 285 rect.height = ALIGN(rect.height, 2);
276 /* Let the user play with the starting pixel */ 286 /* Let the user play with the starting pixel */
@@ -287,10 +297,10 @@ static int mt9v022_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
287 if (ret >= 0) { 297 if (ret >= 0) {
288 if (ret & 1) /* Autoexposure */ 298 if (ret & 1) /* Autoexposure */
289 ret = reg_write(client, MT9V022_MAX_TOTAL_SHUTTER_WIDTH, 299 ret = reg_write(client, MT9V022_MAX_TOTAL_SHUTTER_WIDTH,
290 rect.height + icd->y_skip_top + 43); 300 rect.height + mt9v022->y_skip_top + 43);
291 else 301 else
292 ret = reg_write(client, MT9V022_TOTAL_SHUTTER_WIDTH, 302 ret = reg_write(client, MT9V022_TOTAL_SHUTTER_WIDTH,
293 rect.height + icd->y_skip_top + 43); 303 rect.height + mt9v022->y_skip_top + 43);
294 } 304 }
295 /* Setup frame format: defaults apart from width and height */ 305 /* Setup frame format: defaults apart from width and height */
296 if (!ret) 306 if (!ret)
@@ -298,8 +308,10 @@ static int mt9v022_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
298 if (!ret) 308 if (!ret)
299 ret = reg_write(client, MT9V022_ROW_START, rect.top); 309 ret = reg_write(client, MT9V022_ROW_START, rect.top);
300 if (!ret) 310 if (!ret)
301 /* Default 94, Phytec driver says: 311 /*
302 * "width + horizontal blank >= 660" */ 312 * Default 94, Phytec driver says:
313 * "width + horizontal blank >= 660"
314 */
303 ret = reg_write(client, MT9V022_HORIZONTAL_BLANKING, 315 ret = reg_write(client, MT9V022_HORIZONTAL_BLANKING,
304 rect.width > 660 - 43 ? 43 : 316 rect.width > 660 - 43 ? 43 :
305 660 - rect.width); 317 660 - rect.width);
@@ -309,7 +321,7 @@ static int mt9v022_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
309 ret = reg_write(client, MT9V022_WINDOW_WIDTH, rect.width); 321 ret = reg_write(client, MT9V022_WINDOW_WIDTH, rect.width);
310 if (!ret) 322 if (!ret)
311 ret = reg_write(client, MT9V022_WINDOW_HEIGHT, 323 ret = reg_write(client, MT9V022_WINDOW_HEIGHT,
312 rect.height + icd->y_skip_top); 324 rect.height + mt9v022->y_skip_top);
313 325
314 if (ret < 0) 326 if (ret < 0)
315 return ret; 327 return ret;
@@ -346,46 +358,48 @@ static int mt9v022_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
346 return 0; 358 return 0;
347} 359}
348 360
349static int mt9v022_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) 361static int mt9v022_g_fmt(struct v4l2_subdev *sd,
362 struct v4l2_mbus_framefmt *mf)
350{ 363{
351 struct i2c_client *client = sd->priv; 364 struct i2c_client *client = sd->priv;
352 struct mt9v022 *mt9v022 = to_mt9v022(client); 365 struct mt9v022 *mt9v022 = to_mt9v022(client);
353 struct v4l2_pix_format *pix = &f->fmt.pix;
354 366
355 pix->width = mt9v022->rect.width; 367 mf->width = mt9v022->rect.width;
356 pix->height = mt9v022->rect.height; 368 mf->height = mt9v022->rect.height;
357 pix->pixelformat = mt9v022->fourcc; 369 mf->code = mt9v022->fmt->code;
358 pix->field = V4L2_FIELD_NONE; 370 mf->colorspace = mt9v022->fmt->colorspace;
359 pix->colorspace = V4L2_COLORSPACE_SRGB; 371 mf->field = V4L2_FIELD_NONE;
360 372
361 return 0; 373 return 0;
362} 374}
363 375
364static int mt9v022_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) 376static int mt9v022_s_fmt(struct v4l2_subdev *sd,
377 struct v4l2_mbus_framefmt *mf)
365{ 378{
366 struct i2c_client *client = sd->priv; 379 struct i2c_client *client = sd->priv;
367 struct mt9v022 *mt9v022 = to_mt9v022(client); 380 struct mt9v022 *mt9v022 = to_mt9v022(client);
368 struct v4l2_pix_format *pix = &f->fmt.pix;
369 struct v4l2_crop a = { 381 struct v4l2_crop a = {
370 .c = { 382 .c = {
371 .left = mt9v022->rect.left, 383 .left = mt9v022->rect.left,
372 .top = mt9v022->rect.top, 384 .top = mt9v022->rect.top,
373 .width = pix->width, 385 .width = mf->width,
374 .height = pix->height, 386 .height = mf->height,
375 }, 387 },
376 }; 388 };
377 int ret; 389 int ret;
378 390
379 /* The caller provides a supported format, as verified per call to 391 /*
380 * icd->try_fmt(), datawidth is from our supported format list */ 392 * The caller provides a supported format, as verified per call to
381 switch (pix->pixelformat) { 393 * icd->try_fmt(), datawidth is from our supported format list
382 case V4L2_PIX_FMT_GREY: 394 */
383 case V4L2_PIX_FMT_Y16: 395 switch (mf->code) {
396 case V4L2_MBUS_FMT_GREY8_1X8:
397 case V4L2_MBUS_FMT_Y10_1X10:
384 if (mt9v022->model != V4L2_IDENT_MT9V022IX7ATM) 398 if (mt9v022->model != V4L2_IDENT_MT9V022IX7ATM)
385 return -EINVAL; 399 return -EINVAL;
386 break; 400 break;
387 case V4L2_PIX_FMT_SBGGR8: 401 case V4L2_MBUS_FMT_SBGGR8_1X8:
388 case V4L2_PIX_FMT_SBGGR16: 402 case V4L2_MBUS_FMT_SBGGR10_1X10:
389 if (mt9v022->model != V4L2_IDENT_MT9V022IX7ATC) 403 if (mt9v022->model != V4L2_IDENT_MT9V022IX7ATC)
390 return -EINVAL; 404 return -EINVAL;
391 break; 405 break;
@@ -399,26 +413,38 @@ static int mt9v022_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f)
399 /* No support for scaling on this camera, just crop. */ 413 /* No support for scaling on this camera, just crop. */
400 ret = mt9v022_s_crop(sd, &a); 414 ret = mt9v022_s_crop(sd, &a);
401 if (!ret) { 415 if (!ret) {
402 pix->width = mt9v022->rect.width; 416 mf->width = mt9v022->rect.width;
403 pix->height = mt9v022->rect.height; 417 mf->height = mt9v022->rect.height;
404 mt9v022->fourcc = pix->pixelformat; 418 mt9v022->fmt = mt9v022_find_datafmt(mf->code,
419 mt9v022->fmts, mt9v022->num_fmts);
420 mf->colorspace = mt9v022->fmt->colorspace;
405 } 421 }
406 422
407 return ret; 423 return ret;
408} 424}
409 425
410static int mt9v022_try_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) 426static int mt9v022_try_fmt(struct v4l2_subdev *sd,
427 struct v4l2_mbus_framefmt *mf)
411{ 428{
412 struct i2c_client *client = sd->priv; 429 struct i2c_client *client = sd->priv;
413 struct soc_camera_device *icd = client->dev.platform_data; 430 struct mt9v022 *mt9v022 = to_mt9v022(client);
414 struct v4l2_pix_format *pix = &f->fmt.pix; 431 const struct mt9v022_datafmt *fmt;
415 int align = pix->pixelformat == V4L2_PIX_FMT_SBGGR8 || 432 int align = mf->code == V4L2_MBUS_FMT_SBGGR8_1X8 ||
416 pix->pixelformat == V4L2_PIX_FMT_SBGGR16; 433 mf->code == V4L2_MBUS_FMT_SBGGR10_1X10;
417 434
418 v4l_bound_align_image(&pix->width, MT9V022_MIN_WIDTH, 435 v4l_bound_align_image(&mf->width, MT9V022_MIN_WIDTH,
419 MT9V022_MAX_WIDTH, align, 436 MT9V022_MAX_WIDTH, align,
420 &pix->height, MT9V022_MIN_HEIGHT + icd->y_skip_top, 437 &mf->height, MT9V022_MIN_HEIGHT + mt9v022->y_skip_top,
421 MT9V022_MAX_HEIGHT + icd->y_skip_top, align, 0); 438 MT9V022_MAX_HEIGHT + mt9v022->y_skip_top, align, 0);
439
440 fmt = mt9v022_find_datafmt(mf->code, mt9v022->fmts,
441 mt9v022->num_fmts);
442 if (!fmt) {
443 fmt = mt9v022->fmt;
444 mf->code = fmt->code;
445 }
446
447 mf->colorspace = fmt->colorspace;
422 448
423 return 0; 449 return 0;
424} 450}
@@ -635,8 +661,10 @@ static int mt9v022_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
635 48 + range / 2) / range + 16; 661 48 + range / 2) / range + 16;
636 if (gain >= 32) 662 if (gain >= 32)
637 gain &= ~1; 663 gain &= ~1;
638 /* The user wants to set gain manually, hope, she 664 /*
639 * knows, what she's doing... Switch AGC off. */ 665 * The user wants to set gain manually, hope, she
666 * knows, what she's doing... Switch AGC off.
667 */
640 668
641 if (reg_clear(client, MT9V022_AEC_AGC_ENABLE, 0x2) < 0) 669 if (reg_clear(client, MT9V022_AEC_AGC_ENABLE, 0x2) < 0)
642 return -EIO; 670 return -EIO;
@@ -655,8 +683,10 @@ static int mt9v022_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
655 unsigned long range = qctrl->maximum - qctrl->minimum; 683 unsigned long range = qctrl->maximum - qctrl->minimum;
656 unsigned long shutter = ((ctrl->value - qctrl->minimum) * 684 unsigned long shutter = ((ctrl->value - qctrl->minimum) *
657 479 + range / 2) / range + 1; 685 479 + range / 2) / range + 1;
658 /* The user wants to set shutter width manually, hope, 686 /*
659 * she knows, what she's doing... Switch AEC off. */ 687 * The user wants to set shutter width manually, hope,
688 * she knows, what she's doing... Switch AEC off.
689 */
660 690
661 if (reg_clear(client, MT9V022_AEC_AGC_ENABLE, 0x1) < 0) 691 if (reg_clear(client, MT9V022_AEC_AGC_ENABLE, 0x1) < 0)
662 return -EIO; 692 return -EIO;
@@ -689,8 +719,10 @@ static int mt9v022_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
689 return 0; 719 return 0;
690} 720}
691 721
692/* Interface active, can use i2c. If it fails, it can indeed mean, that 722/*
693 * this wasn't our capture interface, so, we wait for the right one */ 723 * Interface active, can use i2c. If it fails, it can indeed mean, that
724 * this wasn't our capture interface, so, we wait for the right one
725 */
694static int mt9v022_video_probe(struct soc_camera_device *icd, 726static int mt9v022_video_probe(struct soc_camera_device *icd,
695 struct i2c_client *client) 727 struct i2c_client *client)
696{ 728{
@@ -733,17 +765,17 @@ static int mt9v022_video_probe(struct soc_camera_device *icd,
733 !strcmp("color", sensor_type))) { 765 !strcmp("color", sensor_type))) {
734 ret = reg_write(client, MT9V022_PIXEL_OPERATION_MODE, 4 | 0x11); 766 ret = reg_write(client, MT9V022_PIXEL_OPERATION_MODE, 4 | 0x11);
735 mt9v022->model = V4L2_IDENT_MT9V022IX7ATC; 767 mt9v022->model = V4L2_IDENT_MT9V022IX7ATC;
736 icd->formats = mt9v022_colour_formats; 768 mt9v022->fmts = mt9v022_colour_fmts;
737 } else { 769 } else {
738 ret = reg_write(client, MT9V022_PIXEL_OPERATION_MODE, 0x11); 770 ret = reg_write(client, MT9V022_PIXEL_OPERATION_MODE, 0x11);
739 mt9v022->model = V4L2_IDENT_MT9V022IX7ATM; 771 mt9v022->model = V4L2_IDENT_MT9V022IX7ATM;
740 icd->formats = mt9v022_monochrome_formats; 772 mt9v022->fmts = mt9v022_monochrome_fmts;
741 } 773 }
742 774
743 if (ret < 0) 775 if (ret < 0)
744 goto ei2c; 776 goto ei2c;
745 777
746 icd->num_formats = 0; 778 mt9v022->num_fmts = 0;
747 779
748 /* 780 /*
749 * This is a 10bit sensor, so by default we only allow 10bit. 781 * This is a 10bit sensor, so by default we only allow 10bit.
@@ -756,14 +788,14 @@ static int mt9v022_video_probe(struct soc_camera_device *icd,
756 flags = SOCAM_DATAWIDTH_10; 788 flags = SOCAM_DATAWIDTH_10;
757 789
758 if (flags & SOCAM_DATAWIDTH_10) 790 if (flags & SOCAM_DATAWIDTH_10)
759 icd->num_formats++; 791 mt9v022->num_fmts++;
760 else 792 else
761 icd->formats++; 793 mt9v022->fmts++;
762 794
763 if (flags & SOCAM_DATAWIDTH_8) 795 if (flags & SOCAM_DATAWIDTH_8)
764 icd->num_formats++; 796 mt9v022->num_fmts++;
765 797
766 mt9v022->fourcc = icd->formats->fourcc; 798 mt9v022->fmt = &mt9v022->fmts[0];
767 799
768 dev_info(&client->dev, "Detected a MT9V022 chip ID %x, %s sensor\n", 800 dev_info(&client->dev, "Detected a MT9V022 chip ID %x, %s sensor\n",
769 data, mt9v022->model == V4L2_IDENT_MT9V022IX7ATM ? 801 data, mt9v022->model == V4L2_IDENT_MT9V022IX7ATM ?
@@ -787,6 +819,16 @@ static void mt9v022_video_remove(struct soc_camera_device *icd)
787 icl->free_bus(icl); 819 icl->free_bus(icl);
788} 820}
789 821
822static int mt9v022_g_skip_top_lines(struct v4l2_subdev *sd, u32 *lines)
823{
824 struct i2c_client *client = sd->priv;
825 struct mt9v022 *mt9v022 = to_mt9v022(client);
826
827 *lines = mt9v022->y_skip_top;
828
829 return 0;
830}
831
790static struct v4l2_subdev_core_ops mt9v022_subdev_core_ops = { 832static struct v4l2_subdev_core_ops mt9v022_subdev_core_ops = {
791 .g_ctrl = mt9v022_g_ctrl, 833 .g_ctrl = mt9v022_g_ctrl,
792 .s_ctrl = mt9v022_s_ctrl, 834 .s_ctrl = mt9v022_s_ctrl,
@@ -797,19 +839,38 @@ static struct v4l2_subdev_core_ops mt9v022_subdev_core_ops = {
797#endif 839#endif
798}; 840};
799 841
842static int mt9v022_enum_fmt(struct v4l2_subdev *sd, int index,
843 enum v4l2_mbus_pixelcode *code)
844{
845 struct i2c_client *client = sd->priv;
846 struct mt9v022 *mt9v022 = to_mt9v022(client);
847
848 if ((unsigned int)index >= mt9v022->num_fmts)
849 return -EINVAL;
850
851 *code = mt9v022->fmts[index].code;
852 return 0;
853}
854
800static struct v4l2_subdev_video_ops mt9v022_subdev_video_ops = { 855static struct v4l2_subdev_video_ops mt9v022_subdev_video_ops = {
801 .s_stream = mt9v022_s_stream, 856 .s_stream = mt9v022_s_stream,
802 .s_fmt = mt9v022_s_fmt, 857 .s_mbus_fmt = mt9v022_s_fmt,
803 .g_fmt = mt9v022_g_fmt, 858 .g_mbus_fmt = mt9v022_g_fmt,
804 .try_fmt = mt9v022_try_fmt, 859 .try_mbus_fmt = mt9v022_try_fmt,
805 .s_crop = mt9v022_s_crop, 860 .s_crop = mt9v022_s_crop,
806 .g_crop = mt9v022_g_crop, 861 .g_crop = mt9v022_g_crop,
807 .cropcap = mt9v022_cropcap, 862 .cropcap = mt9v022_cropcap,
863 .enum_mbus_fmt = mt9v022_enum_fmt,
864};
865
866static struct v4l2_subdev_sensor_ops mt9v022_subdev_sensor_ops = {
867 .g_skip_top_lines = mt9v022_g_skip_top_lines,
808}; 868};
809 869
810static struct v4l2_subdev_ops mt9v022_subdev_ops = { 870static struct v4l2_subdev_ops mt9v022_subdev_ops = {
811 .core = &mt9v022_subdev_core_ops, 871 .core = &mt9v022_subdev_core_ops,
812 .video = &mt9v022_subdev_video_ops, 872 .video = &mt9v022_subdev_video_ops,
873 .sensor = &mt9v022_subdev_sensor_ops,
813}; 874};
814 875
815static int mt9v022_probe(struct i2c_client *client, 876static int mt9v022_probe(struct i2c_client *client,
@@ -851,8 +912,7 @@ static int mt9v022_probe(struct i2c_client *client,
851 * MT9V022 _really_ corrupts the first read out line. 912 * MT9V022 _really_ corrupts the first read out line.
852 * TODO: verify on i.MX31 913 * TODO: verify on i.MX31
853 */ 914 */
854 icd->y_skip_top = 1; 915 mt9v022->y_skip_top = 1;
855
856 mt9v022->rect.left = MT9V022_COLUMN_SKIP; 916 mt9v022->rect.left = MT9V022_COLUMN_SKIP;
857 mt9v022->rect.top = MT9V022_ROW_SKIP; 917 mt9v022->rect.top = MT9V022_ROW_SKIP;
858 mt9v022->rect.width = MT9V022_MAX_WIDTH; 918 mt9v022->rect.width = MT9V022_MAX_WIDTH;
diff --git a/drivers/media/video/mx1_camera.c b/drivers/media/video/mx1_camera.c
index 72802291e812..2ba14fb5b031 100644
--- a/drivers/media/video/mx1_camera.c
+++ b/drivers/media/video/mx1_camera.c
@@ -37,6 +37,7 @@
37#include <media/v4l2-common.h> 37#include <media/v4l2-common.h>
38#include <media/v4l2-dev.h> 38#include <media/v4l2-dev.h>
39#include <media/videobuf-dma-contig.h> 39#include <media/videobuf-dma-contig.h>
40#include <media/soc_mediabus.h>
40 41
41#include <asm/dma.h> 42#include <asm/dma.h>
42#include <asm/fiq.h> 43#include <asm/fiq.h>
@@ -94,14 +95,16 @@
94/* buffer for one video frame */ 95/* buffer for one video frame */
95struct mx1_buffer { 96struct mx1_buffer {
96 /* common v4l buffer stuff -- must be first */ 97 /* common v4l buffer stuff -- must be first */
97 struct videobuf_buffer vb; 98 struct videobuf_buffer vb;
98 const struct soc_camera_data_format *fmt; 99 enum v4l2_mbus_pixelcode code;
99 int inwork; 100 int inwork;
100}; 101};
101 102
102/* i.MX1/i.MXL is only supposed to handle one camera on its Camera Sensor 103/*
104 * i.MX1/i.MXL is only supposed to handle one camera on its Camera Sensor
103 * Interface. If anyone ever builds hardware to enable more than 105 * Interface. If anyone ever builds hardware to enable more than
104 * one camera, they will have to modify this driver too */ 106 * one camera, they will have to modify this driver too
107 */
105struct mx1_camera_dev { 108struct mx1_camera_dev {
106 struct soc_camera_host soc_host; 109 struct soc_camera_host soc_host;
107 struct soc_camera_device *icd; 110 struct soc_camera_device *icd;
@@ -126,9 +129,13 @@ static int mx1_videobuf_setup(struct videobuf_queue *vq, unsigned int *count,
126 unsigned int *size) 129 unsigned int *size)
127{ 130{
128 struct soc_camera_device *icd = vq->priv_data; 131 struct soc_camera_device *icd = vq->priv_data;
132 int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
133 icd->current_fmt->host_fmt);
134
135 if (bytes_per_line < 0)
136 return bytes_per_line;
129 137
130 *size = icd->user_width * icd->user_height * 138 *size = bytes_per_line * icd->user_height;
131 ((icd->current_fmt->depth + 7) >> 3);
132 139
133 if (!*count) 140 if (!*count)
134 *count = 32; 141 *count = 32;
@@ -151,8 +158,10 @@ static void free_buffer(struct videobuf_queue *vq, struct mx1_buffer *buf)
151 dev_dbg(icd->dev.parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__, 158 dev_dbg(icd->dev.parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
152 vb, vb->baddr, vb->bsize); 159 vb, vb->baddr, vb->bsize);
153 160
154 /* This waits until this buffer is out of danger, i.e., until it is no 161 /*
155 * longer in STATE_QUEUED or STATE_ACTIVE */ 162 * This waits until this buffer is out of danger, i.e., until it is no
163 * longer in STATE_QUEUED or STATE_ACTIVE
164 */
156 videobuf_waiton(vb, 0, 0); 165 videobuf_waiton(vb, 0, 0);
157 videobuf_dma_contig_free(vq, vb); 166 videobuf_dma_contig_free(vq, vb);
158 167
@@ -165,6 +174,11 @@ static int mx1_videobuf_prepare(struct videobuf_queue *vq,
165 struct soc_camera_device *icd = vq->priv_data; 174 struct soc_camera_device *icd = vq->priv_data;
166 struct mx1_buffer *buf = container_of(vb, struct mx1_buffer, vb); 175 struct mx1_buffer *buf = container_of(vb, struct mx1_buffer, vb);
167 int ret; 176 int ret;
177 int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
178 icd->current_fmt->host_fmt);
179
180 if (bytes_per_line < 0)
181 return bytes_per_line;
168 182
169 dev_dbg(icd->dev.parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__, 183 dev_dbg(icd->dev.parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
170 vb, vb->baddr, vb->bsize); 184 vb, vb->baddr, vb->bsize);
@@ -174,22 +188,24 @@ static int mx1_videobuf_prepare(struct videobuf_queue *vq,
174 188
175 BUG_ON(NULL == icd->current_fmt); 189 BUG_ON(NULL == icd->current_fmt);
176 190
177 /* I think, in buf_prepare you only have to protect global data, 191 /*
178 * the actual buffer is yours */ 192 * I think, in buf_prepare you only have to protect global data,
193 * the actual buffer is yours
194 */
179 buf->inwork = 1; 195 buf->inwork = 1;
180 196
181 if (buf->fmt != icd->current_fmt || 197 if (buf->code != icd->current_fmt->code ||
182 vb->width != icd->user_width || 198 vb->width != icd->user_width ||
183 vb->height != icd->user_height || 199 vb->height != icd->user_height ||
184 vb->field != field) { 200 vb->field != field) {
185 buf->fmt = icd->current_fmt; 201 buf->code = icd->current_fmt->code;
186 vb->width = icd->user_width; 202 vb->width = icd->user_width;
187 vb->height = icd->user_height; 203 vb->height = icd->user_height;
188 vb->field = field; 204 vb->field = field;
189 vb->state = VIDEOBUF_NEEDS_INIT; 205 vb->state = VIDEOBUF_NEEDS_INIT;
190 } 206 }
191 207
192 vb->size = vb->width * vb->height * ((buf->fmt->depth + 7) >> 3); 208 vb->size = bytes_per_line * vb->height;
193 if (0 != vb->baddr && vb->bsize < vb->size) { 209 if (0 != vb->baddr && vb->bsize < vb->size) {
194 ret = -EINVAL; 210 ret = -EINVAL;
195 goto out; 211 goto out;
@@ -381,8 +397,10 @@ static int mclk_get_divisor(struct mx1_camera_dev *pcdev)
381 397
382 lcdclk = clk_get_rate(pcdev->clk); 398 lcdclk = clk_get_rate(pcdev->clk);
383 399
384 /* We verify platform_mclk_10khz != 0, so if anyone breaks it, here 400 /*
385 * they get a nice Oops */ 401 * We verify platform_mclk_10khz != 0, so if anyone breaks it, here
402 * they get a nice Oops
403 */
386 div = (lcdclk + 2 * mclk - 1) / (2 * mclk) - 1; 404 div = (lcdclk + 2 * mclk - 1) / (2 * mclk) - 1;
387 405
388 dev_dbg(pcdev->icd->dev.parent, 406 dev_dbg(pcdev->icd->dev.parent,
@@ -420,8 +438,10 @@ static void mx1_camera_deactivate(struct mx1_camera_dev *pcdev)
420 clk_disable(pcdev->clk); 438 clk_disable(pcdev->clk);
421} 439}
422 440
423/* The following two functions absolutely depend on the fact, that 441/*
424 * there can be only one camera on i.MX1/i.MXL camera sensor interface */ 442 * The following two functions absolutely depend on the fact, that
443 * there can be only one camera on i.MX1/i.MXL camera sensor interface
444 */
425static int mx1_camera_add_device(struct soc_camera_device *icd) 445static int mx1_camera_add_device(struct soc_camera_device *icd)
426{ 446{
427 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 447 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
@@ -487,12 +507,10 @@ static int mx1_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt)
487 507
488 /* MX1 supports only 8bit buswidth */ 508 /* MX1 supports only 8bit buswidth */
489 common_flags = soc_camera_bus_param_compatible(camera_flags, 509 common_flags = soc_camera_bus_param_compatible(camera_flags,
490 CSI_BUS_FLAGS); 510 CSI_BUS_FLAGS);
491 if (!common_flags) 511 if (!common_flags)
492 return -EINVAL; 512 return -EINVAL;
493 513
494 icd->buswidth = 8;
495
496 /* Make choises, based on platform choice */ 514 /* Make choises, based on platform choice */
497 if ((common_flags & SOCAM_VSYNC_ACTIVE_HIGH) && 515 if ((common_flags & SOCAM_VSYNC_ACTIVE_HIGH) &&
498 (common_flags & SOCAM_VSYNC_ACTIVE_LOW)) { 516 (common_flags & SOCAM_VSYNC_ACTIVE_LOW)) {
@@ -545,7 +563,8 @@ static int mx1_camera_set_fmt(struct soc_camera_device *icd,
545 struct v4l2_subdev *sd = soc_camera_to_subdev(icd); 563 struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
546 const struct soc_camera_format_xlate *xlate; 564 const struct soc_camera_format_xlate *xlate;
547 struct v4l2_pix_format *pix = &f->fmt.pix; 565 struct v4l2_pix_format *pix = &f->fmt.pix;
548 int ret; 566 struct v4l2_mbus_framefmt mf;
567 int ret, buswidth;
549 568
550 xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat); 569 xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
551 if (!xlate) { 570 if (!xlate) {
@@ -554,12 +573,33 @@ static int mx1_camera_set_fmt(struct soc_camera_device *icd,
554 return -EINVAL; 573 return -EINVAL;
555 } 574 }
556 575
557 ret = v4l2_subdev_call(sd, video, s_fmt, f); 576 buswidth = xlate->host_fmt->bits_per_sample;
558 if (!ret) { 577 if (buswidth > 8) {
559 icd->buswidth = xlate->buswidth; 578 dev_warn(icd->dev.parent,
560 icd->current_fmt = xlate->host_fmt; 579 "bits-per-sample %d for format %x unsupported\n",
580 buswidth, pix->pixelformat);
581 return -EINVAL;
561 } 582 }
562 583
584 mf.width = pix->width;
585 mf.height = pix->height;
586 mf.field = pix->field;
587 mf.colorspace = pix->colorspace;
588 mf.code = xlate->code;
589
590 ret = v4l2_subdev_call(sd, video, s_mbus_fmt, &mf);
591 if (ret < 0)
592 return ret;
593
594 if (mf.code != xlate->code)
595 return -EINVAL;
596
597 pix->width = mf.width;
598 pix->height = mf.height;
599 pix->field = mf.field;
600 pix->colorspace = mf.colorspace;
601 icd->current_fmt = xlate;
602
563 return ret; 603 return ret;
564} 604}
565 605
@@ -567,10 +607,36 @@ static int mx1_camera_try_fmt(struct soc_camera_device *icd,
567 struct v4l2_format *f) 607 struct v4l2_format *f)
568{ 608{
569 struct v4l2_subdev *sd = soc_camera_to_subdev(icd); 609 struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
610 const struct soc_camera_format_xlate *xlate;
611 struct v4l2_pix_format *pix = &f->fmt.pix;
612 struct v4l2_mbus_framefmt mf;
613 int ret;
570 /* TODO: limit to mx1 hardware capabilities */ 614 /* TODO: limit to mx1 hardware capabilities */
571 615
616 xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
617 if (!xlate) {
618 dev_warn(icd->dev.parent, "Format %x not found\n",
619 pix->pixelformat);
620 return -EINVAL;
621 }
622
623 mf.width = pix->width;
624 mf.height = pix->height;
625 mf.field = pix->field;
626 mf.colorspace = pix->colorspace;
627 mf.code = xlate->code;
628
572 /* limit to sensor capabilities */ 629 /* limit to sensor capabilities */
573 return v4l2_subdev_call(sd, video, try_fmt, f); 630 ret = v4l2_subdev_call(sd, video, try_mbus_fmt, &mf);
631 if (ret < 0)
632 return ret;
633
634 pix->width = mf.width;
635 pix->height = mf.height;
636 pix->field = mf.field;
637 pix->colorspace = mf.colorspace;
638
639 return 0;
574} 640}
575 641
576static int mx1_camera_reqbufs(struct soc_camera_file *icf, 642static int mx1_camera_reqbufs(struct soc_camera_file *icf,
@@ -578,10 +644,12 @@ static int mx1_camera_reqbufs(struct soc_camera_file *icf,
578{ 644{
579 int i; 645 int i;
580 646
581 /* This is for locking debugging only. I removed spinlocks and now I 647 /*
648 * This is for locking debugging only. I removed spinlocks and now I
582 * check whether .prepare is ever called on a linked buffer, or whether 649 * check whether .prepare is ever called on a linked buffer, or whether
583 * a dma IRQ can occur for an in-work or unlinked buffer. Until now 650 * a dma IRQ can occur for an in-work or unlinked buffer. Until now
584 * it hadn't triggered */ 651 * it hadn't triggered
652 */
585 for (i = 0; i < p->count; i++) { 653 for (i = 0; i < p->count; i++) {
586 struct mx1_buffer *buf = container_of(icf->vb_vidq.bufs[i], 654 struct mx1_buffer *buf = container_of(icf->vb_vidq.bufs[i],
587 struct mx1_buffer, vb); 655 struct mx1_buffer, vb);
diff --git a/drivers/media/video/mx3_camera.c b/drivers/media/video/mx3_camera.c
index 7db82bdf6f31..bd297f567dc7 100644
--- a/drivers/media/video/mx3_camera.c
+++ b/drivers/media/video/mx3_camera.c
@@ -23,6 +23,7 @@
23#include <media/v4l2-dev.h> 23#include <media/v4l2-dev.h>
24#include <media/videobuf-dma-contig.h> 24#include <media/videobuf-dma-contig.h>
25#include <media/soc_camera.h> 25#include <media/soc_camera.h>
26#include <media/soc_mediabus.h>
26 27
27#include <mach/ipu.h> 28#include <mach/ipu.h>
28#include <mach/mx3_camera.h> 29#include <mach/mx3_camera.h>
@@ -63,7 +64,7 @@
63struct mx3_camera_buffer { 64struct mx3_camera_buffer {
64 /* common v4l buffer stuff -- must be first */ 65 /* common v4l buffer stuff -- must be first */
65 struct videobuf_buffer vb; 66 struct videobuf_buffer vb;
66 const struct soc_camera_data_format *fmt; 67 enum v4l2_mbus_pixelcode code;
67 68
68 /* One descriptot per scatterlist (per frame) */ 69 /* One descriptot per scatterlist (per frame) */
69 struct dma_async_tx_descriptor *txd; 70 struct dma_async_tx_descriptor *txd;
@@ -118,8 +119,6 @@ struct dma_chan_request {
118 enum ipu_channel id; 119 enum ipu_channel id;
119}; 120};
120 121
121static int mx3_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt);
122
123static u32 csi_reg_read(struct mx3_camera_dev *mx3, off_t reg) 122static u32 csi_reg_read(struct mx3_camera_dev *mx3, off_t reg)
124{ 123{
125 return __raw_readl(mx3->base + reg); 124 return __raw_readl(mx3->base + reg);
@@ -211,17 +210,16 @@ static int mx3_videobuf_setup(struct videobuf_queue *vq, unsigned int *count,
211 struct soc_camera_device *icd = vq->priv_data; 210 struct soc_camera_device *icd = vq->priv_data;
212 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 211 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
213 struct mx3_camera_dev *mx3_cam = ici->priv; 212 struct mx3_camera_dev *mx3_cam = ici->priv;
214 /* 213 int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
215 * bits-per-pixel (depth) as specified in camera's pixel format does 214 icd->current_fmt->host_fmt);
216 * not necessarily match what the camera interface writes to RAM, but 215
217 * it should be good enough for now. 216 if (bytes_per_line < 0)
218 */ 217 return bytes_per_line;
219 unsigned int bpp = DIV_ROUND_UP(icd->current_fmt->depth, 8);
220 218
221 if (!mx3_cam->idmac_channel[0]) 219 if (!mx3_cam->idmac_channel[0])
222 return -EINVAL; 220 return -EINVAL;
223 221
224 *size = icd->user_width * icd->user_height * bpp; 222 *size = bytes_per_line * icd->user_height;
225 223
226 if (!*count) 224 if (!*count)
227 *count = 32; 225 *count = 32;
@@ -241,21 +239,26 @@ static int mx3_videobuf_prepare(struct videobuf_queue *vq,
241 struct mx3_camera_dev *mx3_cam = ici->priv; 239 struct mx3_camera_dev *mx3_cam = ici->priv;
242 struct mx3_camera_buffer *buf = 240 struct mx3_camera_buffer *buf =
243 container_of(vb, struct mx3_camera_buffer, vb); 241 container_of(vb, struct mx3_camera_buffer, vb);
244 /* current_fmt _must_ always be set */ 242 size_t new_size;
245 size_t new_size = icd->user_width * icd->user_height *
246 ((icd->current_fmt->depth + 7) >> 3);
247 int ret; 243 int ret;
244 int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
245 icd->current_fmt->host_fmt);
246
247 if (bytes_per_line < 0)
248 return bytes_per_line;
249
250 new_size = bytes_per_line * icd->user_height;
248 251
249 /* 252 /*
250 * I think, in buf_prepare you only have to protect global data, 253 * I think, in buf_prepare you only have to protect global data,
251 * the actual buffer is yours 254 * the actual buffer is yours
252 */ 255 */
253 256
254 if (buf->fmt != icd->current_fmt || 257 if (buf->code != icd->current_fmt->code ||
255 vb->width != icd->user_width || 258 vb->width != icd->user_width ||
256 vb->height != icd->user_height || 259 vb->height != icd->user_height ||
257 vb->field != field) { 260 vb->field != field) {
258 buf->fmt = icd->current_fmt; 261 buf->code = icd->current_fmt->code;
259 vb->width = icd->user_width; 262 vb->width = icd->user_width;
260 vb->height = icd->user_height; 263 vb->height = icd->user_height;
261 vb->field = field; 264 vb->field = field;
@@ -348,13 +351,13 @@ static void mx3_videobuf_queue(struct videobuf_queue *vq,
348 struct dma_async_tx_descriptor *txd = buf->txd; 351 struct dma_async_tx_descriptor *txd = buf->txd;
349 struct idmac_channel *ichan = to_idmac_chan(txd->chan); 352 struct idmac_channel *ichan = to_idmac_chan(txd->chan);
350 struct idmac_video_param *video = &ichan->params.video; 353 struct idmac_video_param *video = &ichan->params.video;
351 const struct soc_camera_data_format *data_fmt = icd->current_fmt;
352 dma_cookie_t cookie; 354 dma_cookie_t cookie;
355 u32 fourcc = icd->current_fmt->host_fmt->fourcc;
353 356
354 BUG_ON(!irqs_disabled()); 357 BUG_ON(!irqs_disabled());
355 358
356 /* This is the configuration of one sg-element */ 359 /* This is the configuration of one sg-element */
357 video->out_pixel_fmt = fourcc_to_ipu_pix(data_fmt->fourcc); 360 video->out_pixel_fmt = fourcc_to_ipu_pix(fourcc);
358 video->out_width = icd->user_width; 361 video->out_width = icd->user_width;
359 video->out_height = icd->user_height; 362 video->out_height = icd->user_height;
360 video->out_stride = icd->user_width; 363 video->out_stride = icd->user_width;
@@ -564,30 +567,37 @@ static int test_platform_param(struct mx3_camera_dev *mx3_cam,
564 SOCAM_DATA_ACTIVE_HIGH | 567 SOCAM_DATA_ACTIVE_HIGH |
565 SOCAM_DATA_ACTIVE_LOW; 568 SOCAM_DATA_ACTIVE_LOW;
566 569
567 /* If requested data width is supported by the platform, use it or any 570 /*
568 * possible lower value - i.MX31 is smart enough to schift bits */ 571 * If requested data width is supported by the platform, use it or any
572 * possible lower value - i.MX31 is smart enough to schift bits
573 */
574 if (mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_15)
575 *flags |= SOCAM_DATAWIDTH_15 | SOCAM_DATAWIDTH_10 |
576 SOCAM_DATAWIDTH_8 | SOCAM_DATAWIDTH_4;
577 else if (mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_10)
578 *flags |= SOCAM_DATAWIDTH_10 | SOCAM_DATAWIDTH_8 |
579 SOCAM_DATAWIDTH_4;
580 else if (mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_8)
581 *flags |= SOCAM_DATAWIDTH_8 | SOCAM_DATAWIDTH_4;
582 else if (mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_4)
583 *flags |= SOCAM_DATAWIDTH_4;
584
569 switch (buswidth) { 585 switch (buswidth) {
570 case 15: 586 case 15:
571 if (!(mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_15)) 587 if (!(*flags & SOCAM_DATAWIDTH_15))
572 return -EINVAL; 588 return -EINVAL;
573 *flags |= SOCAM_DATAWIDTH_15 | SOCAM_DATAWIDTH_10 |
574 SOCAM_DATAWIDTH_8 | SOCAM_DATAWIDTH_4;
575 break; 589 break;
576 case 10: 590 case 10:
577 if (!(mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_10)) 591 if (!(*flags & SOCAM_DATAWIDTH_10))
578 return -EINVAL; 592 return -EINVAL;
579 *flags |= SOCAM_DATAWIDTH_10 | SOCAM_DATAWIDTH_8 |
580 SOCAM_DATAWIDTH_4;
581 break; 593 break;
582 case 8: 594 case 8:
583 if (!(mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_8)) 595 if (!(*flags & SOCAM_DATAWIDTH_8))
584 return -EINVAL; 596 return -EINVAL;
585 *flags |= SOCAM_DATAWIDTH_8 | SOCAM_DATAWIDTH_4;
586 break; 597 break;
587 case 4: 598 case 4:
588 if (!(mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_4)) 599 if (!(*flags & SOCAM_DATAWIDTH_4))
589 return -EINVAL; 600 return -EINVAL;
590 *flags |= SOCAM_DATAWIDTH_4;
591 break; 601 break;
592 default: 602 default:
593 dev_warn(mx3_cam->soc_host.v4l2_dev.dev, 603 dev_warn(mx3_cam->soc_host.v4l2_dev.dev,
@@ -636,91 +646,92 @@ static bool chan_filter(struct dma_chan *chan, void *arg)
636 pdata->dma_dev == chan->device->dev; 646 pdata->dma_dev == chan->device->dev;
637} 647}
638 648
639static const struct soc_camera_data_format mx3_camera_formats[] = { 649static const struct soc_mbus_pixelfmt mx3_camera_formats[] = {
640 { 650 {
641 .name = "Bayer (sRGB) 8 bit", 651 .fourcc = V4L2_PIX_FMT_SBGGR8,
642 .depth = 8, 652 .name = "Bayer BGGR (sRGB) 8 bit",
643 .fourcc = V4L2_PIX_FMT_SBGGR8, 653 .bits_per_sample = 8,
644 .colorspace = V4L2_COLORSPACE_SRGB, 654 .packing = SOC_MBUS_PACKING_NONE,
655 .order = SOC_MBUS_ORDER_LE,
645 }, { 656 }, {
646 .name = "Monochrome 8 bit", 657 .fourcc = V4L2_PIX_FMT_GREY,
647 .depth = 8, 658 .name = "Monochrome 8 bit",
648 .fourcc = V4L2_PIX_FMT_GREY, 659 .bits_per_sample = 8,
649 .colorspace = V4L2_COLORSPACE_JPEG, 660 .packing = SOC_MBUS_PACKING_NONE,
661 .order = SOC_MBUS_ORDER_LE,
650 }, 662 },
651}; 663};
652 664
653static bool buswidth_supported(struct soc_camera_host *ici, int depth) 665/* This will be corrected as we get more formats */
666static bool mx3_camera_packing_supported(const struct soc_mbus_pixelfmt *fmt)
654{ 667{
655 struct mx3_camera_dev *mx3_cam = ici->priv; 668 return fmt->packing == SOC_MBUS_PACKING_NONE ||
656 669 (fmt->bits_per_sample == 8 &&
657 switch (depth) { 670 fmt->packing == SOC_MBUS_PACKING_2X8_PADHI) ||
658 case 4: 671 (fmt->bits_per_sample > 8 &&
659 return !!(mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_4); 672 fmt->packing == SOC_MBUS_PACKING_EXTEND16);
660 case 8:
661 return !!(mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_8);
662 case 10:
663 return !!(mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_10);
664 case 15:
665 return !!(mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_15);
666 }
667 return false;
668} 673}
669 674
670static int mx3_camera_get_formats(struct soc_camera_device *icd, int idx, 675static int mx3_camera_get_formats(struct soc_camera_device *icd, int idx,
671 struct soc_camera_format_xlate *xlate) 676 struct soc_camera_format_xlate *xlate)
672{ 677{
673 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 678 struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
674 int formats = 0, buswidth, ret; 679 struct device *dev = icd->dev.parent;
680 int formats = 0, ret;
681 enum v4l2_mbus_pixelcode code;
682 const struct soc_mbus_pixelfmt *fmt;
675 683
676 buswidth = icd->formats[idx].depth; 684 ret = v4l2_subdev_call(sd, video, enum_mbus_fmt, idx, &code);
685 if (ret < 0)
686 /* No more formats */
687 return 0;
677 688
678 if (!buswidth_supported(ici, buswidth)) 689 fmt = soc_mbus_get_fmtdesc(code);
690 if (!fmt) {
691 dev_err(icd->dev.parent,
692 "Invalid format code #%d: %d\n", idx, code);
679 return 0; 693 return 0;
694 }
680 695
681 ret = mx3_camera_try_bus_param(icd, buswidth); 696 /* This also checks support for the requested bits-per-sample */
697 ret = mx3_camera_try_bus_param(icd, fmt->bits_per_sample);
682 if (ret < 0) 698 if (ret < 0)
683 return 0; 699 return 0;
684 700
685 switch (icd->formats[idx].fourcc) { 701 switch (code) {
686 case V4L2_PIX_FMT_SGRBG10: 702 case V4L2_MBUS_FMT_SBGGR10_1X10:
687 formats++; 703 formats++;
688 if (xlate) { 704 if (xlate) {
689 xlate->host_fmt = &mx3_camera_formats[0]; 705 xlate->host_fmt = &mx3_camera_formats[0];
690 xlate->cam_fmt = icd->formats + idx; 706 xlate->code = code;
691 xlate->buswidth = buswidth;
692 xlate++; 707 xlate++;
693 dev_dbg(icd->dev.parent, 708 dev_dbg(dev, "Providing format %s using code %d\n",
694 "Providing format %s using %s\n", 709 mx3_camera_formats[0].name, code);
695 mx3_camera_formats[0].name,
696 icd->formats[idx].name);
697 } 710 }
698 goto passthrough; 711 break;
699 case V4L2_PIX_FMT_Y16: 712 case V4L2_MBUS_FMT_Y10_1X10:
700 formats++; 713 formats++;
701 if (xlate) { 714 if (xlate) {
702 xlate->host_fmt = &mx3_camera_formats[1]; 715 xlate->host_fmt = &mx3_camera_formats[1];
703 xlate->cam_fmt = icd->formats + idx; 716 xlate->code = code;
704 xlate->buswidth = buswidth;
705 xlate++; 717 xlate++;
706 dev_dbg(icd->dev.parent, 718 dev_dbg(dev, "Providing format %s using code %d\n",
707 "Providing format %s using %s\n", 719 mx3_camera_formats[1].name, code);
708 mx3_camera_formats[0].name,
709 icd->formats[idx].name);
710 } 720 }
721 break;
711 default: 722 default:
712passthrough: 723 if (!mx3_camera_packing_supported(fmt))
713 /* Generic pass-through */ 724 return 0;
714 formats++; 725 }
715 if (xlate) { 726
716 xlate->host_fmt = icd->formats + idx; 727 /* Generic pass-through */
717 xlate->cam_fmt = icd->formats + idx; 728 formats++;
718 xlate->buswidth = buswidth; 729 if (xlate) {
719 xlate++; 730 xlate->host_fmt = fmt;
720 dev_dbg(icd->dev.parent, 731 xlate->code = code;
721 "Providing format %s in pass-through mode\n", 732 xlate++;
722 icd->formats[idx].name); 733 dev_dbg(dev, "Providing format %x in pass-through mode\n",
723 } 734 xlate->host_fmt->fourcc);
724 } 735 }
725 736
726 return formats; 737 return formats;
@@ -804,8 +815,7 @@ static int mx3_camera_set_crop(struct soc_camera_device *icd,
804 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 815 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
805 struct mx3_camera_dev *mx3_cam = ici->priv; 816 struct mx3_camera_dev *mx3_cam = ici->priv;
806 struct v4l2_subdev *sd = soc_camera_to_subdev(icd); 817 struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
807 struct v4l2_format f = {.type = V4L2_BUF_TYPE_VIDEO_CAPTURE}; 818 struct v4l2_mbus_framefmt mf;
808 struct v4l2_pix_format *pix = &f.fmt.pix;
809 int ret; 819 int ret;
810 820
811 soc_camera_limit_side(&rect->left, &rect->width, 0, 2, 4096); 821 soc_camera_limit_side(&rect->left, &rect->width, 0, 2, 4096);
@@ -816,19 +826,19 @@ static int mx3_camera_set_crop(struct soc_camera_device *icd,
816 return ret; 826 return ret;
817 827
818 /* The capture device might have changed its output */ 828 /* The capture device might have changed its output */
819 ret = v4l2_subdev_call(sd, video, g_fmt, &f); 829 ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mf);
820 if (ret < 0) 830 if (ret < 0)
821 return ret; 831 return ret;
822 832
823 if (pix->width & 7) { 833 if (mf.width & 7) {
824 /* Ouch! We can only handle 8-byte aligned width... */ 834 /* Ouch! We can only handle 8-byte aligned width... */
825 stride_align(&pix->width); 835 stride_align(&mf.width);
826 ret = v4l2_subdev_call(sd, video, s_fmt, &f); 836 ret = v4l2_subdev_call(sd, video, s_mbus_fmt, &mf);
827 if (ret < 0) 837 if (ret < 0)
828 return ret; 838 return ret;
829 } 839 }
830 840
831 if (pix->width != icd->user_width || pix->height != icd->user_height) { 841 if (mf.width != icd->user_width || mf.height != icd->user_height) {
832 /* 842 /*
833 * We now know pixel formats and can decide upon DMA-channel(s) 843 * We now know pixel formats and can decide upon DMA-channel(s)
834 * So far only direct camera-to-memory is supported 844 * So far only direct camera-to-memory is supported
@@ -839,14 +849,14 @@ static int mx3_camera_set_crop(struct soc_camera_device *icd,
839 return ret; 849 return ret;
840 } 850 }
841 851
842 configure_geometry(mx3_cam, pix->width, pix->height); 852 configure_geometry(mx3_cam, mf.width, mf.height);
843 } 853 }
844 854
845 dev_dbg(icd->dev.parent, "Sensor cropped %dx%d\n", 855 dev_dbg(icd->dev.parent, "Sensor cropped %dx%d\n",
846 pix->width, pix->height); 856 mf.width, mf.height);
847 857
848 icd->user_width = pix->width; 858 icd->user_width = mf.width;
849 icd->user_height = pix->height; 859 icd->user_height = mf.height;
850 860
851 return ret; 861 return ret;
852} 862}
@@ -859,6 +869,7 @@ static int mx3_camera_set_fmt(struct soc_camera_device *icd,
859 struct v4l2_subdev *sd = soc_camera_to_subdev(icd); 869 struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
860 const struct soc_camera_format_xlate *xlate; 870 const struct soc_camera_format_xlate *xlate;
861 struct v4l2_pix_format *pix = &f->fmt.pix; 871 struct v4l2_pix_format *pix = &f->fmt.pix;
872 struct v4l2_mbus_framefmt mf;
862 int ret; 873 int ret;
863 874
864 xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat); 875 xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
@@ -883,11 +894,24 @@ static int mx3_camera_set_fmt(struct soc_camera_device *icd,
883 894
884 configure_geometry(mx3_cam, pix->width, pix->height); 895 configure_geometry(mx3_cam, pix->width, pix->height);
885 896
886 ret = v4l2_subdev_call(sd, video, s_fmt, f); 897 mf.width = pix->width;
887 if (!ret) { 898 mf.height = pix->height;
888 icd->buswidth = xlate->buswidth; 899 mf.field = pix->field;
889 icd->current_fmt = xlate->host_fmt; 900 mf.colorspace = pix->colorspace;
890 } 901 mf.code = xlate->code;
902
903 ret = v4l2_subdev_call(sd, video, s_mbus_fmt, &mf);
904 if (ret < 0)
905 return ret;
906
907 if (mf.code != xlate->code)
908 return -EINVAL;
909
910 pix->width = mf.width;
911 pix->height = mf.height;
912 pix->field = mf.field;
913 pix->colorspace = mf.colorspace;
914 icd->current_fmt = xlate;
891 915
892 dev_dbg(icd->dev.parent, "Sensor set %dx%d\n", pix->width, pix->height); 916 dev_dbg(icd->dev.parent, "Sensor set %dx%d\n", pix->width, pix->height);
893 917
@@ -900,8 +924,8 @@ static int mx3_camera_try_fmt(struct soc_camera_device *icd,
900 struct v4l2_subdev *sd = soc_camera_to_subdev(icd); 924 struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
901 const struct soc_camera_format_xlate *xlate; 925 const struct soc_camera_format_xlate *xlate;
902 struct v4l2_pix_format *pix = &f->fmt.pix; 926 struct v4l2_pix_format *pix = &f->fmt.pix;
927 struct v4l2_mbus_framefmt mf;
903 __u32 pixfmt = pix->pixelformat; 928 __u32 pixfmt = pix->pixelformat;
904 enum v4l2_field field;
905 int ret; 929 int ret;
906 930
907 xlate = soc_camera_xlate_by_fourcc(icd, pixfmt); 931 xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
@@ -916,23 +940,37 @@ static int mx3_camera_try_fmt(struct soc_camera_device *icd,
916 if (pix->width > 4096) 940 if (pix->width > 4096)
917 pix->width = 4096; 941 pix->width = 4096;
918 942
919 pix->bytesperline = pix->width * 943 pix->bytesperline = soc_mbus_bytes_per_line(pix->width,
920 DIV_ROUND_UP(xlate->host_fmt->depth, 8); 944 xlate->host_fmt);
945 if (pix->bytesperline < 0)
946 return pix->bytesperline;
921 pix->sizeimage = pix->height * pix->bytesperline; 947 pix->sizeimage = pix->height * pix->bytesperline;
922 948
923 /* camera has to see its format, but the user the original one */
924 pix->pixelformat = xlate->cam_fmt->fourcc;
925 /* limit to sensor capabilities */ 949 /* limit to sensor capabilities */
926 ret = v4l2_subdev_call(sd, video, try_fmt, f); 950 mf.width = pix->width;
927 pix->pixelformat = xlate->host_fmt->fourcc; 951 mf.height = pix->height;
952 mf.field = pix->field;
953 mf.colorspace = pix->colorspace;
954 mf.code = xlate->code;
955
956 ret = v4l2_subdev_call(sd, video, try_mbus_fmt, &mf);
957 if (ret < 0)
958 return ret;
928 959
929 field = pix->field; 960 pix->width = mf.width;
961 pix->height = mf.height;
962 pix->colorspace = mf.colorspace;
930 963
931 if (field == V4L2_FIELD_ANY) { 964 switch (mf.field) {
965 case V4L2_FIELD_ANY:
932 pix->field = V4L2_FIELD_NONE; 966 pix->field = V4L2_FIELD_NONE;
933 } else if (field != V4L2_FIELD_NONE) { 967 break;
934 dev_err(icd->dev.parent, "Field type %d unsupported.\n", field); 968 case V4L2_FIELD_NONE:
935 return -EINVAL; 969 break;
970 default:
971 dev_err(icd->dev.parent, "Field type %d unsupported.\n",
972 mf.field);
973 ret = -EINVAL;
936 } 974 }
937 975
938 return ret; 976 return ret;
@@ -968,18 +1006,26 @@ static int mx3_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt)
968 struct mx3_camera_dev *mx3_cam = ici->priv; 1006 struct mx3_camera_dev *mx3_cam = ici->priv;
969 unsigned long bus_flags, camera_flags, common_flags; 1007 unsigned long bus_flags, camera_flags, common_flags;
970 u32 dw, sens_conf; 1008 u32 dw, sens_conf;
971 int ret = test_platform_param(mx3_cam, icd->buswidth, &bus_flags); 1009 const struct soc_mbus_pixelfmt *fmt;
1010 int buswidth;
1011 int ret;
972 const struct soc_camera_format_xlate *xlate; 1012 const struct soc_camera_format_xlate *xlate;
973 struct device *dev = icd->dev.parent; 1013 struct device *dev = icd->dev.parent;
974 1014
1015 fmt = soc_mbus_get_fmtdesc(icd->current_fmt->code);
1016 if (!fmt)
1017 return -EINVAL;
1018
1019 buswidth = fmt->bits_per_sample;
1020 ret = test_platform_param(mx3_cam, buswidth, &bus_flags);
1021
975 xlate = soc_camera_xlate_by_fourcc(icd, pixfmt); 1022 xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
976 if (!xlate) { 1023 if (!xlate) {
977 dev_warn(dev, "Format %x not found\n", pixfmt); 1024 dev_warn(dev, "Format %x not found\n", pixfmt);
978 return -EINVAL; 1025 return -EINVAL;
979 } 1026 }
980 1027
981 dev_dbg(dev, "requested bus width %d bit: %d\n", 1028 dev_dbg(dev, "requested bus width %d bit: %d\n", buswidth, ret);
982 icd->buswidth, ret);
983 1029
984 if (ret < 0) 1030 if (ret < 0)
985 return ret; 1031 return ret;
@@ -1027,8 +1073,10 @@ static int mx3_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt)
1027 common_flags &= ~SOCAM_PCLK_SAMPLE_FALLING; 1073 common_flags &= ~SOCAM_PCLK_SAMPLE_FALLING;
1028 } 1074 }
1029 1075
1030 /* Make the camera work in widest common mode, we'll take care of 1076 /*
1031 * the rest */ 1077 * Make the camera work in widest common mode, we'll take care of
1078 * the rest
1079 */
1032 if (common_flags & SOCAM_DATAWIDTH_15) 1080 if (common_flags & SOCAM_DATAWIDTH_15)
1033 common_flags = (common_flags & ~SOCAM_DATAWIDTH_MASK) | 1081 common_flags = (common_flags & ~SOCAM_DATAWIDTH_MASK) |
1034 SOCAM_DATAWIDTH_15; 1082 SOCAM_DATAWIDTH_15;
@@ -1078,7 +1126,7 @@ static int mx3_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt)
1078 sens_conf |= 1 << CSI_SENS_CONF_DATA_POL_SHIFT; 1126 sens_conf |= 1 << CSI_SENS_CONF_DATA_POL_SHIFT;
1079 1127
1080 /* Just do what we're asked to do */ 1128 /* Just do what we're asked to do */
1081 switch (xlate->host_fmt->depth) { 1129 switch (xlate->host_fmt->bits_per_sample) {
1082 case 4: 1130 case 4:
1083 dw = 0 << CSI_SENS_CONF_DATA_WIDTH_SHIFT; 1131 dw = 0 << CSI_SENS_CONF_DATA_WIDTH_SHIFT;
1084 break; 1132 break;
@@ -1152,8 +1200,10 @@ static int __devinit mx3_camera_probe(struct platform_device *pdev)
1152 if (!(mx3_cam->platform_flags & (MX3_CAMERA_DATAWIDTH_4 | 1200 if (!(mx3_cam->platform_flags & (MX3_CAMERA_DATAWIDTH_4 |
1153 MX3_CAMERA_DATAWIDTH_8 | MX3_CAMERA_DATAWIDTH_10 | 1201 MX3_CAMERA_DATAWIDTH_8 | MX3_CAMERA_DATAWIDTH_10 |
1154 MX3_CAMERA_DATAWIDTH_15))) { 1202 MX3_CAMERA_DATAWIDTH_15))) {
1155 /* Platform hasn't set available data widths. This is bad. 1203 /*
1156 * Warn and use a default. */ 1204 * Platform hasn't set available data widths. This is bad.
1205 * Warn and use a default.
1206 */
1157 dev_warn(&pdev->dev, "WARNING! Platform hasn't set available " 1207 dev_warn(&pdev->dev, "WARNING! Platform hasn't set available "
1158 "data widths, using default 8 bit\n"); 1208 "data widths, using default 8 bit\n");
1159 mx3_cam->platform_flags |= MX3_CAMERA_DATAWIDTH_8; 1209 mx3_cam->platform_flags |= MX3_CAMERA_DATAWIDTH_8;
diff --git a/drivers/media/video/omap24xxcam.c b/drivers/media/video/omap24xxcam.c
index 5fc4ac0d88f0..7400eacb4d64 100644
--- a/drivers/media/video/omap24xxcam.c
+++ b/drivers/media/video/omap24xxcam.c
@@ -1450,12 +1450,11 @@ static int omap24xxcam_mmap(struct file *file, struct vm_area_struct *vma)
1450 1450
1451static int omap24xxcam_open(struct file *file) 1451static int omap24xxcam_open(struct file *file)
1452{ 1452{
1453 int minor = video_devdata(file)->minor;
1454 struct omap24xxcam_device *cam = omap24xxcam.priv; 1453 struct omap24xxcam_device *cam = omap24xxcam.priv;
1455 struct omap24xxcam_fh *fh; 1454 struct omap24xxcam_fh *fh;
1456 struct v4l2_format format; 1455 struct v4l2_format format;
1457 1456
1458 if (!cam || !cam->vfd || (cam->vfd->minor != minor)) 1457 if (!cam || !cam->vfd)
1459 return -ENODEV; 1458 return -ENODEV;
1460 1459
1461 fh = kzalloc(sizeof(*fh), GFP_KERNEL); 1460 fh = kzalloc(sizeof(*fh), GFP_KERNEL);
@@ -1660,7 +1659,6 @@ static int omap24xxcam_device_register(struct v4l2_int_device *s)
1660 1659
1661 strlcpy(vfd->name, CAM_NAME, sizeof(vfd->name)); 1660 strlcpy(vfd->name, CAM_NAME, sizeof(vfd->name));
1662 vfd->fops = &omap24xxcam_fops; 1661 vfd->fops = &omap24xxcam_fops;
1663 vfd->minor = -1;
1664 vfd->ioctl_ops = &omap24xxcam_ioctl_fops; 1662 vfd->ioctl_ops = &omap24xxcam_ioctl_fops;
1665 1663
1666 omap24xxcam_hwinit(cam); 1664 omap24xxcam_hwinit(cam);
@@ -1671,14 +1669,14 @@ static int omap24xxcam_device_register(struct v4l2_int_device *s)
1671 1669
1672 if (video_register_device(vfd, VFL_TYPE_GRABBER, video_nr) < 0) { 1670 if (video_register_device(vfd, VFL_TYPE_GRABBER, video_nr) < 0) {
1673 dev_err(cam->dev, "could not register V4L device\n"); 1671 dev_err(cam->dev, "could not register V4L device\n");
1674 vfd->minor = -1;
1675 rval = -EBUSY; 1672 rval = -EBUSY;
1676 goto err; 1673 goto err;
1677 } 1674 }
1678 1675
1679 omap24xxcam_poweron_reset(cam); 1676 omap24xxcam_poweron_reset(cam);
1680 1677
1681 dev_info(cam->dev, "registered device video%d\n", vfd->minor); 1678 dev_info(cam->dev, "registered device %s\n",
1679 video_device_node_name(vfd));
1682 1680
1683 return 0; 1681 return 0;
1684 1682
@@ -1695,7 +1693,7 @@ static void omap24xxcam_device_unregister(struct v4l2_int_device *s)
1695 omap24xxcam_sensor_exit(cam); 1693 omap24xxcam_sensor_exit(cam);
1696 1694
1697 if (cam->vfd) { 1695 if (cam->vfd) {
1698 if (cam->vfd->minor == -1) { 1696 if (!video_is_registered(cam->vfd)) {
1699 /* 1697 /*
1700 * The device was never registered, so release the 1698 * The device was never registered, so release the
1701 * video_device struct directly. 1699 * video_device struct directly.
diff --git a/drivers/media/video/ov511.c b/drivers/media/video/ov511.c
index 0bc2cf573c76..e0bce8dc74bf 100644
--- a/drivers/media/video/ov511.c
+++ b/drivers/media/video/ov511.c
@@ -4674,7 +4674,6 @@ static struct video_device vdev_template = {
4674 .name = "OV511 USB Camera", 4674 .name = "OV511 USB Camera",
4675 .fops = &ov511_fops, 4675 .fops = &ov511_fops,
4676 .release = video_device_release, 4676 .release = video_device_release,
4677 .minor = -1,
4678}; 4677};
4679 4678
4680/**************************************************************************** 4679/****************************************************************************
@@ -5867,8 +5866,8 @@ ov51x_probe(struct usb_interface *intf, const struct usb_device_id *id)
5867 ov511_devused |= 1 << nr; 5866 ov511_devused |= 1 << nr;
5868 ov->nr = nr; 5867 ov->nr = nr;
5869 5868
5870 dev_info(&intf->dev, "Device at %s registered to minor %d\n", 5869 dev_info(&intf->dev, "Device at %s registered to %s\n",
5871 ov->usb_path, ov->vdev->minor); 5870 ov->usb_path, video_device_node_name(ov->vdev));
5872 5871
5873 usb_set_intfdata(intf, ov); 5872 usb_set_intfdata(intf, ov);
5874 if (ov_create_sysfs(ov->vdev)) { 5873 if (ov_create_sysfs(ov->vdev)) {
@@ -5878,13 +5877,13 @@ ov51x_probe(struct usb_interface *intf, const struct usb_device_id *id)
5878 goto error; 5877 goto error;
5879 } 5878 }
5880 5879
5881 mutex_lock(&ov->lock); 5880 mutex_unlock(&ov->lock);
5882 5881
5883 return 0; 5882 return 0;
5884 5883
5885error: 5884error:
5886 if (ov->vdev) { 5885 if (ov->vdev) {
5887 if (-1 == ov->vdev->minor) 5886 if (!video_is_registered(ov->vdev))
5888 video_device_release(ov->vdev); 5887 video_device_release(ov->vdev);
5889 else 5888 else
5890 video_unregister_device(ov->vdev); 5889 video_unregister_device(ov->vdev);
diff --git a/drivers/media/video/ov772x.c b/drivers/media/video/ov772x.c
index 205229333466..3a45e945a528 100644
--- a/drivers/media/video/ov772x.c
+++ b/drivers/media/video/ov772x.c
@@ -24,6 +24,7 @@
24#include <media/v4l2-chip-ident.h> 24#include <media/v4l2-chip-ident.h>
25#include <media/v4l2-subdev.h> 25#include <media/v4l2-subdev.h>
26#include <media/soc_camera.h> 26#include <media/soc_camera.h>
27#include <media/soc_mediabus.h>
27#include <media/ov772x.h> 28#include <media/ov772x.h>
28 29
29/* 30/*
@@ -382,7 +383,8 @@ struct regval_list {
382}; 383};
383 384
384struct ov772x_color_format { 385struct ov772x_color_format {
385 const struct soc_camera_data_format *format; 386 enum v4l2_mbus_pixelcode code;
387 enum v4l2_colorspace colorspace;
386 u8 dsp3; 388 u8 dsp3;
387 u8 com3; 389 u8 com3;
388 u8 com7; 390 u8 com7;
@@ -399,7 +401,7 @@ struct ov772x_win_size {
399struct ov772x_priv { 401struct ov772x_priv {
400 struct v4l2_subdev subdev; 402 struct v4l2_subdev subdev;
401 struct ov772x_camera_info *info; 403 struct ov772x_camera_info *info;
402 const struct ov772x_color_format *fmt; 404 const struct ov772x_color_format *cfmt;
403 const struct ov772x_win_size *win; 405 const struct ov772x_win_size *win;
404 int model; 406 int model;
405 unsigned short flag_vflip:1; 407 unsigned short flag_vflip:1;
@@ -434,93 +436,57 @@ static const struct regval_list ov772x_vga_regs[] = {
434}; 436};
435 437
436/* 438/*
437 * supported format list 439 * supported color format list
438 */
439
440#define SETFOURCC(type) .name = (#type), .fourcc = (V4L2_PIX_FMT_ ## type)
441static const struct soc_camera_data_format ov772x_fmt_lists[] = {
442 {
443 SETFOURCC(YUYV),
444 .depth = 16,
445 .colorspace = V4L2_COLORSPACE_JPEG,
446 },
447 {
448 SETFOURCC(YVYU),
449 .depth = 16,
450 .colorspace = V4L2_COLORSPACE_JPEG,
451 },
452 {
453 SETFOURCC(UYVY),
454 .depth = 16,
455 .colorspace = V4L2_COLORSPACE_JPEG,
456 },
457 {
458 SETFOURCC(RGB555),
459 .depth = 16,
460 .colorspace = V4L2_COLORSPACE_SRGB,
461 },
462 {
463 SETFOURCC(RGB555X),
464 .depth = 16,
465 .colorspace = V4L2_COLORSPACE_SRGB,
466 },
467 {
468 SETFOURCC(RGB565),
469 .depth = 16,
470 .colorspace = V4L2_COLORSPACE_SRGB,
471 },
472 {
473 SETFOURCC(RGB565X),
474 .depth = 16,
475 .colorspace = V4L2_COLORSPACE_SRGB,
476 },
477};
478
479/*
480 * color format list
481 */ 440 */
482static const struct ov772x_color_format ov772x_cfmts[] = { 441static const struct ov772x_color_format ov772x_cfmts[] = {
483 { 442 {
484 .format = &ov772x_fmt_lists[0], 443 .code = V4L2_MBUS_FMT_YUYV8_2X8_LE,
485 .dsp3 = 0x0, 444 .colorspace = V4L2_COLORSPACE_JPEG,
486 .com3 = SWAP_YUV, 445 .dsp3 = 0x0,
487 .com7 = OFMT_YUV, 446 .com3 = SWAP_YUV,
447 .com7 = OFMT_YUV,
488 }, 448 },
489 { 449 {
490 .format = &ov772x_fmt_lists[1], 450 .code = V4L2_MBUS_FMT_YVYU8_2X8_LE,
491 .dsp3 = UV_ON, 451 .colorspace = V4L2_COLORSPACE_JPEG,
492 .com3 = SWAP_YUV, 452 .dsp3 = UV_ON,
493 .com7 = OFMT_YUV, 453 .com3 = SWAP_YUV,
454 .com7 = OFMT_YUV,
494 }, 455 },
495 { 456 {
496 .format = &ov772x_fmt_lists[2], 457 .code = V4L2_MBUS_FMT_YUYV8_2X8_BE,
497 .dsp3 = 0x0, 458 .colorspace = V4L2_COLORSPACE_JPEG,
498 .com3 = 0x0, 459 .dsp3 = 0x0,
499 .com7 = OFMT_YUV, 460 .com3 = 0x0,
461 .com7 = OFMT_YUV,
500 }, 462 },
501 { 463 {
502 .format = &ov772x_fmt_lists[3], 464 .code = V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE,
503 .dsp3 = 0x0, 465 .colorspace = V4L2_COLORSPACE_SRGB,
504 .com3 = SWAP_RGB, 466 .dsp3 = 0x0,
505 .com7 = FMT_RGB555 | OFMT_RGB, 467 .com3 = SWAP_RGB,
468 .com7 = FMT_RGB555 | OFMT_RGB,
506 }, 469 },
507 { 470 {
508 .format = &ov772x_fmt_lists[4], 471 .code = V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE,
509 .dsp3 = 0x0, 472 .colorspace = V4L2_COLORSPACE_SRGB,
510 .com3 = 0x0, 473 .dsp3 = 0x0,
511 .com7 = FMT_RGB555 | OFMT_RGB, 474 .com3 = 0x0,
475 .com7 = FMT_RGB555 | OFMT_RGB,
512 }, 476 },
513 { 477 {
514 .format = &ov772x_fmt_lists[5], 478 .code = V4L2_MBUS_FMT_RGB565_2X8_LE,
515 .dsp3 = 0x0, 479 .colorspace = V4L2_COLORSPACE_SRGB,
516 .com3 = SWAP_RGB, 480 .dsp3 = 0x0,
517 .com7 = FMT_RGB565 | OFMT_RGB, 481 .com3 = SWAP_RGB,
482 .com7 = FMT_RGB565 | OFMT_RGB,
518 }, 483 },
519 { 484 {
520 .format = &ov772x_fmt_lists[6], 485 .code = V4L2_MBUS_FMT_RGB565_2X8_BE,
521 .dsp3 = 0x0, 486 .colorspace = V4L2_COLORSPACE_SRGB,
522 .com3 = 0x0, 487 .dsp3 = 0x0,
523 .com7 = FMT_RGB565 | OFMT_RGB, 488 .com3 = 0x0,
489 .com7 = FMT_RGB565 | OFMT_RGB,
524 }, 490 },
525}; 491};
526 492
@@ -642,15 +608,15 @@ static int ov772x_s_stream(struct v4l2_subdev *sd, int enable)
642 return 0; 608 return 0;
643 } 609 }
644 610
645 if (!priv->win || !priv->fmt) { 611 if (!priv->win || !priv->cfmt) {
646 dev_err(&client->dev, "norm or win select error\n"); 612 dev_err(&client->dev, "norm or win select error\n");
647 return -EPERM; 613 return -EPERM;
648 } 614 }
649 615
650 ov772x_mask_set(client, COM2, SOFT_SLEEP_MODE, 0); 616 ov772x_mask_set(client, COM2, SOFT_SLEEP_MODE, 0);
651 617
652 dev_dbg(&client->dev, "format %s, win %s\n", 618 dev_dbg(&client->dev, "format %d, win %s\n",
653 priv->fmt->format->name, priv->win->name); 619 priv->cfmt->code, priv->win->name);
654 620
655 return 0; 621 return 0;
656} 622}
@@ -806,8 +772,8 @@ static const struct ov772x_win_size *ov772x_select_win(u32 width, u32 height)
806 return win; 772 return win;
807} 773}
808 774
809static int ov772x_set_params(struct i2c_client *client, 775static int ov772x_set_params(struct i2c_client *client, u32 *width, u32 *height,
810 u32 *width, u32 *height, u32 pixfmt) 776 enum v4l2_mbus_pixelcode code)
811{ 777{
812 struct ov772x_priv *priv = to_ov772x(client); 778 struct ov772x_priv *priv = to_ov772x(client);
813 int ret = -EINVAL; 779 int ret = -EINVAL;
@@ -817,14 +783,14 @@ static int ov772x_set_params(struct i2c_client *client,
817 /* 783 /*
818 * select format 784 * select format
819 */ 785 */
820 priv->fmt = NULL; 786 priv->cfmt = NULL;
821 for (i = 0; i < ARRAY_SIZE(ov772x_cfmts); i++) { 787 for (i = 0; i < ARRAY_SIZE(ov772x_cfmts); i++) {
822 if (pixfmt == ov772x_cfmts[i].format->fourcc) { 788 if (code == ov772x_cfmts[i].code) {
823 priv->fmt = ov772x_cfmts + i; 789 priv->cfmt = ov772x_cfmts + i;
824 break; 790 break;
825 } 791 }
826 } 792 }
827 if (!priv->fmt) 793 if (!priv->cfmt)
828 goto ov772x_set_fmt_error; 794 goto ov772x_set_fmt_error;
829 795
830 /* 796 /*
@@ -894,7 +860,7 @@ static int ov772x_set_params(struct i2c_client *client,
894 /* 860 /*
895 * set DSP_CTRL3 861 * set DSP_CTRL3
896 */ 862 */
897 val = priv->fmt->dsp3; 863 val = priv->cfmt->dsp3;
898 if (val) { 864 if (val) {
899 ret = ov772x_mask_set(client, 865 ret = ov772x_mask_set(client,
900 DSP_CTRL3, UV_MASK, val); 866 DSP_CTRL3, UV_MASK, val);
@@ -905,7 +871,7 @@ static int ov772x_set_params(struct i2c_client *client,
905 /* 871 /*
906 * set COM3 872 * set COM3
907 */ 873 */
908 val = priv->fmt->com3; 874 val = priv->cfmt->com3;
909 if (priv->info->flags & OV772X_FLAG_VFLIP) 875 if (priv->info->flags & OV772X_FLAG_VFLIP)
910 val |= VFLIP_IMG; 876 val |= VFLIP_IMG;
911 if (priv->info->flags & OV772X_FLAG_HFLIP) 877 if (priv->info->flags & OV772X_FLAG_HFLIP)
@@ -923,9 +889,9 @@ static int ov772x_set_params(struct i2c_client *client,
923 /* 889 /*
924 * set COM7 890 * set COM7
925 */ 891 */
926 val = priv->win->com7_bit | priv->fmt->com7; 892 val = priv->win->com7_bit | priv->cfmt->com7;
927 ret = ov772x_mask_set(client, 893 ret = ov772x_mask_set(client,
928 COM7, (SLCT_MASK | FMT_MASK | OFMT_MASK), 894 COM7, SLCT_MASK | FMT_MASK | OFMT_MASK,
929 val); 895 val);
930 if (ret < 0) 896 if (ret < 0)
931 goto ov772x_set_fmt_error; 897 goto ov772x_set_fmt_error;
@@ -951,7 +917,7 @@ ov772x_set_fmt_error:
951 917
952 ov772x_reset(client); 918 ov772x_reset(client);
953 priv->win = NULL; 919 priv->win = NULL;
954 priv->fmt = NULL; 920 priv->cfmt = NULL;
955 921
956 return ret; 922 return ret;
957} 923}
@@ -981,54 +947,79 @@ static int ov772x_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
981 return 0; 947 return 0;
982} 948}
983 949
984static int ov772x_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) 950static int ov772x_g_fmt(struct v4l2_subdev *sd,
951 struct v4l2_mbus_framefmt *mf)
985{ 952{
986 struct i2c_client *client = sd->priv; 953 struct i2c_client *client = sd->priv;
987 struct ov772x_priv *priv = to_ov772x(client); 954 struct ov772x_priv *priv = to_ov772x(client);
988 struct v4l2_pix_format *pix = &f->fmt.pix;
989 955
990 if (!priv->win || !priv->fmt) { 956 if (!priv->win || !priv->cfmt) {
991 u32 width = VGA_WIDTH, height = VGA_HEIGHT; 957 u32 width = VGA_WIDTH, height = VGA_HEIGHT;
992 int ret = ov772x_set_params(client, &width, &height, 958 int ret = ov772x_set_params(client, &width, &height,
993 V4L2_PIX_FMT_YUYV); 959 V4L2_MBUS_FMT_YUYV8_2X8_LE);
994 if (ret < 0) 960 if (ret < 0)
995 return ret; 961 return ret;
996 } 962 }
997 963
998 f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 964 mf->width = priv->win->width;
999 965 mf->height = priv->win->height;
1000 pix->width = priv->win->width; 966 mf->code = priv->cfmt->code;
1001 pix->height = priv->win->height; 967 mf->colorspace = priv->cfmt->colorspace;
1002 pix->pixelformat = priv->fmt->format->fourcc; 968 mf->field = V4L2_FIELD_NONE;
1003 pix->colorspace = priv->fmt->format->colorspace;
1004 pix->field = V4L2_FIELD_NONE;
1005 969
1006 return 0; 970 return 0;
1007} 971}
1008 972
1009static int ov772x_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) 973static int ov772x_s_fmt(struct v4l2_subdev *sd,
974 struct v4l2_mbus_framefmt *mf)
1010{ 975{
1011 struct i2c_client *client = sd->priv; 976 struct i2c_client *client = sd->priv;
1012 struct v4l2_pix_format *pix = &f->fmt.pix; 977 struct ov772x_priv *priv = to_ov772x(client);
978 int ret = ov772x_set_params(client, &mf->width, &mf->height,
979 mf->code);
980
981 if (!ret)
982 mf->colorspace = priv->cfmt->colorspace;
1013 983
1014 return ov772x_set_params(client, &pix->width, &pix->height, 984 return ret;
1015 pix->pixelformat);
1016} 985}
1017 986
1018static int ov772x_try_fmt(struct v4l2_subdev *sd, 987static int ov772x_try_fmt(struct v4l2_subdev *sd,
1019 struct v4l2_format *f) 988 struct v4l2_mbus_framefmt *mf)
1020{ 989{
1021 struct v4l2_pix_format *pix = &f->fmt.pix; 990 struct i2c_client *client = sd->priv;
991 struct ov772x_priv *priv = to_ov772x(client);
1022 const struct ov772x_win_size *win; 992 const struct ov772x_win_size *win;
993 int i;
1023 994
1024 /* 995 /*
1025 * select suitable win 996 * select suitable win
1026 */ 997 */
1027 win = ov772x_select_win(pix->width, pix->height); 998 win = ov772x_select_win(mf->width, mf->height);
999
1000 mf->width = win->width;
1001 mf->height = win->height;
1002 mf->field = V4L2_FIELD_NONE;
1028 1003
1029 pix->width = win->width; 1004 for (i = 0; i < ARRAY_SIZE(ov772x_cfmts); i++)
1030 pix->height = win->height; 1005 if (mf->code == ov772x_cfmts[i].code)
1031 pix->field = V4L2_FIELD_NONE; 1006 break;
1007
1008 if (i == ARRAY_SIZE(ov772x_cfmts)) {
1009 /* Unsupported format requested. Propose either */
1010 if (priv->cfmt) {
1011 /* the current one or */
1012 mf->colorspace = priv->cfmt->colorspace;
1013 mf->code = priv->cfmt->code;
1014 } else {
1015 /* the default one */
1016 mf->colorspace = ov772x_cfmts[0].colorspace;
1017 mf->code = ov772x_cfmts[0].code;
1018 }
1019 } else {
1020 /* Also return the colorspace */
1021 mf->colorspace = ov772x_cfmts[i].colorspace;
1022 }
1032 1023
1033 return 0; 1024 return 0;
1034} 1025}
@@ -1057,9 +1048,6 @@ static int ov772x_video_probe(struct soc_camera_device *icd,
1057 return -ENODEV; 1048 return -ENODEV;
1058 } 1049 }
1059 1050
1060 icd->formats = ov772x_fmt_lists;
1061 icd->num_formats = ARRAY_SIZE(ov772x_fmt_lists);
1062
1063 /* 1051 /*
1064 * check and show product ID and manufacturer ID 1052 * check and show product ID and manufacturer ID
1065 */ 1053 */
@@ -1109,13 +1097,24 @@ static struct v4l2_subdev_core_ops ov772x_subdev_core_ops = {
1109#endif 1097#endif
1110}; 1098};
1111 1099
1100static int ov772x_enum_fmt(struct v4l2_subdev *sd, int index,
1101 enum v4l2_mbus_pixelcode *code)
1102{
1103 if ((unsigned int)index >= ARRAY_SIZE(ov772x_cfmts))
1104 return -EINVAL;
1105
1106 *code = ov772x_cfmts[index].code;
1107 return 0;
1108}
1109
1112static struct v4l2_subdev_video_ops ov772x_subdev_video_ops = { 1110static struct v4l2_subdev_video_ops ov772x_subdev_video_ops = {
1113 .s_stream = ov772x_s_stream, 1111 .s_stream = ov772x_s_stream,
1114 .g_fmt = ov772x_g_fmt, 1112 .g_mbus_fmt = ov772x_g_fmt,
1115 .s_fmt = ov772x_s_fmt, 1113 .s_mbus_fmt = ov772x_s_fmt,
1116 .try_fmt = ov772x_try_fmt, 1114 .try_mbus_fmt = ov772x_try_fmt,
1117 .cropcap = ov772x_cropcap, 1115 .cropcap = ov772x_cropcap,
1118 .g_crop = ov772x_g_crop, 1116 .g_crop = ov772x_g_crop,
1117 .enum_mbus_fmt = ov772x_enum_fmt,
1119}; 1118};
1120 1119
1121static struct v4l2_subdev_ops ov772x_subdev_ops = { 1120static struct v4l2_subdev_ops ov772x_subdev_ops = {
@@ -1143,10 +1142,10 @@ static int ov772x_probe(struct i2c_client *client,
1143 } 1142 }
1144 1143
1145 icl = to_soc_camera_link(icd); 1144 icl = to_soc_camera_link(icd);
1146 if (!icl) 1145 if (!icl || !icl->priv)
1147 return -EINVAL; 1146 return -EINVAL;
1148 1147
1149 info = container_of(icl, struct ov772x_camera_info, link); 1148 info = icl->priv;
1150 1149
1151 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { 1150 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
1152 dev_err(&adapter->dev, 1151 dev_err(&adapter->dev,
diff --git a/drivers/media/video/ov9640.c b/drivers/media/video/ov9640.c
index c81ae2192887..47bf60ceb7a2 100644
--- a/drivers/media/video/ov9640.c
+++ b/drivers/media/video/ov9640.c
@@ -154,19 +154,10 @@ static const struct ov9640_reg ov9640_regs_rgb[] = {
154 { OV9640_MTXS, 0x65 }, 154 { OV9640_MTXS, 0x65 },
155}; 155};
156 156
157/* 157static enum v4l2_mbus_pixelcode ov9640_codes[] = {
158 * TODO: this sensor also supports RGB555 and RGB565 formats, but support for 158 V4L2_MBUS_FMT_YUYV8_2X8_BE,
159 * them has not yet been sufficiently tested and so it is not included with 159 V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE,
160 * this version of the driver. To test and debug these formats add two entries 160 V4L2_MBUS_FMT_RGB565_2X8_LE,
161 * to the below array, see ov722x.c for an example.
162 */
163static const struct soc_camera_data_format ov9640_fmt_lists[] = {
164 {
165 .name = "UYVY",
166 .fourcc = V4L2_PIX_FMT_UYVY,
167 .depth = 16,
168 .colorspace = V4L2_COLORSPACE_JPEG,
169 },
170}; 161};
171 162
172static const struct v4l2_queryctrl ov9640_controls[] = { 163static const struct v4l2_queryctrl ov9640_controls[] = {
@@ -434,20 +425,22 @@ static void ov9640_res_roundup(u32 *width, u32 *height)
434} 425}
435 426
436/* Prepare necessary register changes depending on color encoding */ 427/* Prepare necessary register changes depending on color encoding */
437static void ov9640_alter_regs(u32 pixfmt, struct ov9640_reg_alt *alt) 428static void ov9640_alter_regs(enum v4l2_mbus_pixelcode code,
429 struct ov9640_reg_alt *alt)
438{ 430{
439 switch (pixfmt) { 431 switch (code) {
440 case V4L2_PIX_FMT_UYVY: 432 default:
433 case V4L2_MBUS_FMT_YUYV8_2X8_BE:
441 alt->com12 = OV9640_COM12_YUV_AVG; 434 alt->com12 = OV9640_COM12_YUV_AVG;
442 alt->com13 = OV9640_COM13_Y_DELAY_EN | 435 alt->com13 = OV9640_COM13_Y_DELAY_EN |
443 OV9640_COM13_YUV_DLY(0x01); 436 OV9640_COM13_YUV_DLY(0x01);
444 break; 437 break;
445 case V4L2_PIX_FMT_RGB555: 438 case V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE:
446 alt->com7 = OV9640_COM7_RGB; 439 alt->com7 = OV9640_COM7_RGB;
447 alt->com13 = OV9640_COM13_RGB_AVG; 440 alt->com13 = OV9640_COM13_RGB_AVG;
448 alt->com15 = OV9640_COM15_RGB_555; 441 alt->com15 = OV9640_COM15_RGB_555;
449 break; 442 break;
450 case V4L2_PIX_FMT_RGB565: 443 case V4L2_MBUS_FMT_RGB565_2X8_LE:
451 alt->com7 = OV9640_COM7_RGB; 444 alt->com7 = OV9640_COM7_RGB;
452 alt->com13 = OV9640_COM13_RGB_AVG; 445 alt->com13 = OV9640_COM13_RGB_AVG;
453 alt->com15 = OV9640_COM15_RGB_565; 446 alt->com15 = OV9640_COM15_RGB_565;
@@ -456,8 +449,8 @@ static void ov9640_alter_regs(u32 pixfmt, struct ov9640_reg_alt *alt)
456} 449}
457 450
458/* Setup registers according to resolution and color encoding */ 451/* Setup registers according to resolution and color encoding */
459static int ov9640_write_regs(struct i2c_client *client, 452static int ov9640_write_regs(struct i2c_client *client, u32 width,
460 u32 width, u32 pixfmt, struct ov9640_reg_alt *alts) 453 enum v4l2_mbus_pixelcode code, struct ov9640_reg_alt *alts)
461{ 454{
462 const struct ov9640_reg *ov9640_regs, *matrix_regs; 455 const struct ov9640_reg *ov9640_regs, *matrix_regs;
463 int ov9640_regs_len, matrix_regs_len; 456 int ov9640_regs_len, matrix_regs_len;
@@ -500,7 +493,7 @@ static int ov9640_write_regs(struct i2c_client *client,
500 } 493 }
501 494
502 /* select color matrix configuration for given color encoding */ 495 /* select color matrix configuration for given color encoding */
503 if (pixfmt == V4L2_PIX_FMT_UYVY) { 496 if (code == V4L2_MBUS_FMT_YUYV8_2X8_BE) {
504 matrix_regs = ov9640_regs_yuv; 497 matrix_regs = ov9640_regs_yuv;
505 matrix_regs_len = ARRAY_SIZE(ov9640_regs_yuv); 498 matrix_regs_len = ARRAY_SIZE(ov9640_regs_yuv);
506 } else { 499 } else {
@@ -562,15 +555,17 @@ static int ov9640_prog_dflt(struct i2c_client *client)
562} 555}
563 556
564/* set the format we will capture in */ 557/* set the format we will capture in */
565static int ov9640_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) 558static int ov9640_s_fmt(struct v4l2_subdev *sd,
559 struct v4l2_mbus_framefmt *mf)
566{ 560{
567 struct i2c_client *client = sd->priv; 561 struct i2c_client *client = sd->priv;
568 struct v4l2_pix_format *pix = &f->fmt.pix;
569 struct ov9640_reg_alt alts = {0}; 562 struct ov9640_reg_alt alts = {0};
563 enum v4l2_colorspace cspace;
564 enum v4l2_mbus_pixelcode code = mf->code;
570 int ret; 565 int ret;
571 566
572 ov9640_res_roundup(&pix->width, &pix->height); 567 ov9640_res_roundup(&mf->width, &mf->height);
573 ov9640_alter_regs(pix->pixelformat, &alts); 568 ov9640_alter_regs(mf->code, &alts);
574 569
575 ov9640_reset(client); 570 ov9640_reset(client);
576 571
@@ -578,19 +573,57 @@ static int ov9640_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f)
578 if (ret) 573 if (ret)
579 return ret; 574 return ret;
580 575
581 return ov9640_write_regs(client, pix->width, pix->pixelformat, &alts); 576 switch (code) {
577 case V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE:
578 case V4L2_MBUS_FMT_RGB565_2X8_LE:
579 cspace = V4L2_COLORSPACE_SRGB;
580 break;
581 default:
582 code = V4L2_MBUS_FMT_YUYV8_2X8_BE;
583 case V4L2_MBUS_FMT_YUYV8_2X8_BE:
584 cspace = V4L2_COLORSPACE_JPEG;
585 }
586
587 ret = ov9640_write_regs(client, mf->width, code, &alts);
588 if (!ret) {
589 mf->code = code;
590 mf->colorspace = cspace;
591 }
592
593 return ret;
582} 594}
583 595
584static int ov9640_try_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) 596static int ov9640_try_fmt(struct v4l2_subdev *sd,
597 struct v4l2_mbus_framefmt *mf)
585{ 598{
586 struct v4l2_pix_format *pix = &f->fmt.pix; 599 ov9640_res_roundup(&mf->width, &mf->height);
587 600
588 ov9640_res_roundup(&pix->width, &pix->height); 601 mf->field = V4L2_FIELD_NONE;
589 pix->field = V4L2_FIELD_NONE; 602
603 switch (mf->code) {
604 case V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE:
605 case V4L2_MBUS_FMT_RGB565_2X8_LE:
606 mf->colorspace = V4L2_COLORSPACE_SRGB;
607 break;
608 default:
609 mf->code = V4L2_MBUS_FMT_YUYV8_2X8_BE;
610 case V4L2_MBUS_FMT_YUYV8_2X8_BE:
611 mf->colorspace = V4L2_COLORSPACE_JPEG;
612 }
590 613
591 return 0; 614 return 0;
592} 615}
593 616
617static int ov9640_enum_fmt(struct v4l2_subdev *sd, int index,
618 enum v4l2_mbus_pixelcode *code)
619{
620 if ((unsigned int)index >= ARRAY_SIZE(ov9640_codes))
621 return -EINVAL;
622
623 *code = ov9640_codes[index];
624 return 0;
625}
626
594static int ov9640_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a) 627static int ov9640_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
595{ 628{
596 a->c.left = 0; 629 a->c.left = 0;
@@ -637,9 +670,6 @@ static int ov9640_video_probe(struct soc_camera_device *icd,
637 goto err; 670 goto err;
638 } 671 }
639 672
640 icd->formats = ov9640_fmt_lists;
641 icd->num_formats = ARRAY_SIZE(ov9640_fmt_lists);
642
643 /* 673 /*
644 * check and show product ID and manufacturer ID 674 * check and show product ID and manufacturer ID
645 */ 675 */
@@ -702,11 +732,12 @@ static struct v4l2_subdev_core_ops ov9640_core_ops = {
702}; 732};
703 733
704static struct v4l2_subdev_video_ops ov9640_video_ops = { 734static struct v4l2_subdev_video_ops ov9640_video_ops = {
705 .s_stream = ov9640_s_stream, 735 .s_stream = ov9640_s_stream,
706 .s_fmt = ov9640_s_fmt, 736 .s_mbus_fmt = ov9640_s_fmt,
707 .try_fmt = ov9640_try_fmt, 737 .try_mbus_fmt = ov9640_try_fmt,
708 .cropcap = ov9640_cropcap, 738 .enum_mbus_fmt = ov9640_enum_fmt,
709 .g_crop = ov9640_g_crop, 739 .cropcap = ov9640_cropcap,
740 .g_crop = ov9640_g_crop,
710 741
711}; 742};
712 743
diff --git a/drivers/media/video/pms.c b/drivers/media/video/pms.c
index 73ec970ca5ca..11a2c26399b5 100644
--- a/drivers/media/video/pms.c
+++ b/drivers/media/video/pms.c
@@ -31,7 +31,7 @@
31#include <linux/init.h> 31#include <linux/init.h>
32#include <linux/version.h> 32#include <linux/version.h>
33#include <linux/mutex.h> 33#include <linux/mutex.h>
34#include <asm/uaccess.h> 34#include <linux/uaccess.h>
35#include <asm/io.h> 35#include <asm/io.h>
36 36
37#include <linux/videodev2.h> 37#include <linux/videodev2.h>
diff --git a/drivers/media/video/pvrusb2/pvrusb2-v4l2.c b/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
index 6aa48e0ae731..cc8ddb2d2382 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
@@ -151,17 +151,6 @@ static struct v4l2_format pvr_format [] = {
151}; 151};
152 152
153 153
154static const char *get_v4l_name(int v4l_type)
155{
156 switch (v4l_type) {
157 case VFL_TYPE_GRABBER: return "video";
158 case VFL_TYPE_RADIO: return "radio";
159 case VFL_TYPE_VBI: return "vbi";
160 default: return "?";
161 }
162}
163
164
165/* 154/*
166 * pvr_ioctl() 155 * pvr_ioctl()
167 * 156 *
@@ -891,10 +880,8 @@ static long pvr2_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
891 880
892static void pvr2_v4l2_dev_destroy(struct pvr2_v4l2_dev *dip) 881static void pvr2_v4l2_dev_destroy(struct pvr2_v4l2_dev *dip)
893{ 882{
894 int num = dip->devbase.num;
895 struct pvr2_hdw *hdw = dip->v4lp->channel.mc_head->hdw; 883 struct pvr2_hdw *hdw = dip->v4lp->channel.mc_head->hdw;
896 enum pvr2_config cfg = dip->config; 884 enum pvr2_config cfg = dip->config;
897 int v4l_type = dip->v4l_type;
898 885
899 pvr2_hdw_v4l_store_minor_number(hdw,dip->minor_type,-1); 886 pvr2_hdw_v4l_store_minor_number(hdw,dip->minor_type,-1);
900 887
@@ -906,8 +893,8 @@ static void pvr2_v4l2_dev_destroy(struct pvr2_v4l2_dev *dip)
906 are gone. */ 893 are gone. */
907 video_unregister_device(&dip->devbase); 894 video_unregister_device(&dip->devbase);
908 895
909 printk(KERN_INFO "pvrusb2: unregistered device %s%u [%s]\n", 896 printk(KERN_INFO "pvrusb2: unregistered device %s [%s]\n",
910 get_v4l_name(v4l_type), num, 897 video_device_node_name(&dip->devbase),
911 pvr2_config_get_name(cfg)); 898 pvr2_config_get_name(cfg));
912 899
913} 900}
@@ -1317,8 +1304,8 @@ static void pvr2_v4l2_dev_init(struct pvr2_v4l2_dev *dip,
1317 ": Failed to register pvrusb2 v4l device\n"); 1304 ": Failed to register pvrusb2 v4l device\n");
1318 } 1305 }
1319 1306
1320 printk(KERN_INFO "pvrusb2: registered device %s%u [%s]\n", 1307 printk(KERN_INFO "pvrusb2: registered device %s [%s]\n",
1321 get_v4l_name(dip->v4l_type), dip->devbase.num, 1308 video_device_node_name(&dip->devbase),
1322 pvr2_config_get_name(dip->config)); 1309 pvr2_config_get_name(dip->config));
1323 1310
1324 pvr2_hdw_v4l_store_minor_number(vp->channel.mc_head->hdw, 1311 pvr2_hdw_v4l_store_minor_number(vp->channel.mc_head->hdw,
diff --git a/drivers/media/video/pwc/pwc-if.c b/drivers/media/video/pwc/pwc-if.c
index 89b620f6db7b..aea7e224cef6 100644
--- a/drivers/media/video/pwc/pwc-if.c
+++ b/drivers/media/video/pwc/pwc-if.c
@@ -169,7 +169,6 @@ static struct video_device pwc_template = {
169 .name = "Philips Webcam", /* Filled in later */ 169 .name = "Philips Webcam", /* Filled in later */
170 .release = video_device_release, 170 .release = video_device_release,
171 .fops = &pwc_fops, 171 .fops = &pwc_fops,
172 .minor = -1,
173}; 172};
174 173
175/***************************************************************************/ 174/***************************************************************************/
@@ -1807,7 +1806,7 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
1807 goto err_video_release; 1806 goto err_video_release;
1808 } 1807 }
1809 1808
1810 PWC_INFO("Registered as /dev/video%d.\n", pdev->vdev->num); 1809 PWC_INFO("Registered as %s.\n", video_device_node_name(pdev->vdev));
1811 1810
1812 /* occupy slot */ 1811 /* occupy slot */
1813 if (hint < MAX_DEV_HINTS) 1812 if (hint < MAX_DEV_HINTS)
@@ -1948,7 +1947,9 @@ MODULE_PARM_DESC(size, "Initial image size. One of sqcif, qsif, qcif, sif, cif,
1948MODULE_PARM_DESC(fps, "Initial frames per second. Varies with model, useful range 5-30"); 1947MODULE_PARM_DESC(fps, "Initial frames per second. Varies with model, useful range 5-30");
1949MODULE_PARM_DESC(fbufs, "Number of internal frame buffers to reserve"); 1948MODULE_PARM_DESC(fbufs, "Number of internal frame buffers to reserve");
1950MODULE_PARM_DESC(mbufs, "Number of external (mmap()ed) image buffers"); 1949MODULE_PARM_DESC(mbufs, "Number of external (mmap()ed) image buffers");
1950#ifdef CONFIG_USB_PWC_DEBUG
1951MODULE_PARM_DESC(trace, "For debugging purposes"); 1951MODULE_PARM_DESC(trace, "For debugging purposes");
1952#endif
1952MODULE_PARM_DESC(power_save, "Turn power save feature in camera on or off"); 1953MODULE_PARM_DESC(power_save, "Turn power save feature in camera on or off");
1953MODULE_PARM_DESC(compression, "Preferred compression quality. Range 0 (uncompressed) to 3 (high compression)"); 1954MODULE_PARM_DESC(compression, "Preferred compression quality. Range 0 (uncompressed) to 3 (high compression)");
1954MODULE_PARM_DESC(leds, "LED on,off time in milliseconds"); 1955MODULE_PARM_DESC(leds, "LED on,off time in milliseconds");
diff --git a/drivers/media/video/pxa_camera.c b/drivers/media/video/pxa_camera.c
index 51b683c63b70..294f860ce2b0 100644
--- a/drivers/media/video/pxa_camera.c
+++ b/drivers/media/video/pxa_camera.c
@@ -32,6 +32,7 @@
32#include <media/v4l2-dev.h> 32#include <media/v4l2-dev.h>
33#include <media/videobuf-dma-sg.h> 33#include <media/videobuf-dma-sg.h>
34#include <media/soc_camera.h> 34#include <media/soc_camera.h>
35#include <media/soc_mediabus.h>
35 36
36#include <linux/videodev2.h> 37#include <linux/videodev2.h>
37 38
@@ -183,23 +184,21 @@ struct pxa_cam_dma {
183/* buffer for one video frame */ 184/* buffer for one video frame */
184struct pxa_buffer { 185struct pxa_buffer {
185 /* common v4l buffer stuff -- must be first */ 186 /* common v4l buffer stuff -- must be first */
186 struct videobuf_buffer vb; 187 struct videobuf_buffer vb;
187 188 enum v4l2_mbus_pixelcode code;
188 const struct soc_camera_data_format *fmt;
189
190 /* our descriptor lists for Y, U and V channels */ 189 /* our descriptor lists for Y, U and V channels */
191 struct pxa_cam_dma dmas[3]; 190 struct pxa_cam_dma dmas[3];
192 191 int inwork;
193 int inwork; 192 enum pxa_camera_active_dma active_dma;
194
195 enum pxa_camera_active_dma active_dma;
196}; 193};
197 194
198struct pxa_camera_dev { 195struct pxa_camera_dev {
199 struct soc_camera_host soc_host; 196 struct soc_camera_host soc_host;
200 /* PXA27x is only supposed to handle one camera on its Quick Capture 197 /*
198 * PXA27x is only supposed to handle one camera on its Quick Capture
201 * interface. If anyone ever builds hardware to enable more than 199 * interface. If anyone ever builds hardware to enable more than
202 * one camera, they will have to modify this driver too */ 200 * one camera, they will have to modify this driver too
201 */
203 struct soc_camera_device *icd; 202 struct soc_camera_device *icd;
204 struct clk *clk; 203 struct clk *clk;
205 204
@@ -241,11 +240,15 @@ static int pxa_videobuf_setup(struct videobuf_queue *vq, unsigned int *count,
241 unsigned int *size) 240 unsigned int *size)
242{ 241{
243 struct soc_camera_device *icd = vq->priv_data; 242 struct soc_camera_device *icd = vq->priv_data;
243 int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
244 icd->current_fmt->host_fmt);
245
246 if (bytes_per_line < 0)
247 return bytes_per_line;
244 248
245 dev_dbg(icd->dev.parent, "count=%d, size=%d\n", *count, *size); 249 dev_dbg(icd->dev.parent, "count=%d, size=%d\n", *count, *size);
246 250
247 *size = roundup(icd->user_width * icd->user_height * 251 *size = bytes_per_line * icd->user_height;
248 ((icd->current_fmt->depth + 7) >> 3), 8);
249 252
250 if (0 == *count) 253 if (0 == *count)
251 *count = 32; 254 *count = 32;
@@ -267,8 +270,10 @@ static void free_buffer(struct videobuf_queue *vq, struct pxa_buffer *buf)
267 dev_dbg(icd->dev.parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__, 270 dev_dbg(icd->dev.parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
268 &buf->vb, buf->vb.baddr, buf->vb.bsize); 271 &buf->vb, buf->vb.baddr, buf->vb.bsize);
269 272
270 /* This waits until this buffer is out of danger, i.e., until it is no 273 /*
271 * longer in STATE_QUEUED or STATE_ACTIVE */ 274 * This waits until this buffer is out of danger, i.e., until it is no
275 * longer in STATE_QUEUED or STATE_ACTIVE
276 */
272 videobuf_waiton(&buf->vb, 0, 0); 277 videobuf_waiton(&buf->vb, 0, 0);
273 videobuf_dma_unmap(vq, dma); 278 videobuf_dma_unmap(vq, dma);
274 videobuf_dma_free(dma); 279 videobuf_dma_free(dma);
@@ -429,6 +434,11 @@ static int pxa_videobuf_prepare(struct videobuf_queue *vq,
429 struct pxa_buffer *buf = container_of(vb, struct pxa_buffer, vb); 434 struct pxa_buffer *buf = container_of(vb, struct pxa_buffer, vb);
430 int ret; 435 int ret;
431 int size_y, size_u = 0, size_v = 0; 436 int size_y, size_u = 0, size_v = 0;
437 int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
438 icd->current_fmt->host_fmt);
439
440 if (bytes_per_line < 0)
441 return bytes_per_line;
432 442
433 dev_dbg(dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__, 443 dev_dbg(dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
434 vb, vb->baddr, vb->bsize); 444 vb, vb->baddr, vb->bsize);
@@ -437,29 +447,33 @@ static int pxa_videobuf_prepare(struct videobuf_queue *vq,
437 WARN_ON(!list_empty(&vb->queue)); 447 WARN_ON(!list_empty(&vb->queue));
438 448
439#ifdef DEBUG 449#ifdef DEBUG
440 /* This can be useful if you want to see if we actually fill 450 /*
441 * the buffer with something */ 451 * This can be useful if you want to see if we actually fill
452 * the buffer with something
453 */
442 memset((void *)vb->baddr, 0xaa, vb->bsize); 454 memset((void *)vb->baddr, 0xaa, vb->bsize);
443#endif 455#endif
444 456
445 BUG_ON(NULL == icd->current_fmt); 457 BUG_ON(NULL == icd->current_fmt);
446 458
447 /* I think, in buf_prepare you only have to protect global data, 459 /*
448 * the actual buffer is yours */ 460 * I think, in buf_prepare you only have to protect global data,
461 * the actual buffer is yours
462 */
449 buf->inwork = 1; 463 buf->inwork = 1;
450 464
451 if (buf->fmt != icd->current_fmt || 465 if (buf->code != icd->current_fmt->code ||
452 vb->width != icd->user_width || 466 vb->width != icd->user_width ||
453 vb->height != icd->user_height || 467 vb->height != icd->user_height ||
454 vb->field != field) { 468 vb->field != field) {
455 buf->fmt = icd->current_fmt; 469 buf->code = icd->current_fmt->code;
456 vb->width = icd->user_width; 470 vb->width = icd->user_width;
457 vb->height = icd->user_height; 471 vb->height = icd->user_height;
458 vb->field = field; 472 vb->field = field;
459 vb->state = VIDEOBUF_NEEDS_INIT; 473 vb->state = VIDEOBUF_NEEDS_INIT;
460 } 474 }
461 475
462 vb->size = vb->width * vb->height * ((buf->fmt->depth + 7) >> 3); 476 vb->size = bytes_per_line * vb->height;
463 if (0 != vb->baddr && vb->bsize < vb->size) { 477 if (0 != vb->baddr && vb->bsize < vb->size) {
464 ret = -EINVAL; 478 ret = -EINVAL;
465 goto out; 479 goto out;
@@ -834,8 +848,10 @@ static void pxa_camera_init_videobuf(struct videobuf_queue *q,
834 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 848 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
835 struct pxa_camera_dev *pcdev = ici->priv; 849 struct pxa_camera_dev *pcdev = ici->priv;
836 850
837 /* We must pass NULL as dev pointer, then all pci_* dma operations 851 /*
838 * transform to normal dma_* ones. */ 852 * We must pass NULL as dev pointer, then all pci_* dma operations
853 * transform to normal dma_* ones.
854 */
839 videobuf_queue_sg_init(q, &pxa_videobuf_ops, NULL, &pcdev->lock, 855 videobuf_queue_sg_init(q, &pxa_videobuf_ops, NULL, &pcdev->lock,
840 V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE, 856 V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE,
841 sizeof(struct pxa_buffer), icd); 857 sizeof(struct pxa_buffer), icd);
@@ -1051,11 +1067,18 @@ static void pxa_camera_setup_cicr(struct soc_camera_device *icd,
1051{ 1067{
1052 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 1068 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
1053 struct pxa_camera_dev *pcdev = ici->priv; 1069 struct pxa_camera_dev *pcdev = ici->priv;
1070 struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
1054 unsigned long dw, bpp; 1071 unsigned long dw, bpp;
1055 u32 cicr0, cicr1, cicr2, cicr3, cicr4 = 0; 1072 u32 cicr0, cicr1, cicr2, cicr3, cicr4 = 0, y_skip_top;
1073 int ret = v4l2_subdev_call(sd, sensor, g_skip_top_lines, &y_skip_top);
1074
1075 if (ret < 0)
1076 y_skip_top = 0;
1056 1077
1057 /* Datawidth is now guaranteed to be equal to one of the three values. 1078 /*
1058 * We fix bit-per-pixel equal to data-width... */ 1079 * Datawidth is now guaranteed to be equal to one of the three values.
1080 * We fix bit-per-pixel equal to data-width...
1081 */
1059 switch (flags & SOCAM_DATAWIDTH_MASK) { 1082 switch (flags & SOCAM_DATAWIDTH_MASK) {
1060 case SOCAM_DATAWIDTH_10: 1083 case SOCAM_DATAWIDTH_10:
1061 dw = 4; 1084 dw = 4;
@@ -1066,8 +1089,10 @@ static void pxa_camera_setup_cicr(struct soc_camera_device *icd,
1066 bpp = 0x20; 1089 bpp = 0x20;
1067 break; 1090 break;
1068 default: 1091 default:
1069 /* Actually it can only be 8 now, 1092 /*
1070 * default is just to silence compiler warnings */ 1093 * Actually it can only be 8 now,
1094 * default is just to silence compiler warnings
1095 */
1071 case SOCAM_DATAWIDTH_8: 1096 case SOCAM_DATAWIDTH_8:
1072 dw = 2; 1097 dw = 2;
1073 bpp = 0; 1098 bpp = 0;
@@ -1118,7 +1143,7 @@ static void pxa_camera_setup_cicr(struct soc_camera_device *icd,
1118 1143
1119 cicr2 = 0; 1144 cicr2 = 0;
1120 cicr3 = CICR3_LPF_VAL(icd->user_height - 1) | 1145 cicr3 = CICR3_LPF_VAL(icd->user_height - 1) |
1121 CICR3_BFW_VAL(min((unsigned short)255, icd->y_skip_top)); 1146 CICR3_BFW_VAL(min((u32)255, y_skip_top));
1122 cicr4 |= pcdev->mclk_divisor; 1147 cicr4 |= pcdev->mclk_divisor;
1123 1148
1124 __raw_writel(cicr1, pcdev->base + CICR1); 1149 __raw_writel(cicr1, pcdev->base + CICR1);
@@ -1138,9 +1163,15 @@ static int pxa_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt)
1138 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 1163 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
1139 struct pxa_camera_dev *pcdev = ici->priv; 1164 struct pxa_camera_dev *pcdev = ici->priv;
1140 unsigned long bus_flags, camera_flags, common_flags; 1165 unsigned long bus_flags, camera_flags, common_flags;
1141 int ret = test_platform_param(pcdev, icd->buswidth, &bus_flags); 1166 const struct soc_mbus_pixelfmt *fmt;
1167 int ret;
1142 struct pxa_cam *cam = icd->host_priv; 1168 struct pxa_cam *cam = icd->host_priv;
1143 1169
1170 fmt = soc_mbus_get_fmtdesc(icd->current_fmt->code);
1171 if (!fmt)
1172 return -EINVAL;
1173
1174 ret = test_platform_param(pcdev, fmt->bits_per_sample, &bus_flags);
1144 if (ret < 0) 1175 if (ret < 0)
1145 return ret; 1176 return ret;
1146 1177
@@ -1204,59 +1235,49 @@ static int pxa_camera_try_bus_param(struct soc_camera_device *icd,
1204 return soc_camera_bus_param_compatible(camera_flags, bus_flags) ? 0 : -EINVAL; 1235 return soc_camera_bus_param_compatible(camera_flags, bus_flags) ? 0 : -EINVAL;
1205} 1236}
1206 1237
1207static const struct soc_camera_data_format pxa_camera_formats[] = { 1238static const struct soc_mbus_pixelfmt pxa_camera_formats[] = {
1208 { 1239 {
1209 .name = "Planar YUV422 16 bit", 1240 .fourcc = V4L2_PIX_FMT_YUV422P,
1210 .depth = 16, 1241 .name = "Planar YUV422 16 bit",
1211 .fourcc = V4L2_PIX_FMT_YUV422P, 1242 .bits_per_sample = 8,
1212 .colorspace = V4L2_COLORSPACE_JPEG, 1243 .packing = SOC_MBUS_PACKING_2X8_PADHI,
1244 .order = SOC_MBUS_ORDER_LE,
1213 }, 1245 },
1214}; 1246};
1215 1247
1216static bool buswidth_supported(struct soc_camera_device *icd, int depth) 1248/* This will be corrected as we get more formats */
1249static bool pxa_camera_packing_supported(const struct soc_mbus_pixelfmt *fmt)
1217{ 1250{
1218 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 1251 return fmt->packing == SOC_MBUS_PACKING_NONE ||
1219 struct pxa_camera_dev *pcdev = ici->priv; 1252 (fmt->bits_per_sample == 8 &&
1220 1253 fmt->packing == SOC_MBUS_PACKING_2X8_PADHI) ||
1221 switch (depth) { 1254 (fmt->bits_per_sample > 8 &&
1222 case 8: 1255 fmt->packing == SOC_MBUS_PACKING_EXTEND16);
1223 return !!(pcdev->platform_flags & PXA_CAMERA_DATAWIDTH_8);
1224 case 9:
1225 return !!(pcdev->platform_flags & PXA_CAMERA_DATAWIDTH_9);
1226 case 10:
1227 return !!(pcdev->platform_flags & PXA_CAMERA_DATAWIDTH_10);
1228 }
1229 return false;
1230}
1231
1232static int required_buswidth(const struct soc_camera_data_format *fmt)
1233{
1234 switch (fmt->fourcc) {
1235 case V4L2_PIX_FMT_UYVY:
1236 case V4L2_PIX_FMT_VYUY:
1237 case V4L2_PIX_FMT_YUYV:
1238 case V4L2_PIX_FMT_YVYU:
1239 case V4L2_PIX_FMT_RGB565:
1240 case V4L2_PIX_FMT_RGB555:
1241 return 8;
1242 default:
1243 return fmt->depth;
1244 }
1245} 1256}
1246 1257
1247static int pxa_camera_get_formats(struct soc_camera_device *icd, int idx, 1258static int pxa_camera_get_formats(struct soc_camera_device *icd, int idx,
1248 struct soc_camera_format_xlate *xlate) 1259 struct soc_camera_format_xlate *xlate)
1249{ 1260{
1261 struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
1250 struct device *dev = icd->dev.parent; 1262 struct device *dev = icd->dev.parent;
1251 int formats = 0, buswidth, ret; 1263 int formats = 0, ret;
1252 struct pxa_cam *cam; 1264 struct pxa_cam *cam;
1265 enum v4l2_mbus_pixelcode code;
1266 const struct soc_mbus_pixelfmt *fmt;
1253 1267
1254 buswidth = required_buswidth(icd->formats + idx); 1268 ret = v4l2_subdev_call(sd, video, enum_mbus_fmt, idx, &code);
1269 if (ret < 0)
1270 /* No more formats */
1271 return 0;
1255 1272
1256 if (!buswidth_supported(icd, buswidth)) 1273 fmt = soc_mbus_get_fmtdesc(code);
1274 if (!fmt) {
1275 dev_err(dev, "Invalid format code #%d: %d\n", idx, code);
1257 return 0; 1276 return 0;
1277 }
1258 1278
1259 ret = pxa_camera_try_bus_param(icd, buswidth); 1279 /* This also checks support for the requested bits-per-sample */
1280 ret = pxa_camera_try_bus_param(icd, fmt->bits_per_sample);
1260 if (ret < 0) 1281 if (ret < 0)
1261 return 0; 1282 return 0;
1262 1283
@@ -1270,45 +1291,40 @@ static int pxa_camera_get_formats(struct soc_camera_device *icd, int idx,
1270 cam = icd->host_priv; 1291 cam = icd->host_priv;
1271 } 1292 }
1272 1293
1273 switch (icd->formats[idx].fourcc) { 1294 switch (code) {
1274 case V4L2_PIX_FMT_UYVY: 1295 case V4L2_MBUS_FMT_YUYV8_2X8_BE:
1275 formats++; 1296 formats++;
1276 if (xlate) { 1297 if (xlate) {
1277 xlate->host_fmt = &pxa_camera_formats[0]; 1298 xlate->host_fmt = &pxa_camera_formats[0];
1278 xlate->cam_fmt = icd->formats + idx; 1299 xlate->code = code;
1279 xlate->buswidth = buswidth;
1280 xlate++; 1300 xlate++;
1281 dev_dbg(dev, "Providing format %s using %s\n", 1301 dev_dbg(dev, "Providing format %s using code %d\n",
1282 pxa_camera_formats[0].name, 1302 pxa_camera_formats[0].name, code);
1283 icd->formats[idx].name);
1284 } 1303 }
1285 case V4L2_PIX_FMT_VYUY: 1304 case V4L2_MBUS_FMT_YVYU8_2X8_BE:
1286 case V4L2_PIX_FMT_YUYV: 1305 case V4L2_MBUS_FMT_YUYV8_2X8_LE:
1287 case V4L2_PIX_FMT_YVYU: 1306 case V4L2_MBUS_FMT_YVYU8_2X8_LE:
1288 case V4L2_PIX_FMT_RGB565: 1307 case V4L2_MBUS_FMT_RGB565_2X8_LE:
1289 case V4L2_PIX_FMT_RGB555: 1308 case V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE:
1290 formats++; 1309 if (xlate)
1291 if (xlate) {
1292 xlate->host_fmt = icd->formats + idx;
1293 xlate->cam_fmt = icd->formats + idx;
1294 xlate->buswidth = buswidth;
1295 xlate++;
1296 dev_dbg(dev, "Providing format %s packed\n", 1310 dev_dbg(dev, "Providing format %s packed\n",
1297 icd->formats[idx].name); 1311 fmt->name);
1298 }
1299 break; 1312 break;
1300 default: 1313 default:
1301 /* Generic pass-through */ 1314 if (!pxa_camera_packing_supported(fmt))
1302 formats++; 1315 return 0;
1303 if (xlate) { 1316 if (xlate)
1304 xlate->host_fmt = icd->formats + idx;
1305 xlate->cam_fmt = icd->formats + idx;
1306 xlate->buswidth = icd->formats[idx].depth;
1307 xlate++;
1308 dev_dbg(dev, 1317 dev_dbg(dev,
1309 "Providing format %s in pass-through mode\n", 1318 "Providing format %s in pass-through mode\n",
1310 icd->formats[idx].name); 1319 fmt->name);
1311 } 1320 }
1321
1322 /* Generic pass-through */
1323 formats++;
1324 if (xlate) {
1325 xlate->host_fmt = fmt;
1326 xlate->code = code;
1327 xlate++;
1312 } 1328 }
1313 1329
1314 return formats; 1330 return formats;
@@ -1320,11 +1336,11 @@ static void pxa_camera_put_formats(struct soc_camera_device *icd)
1320 icd->host_priv = NULL; 1336 icd->host_priv = NULL;
1321} 1337}
1322 1338
1323static int pxa_camera_check_frame(struct v4l2_pix_format *pix) 1339static int pxa_camera_check_frame(u32 width, u32 height)
1324{ 1340{
1325 /* limit to pxa hardware capabilities */ 1341 /* limit to pxa hardware capabilities */
1326 return pix->height < 32 || pix->height > 2048 || pix->width < 48 || 1342 return height < 32 || height > 2048 || width < 48 || width > 2048 ||
1327 pix->width > 2048 || (pix->width & 0x01); 1343 (width & 0x01);
1328} 1344}
1329 1345
1330static int pxa_camera_set_crop(struct soc_camera_device *icd, 1346static int pxa_camera_set_crop(struct soc_camera_device *icd,
@@ -1339,9 +1355,9 @@ static int pxa_camera_set_crop(struct soc_camera_device *icd,
1339 .master_clock = pcdev->mclk, 1355 .master_clock = pcdev->mclk,
1340 .pixel_clock_max = pcdev->ciclk / 4, 1356 .pixel_clock_max = pcdev->ciclk / 4,
1341 }; 1357 };
1342 struct v4l2_format f; 1358 struct v4l2_mbus_framefmt mf;
1343 struct v4l2_pix_format *pix = &f.fmt.pix, pix_tmp;
1344 struct pxa_cam *cam = icd->host_priv; 1359 struct pxa_cam *cam = icd->host_priv;
1360 u32 fourcc = icd->current_fmt->host_fmt->fourcc;
1345 int ret; 1361 int ret;
1346 1362
1347 /* If PCLK is used to latch data from the sensor, check sense */ 1363 /* If PCLK is used to latch data from the sensor, check sense */
@@ -1358,27 +1374,23 @@ static int pxa_camera_set_crop(struct soc_camera_device *icd,
1358 return ret; 1374 return ret;
1359 } 1375 }
1360 1376
1361 f.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 1377 ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mf);
1362
1363 ret = v4l2_subdev_call(sd, video, g_fmt, &f);
1364 if (ret < 0) 1378 if (ret < 0)
1365 return ret; 1379 return ret;
1366 1380
1367 pix_tmp = *pix; 1381 if (pxa_camera_check_frame(mf.width, mf.height)) {
1368 if (pxa_camera_check_frame(pix)) {
1369 /* 1382 /*
1370 * Camera cropping produced a frame beyond our capabilities. 1383 * Camera cropping produced a frame beyond our capabilities.
1371 * FIXME: just extract a subframe, that we can process. 1384 * FIXME: just extract a subframe, that we can process.
1372 */ 1385 */
1373 v4l_bound_align_image(&pix->width, 48, 2048, 1, 1386 v4l_bound_align_image(&mf.width, 48, 2048, 1,
1374 &pix->height, 32, 2048, 0, 1387 &mf.height, 32, 2048, 0,
1375 icd->current_fmt->fourcc == V4L2_PIX_FMT_YUV422P ? 1388 fourcc == V4L2_PIX_FMT_YUV422P ? 4 : 0);
1376 4 : 0); 1389 ret = v4l2_subdev_call(sd, video, s_mbus_fmt, &mf);
1377 ret = v4l2_subdev_call(sd, video, s_fmt, &f);
1378 if (ret < 0) 1390 if (ret < 0)
1379 return ret; 1391 return ret;
1380 1392
1381 if (pxa_camera_check_frame(pix)) { 1393 if (pxa_camera_check_frame(mf.width, mf.height)) {
1382 dev_warn(icd->dev.parent, 1394 dev_warn(icd->dev.parent,
1383 "Inconsistent state. Use S_FMT to repair\n"); 1395 "Inconsistent state. Use S_FMT to repair\n");
1384 return -EINVAL; 1396 return -EINVAL;
@@ -1395,10 +1407,10 @@ static int pxa_camera_set_crop(struct soc_camera_device *icd,
1395 recalculate_fifo_timeout(pcdev, sense.pixel_clock); 1407 recalculate_fifo_timeout(pcdev, sense.pixel_clock);
1396 } 1408 }
1397 1409
1398 icd->user_width = pix->width; 1410 icd->user_width = mf.width;
1399 icd->user_height = pix->height; 1411 icd->user_height = mf.height;
1400 1412
1401 pxa_camera_setup_cicr(icd, cam->flags, icd->current_fmt->fourcc); 1413 pxa_camera_setup_cicr(icd, cam->flags, fourcc);
1402 1414
1403 return ret; 1415 return ret;
1404} 1416}
@@ -1410,14 +1422,13 @@ static int pxa_camera_set_fmt(struct soc_camera_device *icd,
1410 struct pxa_camera_dev *pcdev = ici->priv; 1422 struct pxa_camera_dev *pcdev = ici->priv;
1411 struct device *dev = icd->dev.parent; 1423 struct device *dev = icd->dev.parent;
1412 struct v4l2_subdev *sd = soc_camera_to_subdev(icd); 1424 struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
1413 const struct soc_camera_data_format *cam_fmt = NULL;
1414 const struct soc_camera_format_xlate *xlate = NULL; 1425 const struct soc_camera_format_xlate *xlate = NULL;
1415 struct soc_camera_sense sense = { 1426 struct soc_camera_sense sense = {
1416 .master_clock = pcdev->mclk, 1427 .master_clock = pcdev->mclk,
1417 .pixel_clock_max = pcdev->ciclk / 4, 1428 .pixel_clock_max = pcdev->ciclk / 4,
1418 }; 1429 };
1419 struct v4l2_pix_format *pix = &f->fmt.pix; 1430 struct v4l2_pix_format *pix = &f->fmt.pix;
1420 struct v4l2_format cam_f = *f; 1431 struct v4l2_mbus_framefmt mf;
1421 int ret; 1432 int ret;
1422 1433
1423 xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat); 1434 xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
@@ -1426,26 +1437,31 @@ static int pxa_camera_set_fmt(struct soc_camera_device *icd,
1426 return -EINVAL; 1437 return -EINVAL;
1427 } 1438 }
1428 1439
1429 cam_fmt = xlate->cam_fmt;
1430
1431 /* If PCLK is used to latch data from the sensor, check sense */ 1440 /* If PCLK is used to latch data from the sensor, check sense */
1432 if (pcdev->platform_flags & PXA_CAMERA_PCLK_EN) 1441 if (pcdev->platform_flags & PXA_CAMERA_PCLK_EN)
1442 /* The caller holds a mutex. */
1433 icd->sense = &sense; 1443 icd->sense = &sense;
1434 1444
1435 cam_f.fmt.pix.pixelformat = cam_fmt->fourcc; 1445 mf.width = pix->width;
1436 ret = v4l2_subdev_call(sd, video, s_fmt, &cam_f); 1446 mf.height = pix->height;
1437 cam_f.fmt.pix.pixelformat = pix->pixelformat; 1447 mf.field = pix->field;
1438 *pix = cam_f.fmt.pix; 1448 mf.colorspace = pix->colorspace;
1449 mf.code = xlate->code;
1450
1451 ret = v4l2_subdev_call(sd, video, s_mbus_fmt, &mf);
1452
1453 if (mf.code != xlate->code)
1454 return -EINVAL;
1439 1455
1440 icd->sense = NULL; 1456 icd->sense = NULL;
1441 1457
1442 if (ret < 0) { 1458 if (ret < 0) {
1443 dev_warn(dev, "Failed to configure for format %x\n", 1459 dev_warn(dev, "Failed to configure for format %x\n",
1444 pix->pixelformat); 1460 pix->pixelformat);
1445 } else if (pxa_camera_check_frame(pix)) { 1461 } else if (pxa_camera_check_frame(mf.width, mf.height)) {
1446 dev_warn(dev, 1462 dev_warn(dev,
1447 "Camera driver produced an unsupported frame %dx%d\n", 1463 "Camera driver produced an unsupported frame %dx%d\n",
1448 pix->width, pix->height); 1464 mf.width, mf.height);
1449 ret = -EINVAL; 1465 ret = -EINVAL;
1450 } else if (sense.flags & SOCAM_SENSE_PCLK_CHANGED) { 1466 } else if (sense.flags & SOCAM_SENSE_PCLK_CHANGED) {
1451 if (sense.pixel_clock > sense.pixel_clock_max) { 1467 if (sense.pixel_clock > sense.pixel_clock_max) {
@@ -1457,10 +1473,14 @@ static int pxa_camera_set_fmt(struct soc_camera_device *icd,
1457 recalculate_fifo_timeout(pcdev, sense.pixel_clock); 1473 recalculate_fifo_timeout(pcdev, sense.pixel_clock);
1458 } 1474 }
1459 1475
1460 if (!ret) { 1476 if (ret < 0)
1461 icd->buswidth = xlate->buswidth; 1477 return ret;
1462 icd->current_fmt = xlate->host_fmt; 1478
1463 } 1479 pix->width = mf.width;
1480 pix->height = mf.height;
1481 pix->field = mf.field;
1482 pix->colorspace = mf.colorspace;
1483 icd->current_fmt = xlate;
1464 1484
1465 return ret; 1485 return ret;
1466} 1486}
@@ -1468,17 +1488,16 @@ static int pxa_camera_set_fmt(struct soc_camera_device *icd,
1468static int pxa_camera_try_fmt(struct soc_camera_device *icd, 1488static int pxa_camera_try_fmt(struct soc_camera_device *icd,
1469 struct v4l2_format *f) 1489 struct v4l2_format *f)
1470{ 1490{
1471 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
1472 struct v4l2_subdev *sd = soc_camera_to_subdev(icd); 1491 struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
1473 const struct soc_camera_format_xlate *xlate; 1492 const struct soc_camera_format_xlate *xlate;
1474 struct v4l2_pix_format *pix = &f->fmt.pix; 1493 struct v4l2_pix_format *pix = &f->fmt.pix;
1494 struct v4l2_mbus_framefmt mf;
1475 __u32 pixfmt = pix->pixelformat; 1495 __u32 pixfmt = pix->pixelformat;
1476 enum v4l2_field field;
1477 int ret; 1496 int ret;
1478 1497
1479 xlate = soc_camera_xlate_by_fourcc(icd, pixfmt); 1498 xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
1480 if (!xlate) { 1499 if (!xlate) {
1481 dev_warn(ici->v4l2_dev.dev, "Format %x not found\n", pixfmt); 1500 dev_warn(icd->dev.parent, "Format %x not found\n", pixfmt);
1482 return -EINVAL; 1501 return -EINVAL;
1483 } 1502 }
1484 1503
@@ -1492,22 +1511,36 @@ static int pxa_camera_try_fmt(struct soc_camera_device *icd,
1492 &pix->height, 32, 2048, 0, 1511 &pix->height, 32, 2048, 0,
1493 pixfmt == V4L2_PIX_FMT_YUV422P ? 4 : 0); 1512 pixfmt == V4L2_PIX_FMT_YUV422P ? 4 : 0);
1494 1513
1495 pix->bytesperline = pix->width * 1514 pix->bytesperline = soc_mbus_bytes_per_line(pix->width,
1496 DIV_ROUND_UP(xlate->host_fmt->depth, 8); 1515 xlate->host_fmt);
1516 if (pix->bytesperline < 0)
1517 return pix->bytesperline;
1497 pix->sizeimage = pix->height * pix->bytesperline; 1518 pix->sizeimage = pix->height * pix->bytesperline;
1498 1519
1499 /* camera has to see its format, but the user the original one */
1500 pix->pixelformat = xlate->cam_fmt->fourcc;
1501 /* limit to sensor capabilities */ 1520 /* limit to sensor capabilities */
1502 ret = v4l2_subdev_call(sd, video, try_fmt, f); 1521 mf.width = pix->width;
1503 pix->pixelformat = pixfmt; 1522 mf.height = pix->height;
1523 mf.field = pix->field;
1524 mf.colorspace = pix->colorspace;
1525 mf.code = xlate->code;
1504 1526
1505 field = pix->field; 1527 ret = v4l2_subdev_call(sd, video, try_mbus_fmt, &mf);
1528 if (ret < 0)
1529 return ret;
1506 1530
1507 if (field == V4L2_FIELD_ANY) { 1531 pix->width = mf.width;
1508 pix->field = V4L2_FIELD_NONE; 1532 pix->height = mf.height;
1509 } else if (field != V4L2_FIELD_NONE) { 1533 pix->colorspace = mf.colorspace;
1510 dev_err(icd->dev.parent, "Field type %d unsupported.\n", field); 1534
1535 switch (mf.field) {
1536 case V4L2_FIELD_ANY:
1537 case V4L2_FIELD_NONE:
1538 pix->field = V4L2_FIELD_NONE;
1539 break;
1540 default:
1541 /* TODO: support interlaced at least in pass-through mode */
1542 dev_err(icd->dev.parent, "Field type %d unsupported.\n",
1543 mf.field);
1511 return -EINVAL; 1544 return -EINVAL;
1512 } 1545 }
1513 1546
@@ -1519,10 +1552,12 @@ static int pxa_camera_reqbufs(struct soc_camera_file *icf,
1519{ 1552{
1520 int i; 1553 int i;
1521 1554
1522 /* This is for locking debugging only. I removed spinlocks and now I 1555 /*
1556 * This is for locking debugging only. I removed spinlocks and now I
1523 * check whether .prepare is ever called on a linked buffer, or whether 1557 * check whether .prepare is ever called on a linked buffer, or whether
1524 * a dma IRQ can occur for an in-work or unlinked buffer. Until now 1558 * a dma IRQ can occur for an in-work or unlinked buffer. Until now
1525 * it hadn't triggered */ 1559 * it hadn't triggered
1560 */
1526 for (i = 0; i < p->count; i++) { 1561 for (i = 0; i < p->count; i++) {
1527 struct pxa_buffer *buf = container_of(icf->vb_vidq.bufs[i], 1562 struct pxa_buffer *buf = container_of(icf->vb_vidq.bufs[i],
1528 struct pxa_buffer, vb); 1563 struct pxa_buffer, vb);
@@ -1657,8 +1692,10 @@ static int __devinit pxa_camera_probe(struct platform_device *pdev)
1657 pcdev->platform_flags = pcdev->pdata->flags; 1692 pcdev->platform_flags = pcdev->pdata->flags;
1658 if (!(pcdev->platform_flags & (PXA_CAMERA_DATAWIDTH_8 | 1693 if (!(pcdev->platform_flags & (PXA_CAMERA_DATAWIDTH_8 |
1659 PXA_CAMERA_DATAWIDTH_9 | PXA_CAMERA_DATAWIDTH_10))) { 1694 PXA_CAMERA_DATAWIDTH_9 | PXA_CAMERA_DATAWIDTH_10))) {
1660 /* Platform hasn't set available data widths. This is bad. 1695 /*
1661 * Warn and use a default. */ 1696 * Platform hasn't set available data widths. This is bad.
1697 * Warn and use a default.
1698 */
1662 dev_warn(&pdev->dev, "WARNING! Platform hasn't set available " 1699 dev_warn(&pdev->dev, "WARNING! Platform hasn't set available "
1663 "data widths, using default 10 bit\n"); 1700 "data widths, using default 10 bit\n");
1664 pcdev->platform_flags |= PXA_CAMERA_DATAWIDTH_10; 1701 pcdev->platform_flags |= PXA_CAMERA_DATAWIDTH_10;
diff --git a/drivers/media/video/rj54n1cb0c.c b/drivers/media/video/rj54n1cb0c.c
index 373f2a30a677..7e42989ce0e4 100644
--- a/drivers/media/video/rj54n1cb0c.c
+++ b/drivers/media/video/rj54n1cb0c.c
@@ -13,9 +13,11 @@
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <linux/videodev2.h> 14#include <linux/videodev2.h>
15 15
16#include <media/rj54n1cb0c.h>
17#include <media/soc_camera.h>
18#include <media/soc_mediabus.h>
16#include <media/v4l2-subdev.h> 19#include <media/v4l2-subdev.h>
17#include <media/v4l2-chip-ident.h> 20#include <media/v4l2-chip-ident.h>
18#include <media/soc_camera.h>
19 21
20#define RJ54N1_DEV_CODE 0x0400 22#define RJ54N1_DEV_CODE 0x0400
21#define RJ54N1_DEV_CODE2 0x0401 23#define RJ54N1_DEV_CODE2 0x0401
@@ -38,6 +40,7 @@
38#define RJ54N1_H_OBEN_OFS 0x0413 40#define RJ54N1_H_OBEN_OFS 0x0413
39#define RJ54N1_V_OBEN_OFS 0x0414 41#define RJ54N1_V_OBEN_OFS 0x0414
40#define RJ54N1_RESIZE_CONTROL 0x0415 42#define RJ54N1_RESIZE_CONTROL 0x0415
43#define RJ54N1_STILL_CONTROL 0x0417
41#define RJ54N1_INC_USE_SEL_H 0x0425 44#define RJ54N1_INC_USE_SEL_H 0x0425
42#define RJ54N1_INC_USE_SEL_L 0x0426 45#define RJ54N1_INC_USE_SEL_L 0x0426
43#define RJ54N1_MIRROR_STILL_MODE 0x0427 46#define RJ54N1_MIRROR_STILL_MODE 0x0427
@@ -49,10 +52,21 @@
49#define RJ54N1_RA_SEL_UL 0x0530 52#define RJ54N1_RA_SEL_UL 0x0530
50#define RJ54N1_BYTE_SWAP 0x0531 53#define RJ54N1_BYTE_SWAP 0x0531
51#define RJ54N1_OUT_SIGPO 0x053b 54#define RJ54N1_OUT_SIGPO 0x053b
55#define RJ54N1_WB_SEL_WEIGHT_I 0x054e
56#define RJ54N1_BIT8_WB 0x0569
57#define RJ54N1_HCAPS_WB 0x056a
58#define RJ54N1_VCAPS_WB 0x056b
59#define RJ54N1_HCAPE_WB 0x056c
60#define RJ54N1_VCAPE_WB 0x056d
61#define RJ54N1_EXPOSURE_CONTROL 0x058c
52#define RJ54N1_FRAME_LENGTH_S_H 0x0595 62#define RJ54N1_FRAME_LENGTH_S_H 0x0595
53#define RJ54N1_FRAME_LENGTH_S_L 0x0596 63#define RJ54N1_FRAME_LENGTH_S_L 0x0596
54#define RJ54N1_FRAME_LENGTH_P_H 0x0597 64#define RJ54N1_FRAME_LENGTH_P_H 0x0597
55#define RJ54N1_FRAME_LENGTH_P_L 0x0598 65#define RJ54N1_FRAME_LENGTH_P_L 0x0598
66#define RJ54N1_PEAK_H 0x05b7
67#define RJ54N1_PEAK_50 0x05b8
68#define RJ54N1_PEAK_60 0x05b9
69#define RJ54N1_PEAK_DIFF 0x05ba
56#define RJ54N1_IOC 0x05ef 70#define RJ54N1_IOC 0x05ef
57#define RJ54N1_TG_BYPASS 0x0700 71#define RJ54N1_TG_BYPASS 0x0700
58#define RJ54N1_PLL_L 0x0701 72#define RJ54N1_PLL_L 0x0701
@@ -68,6 +82,7 @@
68#define RJ54N1_OCLK_SEL_EN 0x0713 82#define RJ54N1_OCLK_SEL_EN 0x0713
69#define RJ54N1_CLK_RST 0x0717 83#define RJ54N1_CLK_RST 0x0717
70#define RJ54N1_RESET_STANDBY 0x0718 84#define RJ54N1_RESET_STANDBY 0x0718
85#define RJ54N1_FWFLG 0x07fe
71 86
72#define E_EXCLK (1 << 7) 87#define E_EXCLK (1 << 7)
73#define SOFT_STDBY (1 << 4) 88#define SOFT_STDBY (1 << 4)
@@ -78,29 +93,53 @@
78#define RESIZE_HOLD_SEL (1 << 2) 93#define RESIZE_HOLD_SEL (1 << 2)
79#define RESIZE_GO (1 << 1) 94#define RESIZE_GO (1 << 1)
80 95
96/*
97 * When cropping, the camera automatically centers the cropped region, there
98 * doesn't seem to be a way to specify an explicit location of the rectangle.
99 */
81#define RJ54N1_COLUMN_SKIP 0 100#define RJ54N1_COLUMN_SKIP 0
82#define RJ54N1_ROW_SKIP 0 101#define RJ54N1_ROW_SKIP 0
83#define RJ54N1_MAX_WIDTH 1600 102#define RJ54N1_MAX_WIDTH 1600
84#define RJ54N1_MAX_HEIGHT 1200 103#define RJ54N1_MAX_HEIGHT 1200
85 104
105#define PLL_L 2
106#define PLL_N 0x31
107
86/* I2C addresses: 0x50, 0x51, 0x60, 0x61 */ 108/* I2C addresses: 0x50, 0x51, 0x60, 0x61 */
87 109
88static const struct soc_camera_data_format rj54n1_colour_formats[] = { 110/* RJ54N1CB0C has only one fixed colorspace per pixelcode */
89 { 111struct rj54n1_datafmt {
90 .name = "YUYV", 112 enum v4l2_mbus_pixelcode code;
91 .depth = 16, 113 enum v4l2_colorspace colorspace;
92 .fourcc = V4L2_PIX_FMT_YUYV, 114};
93 .colorspace = V4L2_COLORSPACE_JPEG, 115
94 }, { 116/* Find a data format by a pixel code in an array */
95 .name = "RGB565", 117static const struct rj54n1_datafmt *rj54n1_find_datafmt(
96 .depth = 16, 118 enum v4l2_mbus_pixelcode code, const struct rj54n1_datafmt *fmt,
97 .fourcc = V4L2_PIX_FMT_RGB565, 119 int n)
98 .colorspace = V4L2_COLORSPACE_SRGB, 120{
99 } 121 int i;
122 for (i = 0; i < n; i++)
123 if (fmt[i].code == code)
124 return fmt + i;
125
126 return NULL;
127}
128
129static const struct rj54n1_datafmt rj54n1_colour_fmts[] = {
130 {V4L2_MBUS_FMT_YUYV8_2X8_LE, V4L2_COLORSPACE_JPEG},
131 {V4L2_MBUS_FMT_YVYU8_2X8_LE, V4L2_COLORSPACE_JPEG},
132 {V4L2_MBUS_FMT_RGB565_2X8_LE, V4L2_COLORSPACE_SRGB},
133 {V4L2_MBUS_FMT_RGB565_2X8_BE, V4L2_COLORSPACE_SRGB},
134 {V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE, V4L2_COLORSPACE_SRGB},
135 {V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_LE, V4L2_COLORSPACE_SRGB},
136 {V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_BE, V4L2_COLORSPACE_SRGB},
137 {V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_BE, V4L2_COLORSPACE_SRGB},
138 {V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_COLORSPACE_SRGB},
100}; 139};
101 140
102struct rj54n1_clock_div { 141struct rj54n1_clock_div {
103 u8 ratio_tg; 142 u8 ratio_tg; /* can be 0 or an odd number */
104 u8 ratio_t; 143 u8 ratio_t;
105 u8 ratio_r; 144 u8 ratio_r;
106 u8 ratio_op; 145 u8 ratio_op;
@@ -109,12 +148,14 @@ struct rj54n1_clock_div {
109 148
110struct rj54n1 { 149struct rj54n1 {
111 struct v4l2_subdev subdev; 150 struct v4l2_subdev subdev;
151 struct rj54n1_clock_div clk_div;
152 const struct rj54n1_datafmt *fmt;
112 struct v4l2_rect rect; /* Sensor window */ 153 struct v4l2_rect rect; /* Sensor window */
154 unsigned int tgclk_mhz;
155 bool auto_wb;
113 unsigned short width; /* Output window */ 156 unsigned short width; /* Output window */
114 unsigned short height; 157 unsigned short height;
115 unsigned short resize; /* Sensor * 1024 / resize = Output */ 158 unsigned short resize; /* Sensor * 1024 / resize = Output */
116 struct rj54n1_clock_div clk_div;
117 u32 fourcc;
118 unsigned short scale; 159 unsigned short scale;
119 u8 bank; 160 u8 bank;
120}; 161};
@@ -171,7 +212,7 @@ const static struct rj54n1_reg_val bank_7[] = {
171 {0x714, 0xff}, 212 {0x714, 0xff},
172 {0x715, 0xff}, 213 {0x715, 0xff},
173 {0x716, 0x1f}, 214 {0x716, 0x1f},
174 {0x7FE, 0x02}, 215 {0x7FE, 2},
175}; 216};
176 217
177const static struct rj54n1_reg_val bank_8[] = { 218const static struct rj54n1_reg_val bank_8[] = {
@@ -359,7 +400,7 @@ const static struct rj54n1_reg_val bank_8[] = {
359 {0x8BB, 0x00}, 400 {0x8BB, 0x00},
360 {0x8BC, 0xFF}, 401 {0x8BC, 0xFF},
361 {0x8BD, 0x00}, 402 {0x8BD, 0x00},
362 {0x8FE, 0x02}, 403 {0x8FE, 2},
363}; 404};
364 405
365const static struct rj54n1_reg_val bank_10[] = { 406const static struct rj54n1_reg_val bank_10[] = {
@@ -440,12 +481,24 @@ static int reg_write_multiple(struct i2c_client *client,
440 return 0; 481 return 0;
441} 482}
442 483
443static int rj54n1_s_stream(struct v4l2_subdev *sd, int enable) 484static int rj54n1_enum_fmt(struct v4l2_subdev *sd, int index,
485 enum v4l2_mbus_pixelcode *code)
444{ 486{
445 /* TODO: start / stop streaming */ 487 if ((unsigned int)index >= ARRAY_SIZE(rj54n1_colour_fmts))
488 return -EINVAL;
489
490 *code = rj54n1_colour_fmts[index].code;
446 return 0; 491 return 0;
447} 492}
448 493
494static int rj54n1_s_stream(struct v4l2_subdev *sd, int enable)
495{
496 struct i2c_client *client = sd->priv;
497
498 /* Switch between preview and still shot modes */
499 return reg_set(client, RJ54N1_STILL_CONTROL, (!enable) << 7, 0x80);
500}
501
449static int rj54n1_set_bus_param(struct soc_camera_device *icd, 502static int rj54n1_set_bus_param(struct soc_camera_device *icd,
450 unsigned long flags) 503 unsigned long flags)
451{ 504{
@@ -502,6 +555,44 @@ static int rj54n1_commit(struct i2c_client *client)
502 return ret; 555 return ret;
503} 556}
504 557
558static int rj54n1_sensor_scale(struct v4l2_subdev *sd, u32 *in_w, u32 *in_h,
559 u32 *out_w, u32 *out_h);
560
561static int rj54n1_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
562{
563 struct i2c_client *client = sd->priv;
564 struct rj54n1 *rj54n1 = to_rj54n1(client);
565 struct v4l2_rect *rect = &a->c;
566 unsigned int dummy, output_w, output_h,
567 input_w = rect->width, input_h = rect->height;
568 int ret;
569
570 /* arbitrary minimum width and height, edges unimportant */
571 soc_camera_limit_side(&dummy, &input_w,
572 RJ54N1_COLUMN_SKIP, 8, RJ54N1_MAX_WIDTH);
573
574 soc_camera_limit_side(&dummy, &input_h,
575 RJ54N1_ROW_SKIP, 8, RJ54N1_MAX_HEIGHT);
576
577 output_w = (input_w * 1024 + rj54n1->resize / 2) / rj54n1->resize;
578 output_h = (input_h * 1024 + rj54n1->resize / 2) / rj54n1->resize;
579
580 dev_dbg(&client->dev, "Scaling for %ux%u : %u = %ux%u\n",
581 input_w, input_h, rj54n1->resize, output_w, output_h);
582
583 ret = rj54n1_sensor_scale(sd, &input_w, &input_h, &output_w, &output_h);
584 if (ret < 0)
585 return ret;
586
587 rj54n1->width = output_w;
588 rj54n1->height = output_h;
589 rj54n1->resize = ret;
590 rj54n1->rect.width = input_w;
591 rj54n1->rect.height = input_h;
592
593 return 0;
594}
595
505static int rj54n1_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a) 596static int rj54n1_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
506{ 597{
507 struct i2c_client *client = sd->priv; 598 struct i2c_client *client = sd->priv;
@@ -527,16 +618,17 @@ static int rj54n1_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
527 return 0; 618 return 0;
528} 619}
529 620
530static int rj54n1_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) 621static int rj54n1_g_fmt(struct v4l2_subdev *sd,
622 struct v4l2_mbus_framefmt *mf)
531{ 623{
532 struct i2c_client *client = sd->priv; 624 struct i2c_client *client = sd->priv;
533 struct rj54n1 *rj54n1 = to_rj54n1(client); 625 struct rj54n1 *rj54n1 = to_rj54n1(client);
534 struct v4l2_pix_format *pix = &f->fmt.pix;
535 626
536 pix->pixelformat = rj54n1->fourcc; 627 mf->code = rj54n1->fmt->code;
537 pix->field = V4L2_FIELD_NONE; 628 mf->colorspace = rj54n1->fmt->colorspace;
538 pix->width = rj54n1->width; 629 mf->field = V4L2_FIELD_NONE;
539 pix->height = rj54n1->height; 630 mf->width = rj54n1->width;
631 mf->height = rj54n1->height;
540 632
541 return 0; 633 return 0;
542} 634}
@@ -550,11 +642,44 @@ static int rj54n1_sensor_scale(struct v4l2_subdev *sd, u32 *in_w, u32 *in_h,
550 u32 *out_w, u32 *out_h) 642 u32 *out_w, u32 *out_h)
551{ 643{
552 struct i2c_client *client = sd->priv; 644 struct i2c_client *client = sd->priv;
645 struct rj54n1 *rj54n1 = to_rj54n1(client);
553 unsigned int skip, resize, input_w = *in_w, input_h = *in_h, 646 unsigned int skip, resize, input_w = *in_w, input_h = *in_h,
554 output_w = *out_w, output_h = *out_h; 647 output_w = *out_w, output_h = *out_h;
555 u16 inc_sel; 648 u16 inc_sel, wb_bit8, wb_left, wb_right, wb_top, wb_bottom;
649 unsigned int peak, peak_50, peak_60;
556 int ret; 650 int ret;
557 651
652 /*
653 * We have a problem with crops, where the window is larger than 512x384
654 * and output window is larger than a half of the input one. In this
655 * case we have to either reduce the input window to equal or below
656 * 512x384 or the output window to equal or below 1/2 of the input.
657 */
658 if (output_w > max(512U, input_w / 2)) {
659 if (2 * output_w > RJ54N1_MAX_WIDTH) {
660 input_w = RJ54N1_MAX_WIDTH;
661 output_w = RJ54N1_MAX_WIDTH / 2;
662 } else {
663 input_w = output_w * 2;
664 }
665
666 dev_dbg(&client->dev, "Adjusted output width: in %u, out %u\n",
667 input_w, output_w);
668 }
669
670 if (output_h > max(384U, input_h / 2)) {
671 if (2 * output_h > RJ54N1_MAX_HEIGHT) {
672 input_h = RJ54N1_MAX_HEIGHT;
673 output_h = RJ54N1_MAX_HEIGHT / 2;
674 } else {
675 input_h = output_h * 2;
676 }
677
678 dev_dbg(&client->dev, "Adjusted output height: in %u, out %u\n",
679 input_h, output_h);
680 }
681
682 /* Idea: use the read mode for snapshots, handle separate geometries */
558 ret = rj54n1_set_rect(client, RJ54N1_X_OUTPUT_SIZE_S_L, 683 ret = rj54n1_set_rect(client, RJ54N1_X_OUTPUT_SIZE_S_L,
559 RJ54N1_Y_OUTPUT_SIZE_S_L, 684 RJ54N1_Y_OUTPUT_SIZE_S_L,
560 RJ54N1_XY_OUTPUT_SIZE_S_H, output_w, output_h); 685 RJ54N1_XY_OUTPUT_SIZE_S_H, output_w, output_h);
@@ -566,17 +691,27 @@ static int rj54n1_sensor_scale(struct v4l2_subdev *sd, u32 *in_w, u32 *in_h,
566 if (ret < 0) 691 if (ret < 0)
567 return ret; 692 return ret;
568 693
569 if (output_w > input_w || output_h > input_h) { 694 if (output_w > input_w && output_h > input_h) {
570 input_w = output_w; 695 input_w = output_w;
571 input_h = output_h; 696 input_h = output_h;
572 697
573 resize = 1024; 698 resize = 1024;
574 } else { 699 } else {
575 unsigned int resize_x, resize_y; 700 unsigned int resize_x, resize_y;
576 resize_x = input_w * 1024 / output_w; 701 resize_x = (input_w * 1024 + output_w / 2) / output_w;
577 resize_y = input_h * 1024 / output_h; 702 resize_y = (input_h * 1024 + output_h / 2) / output_h;
578 703
579 resize = min(resize_x, resize_y); 704 /* We want max(resize_x, resize_y), check if it still fits */
705 if (resize_x > resize_y &&
706 (output_h * resize_x + 512) / 1024 > RJ54N1_MAX_HEIGHT)
707 resize = (RJ54N1_MAX_HEIGHT * 1024 + output_h / 2) /
708 output_h;
709 else if (resize_y > resize_x &&
710 (output_w * resize_y + 512) / 1024 > RJ54N1_MAX_WIDTH)
711 resize = (RJ54N1_MAX_WIDTH * 1024 + output_w / 2) /
712 output_w;
713 else
714 resize = max(resize_x, resize_y);
580 715
581 /* Prohibited value ranges */ 716 /* Prohibited value ranges */
582 switch (resize) { 717 switch (resize) {
@@ -589,12 +724,9 @@ static int rj54n1_sensor_scale(struct v4l2_subdev *sd, u32 *in_w, u32 *in_h,
589 case 8160 ... 8191: 724 case 8160 ... 8191:
590 resize = 8159; 725 resize = 8159;
591 break; 726 break;
592 case 16320 ... 16383: 727 case 16320 ... 16384:
593 resize = 16319; 728 resize = 16319;
594 } 729 }
595
596 input_w = output_w * resize / 1024;
597 input_h = output_h * resize / 1024;
598 } 730 }
599 731
600 /* Set scaling */ 732 /* Set scaling */
@@ -607,9 +739,18 @@ static int rj54n1_sensor_scale(struct v4l2_subdev *sd, u32 *in_w, u32 *in_h,
607 739
608 /* 740 /*
609 * Configure a skipping bitmask. The sensor will select a skipping value 741 * Configure a skipping bitmask. The sensor will select a skipping value
610 * among set bits automatically. 742 * among set bits automatically. This is very unclear in the datasheet
743 * too. I was told, in this register one enables all skipping values,
744 * that are required for a specific resize, and the camera selects
745 * automatically, which ones to use. But it is unclear how to identify,
746 * which cropping values are needed. Secondly, why don't we just set all
747 * bits and let the camera choose? Would it increase processing time and
748 * reduce the framerate? Using 0xfffc for INC_USE_SEL doesn't seem to
749 * improve the image quality or stability for larger frames (see comment
750 * above), but I didn't check the framerate.
611 */ 751 */
612 skip = min(resize / 1024, (unsigned)15); 752 skip = min(resize / 1024, (unsigned)15);
753
613 inc_sel = 1 << skip; 754 inc_sel = 1 << skip;
614 755
615 if (inc_sel <= 2) 756 if (inc_sel <= 2)
@@ -621,6 +762,43 @@ static int rj54n1_sensor_scale(struct v4l2_subdev *sd, u32 *in_w, u32 *in_h,
621 if (!ret) 762 if (!ret)
622 ret = reg_write(client, RJ54N1_INC_USE_SEL_H, inc_sel >> 8); 763 ret = reg_write(client, RJ54N1_INC_USE_SEL_H, inc_sel >> 8);
623 764
765 if (!rj54n1->auto_wb) {
766 /* Auto white balance window */
767 wb_left = output_w / 16;
768 wb_right = (3 * output_w / 4 - 3) / 4;
769 wb_top = output_h / 16;
770 wb_bottom = (3 * output_h / 4 - 3) / 4;
771 wb_bit8 = ((wb_left >> 2) & 0x40) | ((wb_top >> 4) & 0x10) |
772 ((wb_right >> 6) & 4) | ((wb_bottom >> 8) & 1);
773
774 if (!ret)
775 ret = reg_write(client, RJ54N1_BIT8_WB, wb_bit8);
776 if (!ret)
777 ret = reg_write(client, RJ54N1_HCAPS_WB, wb_left);
778 if (!ret)
779 ret = reg_write(client, RJ54N1_VCAPS_WB, wb_top);
780 if (!ret)
781 ret = reg_write(client, RJ54N1_HCAPE_WB, wb_right);
782 if (!ret)
783 ret = reg_write(client, RJ54N1_VCAPE_WB, wb_bottom);
784 }
785
786 /* Antiflicker */
787 peak = 12 * RJ54N1_MAX_WIDTH * (1 << 14) * resize / rj54n1->tgclk_mhz /
788 10000;
789 peak_50 = peak / 6;
790 peak_60 = peak / 5;
791
792 if (!ret)
793 ret = reg_write(client, RJ54N1_PEAK_H,
794 ((peak_50 >> 4) & 0xf0) | (peak_60 >> 8));
795 if (!ret)
796 ret = reg_write(client, RJ54N1_PEAK_50, peak_50);
797 if (!ret)
798 ret = reg_write(client, RJ54N1_PEAK_60, peak_60);
799 if (!ret)
800 ret = reg_write(client, RJ54N1_PEAK_DIFF, peak / 150);
801
624 /* Start resizing */ 802 /* Start resizing */
625 if (!ret) 803 if (!ret)
626 ret = reg_write(client, RJ54N1_RESIZE_CONTROL, 804 ret = reg_write(client, RJ54N1_RESIZE_CONTROL,
@@ -629,8 +807,6 @@ static int rj54n1_sensor_scale(struct v4l2_subdev *sd, u32 *in_w, u32 *in_h,
629 if (ret < 0) 807 if (ret < 0)
630 return ret; 808 return ret;
631 809
632 dev_dbg(&client->dev, "resize %u, skip %u\n", resize, skip);
633
634 /* Constant taken from manufacturer's example */ 810 /* Constant taken from manufacturer's example */
635 msleep(230); 811 msleep(230);
636 812
@@ -638,11 +814,14 @@ static int rj54n1_sensor_scale(struct v4l2_subdev *sd, u32 *in_w, u32 *in_h,
638 if (ret < 0) 814 if (ret < 0)
639 return ret; 815 return ret;
640 816
641 *in_w = input_w; 817 *in_w = (output_w * resize + 512) / 1024;
642 *in_h = input_h; 818 *in_h = (output_h * resize + 512) / 1024;
643 *out_w = output_w; 819 *out_w = output_w;
644 *out_h = output_h; 820 *out_h = output_h;
645 821
822 dev_dbg(&client->dev, "Scaled for %ux%u : %u = %ux%u, skip %u\n",
823 *in_w, *in_h, resize, output_w, output_h, skip);
824
646 return resize; 825 return resize;
647} 826}
648 827
@@ -653,14 +832,14 @@ static int rj54n1_set_clock(struct i2c_client *client)
653 832
654 /* Enable external clock */ 833 /* Enable external clock */
655 ret = reg_write(client, RJ54N1_RESET_STANDBY, E_EXCLK | SOFT_STDBY); 834 ret = reg_write(client, RJ54N1_RESET_STANDBY, E_EXCLK | SOFT_STDBY);
656 /* Leave stand-by */ 835 /* Leave stand-by. Note: use this when implementing suspend / resume */
657 if (!ret) 836 if (!ret)
658 ret = reg_write(client, RJ54N1_RESET_STANDBY, E_EXCLK); 837 ret = reg_write(client, RJ54N1_RESET_STANDBY, E_EXCLK);
659 838
660 if (!ret) 839 if (!ret)
661 ret = reg_write(client, RJ54N1_PLL_L, 2); 840 ret = reg_write(client, RJ54N1_PLL_L, PLL_L);
662 if (!ret) 841 if (!ret)
663 ret = reg_write(client, RJ54N1_PLL_N, 0x31); 842 ret = reg_write(client, RJ54N1_PLL_N, PLL_N);
664 843
665 /* TGCLK dividers */ 844 /* TGCLK dividers */
666 if (!ret) 845 if (!ret)
@@ -719,6 +898,7 @@ static int rj54n1_set_clock(struct i2c_client *client)
719 "Resetting RJ54N1CB0C clock failed: %d!\n", ret); 898 "Resetting RJ54N1CB0C clock failed: %d!\n", ret);
720 return -EIO; 899 return -EIO;
721 } 900 }
901
722 /* Start the PLL */ 902 /* Start the PLL */
723 ret = reg_set(client, RJ54N1_OCLK_DSP, 1, 1); 903 ret = reg_set(client, RJ54N1_OCLK_DSP, 1, 1);
724 904
@@ -731,6 +911,7 @@ static int rj54n1_set_clock(struct i2c_client *client)
731 911
732static int rj54n1_reg_init(struct i2c_client *client) 912static int rj54n1_reg_init(struct i2c_client *client)
733{ 913{
914 struct rj54n1 *rj54n1 = to_rj54n1(client);
734 int ret = rj54n1_set_clock(client); 915 int ret = rj54n1_set_clock(client);
735 916
736 if (!ret) 917 if (!ret)
@@ -753,14 +934,26 @@ static int rj54n1_reg_init(struct i2c_client *client)
753 if (!ret) 934 if (!ret)
754 ret = reg_write(client, RJ54N1_Y_GAIN, 0x84); 935 ret = reg_write(client, RJ54N1_Y_GAIN, 0x84);
755 936
756 /* Mirror the image back: default is upside down and left-to-right... */ 937 /*
938 * Mirror the image back: default is upside down and left-to-right...
939 * Set manual preview / still shot switching
940 */
757 if (!ret) 941 if (!ret)
758 ret = reg_set(client, RJ54N1_MIRROR_STILL_MODE, 3, 3); 942 ret = reg_write(client, RJ54N1_MIRROR_STILL_MODE, 0x27);
759 943
760 if (!ret) 944 if (!ret)
761 ret = reg_write_multiple(client, bank_4, ARRAY_SIZE(bank_4)); 945 ret = reg_write_multiple(client, bank_4, ARRAY_SIZE(bank_4));
946
947 /* Auto exposure area */
762 if (!ret) 948 if (!ret)
949 ret = reg_write(client, RJ54N1_EXPOSURE_CONTROL, 0x80);
950 /* Check current auto WB config */
951 if (!ret)
952 ret = reg_read(client, RJ54N1_WB_SEL_WEIGHT_I);
953 if (ret >= 0) {
954 rj54n1->auto_wb = ret & 0x80;
763 ret = reg_write_multiple(client, bank_5, ARRAY_SIZE(bank_5)); 955 ret = reg_write_multiple(client, bank_5, ARRAY_SIZE(bank_5));
956 }
764 if (!ret) 957 if (!ret)
765 ret = reg_write_multiple(client, bank_8, ARRAY_SIZE(bank_8)); 958 ret = reg_write_multiple(client, bank_8, ARRAY_SIZE(bank_8));
766 959
@@ -777,8 +970,9 @@ static int rj54n1_reg_init(struct i2c_client *client)
777 ret = reg_write(client, RJ54N1_RESET_STANDBY, 970 ret = reg_write(client, RJ54N1_RESET_STANDBY,
778 E_EXCLK | DSP_RSTX | TG_RSTX | SEN_RSTX); 971 E_EXCLK | DSP_RSTX | TG_RSTX | SEN_RSTX);
779 972
973 /* Start register update? Same register as 0x?FE in many bank_* sets */
780 if (!ret) 974 if (!ret)
781 ret = reg_write(client, 0x7fe, 2); 975 ret = reg_write(client, RJ54N1_FWFLG, 2);
782 976
783 /* Constant taken from manufacturer's example */ 977 /* Constant taken from manufacturer's example */
784 msleep(700); 978 msleep(700);
@@ -786,27 +980,44 @@ static int rj54n1_reg_init(struct i2c_client *client)
786 return ret; 980 return ret;
787} 981}
788 982
789/* FIXME: streaming output only up to 800x600 is functional */ 983static int rj54n1_try_fmt(struct v4l2_subdev *sd,
790static int rj54n1_try_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) 984 struct v4l2_mbus_framefmt *mf)
791{ 985{
792 struct v4l2_pix_format *pix = &f->fmt.pix; 986 struct i2c_client *client = sd->priv;
987 struct rj54n1 *rj54n1 = to_rj54n1(client);
988 const struct rj54n1_datafmt *fmt;
989 int align = mf->code == V4L2_MBUS_FMT_SBGGR10_1X10 ||
990 mf->code == V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_BE ||
991 mf->code == V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_BE ||
992 mf->code == V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE ||
993 mf->code == V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_LE;
994
995 dev_dbg(&client->dev, "%s: code = %d, width = %u, height = %u\n",
996 __func__, mf->code, mf->width, mf->height);
997
998 fmt = rj54n1_find_datafmt(mf->code, rj54n1_colour_fmts,
999 ARRAY_SIZE(rj54n1_colour_fmts));
1000 if (!fmt) {
1001 fmt = rj54n1->fmt;
1002 mf->code = fmt->code;
1003 }
793 1004
794 pix->field = V4L2_FIELD_NONE; 1005 mf->field = V4L2_FIELD_NONE;
1006 mf->colorspace = fmt->colorspace;
795 1007
796 if (pix->width > 800) 1008 v4l_bound_align_image(&mf->width, 112, RJ54N1_MAX_WIDTH, align,
797 pix->width = 800; 1009 &mf->height, 84, RJ54N1_MAX_HEIGHT, align, 0);
798 if (pix->height > 600)
799 pix->height = 600;
800 1010
801 return 0; 1011 return 0;
802} 1012}
803 1013
804static int rj54n1_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) 1014static int rj54n1_s_fmt(struct v4l2_subdev *sd,
1015 struct v4l2_mbus_framefmt *mf)
805{ 1016{
806 struct i2c_client *client = sd->priv; 1017 struct i2c_client *client = sd->priv;
807 struct rj54n1 *rj54n1 = to_rj54n1(client); 1018 struct rj54n1 *rj54n1 = to_rj54n1(client);
808 struct v4l2_pix_format *pix = &f->fmt.pix; 1019 const struct rj54n1_datafmt *fmt;
809 unsigned int output_w, output_h, 1020 unsigned int output_w, output_h, max_w, max_h,
810 input_w = rj54n1->rect.width, input_h = rj54n1->rect.height; 1021 input_w = rj54n1->rect.width, input_h = rj54n1->rect.height;
811 int ret; 1022 int ret;
812 1023
@@ -814,14 +1025,13 @@ static int rj54n1_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f)
814 * The host driver can call us without .try_fmt(), so, we have to take 1025 * The host driver can call us without .try_fmt(), so, we have to take
815 * care ourseleves 1026 * care ourseleves
816 */ 1027 */
817 ret = rj54n1_try_fmt(sd, f); 1028 rj54n1_try_fmt(sd, mf);
818 1029
819 /* 1030 /*
820 * Verify if the sensor has just been powered on. TODO: replace this 1031 * Verify if the sensor has just been powered on. TODO: replace this
821 * with proper PM, when a suitable API is available. 1032 * with proper PM, when a suitable API is available.
822 */ 1033 */
823 if (!ret) 1034 ret = reg_read(client, RJ54N1_RESET_STANDBY);
824 ret = reg_read(client, RJ54N1_RESET_STANDBY);
825 if (ret < 0) 1035 if (ret < 0)
826 return ret; 1036 return ret;
827 1037
@@ -831,50 +1041,105 @@ static int rj54n1_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f)
831 return ret; 1041 return ret;
832 } 1042 }
833 1043
1044 dev_dbg(&client->dev, "%s: code = %d, width = %u, height = %u\n",
1045 __func__, mf->code, mf->width, mf->height);
1046
834 /* RA_SEL_UL is only relevant for raw modes, ignored otherwise. */ 1047 /* RA_SEL_UL is only relevant for raw modes, ignored otherwise. */
835 switch (pix->pixelformat) { 1048 switch (mf->code) {
836 case V4L2_PIX_FMT_YUYV: 1049 case V4L2_MBUS_FMT_YUYV8_2X8_LE:
837 ret = reg_write(client, RJ54N1_OUT_SEL, 0); 1050 ret = reg_write(client, RJ54N1_OUT_SEL, 0);
838 if (!ret) 1051 if (!ret)
839 ret = reg_set(client, RJ54N1_BYTE_SWAP, 8, 8); 1052 ret = reg_set(client, RJ54N1_BYTE_SWAP, 8, 8);
840 break; 1053 break;
841 case V4L2_PIX_FMT_RGB565: 1054 case V4L2_MBUS_FMT_YVYU8_2X8_LE:
1055 ret = reg_write(client, RJ54N1_OUT_SEL, 0);
1056 if (!ret)
1057 ret = reg_set(client, RJ54N1_BYTE_SWAP, 0, 8);
1058 break;
1059 case V4L2_MBUS_FMT_RGB565_2X8_LE:
1060 ret = reg_write(client, RJ54N1_OUT_SEL, 0x11);
1061 if (!ret)
1062 ret = reg_set(client, RJ54N1_BYTE_SWAP, 8, 8);
1063 break;
1064 case V4L2_MBUS_FMT_RGB565_2X8_BE:
842 ret = reg_write(client, RJ54N1_OUT_SEL, 0x11); 1065 ret = reg_write(client, RJ54N1_OUT_SEL, 0x11);
843 if (!ret) 1066 if (!ret)
1067 ret = reg_set(client, RJ54N1_BYTE_SWAP, 0, 8);
1068 break;
1069 case V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_LE:
1070 ret = reg_write(client, RJ54N1_OUT_SEL, 4);
1071 if (!ret)
844 ret = reg_set(client, RJ54N1_BYTE_SWAP, 8, 8); 1072 ret = reg_set(client, RJ54N1_BYTE_SWAP, 8, 8);
1073 if (!ret)
1074 ret = reg_write(client, RJ54N1_RA_SEL_UL, 0);
1075 break;
1076 case V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE:
1077 ret = reg_write(client, RJ54N1_OUT_SEL, 4);
1078 if (!ret)
1079 ret = reg_set(client, RJ54N1_BYTE_SWAP, 8, 8);
1080 if (!ret)
1081 ret = reg_write(client, RJ54N1_RA_SEL_UL, 8);
1082 break;
1083 case V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_BE:
1084 ret = reg_write(client, RJ54N1_OUT_SEL, 4);
1085 if (!ret)
1086 ret = reg_set(client, RJ54N1_BYTE_SWAP, 0, 8);
1087 if (!ret)
1088 ret = reg_write(client, RJ54N1_RA_SEL_UL, 0);
1089 break;
1090 case V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_BE:
1091 ret = reg_write(client, RJ54N1_OUT_SEL, 4);
1092 if (!ret)
1093 ret = reg_set(client, RJ54N1_BYTE_SWAP, 0, 8);
1094 if (!ret)
1095 ret = reg_write(client, RJ54N1_RA_SEL_UL, 8);
1096 break;
1097 case V4L2_MBUS_FMT_SBGGR10_1X10:
1098 ret = reg_write(client, RJ54N1_OUT_SEL, 5);
845 break; 1099 break;
846 default: 1100 default:
847 ret = -EINVAL; 1101 ret = -EINVAL;
848 } 1102 }
849 1103
1104 /* Special case: a raw mode with 10 bits of data per clock tick */
1105 if (!ret)
1106 ret = reg_set(client, RJ54N1_OCLK_SEL_EN,
1107 (mf->code == V4L2_MBUS_FMT_SBGGR10_1X10) << 1, 2);
1108
850 if (ret < 0) 1109 if (ret < 0)
851 return ret; 1110 return ret;
852 1111
853 /* Supported scales 1:1 - 1:16 */ 1112 /* Supported scales 1:1 >= scale > 1:16 */
854 if (pix->width < input_w / 16) 1113 max_w = mf->width * (16 * 1024 - 1) / 1024;
855 pix->width = input_w / 16; 1114 if (input_w > max_w)
856 if (pix->height < input_h / 16) 1115 input_w = max_w;
857 pix->height = input_h / 16; 1116 max_h = mf->height * (16 * 1024 - 1) / 1024;
1117 if (input_h > max_h)
1118 input_h = max_h;
858 1119
859 output_w = pix->width; 1120 output_w = mf->width;
860 output_h = pix->height; 1121 output_h = mf->height;
861 1122
862 ret = rj54n1_sensor_scale(sd, &input_w, &input_h, &output_w, &output_h); 1123 ret = rj54n1_sensor_scale(sd, &input_w, &input_h, &output_w, &output_h);
863 if (ret < 0) 1124 if (ret < 0)
864 return ret; 1125 return ret;
865 1126
866 rj54n1->fourcc = pix->pixelformat; 1127 fmt = rj54n1_find_datafmt(mf->code, rj54n1_colour_fmts,
1128 ARRAY_SIZE(rj54n1_colour_fmts));
1129
1130 rj54n1->fmt = fmt;
867 rj54n1->resize = ret; 1131 rj54n1->resize = ret;
868 rj54n1->rect.width = input_w; 1132 rj54n1->rect.width = input_w;
869 rj54n1->rect.height = input_h; 1133 rj54n1->rect.height = input_h;
870 rj54n1->width = output_w; 1134 rj54n1->width = output_w;
871 rj54n1->height = output_h; 1135 rj54n1->height = output_h;
872 1136
873 pix->width = output_w; 1137 mf->width = output_w;
874 pix->height = output_h; 1138 mf->height = output_h;
875 pix->field = V4L2_FIELD_NONE; 1139 mf->field = V4L2_FIELD_NONE;
1140 mf->colorspace = fmt->colorspace;
876 1141
877 return ret; 1142 return 0;
878} 1143}
879 1144
880static int rj54n1_g_chip_ident(struct v4l2_subdev *sd, 1145static int rj54n1_g_chip_ident(struct v4l2_subdev *sd,
@@ -963,6 +1228,14 @@ static const struct v4l2_queryctrl rj54n1_controls[] = {
963 .step = 1, 1228 .step = 1,
964 .default_value = 66, 1229 .default_value = 66,
965 .flags = V4L2_CTRL_FLAG_SLIDER, 1230 .flags = V4L2_CTRL_FLAG_SLIDER,
1231 }, {
1232 .id = V4L2_CID_AUTO_WHITE_BALANCE,
1233 .type = V4L2_CTRL_TYPE_BOOLEAN,
1234 .name = "Auto white balance",
1235 .minimum = 0,
1236 .maximum = 1,
1237 .step = 1,
1238 .default_value = 1,
966 }, 1239 },
967}; 1240};
968 1241
@@ -976,6 +1249,7 @@ static struct soc_camera_ops rj54n1_ops = {
976static int rj54n1_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) 1249static int rj54n1_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
977{ 1250{
978 struct i2c_client *client = sd->priv; 1251 struct i2c_client *client = sd->priv;
1252 struct rj54n1 *rj54n1 = to_rj54n1(client);
979 int data; 1253 int data;
980 1254
981 switch (ctrl->id) { 1255 switch (ctrl->id) {
@@ -998,6 +1272,9 @@ static int rj54n1_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
998 1272
999 ctrl->value = data / 2; 1273 ctrl->value = data / 2;
1000 break; 1274 break;
1275 case V4L2_CID_AUTO_WHITE_BALANCE:
1276 ctrl->value = rj54n1->auto_wb;
1277 break;
1001 } 1278 }
1002 1279
1003 return 0; 1280 return 0;
@@ -1007,6 +1284,7 @@ static int rj54n1_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
1007{ 1284{
1008 int data; 1285 int data;
1009 struct i2c_client *client = sd->priv; 1286 struct i2c_client *client = sd->priv;
1287 struct rj54n1 *rj54n1 = to_rj54n1(client);
1010 const struct v4l2_queryctrl *qctrl; 1288 const struct v4l2_queryctrl *qctrl;
1011 1289
1012 qctrl = soc_camera_find_qctrl(&rj54n1_ops, ctrl->id); 1290 qctrl = soc_camera_find_qctrl(&rj54n1_ops, ctrl->id);
@@ -1037,6 +1315,13 @@ static int rj54n1_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
1037 else if (reg_write(client, RJ54N1_Y_GAIN, ctrl->value * 2) < 0) 1315 else if (reg_write(client, RJ54N1_Y_GAIN, ctrl->value * 2) < 0)
1038 return -EIO; 1316 return -EIO;
1039 break; 1317 break;
1318 case V4L2_CID_AUTO_WHITE_BALANCE:
1319 /* Auto WB area - whole image */
1320 if (reg_set(client, RJ54N1_WB_SEL_WEIGHT_I, ctrl->value << 7,
1321 0x80) < 0)
1322 return -EIO;
1323 rj54n1->auto_wb = ctrl->value;
1324 break;
1040 } 1325 }
1041 1326
1042 return 0; 1327 return 0;
@@ -1054,10 +1339,12 @@ static struct v4l2_subdev_core_ops rj54n1_subdev_core_ops = {
1054 1339
1055static struct v4l2_subdev_video_ops rj54n1_subdev_video_ops = { 1340static struct v4l2_subdev_video_ops rj54n1_subdev_video_ops = {
1056 .s_stream = rj54n1_s_stream, 1341 .s_stream = rj54n1_s_stream,
1057 .s_fmt = rj54n1_s_fmt, 1342 .s_mbus_fmt = rj54n1_s_fmt,
1058 .g_fmt = rj54n1_g_fmt, 1343 .g_mbus_fmt = rj54n1_g_fmt,
1059 .try_fmt = rj54n1_try_fmt, 1344 .try_mbus_fmt = rj54n1_try_fmt,
1345 .enum_mbus_fmt = rj54n1_enum_fmt,
1060 .g_crop = rj54n1_g_crop, 1346 .g_crop = rj54n1_g_crop,
1347 .s_crop = rj54n1_s_crop,
1061 .cropcap = rj54n1_cropcap, 1348 .cropcap = rj54n1_cropcap,
1062}; 1349};
1063 1350
@@ -1066,21 +1353,13 @@ static struct v4l2_subdev_ops rj54n1_subdev_ops = {
1066 .video = &rj54n1_subdev_video_ops, 1353 .video = &rj54n1_subdev_video_ops,
1067}; 1354};
1068 1355
1069static int rj54n1_pin_config(struct i2c_client *client)
1070{
1071 /*
1072 * Experimentally found out IOCTRL wired to 0. TODO: add to platform
1073 * data: 0 or 1 << 7.
1074 */
1075 return reg_write(client, RJ54N1_IOC, 0);
1076}
1077
1078/* 1356/*
1079 * Interface active, can use i2c. If it fails, it can indeed mean, that 1357 * Interface active, can use i2c. If it fails, it can indeed mean, that
1080 * this wasn't our capture interface, so, we wait for the right one 1358 * this wasn't our capture interface, so, we wait for the right one
1081 */ 1359 */
1082static int rj54n1_video_probe(struct soc_camera_device *icd, 1360static int rj54n1_video_probe(struct soc_camera_device *icd,
1083 struct i2c_client *client) 1361 struct i2c_client *client,
1362 struct rj54n1_pdata *priv)
1084{ 1363{
1085 int data1, data2; 1364 int data1, data2;
1086 int ret; 1365 int ret;
@@ -1101,7 +1380,8 @@ static int rj54n1_video_probe(struct soc_camera_device *icd,
1101 goto ei2c; 1380 goto ei2c;
1102 } 1381 }
1103 1382
1104 ret = rj54n1_pin_config(client); 1383 /* Configure IOCTL polarity from the platform data: 0 or 1 << 7. */
1384 ret = reg_write(client, RJ54N1_IOC, priv->ioctl_high << 7);
1105 if (ret < 0) 1385 if (ret < 0)
1106 goto ei2c; 1386 goto ei2c;
1107 1387
@@ -1119,6 +1399,7 @@ static int rj54n1_probe(struct i2c_client *client,
1119 struct soc_camera_device *icd = client->dev.platform_data; 1399 struct soc_camera_device *icd = client->dev.platform_data;
1120 struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); 1400 struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
1121 struct soc_camera_link *icl; 1401 struct soc_camera_link *icl;
1402 struct rj54n1_pdata *rj54n1_priv;
1122 int ret; 1403 int ret;
1123 1404
1124 if (!icd) { 1405 if (!icd) {
@@ -1127,11 +1408,13 @@ static int rj54n1_probe(struct i2c_client *client,
1127 } 1408 }
1128 1409
1129 icl = to_soc_camera_link(icd); 1410 icl = to_soc_camera_link(icd);
1130 if (!icl) { 1411 if (!icl || !icl->priv) {
1131 dev_err(&client->dev, "RJ54N1CB0C: missing platform data!\n"); 1412 dev_err(&client->dev, "RJ54N1CB0C: missing platform data!\n");
1132 return -EINVAL; 1413 return -EINVAL;
1133 } 1414 }
1134 1415
1416 rj54n1_priv = icl->priv;
1417
1135 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { 1418 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
1136 dev_warn(&adapter->dev, 1419 dev_warn(&adapter->dev,
1137 "I2C-Adapter doesn't support I2C_FUNC_SMBUS_BYTE\n"); 1420 "I2C-Adapter doesn't support I2C_FUNC_SMBUS_BYTE\n");
@@ -1153,10 +1436,12 @@ static int rj54n1_probe(struct i2c_client *client,
1153 rj54n1->rect.height = RJ54N1_MAX_HEIGHT; 1436 rj54n1->rect.height = RJ54N1_MAX_HEIGHT;
1154 rj54n1->width = RJ54N1_MAX_WIDTH; 1437 rj54n1->width = RJ54N1_MAX_WIDTH;
1155 rj54n1->height = RJ54N1_MAX_HEIGHT; 1438 rj54n1->height = RJ54N1_MAX_HEIGHT;
1156 rj54n1->fourcc = V4L2_PIX_FMT_YUYV; 1439 rj54n1->fmt = &rj54n1_colour_fmts[0];
1157 rj54n1->resize = 1024; 1440 rj54n1->resize = 1024;
1441 rj54n1->tgclk_mhz = (rj54n1_priv->mclk_freq / PLL_L * PLL_N) /
1442 (clk_div.ratio_tg + 1) / (clk_div.ratio_t + 1);
1158 1443
1159 ret = rj54n1_video_probe(icd, client); 1444 ret = rj54n1_video_probe(icd, client, rj54n1_priv);
1160 if (ret < 0) { 1445 if (ret < 0) {
1161 icd->ops = NULL; 1446 icd->ops = NULL;
1162 i2c_set_clientdata(client, NULL); 1447 i2c_set_clientdata(client, NULL);
@@ -1164,9 +1449,6 @@ static int rj54n1_probe(struct i2c_client *client,
1164 return ret; 1449 return ret;
1165 } 1450 }
1166 1451
1167 icd->formats = rj54n1_colour_formats;
1168 icd->num_formats = ARRAY_SIZE(rj54n1_colour_formats);
1169
1170 return ret; 1452 return ret;
1171} 1453}
1172 1454
diff --git a/drivers/media/video/s2255drv.c b/drivers/media/video/s2255drv.c
index 41765f3c7c28..fb742f1ae711 100644
--- a/drivers/media/video/s2255drv.c
+++ b/drivers/media/video/s2255drv.c
@@ -233,7 +233,6 @@ struct s2255_dev {
233 233
234 struct s2255_dmaqueue vidq[MAX_CHANNELS]; 234 struct s2255_dmaqueue vidq[MAX_CHANNELS];
235 struct video_device *vdev[MAX_CHANNELS]; 235 struct video_device *vdev[MAX_CHANNELS];
236 struct list_head s2255_devlist;
237 struct timer_list timer; 236 struct timer_list timer;
238 struct s2255_fw *fw_data; 237 struct s2255_fw *fw_data;
239 struct s2255_pipeinfo pipes[MAX_PIPE_BUFFERS]; 238 struct s2255_pipeinfo pipes[MAX_PIPE_BUFFERS];
@@ -313,8 +312,6 @@ struct s2255_fh {
313/* Channels on box are in reverse order */ 312/* Channels on box are in reverse order */
314static unsigned long G_chnmap[MAX_CHANNELS] = {3, 2, 1, 0}; 313static unsigned long G_chnmap[MAX_CHANNELS] = {3, 2, 1, 0};
315 314
316static LIST_HEAD(s2255_devlist);
317
318static int debug; 315static int debug;
319static int *s2255_debug = &debug; 316static int *s2255_debug = &debug;
320 317
@@ -1533,32 +1530,24 @@ static int vidioc_s_parm(struct file *file, void *priv,
1533} 1530}
1534static int s2255_open(struct file *file) 1531static int s2255_open(struct file *file)
1535{ 1532{
1536 int minor = video_devdata(file)->minor; 1533 struct video_device *vdev = video_devdata(file);
1537 struct s2255_dev *h, *dev = NULL; 1534 struct s2255_dev *dev = video_drvdata(file);
1538 struct s2255_fh *fh; 1535 struct s2255_fh *fh;
1539 struct list_head *list; 1536 enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1540 enum v4l2_buf_type type = 0;
1541 int i = 0; 1537 int i = 0;
1542 int cur_channel = -1; 1538 int cur_channel = -1;
1543 int state; 1539 int state;
1544 dprintk(1, "s2255: open called (minor=%d)\n", minor); 1540
1541 dprintk(1, "s2255: open called (dev=%s)\n",
1542 video_device_node_name(vdev));
1545 1543
1546 lock_kernel(); 1544 lock_kernel();
1547 list_for_each(list, &s2255_devlist) {
1548 h = list_entry(list, struct s2255_dev, s2255_devlist);
1549 for (i = 0; i < MAX_CHANNELS; i++) {
1550 if (h->vdev[i]->minor == minor) {
1551 cur_channel = i;
1552 dev = h;
1553 type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1554 }
1555 }
1556 }
1557 1545
1558 if ((NULL == dev) || (cur_channel == -1)) { 1546 for (i = 0; i < MAX_CHANNELS; i++) {
1559 unlock_kernel(); 1547 if (dev->vdev[i] == vdev) {
1560 printk(KERN_INFO "s2255: openv4l no dev\n"); 1548 cur_channel = i;
1561 return -ENODEV; 1549 break;
1550 }
1562 } 1551 }
1563 1552
1564 if (atomic_read(&dev->fw_data->fw_state) == S2255_FW_DISCONNECTING) { 1553 if (atomic_read(&dev->fw_data->fw_state) == S2255_FW_DISCONNECTING) {
@@ -1662,8 +1651,9 @@ static int s2255_open(struct file *file)
1662 for (i = 0; i < ARRAY_SIZE(s2255_qctrl); i++) 1651 for (i = 0; i < ARRAY_SIZE(s2255_qctrl); i++)
1663 qctl_regs[i] = s2255_qctrl[i].default_value; 1652 qctl_regs[i] = s2255_qctrl[i].default_value;
1664 1653
1665 dprintk(1, "s2255drv: open minor=%d type=%s users=%d\n", 1654 dprintk(1, "s2255drv: open dev=%s type=%s users=%d\n",
1666 minor, v4l2_type_names[type], dev->users[cur_channel]); 1655 video_device_node_name(vdev), v4l2_type_names[type],
1656 dev->users[cur_channel]);
1667 dprintk(2, "s2255drv: open: fh=0x%08lx, dev=0x%08lx, vidq=0x%08lx\n", 1657 dprintk(2, "s2255drv: open: fh=0x%08lx, dev=0x%08lx, vidq=0x%08lx\n",
1668 (unsigned long)fh, (unsigned long)dev, 1658 (unsigned long)fh, (unsigned long)dev,
1669 (unsigned long)&dev->vidq[cur_channel]); 1659 (unsigned long)&dev->vidq[cur_channel]);
@@ -1699,7 +1689,6 @@ static unsigned int s2255_poll(struct file *file,
1699static void s2255_destroy(struct kref *kref) 1689static void s2255_destroy(struct kref *kref)
1700{ 1690{
1701 struct s2255_dev *dev = to_s2255_dev(kref); 1691 struct s2255_dev *dev = to_s2255_dev(kref);
1702 struct list_head *list;
1703 int i; 1692 int i;
1704 if (!dev) { 1693 if (!dev) {
1705 printk(KERN_ERR "s2255drv: kref problem\n"); 1694 printk(KERN_ERR "s2255drv: kref problem\n");
@@ -1733,10 +1722,6 @@ static void s2255_destroy(struct kref *kref)
1733 usb_put_dev(dev->udev); 1722 usb_put_dev(dev->udev);
1734 dprintk(1, "%s", __func__); 1723 dprintk(1, "%s", __func__);
1735 1724
1736 while (!list_empty(&s2255_devlist)) {
1737 list = s2255_devlist.next;
1738 list_del(list);
1739 }
1740 mutex_unlock(&dev->open_lock); 1725 mutex_unlock(&dev->open_lock);
1741 kfree(dev); 1726 kfree(dev);
1742} 1727}
@@ -1745,7 +1730,8 @@ static int s2255_close(struct file *file)
1745{ 1730{
1746 struct s2255_fh *fh = file->private_data; 1731 struct s2255_fh *fh = file->private_data;
1747 struct s2255_dev *dev = fh->dev; 1732 struct s2255_dev *dev = fh->dev;
1748 int minor = video_devdata(file)->minor; 1733 struct video_device *vdev = video_devdata(file);
1734
1749 if (!dev) 1735 if (!dev)
1750 return -ENODEV; 1736 return -ENODEV;
1751 1737
@@ -1765,8 +1751,8 @@ static int s2255_close(struct file *file)
1765 mutex_unlock(&dev->open_lock); 1751 mutex_unlock(&dev->open_lock);
1766 1752
1767 kref_put(&dev->kref, s2255_destroy); 1753 kref_put(&dev->kref, s2255_destroy);
1768 dprintk(1, "s2255: close called (minor=%d, users=%d)\n", 1754 dprintk(1, "s2255: close called (dev=%s, users=%d)\n",
1769 minor, dev->users[fh->channel]); 1755 video_device_node_name(vdev), dev->users[fh->channel]);
1770 kfree(fh); 1756 kfree(fh);
1771 return 0; 1757 return 0;
1772} 1758}
@@ -1830,7 +1816,6 @@ static struct video_device template = {
1830 .name = "s2255v", 1816 .name = "s2255v",
1831 .fops = &s2255_fops_v4l, 1817 .fops = &s2255_fops_v4l,
1832 .ioctl_ops = &s2255_ioctl_ops, 1818 .ioctl_ops = &s2255_ioctl_ops,
1833 .minor = -1,
1834 .release = video_device_release, 1819 .release = video_device_release,
1835 .tvnorms = S2255_NORMS, 1820 .tvnorms = S2255_NORMS,
1836 .current_norm = V4L2_STD_NTSC_M, 1821 .current_norm = V4L2_STD_NTSC_M,
@@ -1843,7 +1828,6 @@ static int s2255_probe_v4l(struct s2255_dev *dev)
1843 int cur_nr = video_nr; 1828 int cur_nr = video_nr;
1844 1829
1845 /* initialize all video 4 linux */ 1830 /* initialize all video 4 linux */
1846 list_add_tail(&dev->s2255_devlist, &s2255_devlist);
1847 /* register 4 video devices */ 1831 /* register 4 video devices */
1848 for (i = 0; i < MAX_CHANNELS; i++) { 1832 for (i = 0; i < MAX_CHANNELS; i++) {
1849 INIT_LIST_HEAD(&dev->vidq[i].active); 1833 INIT_LIST_HEAD(&dev->vidq[i].active);
@@ -1853,6 +1837,7 @@ static int s2255_probe_v4l(struct s2255_dev *dev)
1853 dev->vdev[i] = video_device_alloc(); 1837 dev->vdev[i] = video_device_alloc();
1854 memcpy(dev->vdev[i], &template, sizeof(struct video_device)); 1838 memcpy(dev->vdev[i], &template, sizeof(struct video_device));
1855 dev->vdev[i]->parent = &dev->interface->dev; 1839 dev->vdev[i]->parent = &dev->interface->dev;
1840 video_set_drvdata(dev->vdev[i], dev);
1856 if (video_nr == -1) 1841 if (video_nr == -1)
1857 ret = video_register_device(dev->vdev[i], 1842 ret = video_register_device(dev->vdev[i],
1858 VFL_TYPE_GRABBER, 1843 VFL_TYPE_GRABBER,
@@ -1880,7 +1865,7 @@ static void s2255_exit_v4l(struct s2255_dev *dev)
1880 1865
1881 int i; 1866 int i;
1882 for (i = 0; i < MAX_CHANNELS; i++) { 1867 for (i = 0; i < MAX_CHANNELS; i++) {
1883 if (-1 != dev->vdev[i]->minor) { 1868 if (video_is_registered(dev->vdev[i])) {
1884 video_unregister_device(dev->vdev[i]); 1869 video_unregister_device(dev->vdev[i]);
1885 printk(KERN_INFO "s2255 unregistered\n"); 1870 printk(KERN_INFO "s2255 unregistered\n");
1886 } else { 1871 } else {
diff --git a/drivers/media/video/saa5246a.c b/drivers/media/video/saa5246a.c
index b624a4c01fdc..5ab6a0f901c0 100644
--- a/drivers/media/video/saa5246a.c
+++ b/drivers/media/video/saa5246a.c
@@ -1036,7 +1036,6 @@ static struct video_device saa_template =
1036 .name = "saa5246a", 1036 .name = "saa5246a",
1037 .fops = &saa_fops, 1037 .fops = &saa_fops,
1038 .release = video_device_release, 1038 .release = video_device_release,
1039 .minor = -1,
1040}; 1039};
1041 1040
1042static int saa5246a_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip) 1041static int saa5246a_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip)
diff --git a/drivers/media/video/saa7134/saa7134-cards.c b/drivers/media/video/saa7134/saa7134-cards.c
index 7e40d6d99dd0..03f572708b85 100644
--- a/drivers/media/video/saa7134/saa7134-cards.c
+++ b/drivers/media/video/saa7134/saa7134-cards.c
@@ -7211,9 +7211,31 @@ int saa7134_board_init2(struct saa7134_dev *dev)
7211 } 7211 }
7212 case SAA7134_BOARD_FLYDVB_TRIO: 7212 case SAA7134_BOARD_FLYDVB_TRIO:
7213 { 7213 {
7214 u8 temp = 0;
7215 int rc;
7214 u8 data[] = { 0x3c, 0x33, 0x62}; 7216 u8 data[] = { 0x3c, 0x33, 0x62};
7215 struct i2c_msg msg = {.addr=0x09, .flags=0, .buf=data, .len = sizeof(data)}; 7217 struct i2c_msg msg = {.addr=0x09, .flags=0, .buf=data, .len = sizeof(data)};
7216 i2c_transfer(&dev->i2c_adap, &msg, 1); 7218 i2c_transfer(&dev->i2c_adap, &msg, 1);
7219
7220 /*
7221 * send weak up message to pic16C505 chip
7222 * @ LifeView FlyDVB Trio
7223 */
7224 msg.buf = &temp;
7225 msg.addr = 0x0b;
7226 msg.len = 1;
7227 if (1 != i2c_transfer(&dev->i2c_adap, &msg, 1)) {
7228 printk(KERN_WARNING "%s: send wake up byte to pic16C505"
7229 "(IR chip) failed\n", dev->name);
7230 } else {
7231 msg.flags = I2C_M_RD;
7232 rc = i2c_transfer(&dev->i2c_adap, &msg, 1);
7233 printk(KERN_INFO "%s: probe IR chip @ i2c 0x%02x: %s\n",
7234 dev->name, msg.addr,
7235 (1 == rc) ? "yes" : "no");
7236 if (rc == 1)
7237 dev->has_remote = SAA7134_REMOTE_I2C;
7238 }
7217 break; 7239 break;
7218 } 7240 }
7219 case SAA7134_BOARD_ADS_DUO_CARDBUS_PTV331: 7241 case SAA7134_BOARD_ADS_DUO_CARDBUS_PTV331:
diff --git a/drivers/media/video/saa7134/saa7134-core.c b/drivers/media/video/saa7134/saa7134-core.c
index 0ba7f5af0fc3..9f85e917f9f3 100644
--- a/drivers/media/video/saa7134/saa7134-core.c
+++ b/drivers/media/video/saa7134/saa7134-core.c
@@ -797,27 +797,28 @@ static struct video_device *vdev_init(struct saa7134_dev *dev,
797 vfd->debug = video_debug; 797 vfd->debug = video_debug;
798 snprintf(vfd->name, sizeof(vfd->name), "%s %s (%s)", 798 snprintf(vfd->name, sizeof(vfd->name), "%s %s (%s)",
799 dev->name, type, saa7134_boards[dev->board].name); 799 dev->name, type, saa7134_boards[dev->board].name);
800 video_set_drvdata(vfd, dev);
800 return vfd; 801 return vfd;
801} 802}
802 803
803static void saa7134_unregister_video(struct saa7134_dev *dev) 804static void saa7134_unregister_video(struct saa7134_dev *dev)
804{ 805{
805 if (dev->video_dev) { 806 if (dev->video_dev) {
806 if (-1 != dev->video_dev->minor) 807 if (video_is_registered(dev->video_dev))
807 video_unregister_device(dev->video_dev); 808 video_unregister_device(dev->video_dev);
808 else 809 else
809 video_device_release(dev->video_dev); 810 video_device_release(dev->video_dev);
810 dev->video_dev = NULL; 811 dev->video_dev = NULL;
811 } 812 }
812 if (dev->vbi_dev) { 813 if (dev->vbi_dev) {
813 if (-1 != dev->vbi_dev->minor) 814 if (video_is_registered(dev->vbi_dev))
814 video_unregister_device(dev->vbi_dev); 815 video_unregister_device(dev->vbi_dev);
815 else 816 else
816 video_device_release(dev->vbi_dev); 817 video_device_release(dev->vbi_dev);
817 dev->vbi_dev = NULL; 818 dev->vbi_dev = NULL;
818 } 819 }
819 if (dev->radio_dev) { 820 if (dev->radio_dev) {
820 if (-1 != dev->radio_dev->minor) 821 if (video_is_registered(dev->radio_dev))
821 video_unregister_device(dev->radio_dev); 822 video_unregister_device(dev->radio_dev);
822 else 823 else
823 video_device_release(dev->radio_dev); 824 video_device_release(dev->radio_dev);
@@ -1046,8 +1047,8 @@ static int __devinit saa7134_initdev(struct pci_dev *pci_dev,
1046 dev->name); 1047 dev->name);
1047 goto fail4; 1048 goto fail4;
1048 } 1049 }
1049 printk(KERN_INFO "%s: registered device video%d [v4l2]\n", 1050 printk(KERN_INFO "%s: registered device %s [v4l2]\n",
1050 dev->name, dev->video_dev->num); 1051 dev->name, video_device_node_name(dev->video_dev));
1051 1052
1052 dev->vbi_dev = vdev_init(dev, &saa7134_video_template, "vbi"); 1053 dev->vbi_dev = vdev_init(dev, &saa7134_video_template, "vbi");
1053 1054
@@ -1055,8 +1056,8 @@ static int __devinit saa7134_initdev(struct pci_dev *pci_dev,
1055 vbi_nr[dev->nr]); 1056 vbi_nr[dev->nr]);
1056 if (err < 0) 1057 if (err < 0)
1057 goto fail4; 1058 goto fail4;
1058 printk(KERN_INFO "%s: registered device vbi%d\n", 1059 printk(KERN_INFO "%s: registered device %s\n",
1059 dev->name, dev->vbi_dev->num); 1060 dev->name, video_device_node_name(dev->vbi_dev));
1060 1061
1061 if (card_has_radio(dev)) { 1062 if (card_has_radio(dev)) {
1062 dev->radio_dev = vdev_init(dev,&saa7134_radio_template,"radio"); 1063 dev->radio_dev = vdev_init(dev,&saa7134_radio_template,"radio");
@@ -1064,8 +1065,8 @@ static int __devinit saa7134_initdev(struct pci_dev *pci_dev,
1064 radio_nr[dev->nr]); 1065 radio_nr[dev->nr]);
1065 if (err < 0) 1066 if (err < 0)
1066 goto fail4; 1067 goto fail4;
1067 printk(KERN_INFO "%s: registered device radio%d\n", 1068 printk(KERN_INFO "%s: registered device %s\n",
1068 dev->name, dev->radio_dev->num); 1069 dev->name, video_device_node_name(dev->radio_dev));
1069 } 1070 }
1070 1071
1071 /* everything worked */ 1072 /* everything worked */
diff --git a/drivers/media/video/saa7134/saa7134-empress.c b/drivers/media/video/saa7134/saa7134-empress.c
index 296788c3bf0e..7dfecfc6017c 100644
--- a/drivers/media/video/saa7134/saa7134-empress.c
+++ b/drivers/media/video/saa7134/saa7134-empress.c
@@ -86,19 +86,11 @@ static int ts_init_encoder(struct saa7134_dev* dev)
86 86
87static int ts_open(struct file *file) 87static int ts_open(struct file *file)
88{ 88{
89 int minor = video_devdata(file)->minor; 89 struct video_device *vdev = video_devdata(file);
90 struct saa7134_dev *dev; 90 struct saa7134_dev *dev = video_drvdata(file);
91 int err; 91 int err;
92 92
93 lock_kernel(); 93 dprintk("open dev=%s\n", video_device_node_name(vdev));
94 list_for_each_entry(dev, &saa7134_devlist, devlist)
95 if (dev->empress_dev && dev->empress_dev->minor == minor)
96 goto found;
97 unlock_kernel();
98 return -ENODEV;
99 found:
100
101 dprintk("open minor=%d\n",minor);
102 err = -EBUSY; 94 err = -EBUSY;
103 if (!mutex_trylock(&dev->empress_tsq.vb_lock)) 95 if (!mutex_trylock(&dev->empress_tsq.vb_lock))
104 goto done; 96 goto done;
@@ -489,7 +481,6 @@ static const struct v4l2_ioctl_ops ts_ioctl_ops = {
489static struct video_device saa7134_empress_template = { 481static struct video_device saa7134_empress_template = {
490 .name = "saa7134-empress", 482 .name = "saa7134-empress",
491 .fops = &ts_fops, 483 .fops = &ts_fops,
492 .minor = -1,
493 .ioctl_ops = &ts_ioctl_ops, 484 .ioctl_ops = &ts_ioctl_ops,
494 485
495 .tvnorms = SAA7134_NORMS, 486 .tvnorms = SAA7134_NORMS,
@@ -531,6 +522,7 @@ static int empress_init(struct saa7134_dev *dev)
531 522
532 INIT_WORK(&dev->empress_workqueue, empress_signal_update); 523 INIT_WORK(&dev->empress_workqueue, empress_signal_update);
533 524
525 video_set_drvdata(dev->empress_dev, dev);
534 err = video_register_device(dev->empress_dev,VFL_TYPE_GRABBER, 526 err = video_register_device(dev->empress_dev,VFL_TYPE_GRABBER,
535 empress_nr[dev->nr]); 527 empress_nr[dev->nr]);
536 if (err < 0) { 528 if (err < 0) {
@@ -540,8 +532,8 @@ static int empress_init(struct saa7134_dev *dev)
540 dev->empress_dev = NULL; 532 dev->empress_dev = NULL;
541 return err; 533 return err;
542 } 534 }
543 printk(KERN_INFO "%s: registered device video%d [mpeg]\n", 535 printk(KERN_INFO "%s: registered device %s [mpeg]\n",
544 dev->name, dev->empress_dev->num); 536 dev->name, video_device_node_name(dev->empress_dev));
545 537
546 videobuf_queue_sg_init(&dev->empress_tsq, &saa7134_ts_qops, 538 videobuf_queue_sg_init(&dev->empress_tsq, &saa7134_ts_qops,
547 &dev->pci->dev, &dev->slock, 539 &dev->pci->dev, &dev->slock,
diff --git a/drivers/media/video/saa7134/saa7134-input.c b/drivers/media/video/saa7134/saa7134-input.c
index 744918b1cd47..f8e985989ca0 100644
--- a/drivers/media/video/saa7134/saa7134-input.c
+++ b/drivers/media/video/saa7134/saa7134-input.c
@@ -127,6 +127,61 @@ static int build_key(struct saa7134_dev *dev)
127 127
128/* --------------------- Chip specific I2C key builders ----------------- */ 128/* --------------------- Chip specific I2C key builders ----------------- */
129 129
130static int get_key_flydvb_trio(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
131{
132 int gpio;
133 int attempt = 0;
134 unsigned char b;
135
136 /* We need this to access GPI Used by the saa_readl macro. */
137 struct saa7134_dev *dev = ir->c->adapter->algo_data;
138
139 if (dev == NULL) {
140 dprintk("get_key_flydvb_trio: "
141 "gir->c->adapter->algo_data is NULL!\n");
142 return -EIO;
143 }
144
145 /* rising SAA7134_GPIGPRESCAN reads the status */
146 saa_clearb(SAA7134_GPIO_GPMODE3, SAA7134_GPIO_GPRESCAN);
147 saa_setb(SAA7134_GPIO_GPMODE3, SAA7134_GPIO_GPRESCAN);
148
149 gpio = saa_readl(SAA7134_GPIO_GPSTATUS0 >> 2);
150
151 if (0x40000 & ~gpio)
152 return 0; /* No button press */
153
154 /* No button press - only before first key pressed */
155 if (b == 0xFF)
156 return 0;
157
158 /* poll IR chip */
159 /* weak up the IR chip */
160 b = 0;
161
162 while (1 != i2c_master_send(ir->c, &b, 1)) {
163 if ((attempt++) < 10) {
164 /*
165 * wait a bit for next attempt -
166 * I don't know how make it better
167 */
168 msleep(10);
169 continue;
170 }
171 i2cdprintk("send wake up byte to pic16C505 (IR chip)"
172 "failed %dx\n", attempt);
173 return -EIO;
174 }
175 if (1 != i2c_master_recv(ir->c, &b, 1)) {
176 i2cdprintk("read error\n");
177 return -EIO;
178 }
179
180 *ir_key = b;
181 *ir_raw = b;
182 return 1;
183}
184
130static int get_key_msi_tvanywhere_plus(struct IR_i2c *ir, u32 *ir_key, 185static int get_key_msi_tvanywhere_plus(struct IR_i2c *ir, u32 *ir_key,
131 u32 *ir_raw) 186 u32 *ir_raw)
132{ 187{
@@ -622,6 +677,7 @@ int saa7134_input_init1(struct saa7134_dev *dev)
622 mask_keyup = 0x020000; 677 mask_keyup = 0x020000;
623 polling = 50; /* ms */ 678 polling = 50; /* ms */
624 break; 679 break;
680 break;
625 } 681 }
626 if (NULL == ir_codes) { 682 if (NULL == ir_codes) {
627 printk("%s: Oops: IR config error [card=%d]\n", 683 printk("%s: Oops: IR config error [card=%d]\n",
@@ -652,7 +708,7 @@ int saa7134_input_init1(struct saa7134_dev *dev)
652 snprintf(ir->phys, sizeof(ir->phys), "pci-%s/ir0", 708 snprintf(ir->phys, sizeof(ir->phys), "pci-%s/ir0",
653 pci_name(dev->pci)); 709 pci_name(dev->pci));
654 710
655 err = ir_input_init(input_dev, &ir->ir, ir_type, ir_codes); 711 err = ir_input_init(input_dev, &ir->ir, ir_type);
656 if (err < 0) 712 if (err < 0)
657 goto err_out_free; 713 goto err_out_free;
658 714
@@ -672,7 +728,7 @@ int saa7134_input_init1(struct saa7134_dev *dev)
672 dev->remote = ir; 728 dev->remote = ir;
673 saa7134_ir_start(dev, ir); 729 saa7134_ir_start(dev, ir);
674 730
675 err = input_register_device(ir->dev); 731 err = ir_input_register(ir->dev, ir_codes);
676 if (err) 732 if (err)
677 goto err_out_stop; 733 goto err_out_stop;
678 734
@@ -686,8 +742,6 @@ int saa7134_input_init1(struct saa7134_dev *dev)
686 saa7134_ir_stop(dev); 742 saa7134_ir_stop(dev);
687 dev->remote = NULL; 743 dev->remote = NULL;
688 err_out_free: 744 err_out_free:
689 ir_input_free(input_dev);
690 input_free_device(input_dev);
691 kfree(ir); 745 kfree(ir);
692 return err; 746 return err;
693} 747}
@@ -698,8 +752,7 @@ void saa7134_input_fini(struct saa7134_dev *dev)
698 return; 752 return;
699 753
700 saa7134_ir_stop(dev); 754 saa7134_ir_stop(dev);
701 ir_input_free(dev->remote->dev); 755 ir_input_unregister(dev->remote->dev);
702 input_unregister_device(dev->remote->dev);
703 kfree(dev->remote); 756 kfree(dev->remote);
704 dev->remote = NULL; 757 dev->remote = NULL;
705} 758}
@@ -788,6 +841,12 @@ void saa7134_probe_i2c_ir(struct saa7134_dev *dev)
788 case SAA7134_BOARD_AVERMEDIA_CARDBUS_506: 841 case SAA7134_BOARD_AVERMEDIA_CARDBUS_506:
789 info.addr = 0x40; 842 info.addr = 0x40;
790 break; 843 break;
844 case SAA7134_BOARD_FLYDVB_TRIO:
845 dev->init_data.name = "FlyDVB Trio";
846 dev->init_data.get_key = get_key_flydvb_trio;
847 dev->init_data.ir_codes = &ir_codes_flydvb_table;
848 info.addr = 0x0b;
849 break;
791 default: 850 default:
792 dprintk("No I2C IR support for board %x\n", dev->board); 851 dprintk("No I2C IR support for board %x\n", dev->board);
793 return; 852 return;
diff --git a/drivers/media/video/saa7134/saa7134-video.c b/drivers/media/video/saa7134/saa7134-video.c
index 35f8daa3a359..cb732640ac4a 100644
--- a/drivers/media/video/saa7134/saa7134-video.c
+++ b/drivers/media/video/saa7134/saa7134-video.c
@@ -1326,33 +1326,26 @@ static int saa7134_resource(struct saa7134_fh *fh)
1326 1326
1327static int video_open(struct file *file) 1327static int video_open(struct file *file)
1328{ 1328{
1329 int minor = video_devdata(file)->minor; 1329 struct video_device *vdev = video_devdata(file);
1330 struct saa7134_dev *dev; 1330 struct saa7134_dev *dev = video_drvdata(file);
1331 struct saa7134_fh *fh; 1331 struct saa7134_fh *fh;
1332 enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 1332 enum v4l2_buf_type type = 0;
1333 int radio = 0; 1333 int radio = 0;
1334 1334
1335 mutex_lock(&saa7134_devlist_lock); 1335 switch (vdev->vfl_type) {
1336 list_for_each_entry(dev, &saa7134_devlist, devlist) { 1336 case VFL_TYPE_GRABBER:
1337 if (dev->video_dev && (dev->video_dev->minor == minor)) 1337 type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1338 goto found; 1338 break;
1339 if (dev->radio_dev && (dev->radio_dev->minor == minor)) { 1339 case VFL_TYPE_VBI:
1340 radio = 1; 1340 type = V4L2_BUF_TYPE_VBI_CAPTURE;
1341 goto found; 1341 break;
1342 } 1342 case VFL_TYPE_RADIO:
1343 if (dev->vbi_dev && (dev->vbi_dev->minor == minor)) { 1343 radio = 1;
1344 type = V4L2_BUF_TYPE_VBI_CAPTURE; 1344 break;
1345 goto found;
1346 }
1347 } 1345 }
1348 mutex_unlock(&saa7134_devlist_lock);
1349 return -ENODEV;
1350
1351found:
1352 mutex_unlock(&saa7134_devlist_lock);
1353 1346
1354 dprintk("open minor=%d radio=%d type=%s\n",minor,radio, 1347 dprintk("open dev=%s radio=%d type=%s\n", video_device_node_name(vdev),
1355 v4l2_type_names[type]); 1348 radio, v4l2_type_names[type]);
1356 1349
1357 /* allocate + initialize per filehandle data */ 1350 /* allocate + initialize per filehandle data */
1358 fh = kzalloc(sizeof(*fh),GFP_KERNEL); 1351 fh = kzalloc(sizeof(*fh),GFP_KERNEL);
@@ -2502,7 +2495,6 @@ struct video_device saa7134_video_template = {
2502 .name = "saa7134-video", 2495 .name = "saa7134-video",
2503 .fops = &video_fops, 2496 .fops = &video_fops,
2504 .ioctl_ops = &video_ioctl_ops, 2497 .ioctl_ops = &video_ioctl_ops,
2505 .minor = -1,
2506 .tvnorms = SAA7134_NORMS, 2498 .tvnorms = SAA7134_NORMS,
2507 .current_norm = V4L2_STD_PAL, 2499 .current_norm = V4L2_STD_PAL,
2508}; 2500};
@@ -2511,7 +2503,6 @@ struct video_device saa7134_radio_template = {
2511 .name = "saa7134-radio", 2503 .name = "saa7134-radio",
2512 .fops = &radio_fops, 2504 .fops = &radio_fops,
2513 .ioctl_ops = &radio_ioctl_ops, 2505 .ioctl_ops = &radio_ioctl_ops,
2514 .minor = -1,
2515}; 2506};
2516 2507
2517int saa7134_video_init1(struct saa7134_dev *dev) 2508int saa7134_video_init1(struct saa7134_dev *dev)
diff --git a/drivers/media/video/se401.c b/drivers/media/video/se401.c
index 85ffc2cba039..41d0166c0f95 100644
--- a/drivers/media/video/se401.c
+++ b/drivers/media/video/se401.c
@@ -1428,8 +1428,8 @@ static int se401_probe(struct usb_interface *intf,
1428 err("video_register_device failed"); 1428 err("video_register_device failed");
1429 return -EIO; 1429 return -EIO;
1430 } 1430 }
1431 dev_info(&intf->dev, "registered new video device: video%d\n", 1431 dev_info(&intf->dev, "registered new video device: %s\n",
1432 se401->vdev.num); 1432 video_device_node_name(&se401->vdev));
1433 1433
1434 usb_set_intfdata(intf, se401); 1434 usb_set_intfdata(intf, se401);
1435 return 0; 1435 return 0;
diff --git a/drivers/media/video/sh_mobile_ceu_camera.c b/drivers/media/video/sh_mobile_ceu_camera.c
index a4f3472d4db8..d69363f0d8c9 100644
--- a/drivers/media/video/sh_mobile_ceu_camera.c
+++ b/drivers/media/video/sh_mobile_ceu_camera.c
@@ -38,6 +38,8 @@
38#include <media/soc_camera.h> 38#include <media/soc_camera.h>
39#include <media/sh_mobile_ceu.h> 39#include <media/sh_mobile_ceu.h>
40#include <media/videobuf-dma-contig.h> 40#include <media/videobuf-dma-contig.h>
41#include <media/v4l2-mediabus.h>
42#include <media/soc_mediabus.h>
41 43
42/* register offsets for sh7722 / sh7723 */ 44/* register offsets for sh7722 / sh7723 */
43 45
@@ -85,7 +87,7 @@
85/* per video frame buffer */ 87/* per video frame buffer */
86struct sh_mobile_ceu_buffer { 88struct sh_mobile_ceu_buffer {
87 struct videobuf_buffer vb; /* v4l buffer must be first */ 89 struct videobuf_buffer vb; /* v4l buffer must be first */
88 const struct soc_camera_data_format *fmt; 90 enum v4l2_mbus_pixelcode code;
89}; 91};
90 92
91struct sh_mobile_ceu_dev { 93struct sh_mobile_ceu_dev {
@@ -105,7 +107,8 @@ struct sh_mobile_ceu_dev {
105 107
106 u32 cflcr; 108 u32 cflcr;
107 109
108 unsigned int is_interlaced:1; 110 enum v4l2_field field;
111
109 unsigned int image_mode:1; 112 unsigned int image_mode:1;
110 unsigned int is_16bit:1; 113 unsigned int is_16bit:1;
111}; 114};
@@ -114,8 +117,8 @@ struct sh_mobile_ceu_cam {
114 struct v4l2_rect ceu_rect; 117 struct v4l2_rect ceu_rect;
115 unsigned int cam_width; 118 unsigned int cam_width;
116 unsigned int cam_height; 119 unsigned int cam_height;
117 const struct soc_camera_data_format *extra_fmt; 120 const struct soc_mbus_pixelfmt *extra_fmt;
118 const struct soc_camera_data_format *camera_fmt; 121 enum v4l2_mbus_pixelcode code;
119}; 122};
120 123
121static unsigned long make_bus_param(struct sh_mobile_ceu_dev *pcdev) 124static unsigned long make_bus_param(struct sh_mobile_ceu_dev *pcdev)
@@ -197,16 +200,19 @@ static int sh_mobile_ceu_videobuf_setup(struct videobuf_queue *vq,
197 struct soc_camera_device *icd = vq->priv_data; 200 struct soc_camera_device *icd = vq->priv_data;
198 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 201 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
199 struct sh_mobile_ceu_dev *pcdev = ici->priv; 202 struct sh_mobile_ceu_dev *pcdev = ici->priv;
200 int bytes_per_pixel = (icd->current_fmt->depth + 7) >> 3; 203 int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
204 icd->current_fmt->host_fmt);
205
206 if (bytes_per_line < 0)
207 return bytes_per_line;
201 208
202 *size = PAGE_ALIGN(icd->user_width * icd->user_height * 209 *size = bytes_per_line * icd->user_height;
203 bytes_per_pixel);
204 210
205 if (0 == *count) 211 if (0 == *count)
206 *count = 2; 212 *count = 2;
207 213
208 if (pcdev->video_limit) { 214 if (pcdev->video_limit) {
209 while (*size * *count > pcdev->video_limit) 215 while (PAGE_ALIGN(*size) * *count > pcdev->video_limit)
210 (*count)--; 216 (*count)--;
211 } 217 }
212 218
@@ -249,10 +255,13 @@ static int sh_mobile_ceu_capture(struct sh_mobile_ceu_dev *pcdev)
249{ 255{
250 struct soc_camera_device *icd = pcdev->icd; 256 struct soc_camera_device *icd = pcdev->icd;
251 dma_addr_t phys_addr_top, phys_addr_bottom; 257 dma_addr_t phys_addr_top, phys_addr_bottom;
258 unsigned long top1, top2;
259 unsigned long bottom1, bottom2;
252 u32 status; 260 u32 status;
253 int ret = 0; 261 int ret = 0;
254 262
255 /* The hardware is _very_ picky about this sequence. Especially 263 /*
264 * The hardware is _very_ picky about this sequence. Especially
256 * the CEU_CETCR_MAGIC value. It seems like we need to acknowledge 265 * the CEU_CETCR_MAGIC value. It seems like we need to acknowledge
257 * several not-so-well documented interrupt sources in CETCR. 266 * several not-so-well documented interrupt sources in CETCR.
258 */ 267 */
@@ -276,25 +285,36 @@ static int sh_mobile_ceu_capture(struct sh_mobile_ceu_dev *pcdev)
276 if (!pcdev->active) 285 if (!pcdev->active)
277 return ret; 286 return ret;
278 287
288 if (V4L2_FIELD_INTERLACED_BT == pcdev->field) {
289 top1 = CDBYR;
290 top2 = CDBCR;
291 bottom1 = CDAYR;
292 bottom2 = CDACR;
293 } else {
294 top1 = CDAYR;
295 top2 = CDACR;
296 bottom1 = CDBYR;
297 bottom2 = CDBCR;
298 }
299
279 phys_addr_top = videobuf_to_dma_contig(pcdev->active); 300 phys_addr_top = videobuf_to_dma_contig(pcdev->active);
280 ceu_write(pcdev, CDAYR, phys_addr_top); 301 ceu_write(pcdev, top1, phys_addr_top);
281 if (pcdev->is_interlaced) { 302 if (V4L2_FIELD_NONE != pcdev->field) {
282 phys_addr_bottom = phys_addr_top + icd->user_width; 303 phys_addr_bottom = phys_addr_top + icd->user_width;
283 ceu_write(pcdev, CDBYR, phys_addr_bottom); 304 ceu_write(pcdev, bottom1, phys_addr_bottom);
284 } 305 }
285 306
286 switch (icd->current_fmt->fourcc) { 307 switch (icd->current_fmt->host_fmt->fourcc) {
287 case V4L2_PIX_FMT_NV12: 308 case V4L2_PIX_FMT_NV12:
288 case V4L2_PIX_FMT_NV21: 309 case V4L2_PIX_FMT_NV21:
289 case V4L2_PIX_FMT_NV16: 310 case V4L2_PIX_FMT_NV16:
290 case V4L2_PIX_FMT_NV61: 311 case V4L2_PIX_FMT_NV61:
291 phys_addr_top += icd->user_width * 312 phys_addr_top += icd->user_width *
292 icd->user_height; 313 icd->user_height;
293 ceu_write(pcdev, CDACR, phys_addr_top); 314 ceu_write(pcdev, top2, phys_addr_top);
294 if (pcdev->is_interlaced) { 315 if (V4L2_FIELD_NONE != pcdev->field) {
295 phys_addr_bottom = phys_addr_top + 316 phys_addr_bottom = phys_addr_top + icd->user_width;
296 icd->user_width; 317 ceu_write(pcdev, bottom2, phys_addr_bottom);
297 ceu_write(pcdev, CDBCR, phys_addr_bottom);
298 } 318 }
299 } 319 }
300 320
@@ -310,8 +330,13 @@ static int sh_mobile_ceu_videobuf_prepare(struct videobuf_queue *vq,
310{ 330{
311 struct soc_camera_device *icd = vq->priv_data; 331 struct soc_camera_device *icd = vq->priv_data;
312 struct sh_mobile_ceu_buffer *buf; 332 struct sh_mobile_ceu_buffer *buf;
333 int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
334 icd->current_fmt->host_fmt);
313 int ret; 335 int ret;
314 336
337 if (bytes_per_line < 0)
338 return bytes_per_line;
339
315 buf = container_of(vb, struct sh_mobile_ceu_buffer, vb); 340 buf = container_of(vb, struct sh_mobile_ceu_buffer, vb);
316 341
317 dev_dbg(icd->dev.parent, "%s (vb=0x%p) 0x%08lx %zd\n", __func__, 342 dev_dbg(icd->dev.parent, "%s (vb=0x%p) 0x%08lx %zd\n", __func__,
@@ -321,25 +346,27 @@ static int sh_mobile_ceu_videobuf_prepare(struct videobuf_queue *vq,
321 WARN_ON(!list_empty(&vb->queue)); 346 WARN_ON(!list_empty(&vb->queue));
322 347
323#ifdef DEBUG 348#ifdef DEBUG
324 /* This can be useful if you want to see if we actually fill 349 /*
325 * the buffer with something */ 350 * This can be useful if you want to see if we actually fill
351 * the buffer with something
352 */
326 memset((void *)vb->baddr, 0xaa, vb->bsize); 353 memset((void *)vb->baddr, 0xaa, vb->bsize);
327#endif 354#endif
328 355
329 BUG_ON(NULL == icd->current_fmt); 356 BUG_ON(NULL == icd->current_fmt);
330 357
331 if (buf->fmt != icd->current_fmt || 358 if (buf->code != icd->current_fmt->code ||
332 vb->width != icd->user_width || 359 vb->width != icd->user_width ||
333 vb->height != icd->user_height || 360 vb->height != icd->user_height ||
334 vb->field != field) { 361 vb->field != field) {
335 buf->fmt = icd->current_fmt; 362 buf->code = icd->current_fmt->code;
336 vb->width = icd->user_width; 363 vb->width = icd->user_width;
337 vb->height = icd->user_height; 364 vb->height = icd->user_height;
338 vb->field = field; 365 vb->field = field;
339 vb->state = VIDEOBUF_NEEDS_INIT; 366 vb->state = VIDEOBUF_NEEDS_INIT;
340 } 367 }
341 368
342 vb->size = vb->width * vb->height * ((buf->fmt->depth + 7) >> 3); 369 vb->size = vb->height * bytes_per_line;
343 if (0 != vb->baddr && vb->bsize < vb->size) { 370 if (0 != vb->baddr && vb->bsize < vb->size) {
344 ret = -EINVAL; 371 ret = -EINVAL;
345 goto out; 372 goto out;
@@ -456,6 +483,7 @@ static int sh_mobile_ceu_add_device(struct soc_camera_device *icd)
456{ 483{
457 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 484 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
458 struct sh_mobile_ceu_dev *pcdev = ici->priv; 485 struct sh_mobile_ceu_dev *pcdev = ici->priv;
486 int ret;
459 487
460 if (pcdev->icd) 488 if (pcdev->icd)
461 return -EBUSY; 489 return -EBUSY;
@@ -466,9 +494,11 @@ static int sh_mobile_ceu_add_device(struct soc_camera_device *icd)
466 494
467 pm_runtime_get_sync(ici->v4l2_dev.dev); 495 pm_runtime_get_sync(ici->v4l2_dev.dev);
468 496
469 pcdev->icd = icd; 497 ret = sh_mobile_ceu_soft_reset(pcdev);
498 if (!ret)
499 pcdev->icd = icd;
470 500
471 return sh_mobile_ceu_soft_reset(pcdev); 501 return ret;
472} 502}
473 503
474/* Called with .video_lock held */ 504/* Called with .video_lock held */
@@ -558,24 +588,35 @@ static void sh_mobile_ceu_set_rect(struct soc_camera_device *icd,
558 in_width *= 2; 588 in_width *= 2;
559 left_offset *= 2; 589 left_offset *= 2;
560 } 590 }
561 width = cdwdr_width = out_width; 591 width = out_width;
592 cdwdr_width = out_width;
562 } else { 593 } else {
563 unsigned int w_factor = (icd->current_fmt->depth + 7) >> 3; 594 int bytes_per_line = soc_mbus_bytes_per_line(out_width,
595 icd->current_fmt->host_fmt);
596 unsigned int w_factor;
564 597
565 width = out_width * w_factor / 2; 598 width = out_width;
566 599
567 if (!pcdev->is_16bit) 600 switch (icd->current_fmt->host_fmt->packing) {
568 w_factor *= 2; 601 case SOC_MBUS_PACKING_2X8_PADHI:
602 w_factor = 2;
603 break;
604 default:
605 w_factor = 1;
606 }
569 607
570 in_width = rect->width * w_factor / 2; 608 in_width = rect->width * w_factor;
571 left_offset = left_offset * w_factor / 2; 609 left_offset = left_offset * w_factor;
572 610
573 cdwdr_width = width * 2; 611 if (bytes_per_line < 0)
612 cdwdr_width = out_width;
613 else
614 cdwdr_width = bytes_per_line;
574 } 615 }
575 616
576 height = out_height; 617 height = out_height;
577 in_height = rect->height; 618 in_height = rect->height;
578 if (pcdev->is_interlaced) { 619 if (V4L2_FIELD_NONE != pcdev->field) {
579 height /= 2; 620 height /= 2;
580 in_height /= 2; 621 in_height /= 2;
581 top_offset /= 2; 622 top_offset /= 2;
@@ -646,6 +687,23 @@ static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd,
646 if (!common_flags) 687 if (!common_flags)
647 return -EINVAL; 688 return -EINVAL;
648 689
690 /* Make choises, based on platform preferences */
691 if ((common_flags & SOCAM_HSYNC_ACTIVE_HIGH) &&
692 (common_flags & SOCAM_HSYNC_ACTIVE_LOW)) {
693 if (pcdev->pdata->flags & SH_CEU_FLAG_HSYNC_LOW)
694 common_flags &= ~SOCAM_HSYNC_ACTIVE_HIGH;
695 else
696 common_flags &= ~SOCAM_HSYNC_ACTIVE_LOW;
697 }
698
699 if ((common_flags & SOCAM_VSYNC_ACTIVE_HIGH) &&
700 (common_flags & SOCAM_VSYNC_ACTIVE_LOW)) {
701 if (pcdev->pdata->flags & SH_CEU_FLAG_VSYNC_LOW)
702 common_flags &= ~SOCAM_VSYNC_ACTIVE_HIGH;
703 else
704 common_flags &= ~SOCAM_VSYNC_ACTIVE_LOW;
705 }
706
649 ret = icd->ops->set_bus_param(icd, common_flags); 707 ret = icd->ops->set_bus_param(icd, common_flags);
650 if (ret < 0) 708 if (ret < 0)
651 return ret; 709 return ret;
@@ -667,24 +725,24 @@ static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd,
667 value = 0x00000010; /* data fetch by default */ 725 value = 0x00000010; /* data fetch by default */
668 yuv_lineskip = 0; 726 yuv_lineskip = 0;
669 727
670 switch (icd->current_fmt->fourcc) { 728 switch (icd->current_fmt->host_fmt->fourcc) {
671 case V4L2_PIX_FMT_NV12: 729 case V4L2_PIX_FMT_NV12:
672 case V4L2_PIX_FMT_NV21: 730 case V4L2_PIX_FMT_NV21:
673 yuv_lineskip = 1; /* skip for NV12/21, no skip for NV16/61 */ 731 yuv_lineskip = 1; /* skip for NV12/21, no skip for NV16/61 */
674 /* fall-through */ 732 /* fall-through */
675 case V4L2_PIX_FMT_NV16: 733 case V4L2_PIX_FMT_NV16:
676 case V4L2_PIX_FMT_NV61: 734 case V4L2_PIX_FMT_NV61:
677 switch (cam->camera_fmt->fourcc) { 735 switch (cam->code) {
678 case V4L2_PIX_FMT_UYVY: 736 case V4L2_MBUS_FMT_YUYV8_2X8_BE:
679 value = 0x00000000; /* Cb0, Y0, Cr0, Y1 */ 737 value = 0x00000000; /* Cb0, Y0, Cr0, Y1 */
680 break; 738 break;
681 case V4L2_PIX_FMT_VYUY: 739 case V4L2_MBUS_FMT_YVYU8_2X8_BE:
682 value = 0x00000100; /* Cr0, Y0, Cb0, Y1 */ 740 value = 0x00000100; /* Cr0, Y0, Cb0, Y1 */
683 break; 741 break;
684 case V4L2_PIX_FMT_YUYV: 742 case V4L2_MBUS_FMT_YUYV8_2X8_LE:
685 value = 0x00000200; /* Y0, Cb0, Y1, Cr0 */ 743 value = 0x00000200; /* Y0, Cb0, Y1, Cr0 */
686 break; 744 break;
687 case V4L2_PIX_FMT_YVYU: 745 case V4L2_MBUS_FMT_YVYU8_2X8_LE:
688 value = 0x00000300; /* Y0, Cr0, Y1, Cb0 */ 746 value = 0x00000300; /* Y0, Cr0, Y1, Cb0 */
689 break; 747 break;
690 default: 748 default:
@@ -692,8 +750,8 @@ static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd,
692 } 750 }
693 } 751 }
694 752
695 if (icd->current_fmt->fourcc == V4L2_PIX_FMT_NV21 || 753 if (icd->current_fmt->host_fmt->fourcc == V4L2_PIX_FMT_NV21 ||
696 icd->current_fmt->fourcc == V4L2_PIX_FMT_NV61) 754 icd->current_fmt->host_fmt->fourcc == V4L2_PIX_FMT_NV61)
697 value ^= 0x00000100; /* swap U, V to change from NV1x->NVx1 */ 755 value ^= 0x00000100; /* swap U, V to change from NV1x->NVx1 */
698 756
699 value |= common_flags & SOCAM_VSYNC_ACTIVE_LOW ? 1 << 1 : 0; 757 value |= common_flags & SOCAM_VSYNC_ACTIVE_LOW ? 1 << 1 : 0;
@@ -702,14 +760,27 @@ static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd,
702 ceu_write(pcdev, CAMCR, value); 760 ceu_write(pcdev, CAMCR, value);
703 761
704 ceu_write(pcdev, CAPCR, 0x00300000); 762 ceu_write(pcdev, CAPCR, 0x00300000);
705 ceu_write(pcdev, CAIFR, pcdev->is_interlaced ? 0x101 : 0); 763
764 switch (pcdev->field) {
765 case V4L2_FIELD_INTERLACED_TB:
766 value = 0x101;
767 break;
768 case V4L2_FIELD_INTERLACED_BT:
769 value = 0x102;
770 break;
771 default:
772 value = 0;
773 break;
774 }
775 ceu_write(pcdev, CAIFR, value);
706 776
707 sh_mobile_ceu_set_rect(icd, icd->user_width, icd->user_height); 777 sh_mobile_ceu_set_rect(icd, icd->user_width, icd->user_height);
708 mdelay(1); 778 mdelay(1);
709 779
710 ceu_write(pcdev, CFLCR, pcdev->cflcr); 780 ceu_write(pcdev, CFLCR, pcdev->cflcr);
711 781
712 /* A few words about byte order (observed in Big Endian mode) 782 /*
783 * A few words about byte order (observed in Big Endian mode)
713 * 784 *
714 * In data fetch mode bytes are received in chunks of 8 bytes. 785 * In data fetch mode bytes are received in chunks of 8 bytes.
715 * D0, D1, D2, D3, D4, D5, D6, D7 (D0 received first) 786 * D0, D1, D2, D3, D4, D5, D6, D7 (D0 received first)
@@ -739,7 +810,8 @@ static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd,
739 return 0; 810 return 0;
740} 811}
741 812
742static int sh_mobile_ceu_try_bus_param(struct soc_camera_device *icd) 813static int sh_mobile_ceu_try_bus_param(struct soc_camera_device *icd,
814 unsigned char buswidth)
743{ 815{
744 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 816 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
745 struct sh_mobile_ceu_dev *pcdev = ici->priv; 817 struct sh_mobile_ceu_dev *pcdev = ici->priv;
@@ -748,48 +820,75 @@ static int sh_mobile_ceu_try_bus_param(struct soc_camera_device *icd)
748 camera_flags = icd->ops->query_bus_param(icd); 820 camera_flags = icd->ops->query_bus_param(icd);
749 common_flags = soc_camera_bus_param_compatible(camera_flags, 821 common_flags = soc_camera_bus_param_compatible(camera_flags,
750 make_bus_param(pcdev)); 822 make_bus_param(pcdev));
751 if (!common_flags) 823 if (!common_flags || buswidth > 16 ||
824 (buswidth > 8 && !(common_flags & SOCAM_DATAWIDTH_16)))
752 return -EINVAL; 825 return -EINVAL;
753 826
754 return 0; 827 return 0;
755} 828}
756 829
757static const struct soc_camera_data_format sh_mobile_ceu_formats[] = { 830static const struct soc_mbus_pixelfmt sh_mobile_ceu_formats[] = {
758 {
759 .name = "NV12",
760 .depth = 12,
761 .fourcc = V4L2_PIX_FMT_NV12,
762 .colorspace = V4L2_COLORSPACE_JPEG,
763 },
764 {
765 .name = "NV21",
766 .depth = 12,
767 .fourcc = V4L2_PIX_FMT_NV21,
768 .colorspace = V4L2_COLORSPACE_JPEG,
769 },
770 {
771 .name = "NV16",
772 .depth = 16,
773 .fourcc = V4L2_PIX_FMT_NV16,
774 .colorspace = V4L2_COLORSPACE_JPEG,
775 },
776 { 831 {
777 .name = "NV61", 832 .fourcc = V4L2_PIX_FMT_NV12,
778 .depth = 16, 833 .name = "NV12",
779 .fourcc = V4L2_PIX_FMT_NV61, 834 .bits_per_sample = 12,
780 .colorspace = V4L2_COLORSPACE_JPEG, 835 .packing = SOC_MBUS_PACKING_NONE,
836 .order = SOC_MBUS_ORDER_LE,
837 }, {
838 .fourcc = V4L2_PIX_FMT_NV21,
839 .name = "NV21",
840 .bits_per_sample = 12,
841 .packing = SOC_MBUS_PACKING_NONE,
842 .order = SOC_MBUS_ORDER_LE,
843 }, {
844 .fourcc = V4L2_PIX_FMT_NV16,
845 .name = "NV16",
846 .bits_per_sample = 16,
847 .packing = SOC_MBUS_PACKING_NONE,
848 .order = SOC_MBUS_ORDER_LE,
849 }, {
850 .fourcc = V4L2_PIX_FMT_NV61,
851 .name = "NV61",
852 .bits_per_sample = 16,
853 .packing = SOC_MBUS_PACKING_NONE,
854 .order = SOC_MBUS_ORDER_LE,
781 }, 855 },
782}; 856};
783 857
858/* This will be corrected as we get more formats */
859static bool sh_mobile_ceu_packing_supported(const struct soc_mbus_pixelfmt *fmt)
860{
861 return fmt->packing == SOC_MBUS_PACKING_NONE ||
862 (fmt->bits_per_sample == 8 &&
863 fmt->packing == SOC_MBUS_PACKING_2X8_PADHI) ||
864 (fmt->bits_per_sample > 8 &&
865 fmt->packing == SOC_MBUS_PACKING_EXTEND16);
866}
867
784static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, int idx, 868static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, int idx,
785 struct soc_camera_format_xlate *xlate) 869 struct soc_camera_format_xlate *xlate)
786{ 870{
871 struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
787 struct device *dev = icd->dev.parent; 872 struct device *dev = icd->dev.parent;
788 int ret, k, n; 873 int ret, k, n;
789 int formats = 0; 874 int formats = 0;
790 struct sh_mobile_ceu_cam *cam; 875 struct sh_mobile_ceu_cam *cam;
876 enum v4l2_mbus_pixelcode code;
877 const struct soc_mbus_pixelfmt *fmt;
791 878
792 ret = sh_mobile_ceu_try_bus_param(icd); 879 ret = v4l2_subdev_call(sd, video, enum_mbus_fmt, idx, &code);
880 if (ret < 0)
881 /* No more formats */
882 return 0;
883
884 fmt = soc_mbus_get_fmtdesc(code);
885 if (!fmt) {
886 dev_err(icd->dev.parent,
887 "Invalid format code #%d: %d\n", idx, code);
888 return -EINVAL;
889 }
890
891 ret = sh_mobile_ceu_try_bus_param(icd, fmt->bits_per_sample);
793 if (ret < 0) 892 if (ret < 0)
794 return 0; 893 return 0;
795 894
@@ -807,13 +906,13 @@ static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, int idx,
807 if (!idx) 906 if (!idx)
808 cam->extra_fmt = NULL; 907 cam->extra_fmt = NULL;
809 908
810 switch (icd->formats[idx].fourcc) { 909 switch (code) {
811 case V4L2_PIX_FMT_UYVY: 910 case V4L2_MBUS_FMT_YUYV8_2X8_BE:
812 case V4L2_PIX_FMT_VYUY: 911 case V4L2_MBUS_FMT_YVYU8_2X8_BE:
813 case V4L2_PIX_FMT_YUYV: 912 case V4L2_MBUS_FMT_YUYV8_2X8_LE:
814 case V4L2_PIX_FMT_YVYU: 913 case V4L2_MBUS_FMT_YVYU8_2X8_LE:
815 if (cam->extra_fmt) 914 if (cam->extra_fmt)
816 goto add_single_format; 915 break;
817 916
818 /* 917 /*
819 * Our case is simple so far: for any of the above four camera 918 * Our case is simple so far: for any of the above four camera
@@ -824,32 +923,31 @@ static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, int idx,
824 * the host_priv pointer and check whether the format you're 923 * the host_priv pointer and check whether the format you're
825 * going to add now is already there. 924 * going to add now is already there.
826 */ 925 */
827 cam->extra_fmt = (void *)sh_mobile_ceu_formats; 926 cam->extra_fmt = sh_mobile_ceu_formats;
828 927
829 n = ARRAY_SIZE(sh_mobile_ceu_formats); 928 n = ARRAY_SIZE(sh_mobile_ceu_formats);
830 formats += n; 929 formats += n;
831 for (k = 0; xlate && k < n; k++) { 930 for (k = 0; xlate && k < n; k++) {
832 xlate->host_fmt = &sh_mobile_ceu_formats[k]; 931 xlate->host_fmt = &sh_mobile_ceu_formats[k];
833 xlate->cam_fmt = icd->formats + idx; 932 xlate->code = code;
834 xlate->buswidth = icd->formats[idx].depth;
835 xlate++; 933 xlate++;
836 dev_dbg(dev, "Providing format %s using %s\n", 934 dev_dbg(dev, "Providing format %s using code %d\n",
837 sh_mobile_ceu_formats[k].name, 935 sh_mobile_ceu_formats[k].name, code);
838 icd->formats[idx].name);
839 } 936 }
937 break;
840 default: 938 default:
841add_single_format: 939 if (!sh_mobile_ceu_packing_supported(fmt))
842 /* Generic pass-through */ 940 return 0;
843 formats++; 941 }
844 if (xlate) { 942
845 xlate->host_fmt = icd->formats + idx; 943 /* Generic pass-through */
846 xlate->cam_fmt = icd->formats + idx; 944 formats++;
847 xlate->buswidth = icd->formats[idx].depth; 945 if (xlate) {
848 xlate++; 946 xlate->host_fmt = fmt;
849 dev_dbg(dev, 947 xlate->code = code;
850 "Providing format %s in pass-through mode\n", 948 xlate++;
851 icd->formats[idx].name); 949 dev_dbg(dev, "Providing format %s in pass-through mode\n",
852 } 950 xlate->host_fmt->name);
853 } 951 }
854 952
855 return formats; 953 return formats;
@@ -1029,17 +1127,15 @@ static int client_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *crop,
1029static int get_camera_scales(struct v4l2_subdev *sd, struct v4l2_rect *rect, 1127static int get_camera_scales(struct v4l2_subdev *sd, struct v4l2_rect *rect,
1030 unsigned int *scale_h, unsigned int *scale_v) 1128 unsigned int *scale_h, unsigned int *scale_v)
1031{ 1129{
1032 struct v4l2_format f; 1130 struct v4l2_mbus_framefmt mf;
1033 int ret; 1131 int ret;
1034 1132
1035 f.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 1133 ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mf);
1036
1037 ret = v4l2_subdev_call(sd, video, g_fmt, &f);
1038 if (ret < 0) 1134 if (ret < 0)
1039 return ret; 1135 return ret;
1040 1136
1041 *scale_h = calc_generic_scale(rect->width, f.fmt.pix.width); 1137 *scale_h = calc_generic_scale(rect->width, mf.width);
1042 *scale_v = calc_generic_scale(rect->height, f.fmt.pix.height); 1138 *scale_v = calc_generic_scale(rect->height, mf.height);
1043 1139
1044 return 0; 1140 return 0;
1045} 1141}
@@ -1054,32 +1150,29 @@ static int get_camera_subwin(struct soc_camera_device *icd,
1054 if (!ceu_rect->width) { 1150 if (!ceu_rect->width) {
1055 struct v4l2_subdev *sd = soc_camera_to_subdev(icd); 1151 struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
1056 struct device *dev = icd->dev.parent; 1152 struct device *dev = icd->dev.parent;
1057 struct v4l2_format f; 1153 struct v4l2_mbus_framefmt mf;
1058 struct v4l2_pix_format *pix = &f.fmt.pix;
1059 int ret; 1154 int ret;
1060 /* First time */ 1155 /* First time */
1061 1156
1062 f.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 1157 ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mf);
1063
1064 ret = v4l2_subdev_call(sd, video, g_fmt, &f);
1065 if (ret < 0) 1158 if (ret < 0)
1066 return ret; 1159 return ret;
1067 1160
1068 dev_geo(dev, "camera fmt %ux%u\n", pix->width, pix->height); 1161 dev_geo(dev, "camera fmt %ux%u\n", mf.width, mf.height);
1069 1162
1070 if (pix->width > 2560) { 1163 if (mf.width > 2560) {
1071 ceu_rect->width = 2560; 1164 ceu_rect->width = 2560;
1072 ceu_rect->left = (pix->width - 2560) / 2; 1165 ceu_rect->left = (mf.width - 2560) / 2;
1073 } else { 1166 } else {
1074 ceu_rect->width = pix->width; 1167 ceu_rect->width = mf.width;
1075 ceu_rect->left = 0; 1168 ceu_rect->left = 0;
1076 } 1169 }
1077 1170
1078 if (pix->height > 1920) { 1171 if (mf.height > 1920) {
1079 ceu_rect->height = 1920; 1172 ceu_rect->height = 1920;
1080 ceu_rect->top = (pix->height - 1920) / 2; 1173 ceu_rect->top = (mf.height - 1920) / 2;
1081 } else { 1174 } else {
1082 ceu_rect->height = pix->height; 1175 ceu_rect->height = mf.height;
1083 ceu_rect->top = 0; 1176 ceu_rect->top = 0;
1084 } 1177 }
1085 1178
@@ -1096,13 +1189,12 @@ static int get_camera_subwin(struct soc_camera_device *icd,
1096 return 0; 1189 return 0;
1097} 1190}
1098 1191
1099static int client_s_fmt(struct soc_camera_device *icd, struct v4l2_format *f, 1192static int client_s_fmt(struct soc_camera_device *icd,
1100 bool ceu_can_scale) 1193 struct v4l2_mbus_framefmt *mf, bool ceu_can_scale)
1101{ 1194{
1102 struct v4l2_subdev *sd = soc_camera_to_subdev(icd); 1195 struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
1103 struct device *dev = icd->dev.parent; 1196 struct device *dev = icd->dev.parent;
1104 struct v4l2_pix_format *pix = &f->fmt.pix; 1197 unsigned int width = mf->width, height = mf->height, tmp_w, tmp_h;
1105 unsigned int width = pix->width, height = pix->height, tmp_w, tmp_h;
1106 unsigned int max_width, max_height; 1198 unsigned int max_width, max_height;
1107 struct v4l2_cropcap cap; 1199 struct v4l2_cropcap cap;
1108 int ret; 1200 int ret;
@@ -1116,29 +1208,29 @@ static int client_s_fmt(struct soc_camera_device *icd, struct v4l2_format *f,
1116 max_width = min(cap.bounds.width, 2560); 1208 max_width = min(cap.bounds.width, 2560);
1117 max_height = min(cap.bounds.height, 1920); 1209 max_height = min(cap.bounds.height, 1920);
1118 1210
1119 ret = v4l2_subdev_call(sd, video, s_fmt, f); 1211 ret = v4l2_subdev_call(sd, video, s_mbus_fmt, mf);
1120 if (ret < 0) 1212 if (ret < 0)
1121 return ret; 1213 return ret;
1122 1214
1123 dev_geo(dev, "camera scaled to %ux%u\n", pix->width, pix->height); 1215 dev_geo(dev, "camera scaled to %ux%u\n", mf->width, mf->height);
1124 1216
1125 if ((width == pix->width && height == pix->height) || !ceu_can_scale) 1217 if ((width == mf->width && height == mf->height) || !ceu_can_scale)
1126 return 0; 1218 return 0;
1127 1219
1128 /* Camera set a format, but geometry is not precise, try to improve */ 1220 /* Camera set a format, but geometry is not precise, try to improve */
1129 tmp_w = pix->width; 1221 tmp_w = mf->width;
1130 tmp_h = pix->height; 1222 tmp_h = mf->height;
1131 1223
1132 /* width <= max_width && height <= max_height - guaranteed by try_fmt */ 1224 /* width <= max_width && height <= max_height - guaranteed by try_fmt */
1133 while ((width > tmp_w || height > tmp_h) && 1225 while ((width > tmp_w || height > tmp_h) &&
1134 tmp_w < max_width && tmp_h < max_height) { 1226 tmp_w < max_width && tmp_h < max_height) {
1135 tmp_w = min(2 * tmp_w, max_width); 1227 tmp_w = min(2 * tmp_w, max_width);
1136 tmp_h = min(2 * tmp_h, max_height); 1228 tmp_h = min(2 * tmp_h, max_height);
1137 pix->width = tmp_w; 1229 mf->width = tmp_w;
1138 pix->height = tmp_h; 1230 mf->height = tmp_h;
1139 ret = v4l2_subdev_call(sd, video, s_fmt, f); 1231 ret = v4l2_subdev_call(sd, video, s_mbus_fmt, mf);
1140 dev_geo(dev, "Camera scaled to %ux%u\n", 1232 dev_geo(dev, "Camera scaled to %ux%u\n",
1141 pix->width, pix->height); 1233 mf->width, mf->height);
1142 if (ret < 0) { 1234 if (ret < 0) {
1143 /* This shouldn't happen */ 1235 /* This shouldn't happen */
1144 dev_err(dev, "Client failed to set format: %d\n", ret); 1236 dev_err(dev, "Client failed to set format: %d\n", ret);
@@ -1156,27 +1248,26 @@ static int client_s_fmt(struct soc_camera_device *icd, struct v4l2_format *f,
1156 */ 1248 */
1157static int client_scale(struct soc_camera_device *icd, struct v4l2_rect *rect, 1249static int client_scale(struct soc_camera_device *icd, struct v4l2_rect *rect,
1158 struct v4l2_rect *sub_rect, struct v4l2_rect *ceu_rect, 1250 struct v4l2_rect *sub_rect, struct v4l2_rect *ceu_rect,
1159 struct v4l2_format *f, bool ceu_can_scale) 1251 struct v4l2_mbus_framefmt *mf, bool ceu_can_scale)
1160{ 1252{
1161 struct v4l2_subdev *sd = soc_camera_to_subdev(icd); 1253 struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
1162 struct sh_mobile_ceu_cam *cam = icd->host_priv; 1254 struct sh_mobile_ceu_cam *cam = icd->host_priv;
1163 struct device *dev = icd->dev.parent; 1255 struct device *dev = icd->dev.parent;
1164 struct v4l2_format f_tmp = *f; 1256 struct v4l2_mbus_framefmt mf_tmp = *mf;
1165 struct v4l2_pix_format *pix_tmp = &f_tmp.fmt.pix;
1166 unsigned int scale_h, scale_v; 1257 unsigned int scale_h, scale_v;
1167 int ret; 1258 int ret;
1168 1259
1169 /* 5. Apply iterative camera S_FMT for camera user window. */ 1260 /* 5. Apply iterative camera S_FMT for camera user window. */
1170 ret = client_s_fmt(icd, &f_tmp, ceu_can_scale); 1261 ret = client_s_fmt(icd, &mf_tmp, ceu_can_scale);
1171 if (ret < 0) 1262 if (ret < 0)
1172 return ret; 1263 return ret;
1173 1264
1174 dev_geo(dev, "5: camera scaled to %ux%u\n", 1265 dev_geo(dev, "5: camera scaled to %ux%u\n",
1175 pix_tmp->width, pix_tmp->height); 1266 mf_tmp.width, mf_tmp.height);
1176 1267
1177 /* 6. Retrieve camera output window (g_fmt) */ 1268 /* 6. Retrieve camera output window (g_fmt) */
1178 1269
1179 /* unneeded - it is already in "f_tmp" */ 1270 /* unneeded - it is already in "mf_tmp" */
1180 1271
1181 /* 7. Calculate new camera scales. */ 1272 /* 7. Calculate new camera scales. */
1182 ret = get_camera_scales(sd, rect, &scale_h, &scale_v); 1273 ret = get_camera_scales(sd, rect, &scale_h, &scale_v);
@@ -1185,10 +1276,11 @@ static int client_scale(struct soc_camera_device *icd, struct v4l2_rect *rect,
1185 1276
1186 dev_geo(dev, "7: camera scales %u:%u\n", scale_h, scale_v); 1277 dev_geo(dev, "7: camera scales %u:%u\n", scale_h, scale_v);
1187 1278
1188 cam->cam_width = pix_tmp->width; 1279 cam->cam_width = mf_tmp.width;
1189 cam->cam_height = pix_tmp->height; 1280 cam->cam_height = mf_tmp.height;
1190 f->fmt.pix.width = pix_tmp->width; 1281 mf->width = mf_tmp.width;
1191 f->fmt.pix.height = pix_tmp->height; 1282 mf->height = mf_tmp.height;
1283 mf->colorspace = mf_tmp.colorspace;
1192 1284
1193 /* 1285 /*
1194 * 8. Calculate new CEU crop - apply camera scales to previously 1286 * 8. Calculate new CEU crop - apply camera scales to previously
@@ -1252,8 +1344,7 @@ static int sh_mobile_ceu_set_crop(struct soc_camera_device *icd,
1252 struct v4l2_rect *cam_rect = &cam_crop.c, *ceu_rect = &cam->ceu_rect; 1344 struct v4l2_rect *cam_rect = &cam_crop.c, *ceu_rect = &cam->ceu_rect;
1253 struct v4l2_subdev *sd = soc_camera_to_subdev(icd); 1345 struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
1254 struct device *dev = icd->dev.parent; 1346 struct device *dev = icd->dev.parent;
1255 struct v4l2_format f; 1347 struct v4l2_mbus_framefmt mf;
1256 struct v4l2_pix_format *pix = &f.fmt.pix;
1257 unsigned int scale_comb_h, scale_comb_v, scale_ceu_h, scale_ceu_v, 1348 unsigned int scale_comb_h, scale_comb_v, scale_ceu_h, scale_ceu_v,
1258 out_width, out_height; 1349 out_width, out_height;
1259 u32 capsr, cflcr; 1350 u32 capsr, cflcr;
@@ -1302,26 +1393,25 @@ static int sh_mobile_ceu_set_crop(struct soc_camera_device *icd,
1302 * 5. Using actual input window and calculated combined scales calculate 1393 * 5. Using actual input window and calculated combined scales calculate
1303 * camera target output window. 1394 * camera target output window.
1304 */ 1395 */
1305 pix->width = scale_down(cam_rect->width, scale_comb_h); 1396 mf.width = scale_down(cam_rect->width, scale_comb_h);
1306 pix->height = scale_down(cam_rect->height, scale_comb_v); 1397 mf.height = scale_down(cam_rect->height, scale_comb_v);
1307 1398
1308 dev_geo(dev, "5: camera target %ux%u\n", pix->width, pix->height); 1399 dev_geo(dev, "5: camera target %ux%u\n", mf.width, mf.height);
1309 1400
1310 /* 6. - 9. */ 1401 /* 6. - 9. */
1311 pix->pixelformat = cam->camera_fmt->fourcc; 1402 mf.code = cam->code;
1312 pix->colorspace = cam->camera_fmt->colorspace; 1403 mf.field = pcdev->field;
1313 1404
1314 capsr = capture_save_reset(pcdev); 1405 capsr = capture_save_reset(pcdev);
1315 dev_dbg(dev, "CAPSR 0x%x, CFLCR 0x%x\n", capsr, pcdev->cflcr); 1406 dev_dbg(dev, "CAPSR 0x%x, CFLCR 0x%x\n", capsr, pcdev->cflcr);
1316 1407
1317 /* Make relative to camera rectangle */ 1408 /* Make relative to camera rectangle */
1318 rect->left -= cam_rect->left; 1409 rect->left -= cam_rect->left;
1319 rect->top -= cam_rect->top; 1410 rect->top -= cam_rect->top;
1320 1411
1321 f.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 1412 ret = client_scale(icd, cam_rect, rect, ceu_rect, &mf,
1322 1413 pcdev->image_mode &&
1323 ret = client_scale(icd, cam_rect, rect, ceu_rect, &f, 1414 V4L2_FIELD_NONE == pcdev->field);
1324 pcdev->image_mode && !pcdev->is_interlaced);
1325 1415
1326 dev_geo(dev, "6-9: %d\n", ret); 1416 dev_geo(dev, "6-9: %d\n", ret);
1327 1417
@@ -1368,8 +1458,7 @@ static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd,
1368 struct sh_mobile_ceu_dev *pcdev = ici->priv; 1458 struct sh_mobile_ceu_dev *pcdev = ici->priv;
1369 struct sh_mobile_ceu_cam *cam = icd->host_priv; 1459 struct sh_mobile_ceu_cam *cam = icd->host_priv;
1370 struct v4l2_pix_format *pix = &f->fmt.pix; 1460 struct v4l2_pix_format *pix = &f->fmt.pix;
1371 struct v4l2_format cam_f = *f; 1461 struct v4l2_mbus_framefmt mf;
1372 struct v4l2_pix_format *cam_pix = &cam_f.fmt.pix;
1373 struct v4l2_subdev *sd = soc_camera_to_subdev(icd); 1462 struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
1374 struct device *dev = icd->dev.parent; 1463 struct device *dev = icd->dev.parent;
1375 __u32 pixfmt = pix->pixelformat; 1464 __u32 pixfmt = pix->pixelformat;
@@ -1379,18 +1468,20 @@ static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd,
1379 unsigned int scale_cam_h, scale_cam_v; 1468 unsigned int scale_cam_h, scale_cam_v;
1380 u16 scale_v, scale_h; 1469 u16 scale_v, scale_h;
1381 int ret; 1470 int ret;
1382 bool is_interlaced, image_mode; 1471 bool image_mode;
1472 enum v4l2_field field;
1383 1473
1384 switch (pix->field) { 1474 switch (pix->field) {
1385 case V4L2_FIELD_INTERLACED:
1386 is_interlaced = true;
1387 break;
1388 case V4L2_FIELD_ANY:
1389 default: 1475 default:
1390 pix->field = V4L2_FIELD_NONE; 1476 pix->field = V4L2_FIELD_NONE;
1391 /* fall-through */ 1477 /* fall-through */
1478 case V4L2_FIELD_INTERLACED_TB:
1479 case V4L2_FIELD_INTERLACED_BT:
1392 case V4L2_FIELD_NONE: 1480 case V4L2_FIELD_NONE:
1393 is_interlaced = false; 1481 field = pix->field;
1482 break;
1483 case V4L2_FIELD_INTERLACED:
1484 field = V4L2_FIELD_INTERLACED_TB;
1394 break; 1485 break;
1395 } 1486 }
1396 1487
@@ -1438,9 +1529,11 @@ static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd,
1438 * 4. Calculate camera output window by applying combined scales to real 1529 * 4. Calculate camera output window by applying combined scales to real
1439 * input window. 1530 * input window.
1440 */ 1531 */
1441 cam_pix->width = scale_down(cam_rect->width, scale_h); 1532 mf.width = scale_down(cam_rect->width, scale_h);
1442 cam_pix->height = scale_down(cam_rect->height, scale_v); 1533 mf.height = scale_down(cam_rect->height, scale_v);
1443 cam_pix->pixelformat = xlate->cam_fmt->fourcc; 1534 mf.field = pix->field;
1535 mf.colorspace = pix->colorspace;
1536 mf.code = xlate->code;
1444 1537
1445 switch (pixfmt) { 1538 switch (pixfmt) {
1446 case V4L2_PIX_FMT_NV12: 1539 case V4L2_PIX_FMT_NV12:
@@ -1453,51 +1546,61 @@ static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd,
1453 image_mode = false; 1546 image_mode = false;
1454 } 1547 }
1455 1548
1456 dev_geo(dev, "4: camera output %ux%u\n", 1549 dev_geo(dev, "4: camera output %ux%u\n", mf.width, mf.height);
1457 cam_pix->width, cam_pix->height);
1458 1550
1459 /* 5. - 9. */ 1551 /* 5. - 9. */
1460 ret = client_scale(icd, cam_rect, &cam_subrect, &ceu_rect, &cam_f, 1552 ret = client_scale(icd, cam_rect, &cam_subrect, &ceu_rect, &mf,
1461 image_mode && !is_interlaced); 1553 image_mode && V4L2_FIELD_NONE == field);
1462 1554
1463 dev_geo(dev, "5-9: client scale %d\n", ret); 1555 dev_geo(dev, "5-9: client scale %d\n", ret);
1464 1556
1465 /* Done with the camera. Now see if we can improve the result */ 1557 /* Done with the camera. Now see if we can improve the result */
1466 1558
1467 dev_dbg(dev, "Camera %d fmt %ux%u, requested %ux%u\n", 1559 dev_dbg(dev, "Camera %d fmt %ux%u, requested %ux%u\n",
1468 ret, cam_pix->width, cam_pix->height, pix->width, pix->height); 1560 ret, mf.width, mf.height, pix->width, pix->height);
1469 if (ret < 0) 1561 if (ret < 0)
1470 return ret; 1562 return ret;
1471 1563
1564 if (mf.code != xlate->code)
1565 return -EINVAL;
1566
1472 /* 10. Use CEU scaling to scale to the requested user window. */ 1567 /* 10. Use CEU scaling to scale to the requested user window. */
1473 1568
1474 /* We cannot scale up */ 1569 /* We cannot scale up */
1475 if (pix->width > cam_pix->width) 1570 if (pix->width > mf.width)
1476 pix->width = cam_pix->width; 1571 pix->width = mf.width;
1477 if (pix->width > ceu_rect.width) 1572 if (pix->width > ceu_rect.width)
1478 pix->width = ceu_rect.width; 1573 pix->width = ceu_rect.width;
1479 1574
1480 if (pix->height > cam_pix->height) 1575 if (pix->height > mf.height)
1481 pix->height = cam_pix->height; 1576 pix->height = mf.height;
1482 if (pix->height > ceu_rect.height) 1577 if (pix->height > ceu_rect.height)
1483 pix->height = ceu_rect.height; 1578 pix->height = ceu_rect.height;
1484 1579
1485 /* Let's rock: scale pix->{width x height} down to width x height */ 1580 pix->colorspace = mf.colorspace;
1486 scale_h = calc_scale(ceu_rect.width, &pix->width); 1581
1487 scale_v = calc_scale(ceu_rect.height, &pix->height); 1582 if (image_mode) {
1583 /* Scale pix->{width x height} down to width x height */
1584 scale_h = calc_scale(ceu_rect.width, &pix->width);
1585 scale_v = calc_scale(ceu_rect.height, &pix->height);
1586
1587 pcdev->cflcr = scale_h | (scale_v << 16);
1588 } else {
1589 pix->width = ceu_rect.width;
1590 pix->height = ceu_rect.height;
1591 scale_h = scale_v = 0;
1592 pcdev->cflcr = 0;
1593 }
1488 1594
1489 dev_geo(dev, "10: W: %u : 0x%x = %u, H: %u : 0x%x = %u\n", 1595 dev_geo(dev, "10: W: %u : 0x%x = %u, H: %u : 0x%x = %u\n",
1490 ceu_rect.width, scale_h, pix->width, 1596 ceu_rect.width, scale_h, pix->width,
1491 ceu_rect.height, scale_v, pix->height); 1597 ceu_rect.height, scale_v, pix->height);
1492 1598
1493 pcdev->cflcr = scale_h | (scale_v << 16); 1599 cam->code = xlate->code;
1600 cam->ceu_rect = ceu_rect;
1601 icd->current_fmt = xlate;
1494 1602
1495 icd->buswidth = xlate->buswidth; 1603 pcdev->field = field;
1496 icd->current_fmt = xlate->host_fmt;
1497 cam->camera_fmt = xlate->cam_fmt;
1498 cam->ceu_rect = ceu_rect;
1499
1500 pcdev->is_interlaced = is_interlaced;
1501 pcdev->image_mode = image_mode; 1604 pcdev->image_mode = image_mode;
1502 1605
1503 return 0; 1606 return 0;
@@ -1509,6 +1612,7 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
1509 const struct soc_camera_format_xlate *xlate; 1612 const struct soc_camera_format_xlate *xlate;
1510 struct v4l2_pix_format *pix = &f->fmt.pix; 1613 struct v4l2_pix_format *pix = &f->fmt.pix;
1511 struct v4l2_subdev *sd = soc_camera_to_subdev(icd); 1614 struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
1615 struct v4l2_mbus_framefmt mf;
1512 __u32 pixfmt = pix->pixelformat; 1616 __u32 pixfmt = pix->pixelformat;
1513 int width, height; 1617 int width, height;
1514 int ret; 1618 int ret;
@@ -1527,18 +1631,27 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
1527 width = pix->width; 1631 width = pix->width;
1528 height = pix->height; 1632 height = pix->height;
1529 1633
1530 pix->bytesperline = pix->width * 1634 pix->bytesperline = soc_mbus_bytes_per_line(width, xlate->host_fmt);
1531 DIV_ROUND_UP(xlate->host_fmt->depth, 8); 1635 if (pix->bytesperline < 0)
1532 pix->sizeimage = pix->height * pix->bytesperline; 1636 return pix->bytesperline;
1533 1637 pix->sizeimage = height * pix->bytesperline;
1534 pix->pixelformat = xlate->cam_fmt->fourcc;
1535 1638
1536 /* limit to sensor capabilities */ 1639 /* limit to sensor capabilities */
1537 ret = v4l2_subdev_call(sd, video, try_fmt, f); 1640 mf.width = pix->width;
1538 pix->pixelformat = pixfmt; 1641 mf.height = pix->height;
1642 mf.field = pix->field;
1643 mf.code = xlate->code;
1644 mf.colorspace = pix->colorspace;
1645
1646 ret = v4l2_subdev_call(sd, video, try_mbus_fmt, &mf);
1539 if (ret < 0) 1647 if (ret < 0)
1540 return ret; 1648 return ret;
1541 1649
1650 pix->width = mf.width;
1651 pix->height = mf.height;
1652 pix->field = mf.field;
1653 pix->colorspace = mf.colorspace;
1654
1542 switch (pixfmt) { 1655 switch (pixfmt) {
1543 case V4L2_PIX_FMT_NV12: 1656 case V4L2_PIX_FMT_NV12:
1544 case V4L2_PIX_FMT_NV21: 1657 case V4L2_PIX_FMT_NV21:
@@ -1547,21 +1660,25 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
1547 /* FIXME: check against rect_max after converting soc-camera */ 1660 /* FIXME: check against rect_max after converting soc-camera */
1548 /* We can scale precisely, need a bigger image from camera */ 1661 /* We can scale precisely, need a bigger image from camera */
1549 if (pix->width < width || pix->height < height) { 1662 if (pix->width < width || pix->height < height) {
1550 int tmp_w = pix->width, tmp_h = pix->height; 1663 /*
1551 pix->width = 2560; 1664 * We presume, the sensor behaves sanely, i.e., if
1552 pix->height = 1920; 1665 * requested a bigger rectangle, it will not return a
1553 ret = v4l2_subdev_call(sd, video, try_fmt, f); 1666 * smaller one.
1667 */
1668 mf.width = 2560;
1669 mf.height = 1920;
1670 ret = v4l2_subdev_call(sd, video, try_mbus_fmt, &mf);
1554 if (ret < 0) { 1671 if (ret < 0) {
1555 /* Shouldn't actually happen... */ 1672 /* Shouldn't actually happen... */
1556 dev_err(icd->dev.parent, 1673 dev_err(icd->dev.parent,
1557 "FIXME: try_fmt() returned %d\n", ret); 1674 "FIXME: client try_fmt() = %d\n", ret);
1558 pix->width = tmp_w; 1675 return ret;
1559 pix->height = tmp_h;
1560 } 1676 }
1561 } 1677 }
1562 if (pix->width > width) 1678 /* We will scale exactly */
1679 if (mf.width > width)
1563 pix->width = width; 1680 pix->width = width;
1564 if (pix->height > height) 1681 if (mf.height > height)
1565 pix->height = height; 1682 pix->height = height;
1566 } 1683 }
1567 1684
@@ -1573,10 +1690,12 @@ static int sh_mobile_ceu_reqbufs(struct soc_camera_file *icf,
1573{ 1690{
1574 int i; 1691 int i;
1575 1692
1576 /* This is for locking debugging only. I removed spinlocks and now I 1693 /*
1694 * This is for locking debugging only. I removed spinlocks and now I
1577 * check whether .prepare is ever called on a linked buffer, or whether 1695 * check whether .prepare is ever called on a linked buffer, or whether
1578 * a dma IRQ can occur for an in-work or unlinked buffer. Until now 1696 * a dma IRQ can occur for an in-work or unlinked buffer. Until now
1579 * it hadn't triggered */ 1697 * it hadn't triggered
1698 */
1580 for (i = 0; i < p->count; i++) { 1699 for (i = 0; i < p->count; i++) {
1581 struct sh_mobile_ceu_buffer *buf; 1700 struct sh_mobile_ceu_buffer *buf;
1582 1701
@@ -1624,8 +1743,7 @@ static void sh_mobile_ceu_init_videobuf(struct videobuf_queue *q,
1624 &sh_mobile_ceu_videobuf_ops, 1743 &sh_mobile_ceu_videobuf_ops,
1625 icd->dev.parent, &pcdev->lock, 1744 icd->dev.parent, &pcdev->lock,
1626 V4L2_BUF_TYPE_VIDEO_CAPTURE, 1745 V4L2_BUF_TYPE_VIDEO_CAPTURE,
1627 pcdev->is_interlaced ? 1746 pcdev->field,
1628 V4L2_FIELD_INTERLACED : V4L2_FIELD_NONE,
1629 sizeof(struct sh_mobile_ceu_buffer), 1747 sizeof(struct sh_mobile_ceu_buffer),
1630 icd); 1748 icd);
1631} 1749}
@@ -1654,7 +1772,7 @@ static int sh_mobile_ceu_set_ctrl(struct soc_camera_device *icd,
1654 1772
1655 switch (ctrl->id) { 1773 switch (ctrl->id) {
1656 case V4L2_CID_SHARPNESS: 1774 case V4L2_CID_SHARPNESS:
1657 switch (icd->current_fmt->fourcc) { 1775 switch (icd->current_fmt->host_fmt->fourcc) {
1658 case V4L2_PIX_FMT_NV12: 1776 case V4L2_PIX_FMT_NV12:
1659 case V4L2_PIX_FMT_NV21: 1777 case V4L2_PIX_FMT_NV21:
1660 case V4L2_PIX_FMT_NV16: 1778 case V4L2_PIX_FMT_NV16:
@@ -1825,7 +1943,7 @@ static int sh_mobile_ceu_runtime_nop(struct device *dev)
1825 return 0; 1943 return 0;
1826} 1944}
1827 1945
1828static struct dev_pm_ops sh_mobile_ceu_dev_pm_ops = { 1946static const struct dev_pm_ops sh_mobile_ceu_dev_pm_ops = {
1829 .runtime_suspend = sh_mobile_ceu_runtime_nop, 1947 .runtime_suspend = sh_mobile_ceu_runtime_nop,
1830 .runtime_resume = sh_mobile_ceu_runtime_nop, 1948 .runtime_resume = sh_mobile_ceu_runtime_nop,
1831}; 1949};
@@ -1836,7 +1954,7 @@ static struct platform_driver sh_mobile_ceu_driver = {
1836 .pm = &sh_mobile_ceu_dev_pm_ops, 1954 .pm = &sh_mobile_ceu_dev_pm_ops,
1837 }, 1955 },
1838 .probe = sh_mobile_ceu_probe, 1956 .probe = sh_mobile_ceu_probe,
1839 .remove = __exit_p(sh_mobile_ceu_remove), 1957 .remove = __devexit_p(sh_mobile_ceu_remove),
1840}; 1958};
1841 1959
1842static int __init sh_mobile_ceu_init(void) 1960static int __init sh_mobile_ceu_init(void)
diff --git a/drivers/media/video/sn9c102/sn9c102_core.c b/drivers/media/video/sn9c102/sn9c102_core.c
index 4a7711c3e745..cbf8087b286f 100644
--- a/drivers/media/video/sn9c102/sn9c102_core.c
+++ b/drivers/media/video/sn9c102/sn9c102_core.c
@@ -1007,8 +1007,8 @@ static int sn9c102_stream_interrupt(struct sn9c102_device* cam)
1007 else if (cam->stream != STREAM_OFF) { 1007 else if (cam->stream != STREAM_OFF) {
1008 cam->state |= DEV_MISCONFIGURED; 1008 cam->state |= DEV_MISCONFIGURED;
1009 DBG(1, "URB timeout reached. The camera is misconfigured. " 1009 DBG(1, "URB timeout reached. The camera is misconfigured. "
1010 "To use it, close and open /dev/video%d again.", 1010 "To use it, close and open %s again.",
1011 cam->v4ldev->num); 1011 video_device_node_name(cam->v4ldev));
1012 return -EIO; 1012 return -EIO;
1013 } 1013 }
1014 1014
@@ -1734,7 +1734,8 @@ static void sn9c102_release_resources(struct kref *kref)
1734 1734
1735 cam = container_of(kref, struct sn9c102_device, kref); 1735 cam = container_of(kref, struct sn9c102_device, kref);
1736 1736
1737 DBG(2, "V4L2 device /dev/video%d deregistered", cam->v4ldev->num); 1737 DBG(2, "V4L2 device %s deregistered",
1738 video_device_node_name(cam->v4ldev));
1738 video_set_drvdata(cam->v4ldev, NULL); 1739 video_set_drvdata(cam->v4ldev, NULL);
1739 video_unregister_device(cam->v4ldev); 1740 video_unregister_device(cam->v4ldev);
1740 usb_put_dev(cam->usbdev); 1741 usb_put_dev(cam->usbdev);
@@ -1791,8 +1792,8 @@ static int sn9c102_open(struct file *filp)
1791 } 1792 }
1792 1793
1793 if (cam->users) { 1794 if (cam->users) {
1794 DBG(2, "Device /dev/video%d is already in use", 1795 DBG(2, "Device %s is already in use",
1795 cam->v4ldev->num); 1796 video_device_node_name(cam->v4ldev));
1796 DBG(3, "Simultaneous opens are not supported"); 1797 DBG(3, "Simultaneous opens are not supported");
1797 /* 1798 /*
1798 open() must follow the open flags and should block 1799 open() must follow the open flags and should block
@@ -1845,7 +1846,7 @@ static int sn9c102_open(struct file *filp)
1845 cam->frame_count = 0; 1846 cam->frame_count = 0;
1846 sn9c102_empty_framequeues(cam); 1847 sn9c102_empty_framequeues(cam);
1847 1848
1848 DBG(3, "Video device /dev/video%d is open", cam->v4ldev->num); 1849 DBG(3, "Video device %s is open", video_device_node_name(cam->v4ldev));
1849 1850
1850out: 1851out:
1851 mutex_unlock(&cam->open_mutex); 1852 mutex_unlock(&cam->open_mutex);
@@ -1870,7 +1871,7 @@ static int sn9c102_release(struct file *filp)
1870 cam->users--; 1871 cam->users--;
1871 wake_up_interruptible_nr(&cam->wait_open, 1); 1872 wake_up_interruptible_nr(&cam->wait_open, 1);
1872 1873
1873 DBG(3, "Video device /dev/video%d closed", cam->v4ldev->num); 1874 DBG(3, "Video device %s closed", video_device_node_name(cam->v4ldev));
1874 1875
1875 kref_put(&cam->kref, sn9c102_release_resources); 1876 kref_put(&cam->kref, sn9c102_release_resources);
1876 1877
@@ -2433,8 +2434,8 @@ sn9c102_vidioc_s_crop(struct sn9c102_device* cam, void __user * arg)
2433 if (err) { /* atomic, no rollback in ioctl() */ 2434 if (err) { /* atomic, no rollback in ioctl() */
2434 cam->state |= DEV_MISCONFIGURED; 2435 cam->state |= DEV_MISCONFIGURED;
2435 DBG(1, "VIDIOC_S_CROP failed because of hardware problems. To " 2436 DBG(1, "VIDIOC_S_CROP failed because of hardware problems. To "
2436 "use the camera, close and open /dev/video%d again.", 2437 "use the camera, close and open %s again.",
2437 cam->v4ldev->num); 2438 video_device_node_name(cam->v4ldev));
2438 return -EIO; 2439 return -EIO;
2439 } 2440 }
2440 2441
@@ -2446,8 +2447,8 @@ sn9c102_vidioc_s_crop(struct sn9c102_device* cam, void __user * arg)
2446 nbuffers != sn9c102_request_buffers(cam, nbuffers, cam->io)) { 2447 nbuffers != sn9c102_request_buffers(cam, nbuffers, cam->io)) {
2447 cam->state |= DEV_MISCONFIGURED; 2448 cam->state |= DEV_MISCONFIGURED;
2448 DBG(1, "VIDIOC_S_CROP failed because of not enough memory. To " 2449 DBG(1, "VIDIOC_S_CROP failed because of not enough memory. To "
2449 "use the camera, close and open /dev/video%d again.", 2450 "use the camera, close and open %s again.",
2450 cam->v4ldev->num); 2451 video_device_node_name(cam->v4ldev));
2451 return -ENOMEM; 2452 return -ENOMEM;
2452 } 2453 }
2453 2454
@@ -2690,8 +2691,8 @@ sn9c102_vidioc_try_s_fmt(struct sn9c102_device* cam, unsigned int cmd,
2690 if (err) { /* atomic, no rollback in ioctl() */ 2691 if (err) { /* atomic, no rollback in ioctl() */
2691 cam->state |= DEV_MISCONFIGURED; 2692 cam->state |= DEV_MISCONFIGURED;
2692 DBG(1, "VIDIOC_S_FMT failed because of hardware problems. To " 2693 DBG(1, "VIDIOC_S_FMT failed because of hardware problems. To "
2693 "use the camera, close and open /dev/video%d again.", 2694 "use the camera, close and open %s again.",
2694 cam->v4ldev->num); 2695 video_device_node_name(cam->v4ldev));
2695 return -EIO; 2696 return -EIO;
2696 } 2697 }
2697 2698
@@ -2702,8 +2703,8 @@ sn9c102_vidioc_try_s_fmt(struct sn9c102_device* cam, unsigned int cmd,
2702 nbuffers != sn9c102_request_buffers(cam, nbuffers, cam->io)) { 2703 nbuffers != sn9c102_request_buffers(cam, nbuffers, cam->io)) {
2703 cam->state |= DEV_MISCONFIGURED; 2704 cam->state |= DEV_MISCONFIGURED;
2704 DBG(1, "VIDIOC_S_FMT failed because of not enough memory. To " 2705 DBG(1, "VIDIOC_S_FMT failed because of not enough memory. To "
2705 "use the camera, close and open /dev/video%d again.", 2706 "use the camera, close and open %s again.",
2706 cam->v4ldev->num); 2707 video_device_node_name(cam->v4ldev));
2707 return -ENOMEM; 2708 return -ENOMEM;
2708 } 2709 }
2709 2710
@@ -2748,9 +2749,9 @@ sn9c102_vidioc_s_jpegcomp(struct sn9c102_device* cam, void __user * arg)
2748 err += sn9c102_set_compression(cam, &jc); 2749 err += sn9c102_set_compression(cam, &jc);
2749 if (err) { /* atomic, no rollback in ioctl() */ 2750 if (err) { /* atomic, no rollback in ioctl() */
2750 cam->state |= DEV_MISCONFIGURED; 2751 cam->state |= DEV_MISCONFIGURED;
2751 DBG(1, "VIDIOC_S_JPEGCOMP failed because of hardware " 2752 DBG(1, "VIDIOC_S_JPEGCOMP failed because of hardware problems. "
2752 "problems. To use the camera, close and open " 2753 "To use the camera, close and open %s again.",
2753 "/dev/video%d again.", cam->v4ldev->num); 2754 video_device_node_name(cam->v4ldev));
2754 return -EIO; 2755 return -EIO;
2755 } 2756 }
2756 2757
@@ -3328,7 +3329,6 @@ sn9c102_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
3328 3329
3329 strcpy(cam->v4ldev->name, "SN9C1xx PC Camera"); 3330 strcpy(cam->v4ldev->name, "SN9C1xx PC Camera");
3330 cam->v4ldev->fops = &sn9c102_fops; 3331 cam->v4ldev->fops = &sn9c102_fops;
3331 cam->v4ldev->minor = video_nr[dev_nr];
3332 cam->v4ldev->release = video_device_release; 3332 cam->v4ldev->release = video_device_release;
3333 cam->v4ldev->parent = &udev->dev; 3333 cam->v4ldev->parent = &udev->dev;
3334 3334
@@ -3346,7 +3346,8 @@ sn9c102_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
3346 goto fail; 3346 goto fail;
3347 } 3347 }
3348 3348
3349 DBG(2, "V4L2 device registered as /dev/video%d", cam->v4ldev->num); 3349 DBG(2, "V4L2 device registered as %s",
3350 video_device_node_name(cam->v4ldev));
3350 3351
3351 video_set_drvdata(cam->v4ldev, cam); 3352 video_set_drvdata(cam->v4ldev, cam);
3352 cam->module_param.force_munmap = force_munmap[dev_nr]; 3353 cam->module_param.force_munmap = force_munmap[dev_nr];
@@ -3398,9 +3399,9 @@ static void sn9c102_usb_disconnect(struct usb_interface* intf)
3398 DBG(2, "Disconnecting %s...", cam->v4ldev->name); 3399 DBG(2, "Disconnecting %s...", cam->v4ldev->name);
3399 3400
3400 if (cam->users) { 3401 if (cam->users) {
3401 DBG(2, "Device /dev/video%d is open! Deregistration and " 3402 DBG(2, "Device %s is open! Deregistration and memory "
3402 "memory deallocation are deferred.", 3403 "deallocation are deferred.",
3403 cam->v4ldev->num); 3404 video_device_node_name(cam->v4ldev));
3404 cam->state |= DEV_MISCONFIGURED; 3405 cam->state |= DEV_MISCONFIGURED;
3405 sn9c102_stop_transfer(cam); 3406 sn9c102_stop_transfer(cam);
3406 cam->state |= DEV_DISCONNECTED; 3407 cam->state |= DEV_DISCONNECTED;
diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c
index 95fdeb23c2c1..6b3fbcca7747 100644
--- a/drivers/media/video/soc_camera.c
+++ b/drivers/media/video/soc_camera.c
@@ -31,6 +31,7 @@
31#include <media/v4l2-ioctl.h> 31#include <media/v4l2-ioctl.h>
32#include <media/v4l2-dev.h> 32#include <media/v4l2-dev.h>
33#include <media/videobuf-core.h> 33#include <media/videobuf-core.h>
34#include <media/soc_mediabus.h>
34 35
35/* Default to VGA resolution */ 36/* Default to VGA resolution */
36#define DEFAULT_WIDTH 640 37#define DEFAULT_WIDTH 640
@@ -40,18 +41,6 @@ static LIST_HEAD(hosts);
40static LIST_HEAD(devices); 41static LIST_HEAD(devices);
41static DEFINE_MUTEX(list_lock); /* Protects the list of hosts */ 42static DEFINE_MUTEX(list_lock); /* Protects the list of hosts */
42 43
43const struct soc_camera_data_format *soc_camera_format_by_fourcc(
44 struct soc_camera_device *icd, unsigned int fourcc)
45{
46 unsigned int i;
47
48 for (i = 0; i < icd->num_formats; i++)
49 if (icd->formats[i].fourcc == fourcc)
50 return icd->formats + i;
51 return NULL;
52}
53EXPORT_SYMBOL(soc_camera_format_by_fourcc);
54
55const struct soc_camera_format_xlate *soc_camera_xlate_by_fourcc( 44const struct soc_camera_format_xlate *soc_camera_xlate_by_fourcc(
56 struct soc_camera_device *icd, unsigned int fourcc) 45 struct soc_camera_device *icd, unsigned int fourcc)
57{ 46{
@@ -207,21 +196,26 @@ static int soc_camera_dqbuf(struct file *file, void *priv,
207/* Always entered with .video_lock held */ 196/* Always entered with .video_lock held */
208static int soc_camera_init_user_formats(struct soc_camera_device *icd) 197static int soc_camera_init_user_formats(struct soc_camera_device *icd)
209{ 198{
199 struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
210 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 200 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
211 int i, fmts = 0, ret; 201 int i, fmts = 0, raw_fmts = 0, ret;
202 enum v4l2_mbus_pixelcode code;
203
204 while (!v4l2_subdev_call(sd, video, enum_mbus_fmt, raw_fmts, &code))
205 raw_fmts++;
212 206
213 if (!ici->ops->get_formats) 207 if (!ici->ops->get_formats)
214 /* 208 /*
215 * Fallback mode - the host will have to serve all 209 * Fallback mode - the host will have to serve all
216 * sensor-provided formats one-to-one to the user 210 * sensor-provided formats one-to-one to the user
217 */ 211 */
218 fmts = icd->num_formats; 212 fmts = raw_fmts;
219 else 213 else
220 /* 214 /*
221 * First pass - only count formats this host-sensor 215 * First pass - only count formats this host-sensor
222 * configuration can provide 216 * configuration can provide
223 */ 217 */
224 for (i = 0; i < icd->num_formats; i++) { 218 for (i = 0; i < raw_fmts; i++) {
225 ret = ici->ops->get_formats(icd, i, NULL); 219 ret = ici->ops->get_formats(icd, i, NULL);
226 if (ret < 0) 220 if (ret < 0)
227 return ret; 221 return ret;
@@ -242,11 +236,12 @@ static int soc_camera_init_user_formats(struct soc_camera_device *icd)
242 236
243 /* Second pass - actually fill data formats */ 237 /* Second pass - actually fill data formats */
244 fmts = 0; 238 fmts = 0;
245 for (i = 0; i < icd->num_formats; i++) 239 for (i = 0; i < raw_fmts; i++)
246 if (!ici->ops->get_formats) { 240 if (!ici->ops->get_formats) {
247 icd->user_formats[i].host_fmt = icd->formats + i; 241 v4l2_subdev_call(sd, video, enum_mbus_fmt, i, &code);
248 icd->user_formats[i].cam_fmt = icd->formats + i; 242 icd->user_formats[i].host_fmt =
249 icd->user_formats[i].buswidth = icd->formats[i].depth; 243 soc_mbus_get_fmtdesc(code);
244 icd->user_formats[i].code = code;
250 } else { 245 } else {
251 ret = ici->ops->get_formats(icd, i, 246 ret = ici->ops->get_formats(icd, i,
252 &icd->user_formats[fmts]); 247 &icd->user_formats[fmts]);
@@ -255,7 +250,7 @@ static int soc_camera_init_user_formats(struct soc_camera_device *icd)
255 fmts += ret; 250 fmts += ret;
256 } 251 }
257 252
258 icd->current_fmt = icd->user_formats[0].host_fmt; 253 icd->current_fmt = &icd->user_formats[0];
259 254
260 return 0; 255 return 0;
261 256
@@ -281,7 +276,7 @@ static void soc_camera_free_user_formats(struct soc_camera_device *icd)
281#define pixfmtstr(x) (x) & 0xff, ((x) >> 8) & 0xff, ((x) >> 16) & 0xff, \ 276#define pixfmtstr(x) (x) & 0xff, ((x) >> 8) & 0xff, ((x) >> 16) & 0xff, \
282 ((x) >> 24) & 0xff 277 ((x) >> 24) & 0xff
283 278
284/* Called with .vb_lock held */ 279/* Called with .vb_lock held, or from the first open(2), see comment there */
285static int soc_camera_set_fmt(struct soc_camera_file *icf, 280static int soc_camera_set_fmt(struct soc_camera_file *icf,
286 struct v4l2_format *f) 281 struct v4l2_format *f)
287{ 282{
@@ -302,7 +297,7 @@ static int soc_camera_set_fmt(struct soc_camera_file *icf,
302 if (ret < 0) { 297 if (ret < 0) {
303 return ret; 298 return ret;
304 } else if (!icd->current_fmt || 299 } else if (!icd->current_fmt ||
305 icd->current_fmt->fourcc != pix->pixelformat) { 300 icd->current_fmt->host_fmt->fourcc != pix->pixelformat) {
306 dev_err(&icd->dev, 301 dev_err(&icd->dev,
307 "Host driver hasn't set up current format correctly!\n"); 302 "Host driver hasn't set up current format correctly!\n");
308 return -EINVAL; 303 return -EINVAL;
@@ -310,6 +305,7 @@ static int soc_camera_set_fmt(struct soc_camera_file *icf,
310 305
311 icd->user_width = pix->width; 306 icd->user_width = pix->width;
312 icd->user_height = pix->height; 307 icd->user_height = pix->height;
308 icd->colorspace = pix->colorspace;
313 icf->vb_vidq.field = 309 icf->vb_vidq.field =
314 icd->field = pix->field; 310 icd->field = pix->field;
315 311
@@ -369,8 +365,9 @@ static int soc_camera_open(struct file *file)
369 .width = icd->user_width, 365 .width = icd->user_width,
370 .height = icd->user_height, 366 .height = icd->user_height,
371 .field = icd->field, 367 .field = icd->field,
372 .pixelformat = icd->current_fmt->fourcc, 368 .colorspace = icd->colorspace,
373 .colorspace = icd->current_fmt->colorspace, 369 .pixelformat =
370 icd->current_fmt->host_fmt->fourcc,
374 }, 371 },
375 }; 372 };
376 373
@@ -390,7 +387,12 @@ static int soc_camera_open(struct file *file)
390 goto eiciadd; 387 goto eiciadd;
391 } 388 }
392 389
393 /* Try to configure with default parameters */ 390 /*
391 * Try to configure with default parameters. Notice: this is the
392 * very first open, so, we cannot race against other calls,
393 * apart from someone else calling open() simultaneously, but
394 * .video_lock is protecting us against it.
395 */
394 ret = soc_camera_set_fmt(icf, &f); 396 ret = soc_camera_set_fmt(icf, &f);
395 if (ret < 0) 397 if (ret < 0)
396 goto esfmt; 398 goto esfmt;
@@ -534,7 +536,7 @@ static int soc_camera_enum_fmt_vid_cap(struct file *file, void *priv,
534{ 536{
535 struct soc_camera_file *icf = file->private_data; 537 struct soc_camera_file *icf = file->private_data;
536 struct soc_camera_device *icd = icf->icd; 538 struct soc_camera_device *icd = icf->icd;
537 const struct soc_camera_data_format *format; 539 const struct soc_mbus_pixelfmt *format;
538 540
539 WARN_ON(priv != file->private_data); 541 WARN_ON(priv != file->private_data);
540 542
@@ -543,7 +545,8 @@ static int soc_camera_enum_fmt_vid_cap(struct file *file, void *priv,
543 545
544 format = icd->user_formats[f->index].host_fmt; 546 format = icd->user_formats[f->index].host_fmt;
545 547
546 strlcpy(f->description, format->name, sizeof(f->description)); 548 if (format->name)
549 strlcpy(f->description, format->name, sizeof(f->description));
547 f->pixelformat = format->fourcc; 550 f->pixelformat = format->fourcc;
548 return 0; 551 return 0;
549} 552}
@@ -560,12 +563,15 @@ static int soc_camera_g_fmt_vid_cap(struct file *file, void *priv,
560 pix->width = icd->user_width; 563 pix->width = icd->user_width;
561 pix->height = icd->user_height; 564 pix->height = icd->user_height;
562 pix->field = icf->vb_vidq.field; 565 pix->field = icf->vb_vidq.field;
563 pix->pixelformat = icd->current_fmt->fourcc; 566 pix->pixelformat = icd->current_fmt->host_fmt->fourcc;
564 pix->bytesperline = pix->width * 567 pix->bytesperline = soc_mbus_bytes_per_line(pix->width,
565 DIV_ROUND_UP(icd->current_fmt->depth, 8); 568 icd->current_fmt->host_fmt);
569 pix->colorspace = icd->colorspace;
570 if (pix->bytesperline < 0)
571 return pix->bytesperline;
566 pix->sizeimage = pix->height * pix->bytesperline; 572 pix->sizeimage = pix->height * pix->bytesperline;
567 dev_dbg(&icd->dev, "current_fmt->fourcc: 0x%08x\n", 573 dev_dbg(&icd->dev, "current_fmt->fourcc: 0x%08x\n",
568 icd->current_fmt->fourcc); 574 icd->current_fmt->host_fmt->fourcc);
569 return 0; 575 return 0;
570} 576}
571 577
@@ -621,8 +627,10 @@ static int soc_camera_streamoff(struct file *file, void *priv,
621 627
622 mutex_lock(&icd->video_lock); 628 mutex_lock(&icd->video_lock);
623 629
624 /* This calls buf_release from host driver's videobuf_queue_ops for all 630 /*
625 * remaining buffers. When the last buffer is freed, stop capture */ 631 * This calls buf_release from host driver's videobuf_queue_ops for all
632 * remaining buffers. When the last buffer is freed, stop capture
633 */
626 videobuf_streamoff(&icf->vb_vidq); 634 videobuf_streamoff(&icf->vb_vidq);
627 635
628 v4l2_subdev_call(sd, video, s_stream, 0); 636 v4l2_subdev_call(sd, video, s_stream, 0);
@@ -892,7 +900,7 @@ static int soc_camera_probe(struct device *dev)
892 struct soc_camera_link *icl = to_soc_camera_link(icd); 900 struct soc_camera_link *icl = to_soc_camera_link(icd);
893 struct device *control = NULL; 901 struct device *control = NULL;
894 struct v4l2_subdev *sd; 902 struct v4l2_subdev *sd;
895 struct v4l2_format f = {.type = V4L2_BUF_TYPE_VIDEO_CAPTURE}; 903 struct v4l2_mbus_framefmt mf;
896 int ret; 904 int ret;
897 905
898 dev_info(dev, "Probing %s\n", dev_name(dev)); 906 dev_info(dev, "Probing %s\n", dev_name(dev));
@@ -963,9 +971,11 @@ static int soc_camera_probe(struct device *dev)
963 971
964 /* Try to improve our guess of a reasonable window format */ 972 /* Try to improve our guess of a reasonable window format */
965 sd = soc_camera_to_subdev(icd); 973 sd = soc_camera_to_subdev(icd);
966 if (!v4l2_subdev_call(sd, video, g_fmt, &f)) { 974 if (!v4l2_subdev_call(sd, video, g_mbus_fmt, &mf)) {
967 icd->user_width = f.fmt.pix.width; 975 icd->user_width = mf.width;
968 icd->user_height = f.fmt.pix.height; 976 icd->user_height = mf.height;
977 icd->colorspace = mf.colorspace;
978 icd->field = mf.field;
969 } 979 }
970 980
971 /* Do we have to sysfs_remove_link() before device_unregister()? */ 981 /* Do we have to sysfs_remove_link() before device_unregister()? */
@@ -1004,8 +1014,10 @@ epower:
1004 return ret; 1014 return ret;
1005} 1015}
1006 1016
1007/* This is called on device_unregister, which only means we have to disconnect 1017/*
1008 * from the host, but not remove ourselves from the device list */ 1018 * This is called on device_unregister, which only means we have to disconnect
1019 * from the host, but not remove ourselves from the device list
1020 */
1009static int soc_camera_remove(struct device *dev) 1021static int soc_camera_remove(struct device *dev)
1010{ 1022{
1011 struct soc_camera_device *icd = to_soc_camera_dev(dev); 1023 struct soc_camera_device *icd = to_soc_camera_dev(dev);
@@ -1205,8 +1217,10 @@ static int soc_camera_device_register(struct soc_camera_device *icd)
1205 } 1217 }
1206 1218
1207 if (num < 0) 1219 if (num < 0)
1208 /* ok, we have 256 cameras on this host... 1220 /*
1209 * man, stay reasonable... */ 1221 * ok, we have 256 cameras on this host...
1222 * man, stay reasonable...
1223 */
1210 return -ENOMEM; 1224 return -ENOMEM;
1211 1225
1212 icd->devnum = num; 1226 icd->devnum = num;
@@ -1268,7 +1282,6 @@ static int video_dev_create(struct soc_camera_device *icd)
1268 vdev->fops = &soc_camera_fops; 1282 vdev->fops = &soc_camera_fops;
1269 vdev->ioctl_ops = &soc_camera_ioctl_ops; 1283 vdev->ioctl_ops = &soc_camera_ioctl_ops;
1270 vdev->release = video_device_release; 1284 vdev->release = video_device_release;
1271 vdev->minor = -1;
1272 vdev->tvnorms = V4L2_STD_UNKNOWN; 1285 vdev->tvnorms = V4L2_STD_UNKNOWN;
1273 1286
1274 icd->vdev = vdev; 1287 icd->vdev = vdev;
@@ -1291,8 +1304,7 @@ static int soc_camera_video_start(struct soc_camera_device *icd)
1291 !icd->ops->set_bus_param) 1304 !icd->ops->set_bus_param)
1292 return -EINVAL; 1305 return -EINVAL;
1293 1306
1294 ret = video_register_device(icd->vdev, VFL_TYPE_GRABBER, 1307 ret = video_register_device(icd->vdev, VFL_TYPE_GRABBER, -1);
1295 icd->vdev->minor);
1296 if (ret < 0) { 1308 if (ret < 0) {
1297 dev_err(&icd->dev, "video_register_device failed: %d\n", ret); 1309 dev_err(&icd->dev, "video_register_device failed: %d\n", ret);
1298 return ret; 1310 return ret;
@@ -1335,9 +1347,11 @@ escdevreg:
1335 return ret; 1347 return ret;
1336} 1348}
1337 1349
1338/* Only called on rmmod for each platform device, since they are not 1350/*
1351 * Only called on rmmod for each platform device, since they are not
1339 * hot-pluggable. Now we know, that all our users - hosts and devices have 1352 * hot-pluggable. Now we know, that all our users - hosts and devices have
1340 * been unloaded already */ 1353 * been unloaded already
1354 */
1341static int __devexit soc_camera_pdrv_remove(struct platform_device *pdev) 1355static int __devexit soc_camera_pdrv_remove(struct platform_device *pdev)
1342{ 1356{
1343 struct soc_camera_device *icd = platform_get_drvdata(pdev); 1357 struct soc_camera_device *icd = platform_get_drvdata(pdev);
diff --git a/drivers/media/video/soc_camera_platform.c b/drivers/media/video/soc_camera_platform.c
index b6a575ce5da2..10b003a8be83 100644
--- a/drivers/media/video/soc_camera_platform.c
+++ b/drivers/media/video/soc_camera_platform.c
@@ -22,7 +22,6 @@
22 22
23struct soc_camera_platform_priv { 23struct soc_camera_platform_priv {
24 struct v4l2_subdev subdev; 24 struct v4l2_subdev subdev;
25 struct soc_camera_data_format format;
26}; 25};
27 26
28static struct soc_camera_platform_priv *get_priv(struct platform_device *pdev) 27static struct soc_camera_platform_priv *get_priv(struct platform_device *pdev)
@@ -58,36 +57,36 @@ soc_camera_platform_query_bus_param(struct soc_camera_device *icd)
58} 57}
59 58
60static int soc_camera_platform_try_fmt(struct v4l2_subdev *sd, 59static int soc_camera_platform_try_fmt(struct v4l2_subdev *sd,
61 struct v4l2_format *f) 60 struct v4l2_mbus_framefmt *mf)
62{ 61{
63 struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd); 62 struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd);
64 struct v4l2_pix_format *pix = &f->fmt.pix;
65 63
66 pix->width = p->format.width; 64 mf->width = p->format.width;
67 pix->height = p->format.height; 65 mf->height = p->format.height;
66 mf->code = p->format.code;
67 mf->colorspace = p->format.colorspace;
68
68 return 0; 69 return 0;
69} 70}
70 71
71static void soc_camera_platform_video_probe(struct soc_camera_device *icd, 72static struct v4l2_subdev_core_ops platform_subdev_core_ops;
72 struct platform_device *pdev) 73
74static int soc_camera_platform_enum_fmt(struct v4l2_subdev *sd, int index,
75 enum v4l2_mbus_pixelcode *code)
73{ 76{
74 struct soc_camera_platform_priv *priv = get_priv(pdev); 77 struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd);
75 struct soc_camera_platform_info *p = pdev->dev.platform_data;
76 78
77 priv->format.name = p->format_name; 79 if (index)
78 priv->format.depth = p->format_depth; 80 return -EINVAL;
79 priv->format.fourcc = p->format.pixelformat;
80 priv->format.colorspace = p->format.colorspace;
81 81
82 icd->formats = &priv->format; 82 *code = p->format.code;
83 icd->num_formats = 1; 83 return 0;
84} 84}
85 85
86static struct v4l2_subdev_core_ops platform_subdev_core_ops;
87
88static struct v4l2_subdev_video_ops platform_subdev_video_ops = { 86static struct v4l2_subdev_video_ops platform_subdev_video_ops = {
89 .s_stream = soc_camera_platform_s_stream, 87 .s_stream = soc_camera_platform_s_stream,
90 .try_fmt = soc_camera_platform_try_fmt, 88 .try_mbus_fmt = soc_camera_platform_try_fmt,
89 .enum_mbus_fmt = soc_camera_platform_enum_fmt,
91}; 90};
92 91
93static struct v4l2_subdev_ops platform_subdev_ops = { 92static struct v4l2_subdev_ops platform_subdev_ops = {
@@ -128,13 +127,10 @@ static int soc_camera_platform_probe(struct platform_device *pdev)
128 /* Set the control device reference */ 127 /* Set the control device reference */
129 dev_set_drvdata(&icd->dev, &pdev->dev); 128 dev_set_drvdata(&icd->dev, &pdev->dev);
130 129
131 icd->y_skip_top = 0; 130 icd->ops = &soc_camera_platform_ops;
132 icd->ops = &soc_camera_platform_ops;
133 131
134 ici = to_soc_camera_host(icd->dev.parent); 132 ici = to_soc_camera_host(icd->dev.parent);
135 133
136 soc_camera_platform_video_probe(icd, pdev);
137
138 v4l2_subdev_init(&priv->subdev, &platform_subdev_ops); 134 v4l2_subdev_init(&priv->subdev, &platform_subdev_ops);
139 v4l2_set_subdevdata(&priv->subdev, p); 135 v4l2_set_subdevdata(&priv->subdev, p);
140 strncpy(priv->subdev.name, dev_name(&pdev->dev), V4L2_SUBDEV_NAME_SIZE); 136 strncpy(priv->subdev.name, dev_name(&pdev->dev), V4L2_SUBDEV_NAME_SIZE);
diff --git a/drivers/media/video/soc_mediabus.c b/drivers/media/video/soc_mediabus.c
new file mode 100644
index 000000000000..f8d5c87dc2aa
--- /dev/null
+++ b/drivers/media/video/soc_mediabus.c
@@ -0,0 +1,157 @@
1/*
2 * soc-camera media bus helper routines
3 *
4 * Copyright (C) 2009, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/kernel.h>
12#include <linux/module.h>
13
14#include <media/v4l2-device.h>
15#include <media/v4l2-mediabus.h>
16#include <media/soc_mediabus.h>
17
18#define MBUS_IDX(f) (V4L2_MBUS_FMT_ ## f - V4L2_MBUS_FMT_FIXED - 1)
19
20static const struct soc_mbus_pixelfmt mbus_fmt[] = {
21 [MBUS_IDX(YUYV8_2X8_LE)] = {
22 .fourcc = V4L2_PIX_FMT_YUYV,
23 .name = "YUYV",
24 .bits_per_sample = 8,
25 .packing = SOC_MBUS_PACKING_2X8_PADHI,
26 .order = SOC_MBUS_ORDER_LE,
27 }, [MBUS_IDX(YVYU8_2X8_LE)] = {
28 .fourcc = V4L2_PIX_FMT_YVYU,
29 .name = "YVYU",
30 .bits_per_sample = 8,
31 .packing = SOC_MBUS_PACKING_2X8_PADHI,
32 .order = SOC_MBUS_ORDER_LE,
33 }, [MBUS_IDX(YUYV8_2X8_BE)] = {
34 .fourcc = V4L2_PIX_FMT_UYVY,
35 .name = "UYVY",
36 .bits_per_sample = 8,
37 .packing = SOC_MBUS_PACKING_2X8_PADHI,
38 .order = SOC_MBUS_ORDER_LE,
39 }, [MBUS_IDX(YVYU8_2X8_BE)] = {
40 .fourcc = V4L2_PIX_FMT_VYUY,
41 .name = "VYUY",
42 .bits_per_sample = 8,
43 .packing = SOC_MBUS_PACKING_2X8_PADHI,
44 .order = SOC_MBUS_ORDER_LE,
45 }, [MBUS_IDX(RGB555_2X8_PADHI_LE)] = {
46 .fourcc = V4L2_PIX_FMT_RGB555,
47 .name = "RGB555",
48 .bits_per_sample = 8,
49 .packing = SOC_MBUS_PACKING_2X8_PADHI,
50 .order = SOC_MBUS_ORDER_LE,
51 }, [MBUS_IDX(RGB555_2X8_PADHI_BE)] = {
52 .fourcc = V4L2_PIX_FMT_RGB555X,
53 .name = "RGB555X",
54 .bits_per_sample = 8,
55 .packing = SOC_MBUS_PACKING_2X8_PADHI,
56 .order = SOC_MBUS_ORDER_LE,
57 }, [MBUS_IDX(RGB565_2X8_LE)] = {
58 .fourcc = V4L2_PIX_FMT_RGB565,
59 .name = "RGB565",
60 .bits_per_sample = 8,
61 .packing = SOC_MBUS_PACKING_2X8_PADHI,
62 .order = SOC_MBUS_ORDER_LE,
63 }, [MBUS_IDX(RGB565_2X8_BE)] = {
64 .fourcc = V4L2_PIX_FMT_RGB565X,
65 .name = "RGB565X",
66 .bits_per_sample = 8,
67 .packing = SOC_MBUS_PACKING_2X8_PADHI,
68 .order = SOC_MBUS_ORDER_LE,
69 }, [MBUS_IDX(SBGGR8_1X8)] = {
70 .fourcc = V4L2_PIX_FMT_SBGGR8,
71 .name = "Bayer 8 BGGR",
72 .bits_per_sample = 8,
73 .packing = SOC_MBUS_PACKING_NONE,
74 .order = SOC_MBUS_ORDER_LE,
75 }, [MBUS_IDX(SBGGR10_1X10)] = {
76 .fourcc = V4L2_PIX_FMT_SBGGR10,
77 .name = "Bayer 10 BGGR",
78 .bits_per_sample = 10,
79 .packing = SOC_MBUS_PACKING_EXTEND16,
80 .order = SOC_MBUS_ORDER_LE,
81 }, [MBUS_IDX(GREY8_1X8)] = {
82 .fourcc = V4L2_PIX_FMT_GREY,
83 .name = "Grey",
84 .bits_per_sample = 8,
85 .packing = SOC_MBUS_PACKING_NONE,
86 .order = SOC_MBUS_ORDER_LE,
87 }, [MBUS_IDX(Y10_1X10)] = {
88 .fourcc = V4L2_PIX_FMT_Y10,
89 .name = "Grey 10bit",
90 .bits_per_sample = 10,
91 .packing = SOC_MBUS_PACKING_EXTEND16,
92 .order = SOC_MBUS_ORDER_LE,
93 }, [MBUS_IDX(SBGGR10_2X8_PADHI_LE)] = {
94 .fourcc = V4L2_PIX_FMT_SBGGR10,
95 .name = "Bayer 10 BGGR",
96 .bits_per_sample = 8,
97 .packing = SOC_MBUS_PACKING_2X8_PADHI,
98 .order = SOC_MBUS_ORDER_LE,
99 }, [MBUS_IDX(SBGGR10_2X8_PADLO_LE)] = {
100 .fourcc = V4L2_PIX_FMT_SBGGR10,
101 .name = "Bayer 10 BGGR",
102 .bits_per_sample = 8,
103 .packing = SOC_MBUS_PACKING_2X8_PADLO,
104 .order = SOC_MBUS_ORDER_LE,
105 }, [MBUS_IDX(SBGGR10_2X8_PADHI_BE)] = {
106 .fourcc = V4L2_PIX_FMT_SBGGR10,
107 .name = "Bayer 10 BGGR",
108 .bits_per_sample = 8,
109 .packing = SOC_MBUS_PACKING_2X8_PADHI,
110 .order = SOC_MBUS_ORDER_BE,
111 }, [MBUS_IDX(SBGGR10_2X8_PADLO_BE)] = {
112 .fourcc = V4L2_PIX_FMT_SBGGR10,
113 .name = "Bayer 10 BGGR",
114 .bits_per_sample = 8,
115 .packing = SOC_MBUS_PACKING_2X8_PADLO,
116 .order = SOC_MBUS_ORDER_BE,
117 },
118};
119
120s32 soc_mbus_bytes_per_line(u32 width, const struct soc_mbus_pixelfmt *mf)
121{
122 switch (mf->packing) {
123 case SOC_MBUS_PACKING_NONE:
124 return width * mf->bits_per_sample / 8;
125 case SOC_MBUS_PACKING_2X8_PADHI:
126 case SOC_MBUS_PACKING_2X8_PADLO:
127 case SOC_MBUS_PACKING_EXTEND16:
128 return width * 2;
129 }
130 return -EINVAL;
131}
132EXPORT_SYMBOL(soc_mbus_bytes_per_line);
133
134const struct soc_mbus_pixelfmt *soc_mbus_get_fmtdesc(
135 enum v4l2_mbus_pixelcode code)
136{
137 if ((unsigned int)(code - V4L2_MBUS_FMT_FIXED) > ARRAY_SIZE(mbus_fmt))
138 return NULL;
139 return mbus_fmt + code - V4L2_MBUS_FMT_FIXED - 1;
140}
141EXPORT_SYMBOL(soc_mbus_get_fmtdesc);
142
143static int __init soc_mbus_init(void)
144{
145 return 0;
146}
147
148static void __exit soc_mbus_exit(void)
149{
150}
151
152module_init(soc_mbus_init);
153module_exit(soc_mbus_exit);
154
155MODULE_DESCRIPTION("soc-camera media bus interface");
156MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
157MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/video/stk-webcam.c b/drivers/media/video/stk-webcam.c
index 6b41865f42bd..f07a0f6b71c4 100644
--- a/drivers/media/video/stk-webcam.c
+++ b/drivers/media/video/stk-webcam.c
@@ -1307,7 +1307,6 @@ static void stk_v4l_dev_release(struct video_device *vd)
1307 1307
1308static struct video_device stk_v4l_data = { 1308static struct video_device stk_v4l_data = {
1309 .name = "stkwebcam", 1309 .name = "stkwebcam",
1310 .minor = -1,
1311 .tvnorms = V4L2_STD_UNKNOWN, 1310 .tvnorms = V4L2_STD_UNKNOWN,
1312 .current_norm = V4L2_STD_UNKNOWN, 1311 .current_norm = V4L2_STD_UNKNOWN,
1313 .fops = &v4l_stk_fops, 1312 .fops = &v4l_stk_fops,
@@ -1327,8 +1326,8 @@ static int stk_register_video_device(struct stk_camera *dev)
1327 if (err) 1326 if (err)
1328 STK_ERROR("v4l registration failed\n"); 1327 STK_ERROR("v4l registration failed\n");
1329 else 1328 else
1330 STK_INFO("Syntek USB2.0 Camera is now controlling video device" 1329 STK_INFO("Syntek USB2.0 Camera is now controlling device %s\n",
1331 " /dev/video%d\n", dev->vdev.num); 1330 video_device_node_name(&dev->vdev));
1332 return err; 1331 return err;
1333} 1332}
1334 1333
@@ -1418,8 +1417,8 @@ static void stk_camera_disconnect(struct usb_interface *interface)
1418 wake_up_interruptible(&dev->wait_frame); 1417 wake_up_interruptible(&dev->wait_frame);
1419 stk_remove_sysfs_files(&dev->vdev); 1418 stk_remove_sysfs_files(&dev->vdev);
1420 1419
1421 STK_INFO("Syntek USB2.0 Camera release resources " 1420 STK_INFO("Syntek USB2.0 Camera release resources device %s\n",
1422 "video device /dev/video%d\n", dev->vdev.num); 1421 video_device_node_name(&dev->vdev));
1423 1422
1424 video_unregister_device(&dev->vdev); 1423 video_unregister_device(&dev->vdev);
1425} 1424}
diff --git a/drivers/media/video/stradis.c b/drivers/media/video/stradis.c
index eaada39c76fd..a057824e7ebc 100644
--- a/drivers/media/video/stradis.c
+++ b/drivers/media/video/stradis.c
@@ -1921,7 +1921,6 @@ static const struct v4l2_file_operations saa_fops = {
1921static struct video_device saa_template = { 1921static struct video_device saa_template = {
1922 .name = "SAA7146A", 1922 .name = "SAA7146A",
1923 .fops = &saa_fops, 1923 .fops = &saa_fops,
1924 .minor = -1,
1925 .release = video_device_release_empty, 1924 .release = video_device_release_empty,
1926}; 1925};
1927 1926
@@ -1972,7 +1971,6 @@ static int __devinit configure_saa7146(struct pci_dev *pdev, int num)
1972 1971
1973 saa->id = pdev->device; 1972 saa->id = pdev->device;
1974 saa->irq = pdev->irq; 1973 saa->irq = pdev->irq;
1975 saa->video_dev.minor = -1;
1976 saa->saa7146_adr = pci_resource_start(pdev, 0); 1974 saa->saa7146_adr = pci_resource_start(pdev, 0);
1977 pci_read_config_byte(pdev, PCI_CLASS_REVISION, &saa->revision); 1975 pci_read_config_byte(pdev, PCI_CLASS_REVISION, &saa->revision);
1978 1976
@@ -2134,7 +2132,7 @@ static void stradis_release_saa(struct pci_dev *pdev)
2134 free_irq(saa->irq, saa); 2132 free_irq(saa->irq, saa);
2135 if (saa->saa7146_mem) 2133 if (saa->saa7146_mem)
2136 iounmap(saa->saa7146_mem); 2134 iounmap(saa->saa7146_mem);
2137 if (saa->video_dev.minor != -1) 2135 if (video_is_registered(&saa->video_dev))
2138 video_unregister_device(&saa->video_dev); 2136 video_unregister_device(&saa->video_dev);
2139} 2137}
2140 2138
diff --git a/drivers/media/video/stv680.c b/drivers/media/video/stv680.c
index 6a91714125d2..5938ad8702ef 100644
--- a/drivers/media/video/stv680.c
+++ b/drivers/media/video/stv680.c
@@ -1405,7 +1405,6 @@ static struct video_device stv680_template = {
1405 .name = "STV0680 USB camera", 1405 .name = "STV0680 USB camera",
1406 .fops = &stv680_fops, 1406 .fops = &stv680_fops,
1407 .release = video_device_release, 1407 .release = video_device_release,
1408 .minor = -1,
1409}; 1408};
1410 1409
1411static int stv680_probe (struct usb_interface *intf, const struct usb_device_id *id) 1410static int stv680_probe (struct usb_interface *intf, const struct usb_device_id *id)
@@ -1467,8 +1466,8 @@ static int stv680_probe (struct usb_interface *intf, const struct usb_device_id
1467 retval = -EIO; 1466 retval = -EIO;
1468 goto error_vdev; 1467 goto error_vdev;
1469 } 1468 }
1470 PDEBUG(0, "STV(i): registered new video device: video%d", 1469 PDEBUG(0, "STV(i): registered new video device: %s",
1471 stv680->vdev->num); 1470 video_device_node_name(stv680->vdev));
1472 1471
1473 usb_set_intfdata (intf, stv680); 1472 usb_set_intfdata (intf, stv680);
1474 retval = stv680_create_sysfs_files(stv680->vdev); 1473 retval = stv680_create_sysfs_files(stv680->vdev);
diff --git a/drivers/media/video/tw9910.c b/drivers/media/video/tw9910.c
index 269ab044072a..5b801a6e1eea 100644
--- a/drivers/media/video/tw9910.c
+++ b/drivers/media/video/tw9910.c
@@ -29,7 +29,7 @@
29#include <media/tw9910.h> 29#include <media/tw9910.h>
30 30
31#define GET_ID(val) ((val & 0xF8) >> 3) 31#define GET_ID(val) ((val & 0xF8) >> 3)
32#define GET_ReV(val) (val & 0x07) 32#define GET_REV(val) (val & 0x07)
33 33
34/* 34/*
35 * register offset 35 * register offset
@@ -117,7 +117,7 @@
117#define LCTL24 0x68 117#define LCTL24 0x68
118#define LCTL25 0x69 118#define LCTL25 0x69
119#define LCTL26 0x6A 119#define LCTL26 0x6A
120#define HSGEGIN 0x6B 120#define HSBEGIN 0x6B
121#define HSEND 0x6C 121#define HSEND 0x6C
122#define OVSDLY 0x6D 122#define OVSDLY 0x6D
123#define OVSEND 0x6E 123#define OVSEND 0x6E
@@ -152,7 +152,10 @@
152 /* 1 : non-auto */ 152 /* 1 : non-auto */
153#define VSCTL 0x08 /* 1 : Vertical out ctrl by DVALID */ 153#define VSCTL 0x08 /* 1 : Vertical out ctrl by DVALID */
154 /* 0 : Vertical out ctrl by HACTIVE and DVALID */ 154 /* 0 : Vertical out ctrl by HACTIVE and DVALID */
155#define OEN 0x04 /* Output Enable together with TRI_SEL. */ 155#define OEN_TRI_SEL_MASK 0x07
156#define OEN_TRI_SEL_ALL_ON 0x00 /* Enable output for Rev0/Rev1 */
157#define OEN_TRI_SEL_ALL_OFF_r0 0x06 /* All tri-stated for Rev0 */
158#define OEN_TRI_SEL_ALL_OFF_r1 0x07 /* All tri-stated for Rev1 */
156 159
157/* OUTCTR1 */ 160/* OUTCTR1 */
158#define VSP_LO 0x00 /* 0 : VS pin output polarity is active low */ 161#define VSP_LO 0x00 /* 0 : VS pin output polarity is active low */
@@ -178,11 +181,18 @@
178 * but all register content remain unchanged. 181 * but all register content remain unchanged.
179 * This bit is self-resetting. 182 * This bit is self-resetting.
180 */ 183 */
184#define ACNTL1_PDN_MASK 0x0e
185#define CLK_PDN 0x08 /* system clock power down */
186#define Y_PDN 0x04 /* Luma ADC power down */
187#define C_PDN 0x02 /* Chroma ADC power down */
188
189/* ACNTL2 */
190#define ACNTL2_PDN_MASK 0x40
191#define PLL_PDN 0x40 /* PLL power down */
181 192
182/* VBICNTL */ 193/* VBICNTL */
183/* RTSEL : control the real time signal 194
184* output from the MPOUT pin 195/* RTSEL : control the real time signal output from the MPOUT pin */
185*/
186#define RTSEL_MASK 0x07 196#define RTSEL_MASK 0x07
187#define RTSEL_VLOSS 0x00 /* 0000 = Video loss */ 197#define RTSEL_VLOSS 0x00 /* 0000 = Video loss */
188#define RTSEL_HLOCK 0x01 /* 0001 = H-lock */ 198#define RTSEL_HLOCK 0x01 /* 0001 = H-lock */
@@ -226,28 +236,7 @@ struct tw9910_priv {
226 struct v4l2_subdev subdev; 236 struct v4l2_subdev subdev;
227 struct tw9910_video_info *info; 237 struct tw9910_video_info *info;
228 const struct tw9910_scale_ctrl *scale; 238 const struct tw9910_scale_ctrl *scale;
229}; 239 u32 revision;
230
231/*
232 * register settings
233 */
234
235#define ENDMARKER { 0xff, 0xff }
236
237static const struct regval_list tw9910_default_regs[] =
238{
239 { OPFORM, 0x00 },
240 { OUTCTR1, VSP_LO | VSSL_VVALID | HSP_HI | HSSL_HSYNC },
241 ENDMARKER,
242};
243
244static const struct soc_camera_data_format tw9910_color_fmt[] = {
245 {
246 .name = "VYUY",
247 .fourcc = V4L2_PIX_FMT_VYUY,
248 .depth = 16,
249 .colorspace = V4L2_COLORSPACE_SMPTE170M,
250 }
251}; 240};
252 241
253static const struct tw9910_scale_ctrl tw9910_ntsc_scales[] = { 242static const struct tw9910_scale_ctrl tw9910_ntsc_scales[] = {
@@ -340,13 +329,6 @@ static const struct tw9910_scale_ctrl tw9910_pal_scales[] = {
340 }, 329 },
341}; 330};
342 331
343static const struct tw9910_cropping_ctrl tw9910_cropping_ctrl = {
344 .vdelay = 0x0012,
345 .vactive = 0x00F0,
346 .hdelay = 0x0010,
347 .hactive = 0x02D0,
348};
349
350static const struct tw9910_hsync_ctrl tw9910_hsync_ctrl = { 332static const struct tw9910_hsync_ctrl tw9910_hsync_ctrl = {
351 .start = 0x0260, 333 .start = 0x0260,
352 .end = 0x0300, 334 .end = 0x0300,
@@ -361,6 +343,19 @@ static struct tw9910_priv *to_tw9910(const struct i2c_client *client)
361 subdev); 343 subdev);
362} 344}
363 345
346static int tw9910_mask_set(struct i2c_client *client, u8 command,
347 u8 mask, u8 set)
348{
349 s32 val = i2c_smbus_read_byte_data(client, command);
350 if (val < 0)
351 return val;
352
353 val &= ~mask;
354 val |= set & mask;
355
356 return i2c_smbus_write_byte_data(client, command, val);
357}
358
364static int tw9910_set_scale(struct i2c_client *client, 359static int tw9910_set_scale(struct i2c_client *client,
365 const struct tw9910_scale_ctrl *scale) 360 const struct tw9910_scale_ctrl *scale)
366{ 361{
@@ -383,47 +378,14 @@ static int tw9910_set_scale(struct i2c_client *client,
383 return ret; 378 return ret;
384} 379}
385 380
386static int tw9910_set_cropping(struct i2c_client *client,
387 const struct tw9910_cropping_ctrl *cropping)
388{
389 int ret;
390
391 ret = i2c_smbus_write_byte_data(client, CROP_HI,
392 (cropping->vdelay & 0x0300) >> 2 |
393 (cropping->vactive & 0x0300) >> 4 |
394 (cropping->hdelay & 0x0300) >> 6 |
395 (cropping->hactive & 0x0300) >> 8);
396 if (ret < 0)
397 return ret;
398
399 ret = i2c_smbus_write_byte_data(client, VDELAY_LO,
400 cropping->vdelay & 0x00FF);
401 if (ret < 0)
402 return ret;
403
404 ret = i2c_smbus_write_byte_data(client, VACTIVE_LO,
405 cropping->vactive & 0x00FF);
406 if (ret < 0)
407 return ret;
408
409 ret = i2c_smbus_write_byte_data(client, HDELAY_LO,
410 cropping->hdelay & 0x00FF);
411 if (ret < 0)
412 return ret;
413
414 ret = i2c_smbus_write_byte_data(client, HACTIVE_LO,
415 cropping->hactive & 0x00FF);
416
417 return ret;
418}
419
420static int tw9910_set_hsync(struct i2c_client *client, 381static int tw9910_set_hsync(struct i2c_client *client,
421 const struct tw9910_hsync_ctrl *hsync) 382 const struct tw9910_hsync_ctrl *hsync)
422{ 383{
384 struct tw9910_priv *priv = to_tw9910(client);
423 int ret; 385 int ret;
424 386
425 /* bit 10 - 3 */ 387 /* bit 10 - 3 */
426 ret = i2c_smbus_write_byte_data(client, HSGEGIN, 388 ret = i2c_smbus_write_byte_data(client, HSBEGIN,
427 (hsync->start & 0x07F8) >> 3); 389 (hsync->start & 0x07F8) >> 3);
428 if (ret < 0) 390 if (ret < 0)
429 return ret; 391 return ret;
@@ -434,50 +396,41 @@ static int tw9910_set_hsync(struct i2c_client *client,
434 if (ret < 0) 396 if (ret < 0)
435 return ret; 397 return ret;
436 398
399 /* So far only revisions 0 and 1 have been seen */
437 /* bit 2 - 0 */ 400 /* bit 2 - 0 */
438 ret = i2c_smbus_read_byte_data(client, HSLOWCTL); 401 if (1 == priv->revision)
439 if (ret < 0) 402 ret = tw9910_mask_set(client, HSLOWCTL, 0x77,
440 return ret; 403 (hsync->start & 0x0007) << 4 |
441 404 (hsync->end & 0x0007));
442 ret = i2c_smbus_write_byte_data(client, HSLOWCTL,
443 (ret & 0x88) |
444 (hsync->start & 0x0007) << 4 |
445 (hsync->end & 0x0007));
446 405
447 return ret; 406 return ret;
448} 407}
449 408
450static int tw9910_write_array(struct i2c_client *client, 409static void tw9910_reset(struct i2c_client *client)
451 const struct regval_list *vals)
452{ 410{
453 while (vals->reg_num != 0xff) { 411 tw9910_mask_set(client, ACNTL1, SRESET, SRESET);
454 int ret = i2c_smbus_write_byte_data(client, 412 msleep(1);
455 vals->reg_num,
456 vals->value);
457 if (ret < 0)
458 return ret;
459 vals++;
460 }
461 return 0;
462} 413}
463 414
464static int tw9910_mask_set(struct i2c_client *client, u8 command, 415static int tw9910_power(struct i2c_client *client, int enable)
465 u8 mask, u8 set)
466{ 416{
467 s32 val = i2c_smbus_read_byte_data(client, command); 417 int ret;
468 if (val < 0) 418 u8 acntl1;
469 return val; 419 u8 acntl2;
470 420
471 val &= ~mask; 421 if (enable) {
472 val |= set & mask; 422 acntl1 = 0;
423 acntl2 = 0;
424 } else {
425 acntl1 = CLK_PDN | Y_PDN | C_PDN;
426 acntl2 = PLL_PDN;
427 }
473 428
474 return i2c_smbus_write_byte_data(client, command, val); 429 ret = tw9910_mask_set(client, ACNTL1, ACNTL1_PDN_MASK, acntl1);
475} 430 if (ret < 0)
431 return ret;
476 432
477static void tw9910_reset(struct i2c_client *client) 433 return tw9910_mask_set(client, ACNTL2, ACNTL2_PDN_MASK, acntl2);
478{
479 i2c_smbus_write_byte_data(client, ACNTL1, SRESET);
480 msleep(1);
481} 434}
482 435
483static const struct tw9910_scale_ctrl* 436static const struct tw9910_scale_ctrl*
@@ -518,27 +471,62 @@ static int tw9910_s_stream(struct v4l2_subdev *sd, int enable)
518{ 471{
519 struct i2c_client *client = sd->priv; 472 struct i2c_client *client = sd->priv;
520 struct tw9910_priv *priv = to_tw9910(client); 473 struct tw9910_priv *priv = to_tw9910(client);
474 u8 val;
475 int ret;
521 476
522 if (!enable) 477 if (!enable) {
523 return 0; 478 switch (priv->revision) {
479 case 0:
480 val = OEN_TRI_SEL_ALL_OFF_r0;
481 break;
482 case 1:
483 val = OEN_TRI_SEL_ALL_OFF_r1;
484 break;
485 default:
486 dev_err(&client->dev, "un-supported revision\n");
487 return -EINVAL;
488 }
489 } else {
490 val = OEN_TRI_SEL_ALL_ON;
524 491
525 if (!priv->scale) { 492 if (!priv->scale) {
526 dev_err(&client->dev, "norm select error\n"); 493 dev_err(&client->dev, "norm select error\n");
527 return -EPERM; 494 return -EPERM;
495 }
496
497 dev_dbg(&client->dev, "%s %dx%d\n",
498 priv->scale->name,
499 priv->scale->width,
500 priv->scale->height);
528 } 501 }
529 502
530 dev_dbg(&client->dev, "%s %dx%d\n", 503 ret = tw9910_mask_set(client, OPFORM, OEN_TRI_SEL_MASK, val);
531 priv->scale->name, 504 if (ret < 0)
532 priv->scale->width, 505 return ret;
533 priv->scale->height);
534 506
535 return 0; 507 return tw9910_power(client, enable);
536} 508}
537 509
538static int tw9910_set_bus_param(struct soc_camera_device *icd, 510static int tw9910_set_bus_param(struct soc_camera_device *icd,
539 unsigned long flags) 511 unsigned long flags)
540{ 512{
541 return 0; 513 struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
514 struct i2c_client *client = sd->priv;
515 u8 val = VSSL_VVALID | HSSL_DVALID;
516
517 /*
518 * set OUTCTR1
519 *
520 * We use VVALID and DVALID signals to control VSYNC and HSYNC
521 * outputs, in this mode their polarity is inverted.
522 */
523 if (flags & SOCAM_HSYNC_ACTIVE_LOW)
524 val |= HSP_HI;
525
526 if (flags & SOCAM_VSYNC_ACTIVE_LOW)
527 val |= VSP_HI;
528
529 return i2c_smbus_write_byte_data(client, OUTCTR1, val);
542} 530}
543 531
544static unsigned long tw9910_query_bus_param(struct soc_camera_device *icd) 532static unsigned long tw9910_query_bus_param(struct soc_camera_device *icd)
@@ -548,6 +536,7 @@ static unsigned long tw9910_query_bus_param(struct soc_camera_device *icd)
548 struct soc_camera_link *icl = to_soc_camera_link(icd); 536 struct soc_camera_link *icl = to_soc_camera_link(icd);
549 unsigned long flags = SOCAM_PCLK_SAMPLE_RISING | SOCAM_MASTER | 537 unsigned long flags = SOCAM_PCLK_SAMPLE_RISING | SOCAM_MASTER |
550 SOCAM_VSYNC_ACTIVE_HIGH | SOCAM_HSYNC_ACTIVE_HIGH | 538 SOCAM_VSYNC_ACTIVE_HIGH | SOCAM_HSYNC_ACTIVE_HIGH |
539 SOCAM_VSYNC_ACTIVE_LOW | SOCAM_HSYNC_ACTIVE_LOW |
551 SOCAM_DATA_ACTIVE_HIGH | priv->info->buswidth; 540 SOCAM_DATA_ACTIVE_HIGH | priv->info->buswidth;
552 541
553 return soc_camera_apply_sensor_flags(icl, flags); 542 return soc_camera_apply_sensor_flags(icl, flags);
@@ -576,8 +565,11 @@ static int tw9910_enum_input(struct soc_camera_device *icd,
576static int tw9910_g_chip_ident(struct v4l2_subdev *sd, 565static int tw9910_g_chip_ident(struct v4l2_subdev *sd,
577 struct v4l2_dbg_chip_ident *id) 566 struct v4l2_dbg_chip_ident *id)
578{ 567{
568 struct i2c_client *client = sd->priv;
569 struct tw9910_priv *priv = to_tw9910(client);
570
579 id->ident = V4L2_IDENT_TW9910; 571 id->ident = V4L2_IDENT_TW9910;
580 id->revision = 0; 572 id->revision = priv->revision;
581 573
582 return 0; 574 return 0;
583} 575}
@@ -596,7 +588,8 @@ static int tw9910_g_register(struct v4l2_subdev *sd,
596 if (ret < 0) 588 if (ret < 0)
597 return ret; 589 return ret;
598 590
599 /* ret = int 591 /*
592 * ret = int
600 * reg->val = __u64 593 * reg->val = __u64
601 */ 594 */
602 reg->val = (__u64)ret; 595 reg->val = (__u64)ret;
@@ -637,9 +630,6 @@ static int tw9910_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
637 * reset hardware 630 * reset hardware
638 */ 631 */
639 tw9910_reset(client); 632 tw9910_reset(client);
640 ret = tw9910_write_array(client, tw9910_default_regs);
641 if (ret < 0)
642 goto tw9910_set_fmt_error;
643 633
644 /* 634 /*
645 * set bus width 635 * set bus width
@@ -688,13 +678,6 @@ static int tw9910_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
688 goto tw9910_set_fmt_error; 678 goto tw9910_set_fmt_error;
689 679
690 /* 680 /*
691 * set cropping
692 */
693 ret = tw9910_set_cropping(client, &tw9910_cropping_ctrl);
694 if (ret < 0)
695 goto tw9910_set_fmt_error;
696
697 /*
698 * set hsync 681 * set hsync
699 */ 682 */
700 ret = tw9910_set_hsync(client, &tw9910_hsync_ctrl); 683 ret = tw9910_set_hsync(client, &tw9910_hsync_ctrl);
@@ -762,11 +745,11 @@ static int tw9910_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
762 return 0; 745 return 0;
763} 746}
764 747
765static int tw9910_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) 748static int tw9910_g_fmt(struct v4l2_subdev *sd,
749 struct v4l2_mbus_framefmt *mf)
766{ 750{
767 struct i2c_client *client = sd->priv; 751 struct i2c_client *client = sd->priv;
768 struct tw9910_priv *priv = to_tw9910(client); 752 struct tw9910_priv *priv = to_tw9910(client);
769 struct v4l2_pix_format *pix = &f->fmt.pix;
770 753
771 if (!priv->scale) { 754 if (!priv->scale) {
772 int ret; 755 int ret;
@@ -783,74 +766,76 @@ static int tw9910_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *f)
783 return ret; 766 return ret;
784 } 767 }
785 768
786 f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 769 mf->width = priv->scale->width;
787 770 mf->height = priv->scale->height;
788 pix->width = priv->scale->width; 771 mf->code = V4L2_MBUS_FMT_YUYV8_2X8_BE;
789 pix->height = priv->scale->height; 772 mf->colorspace = V4L2_COLORSPACE_JPEG;
790 pix->pixelformat = V4L2_PIX_FMT_VYUY; 773 mf->field = V4L2_FIELD_INTERLACED_BT;
791 pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
792 pix->field = V4L2_FIELD_INTERLACED;
793 774
794 return 0; 775 return 0;
795} 776}
796 777
797static int tw9910_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) 778static int tw9910_s_fmt(struct v4l2_subdev *sd,
779 struct v4l2_mbus_framefmt *mf)
798{ 780{
799 struct i2c_client *client = sd->priv; 781 struct i2c_client *client = sd->priv;
800 struct tw9910_priv *priv = to_tw9910(client); 782 struct tw9910_priv *priv = to_tw9910(client);
801 struct v4l2_pix_format *pix = &f->fmt.pix;
802 /* See tw9910_s_crop() - no proper cropping support */ 783 /* See tw9910_s_crop() - no proper cropping support */
803 struct v4l2_crop a = { 784 struct v4l2_crop a = {
804 .c = { 785 .c = {
805 .left = 0, 786 .left = 0,
806 .top = 0, 787 .top = 0,
807 .width = pix->width, 788 .width = mf->width,
808 .height = pix->height, 789 .height = mf->height,
809 }, 790 },
810 }; 791 };
811 int i, ret; 792 int ret;
793
794 WARN_ON(mf->field != V4L2_FIELD_ANY &&
795 mf->field != V4L2_FIELD_INTERLACED_BT);
812 796
813 /* 797 /*
814 * check color format 798 * check color format
815 */ 799 */
816 for (i = 0; i < ARRAY_SIZE(tw9910_color_fmt); i++) 800 if (mf->code != V4L2_MBUS_FMT_YUYV8_2X8_BE)
817 if (pix->pixelformat == tw9910_color_fmt[i].fourcc)
818 break;
819
820 if (i == ARRAY_SIZE(tw9910_color_fmt))
821 return -EINVAL; 801 return -EINVAL;
822 802
803 mf->colorspace = V4L2_COLORSPACE_JPEG;
804
823 ret = tw9910_s_crop(sd, &a); 805 ret = tw9910_s_crop(sd, &a);
824 if (!ret) { 806 if (!ret) {
825 pix->width = priv->scale->width; 807 mf->width = priv->scale->width;
826 pix->height = priv->scale->height; 808 mf->height = priv->scale->height;
827 } 809 }
828 return ret; 810 return ret;
829} 811}
830 812
831static int tw9910_try_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) 813static int tw9910_try_fmt(struct v4l2_subdev *sd,
814 struct v4l2_mbus_framefmt *mf)
832{ 815{
833 struct i2c_client *client = sd->priv; 816 struct i2c_client *client = sd->priv;
834 struct soc_camera_device *icd = client->dev.platform_data; 817 struct soc_camera_device *icd = client->dev.platform_data;
835 struct v4l2_pix_format *pix = &f->fmt.pix;
836 const struct tw9910_scale_ctrl *scale; 818 const struct tw9910_scale_ctrl *scale;
837 819
838 if (V4L2_FIELD_ANY == pix->field) { 820 if (V4L2_FIELD_ANY == mf->field) {
839 pix->field = V4L2_FIELD_INTERLACED; 821 mf->field = V4L2_FIELD_INTERLACED_BT;
840 } else if (V4L2_FIELD_INTERLACED != pix->field) { 822 } else if (V4L2_FIELD_INTERLACED_BT != mf->field) {
841 dev_err(&client->dev, "Field type invalid.\n"); 823 dev_err(&client->dev, "Field type %d invalid.\n", mf->field);
842 return -EINVAL; 824 return -EINVAL;
843 } 825 }
844 826
827 mf->code = V4L2_MBUS_FMT_YUYV8_2X8_BE;
828 mf->colorspace = V4L2_COLORSPACE_JPEG;
829
845 /* 830 /*
846 * select suitable norm 831 * select suitable norm
847 */ 832 */
848 scale = tw9910_select_norm(icd, pix->width, pix->height); 833 scale = tw9910_select_norm(icd, mf->width, mf->height);
849 if (!scale) 834 if (!scale)
850 return -EINVAL; 835 return -EINVAL;
851 836
852 pix->width = scale->width; 837 mf->width = scale->width;
853 pix->height = scale->height; 838 mf->height = scale->height;
854 839
855 return 0; 840 return 0;
856} 841}
@@ -859,7 +844,7 @@ static int tw9910_video_probe(struct soc_camera_device *icd,
859 struct i2c_client *client) 844 struct i2c_client *client)
860{ 845{
861 struct tw9910_priv *priv = to_tw9910(client); 846 struct tw9910_priv *priv = to_tw9910(client);
862 s32 val; 847 s32 id;
863 848
864 /* 849 /*
865 * We must have a parent by now. And it cannot be a wrong one. 850 * We must have a parent by now. And it cannot be a wrong one.
@@ -878,23 +863,24 @@ static int tw9910_video_probe(struct soc_camera_device *icd,
878 return -ENODEV; 863 return -ENODEV;
879 } 864 }
880 865
881 icd->formats = tw9910_color_fmt;
882 icd->num_formats = ARRAY_SIZE(tw9910_color_fmt);
883
884 /* 866 /*
885 * check and show Product ID 867 * check and show Product ID
868 * So far only revisions 0 and 1 have been seen
886 */ 869 */
887 val = i2c_smbus_read_byte_data(client, ID); 870 id = i2c_smbus_read_byte_data(client, ID);
871 priv->revision = GET_REV(id);
872 id = GET_ID(id);
888 873
889 if (0x0B != GET_ID(val) || 874 if (0x0B != id ||
890 0x00 != GET_ReV(val)) { 875 0x01 < priv->revision) {
891 dev_err(&client->dev, 876 dev_err(&client->dev,
892 "Product ID error %x:%x\n", GET_ID(val), GET_ReV(val)); 877 "Product ID error %x:%x\n",
878 id, priv->revision);
893 return -ENODEV; 879 return -ENODEV;
894 } 880 }
895 881
896 dev_info(&client->dev, 882 dev_info(&client->dev,
897 "tw9910 Product ID %0x:%0x\n", GET_ID(val), GET_ReV(val)); 883 "tw9910 Product ID %0x:%0x\n", id, priv->revision);
898 884
899 icd->vdev->tvnorms = V4L2_STD_NTSC | V4L2_STD_PAL; 885 icd->vdev->tvnorms = V4L2_STD_NTSC | V4L2_STD_PAL;
900 icd->vdev->current_norm = V4L2_STD_NTSC; 886 icd->vdev->current_norm = V4L2_STD_NTSC;
@@ -917,14 +903,25 @@ static struct v4l2_subdev_core_ops tw9910_subdev_core_ops = {
917#endif 903#endif
918}; 904};
919 905
906static int tw9910_enum_fmt(struct v4l2_subdev *sd, int index,
907 enum v4l2_mbus_pixelcode *code)
908{
909 if (index)
910 return -EINVAL;
911
912 *code = V4L2_MBUS_FMT_YUYV8_2X8_BE;
913 return 0;
914}
915
920static struct v4l2_subdev_video_ops tw9910_subdev_video_ops = { 916static struct v4l2_subdev_video_ops tw9910_subdev_video_ops = {
921 .s_stream = tw9910_s_stream, 917 .s_stream = tw9910_s_stream,
922 .g_fmt = tw9910_g_fmt, 918 .g_mbus_fmt = tw9910_g_fmt,
923 .s_fmt = tw9910_s_fmt, 919 .s_mbus_fmt = tw9910_s_fmt,
924 .try_fmt = tw9910_try_fmt, 920 .try_mbus_fmt = tw9910_try_fmt,
925 .cropcap = tw9910_cropcap, 921 .cropcap = tw9910_cropcap,
926 .g_crop = tw9910_g_crop, 922 .g_crop = tw9910_g_crop,
927 .s_crop = tw9910_s_crop, 923 .s_crop = tw9910_s_crop,
924 .enum_mbus_fmt = tw9910_enum_fmt,
928}; 925};
929 926
930static struct v4l2_subdev_ops tw9910_subdev_ops = { 927static struct v4l2_subdev_ops tw9910_subdev_ops = {
@@ -954,10 +951,10 @@ static int tw9910_probe(struct i2c_client *client,
954 } 951 }
955 952
956 icl = to_soc_camera_link(icd); 953 icl = to_soc_camera_link(icd);
957 if (!icl) 954 if (!icl || !icl->priv)
958 return -EINVAL; 955 return -EINVAL;
959 956
960 info = container_of(icl, struct tw9910_video_info, link); 957 info = icl->priv;
961 958
962 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { 959 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
963 dev_err(&client->dev, 960 dev_err(&client->dev,
@@ -975,7 +972,7 @@ static int tw9910_probe(struct i2c_client *client,
975 v4l2_i2c_subdev_init(&priv->subdev, client, &tw9910_subdev_ops); 972 v4l2_i2c_subdev_init(&priv->subdev, client, &tw9910_subdev_ops);
976 973
977 icd->ops = &tw9910_ops; 974 icd->ops = &tw9910_ops;
978 icd->iface = info->link.bus_id; 975 icd->iface = icl->bus_id;
979 976
980 ret = tw9910_video_probe(icd, client); 977 ret = tw9910_video_probe(icd, client);
981 if (ret) { 978 if (ret) {
diff --git a/drivers/media/video/usbvideo/usbvideo.c b/drivers/media/video/usbvideo/usbvideo.c
index dea8b321fb4a..5ac37c6c4313 100644
--- a/drivers/media/video/usbvideo/usbvideo.c
+++ b/drivers/media/video/usbvideo/usbvideo.c
@@ -1053,9 +1053,9 @@ int usbvideo_RegisterVideoDevice(struct uvd *uvd)
1053 "%s: video_register_device() successful\n", __func__); 1053 "%s: video_register_device() successful\n", __func__);
1054 } 1054 }
1055 1055
1056 dev_info(&uvd->dev->dev, "%s on /dev/video%d: canvas=%s videosize=%s\n", 1056 dev_info(&uvd->dev->dev, "%s on %s: canvas=%s videosize=%s\n",
1057 (uvd->handle != NULL) ? uvd->handle->drvName : "???", 1057 (uvd->handle != NULL) ? uvd->handle->drvName : "???",
1058 uvd->vdev.num, tmp2, tmp1); 1058 video_device_node_name(&uvd->vdev), tmp2, tmp1);
1059 1059
1060 usb_get_dev(uvd->dev); 1060 usb_get_dev(uvd->dev);
1061 return 0; 1061 return 0;
diff --git a/drivers/media/video/usbvideo/vicam.c b/drivers/media/video/usbvideo/vicam.c
index 45fce39ec9ad..6030410c6677 100644
--- a/drivers/media/video/usbvideo/vicam.c
+++ b/drivers/media/video/usbvideo/vicam.c
@@ -796,7 +796,6 @@ static const struct v4l2_file_operations vicam_fops = {
796static struct video_device vicam_template = { 796static struct video_device vicam_template = {
797 .name = "ViCam-based USB Camera", 797 .name = "ViCam-based USB Camera",
798 .fops = &vicam_fops, 798 .fops = &vicam_fops,
799 .minor = -1,
800 .release = video_device_release_empty, 799 .release = video_device_release_empty,
801}; 800};
802 801
@@ -873,8 +872,8 @@ vicam_probe( struct usb_interface *intf, const struct usb_device_id *id)
873 return -EIO; 872 return -EIO;
874 } 873 }
875 874
876 printk(KERN_INFO "ViCam webcam driver now controlling video device %d\n", 875 printk(KERN_INFO "ViCam webcam driver now controlling device %s\n",
877 cam->vdev.num); 876 video_device_node_name(&cam->vdev));
878 877
879 usb_set_intfdata (intf, cam); 878 usb_set_intfdata (intf, cam);
880 879
diff --git a/drivers/media/video/usbvision/usbvision-i2c.c b/drivers/media/video/usbvision/usbvision-i2c.c
index c19f51dba2ee..0613922997e0 100644
--- a/drivers/media/video/usbvision/usbvision-i2c.c
+++ b/drivers/media/video/usbvision/usbvision-i2c.c
@@ -215,8 +215,8 @@ int usbvision_i2c_register(struct usb_usbvision *usbvision)
215 memcpy(&usbvision->i2c_adap, &i2c_adap_template, 215 memcpy(&usbvision->i2c_adap, &i2c_adap_template,
216 sizeof(struct i2c_adapter)); 216 sizeof(struct i2c_adapter));
217 217
218 sprintf(usbvision->i2c_adap.name + strlen(usbvision->i2c_adap.name), 218 sprintf(usbvision->i2c_adap.name, "%s-%d-%s", i2c_adap_template.name,
219 " #%d", usbvision->vdev->num); 219 usbvision->dev->bus->busnum, usbvision->dev->devpath);
220 PDEBUG(DBG_I2C,"Adaptername: %s", usbvision->i2c_adap.name); 220 PDEBUG(DBG_I2C,"Adaptername: %s", usbvision->i2c_adap.name);
221 usbvision->i2c_adap.dev.parent = &usbvision->dev->dev; 221 usbvision->i2c_adap.dev.parent = &usbvision->dev->dev;
222 222
diff --git a/drivers/media/video/usbvision/usbvision-video.c b/drivers/media/video/usbvision/usbvision-video.c
index c07b0ac452ab..1054546db908 100644
--- a/drivers/media/video/usbvision/usbvision-video.c
+++ b/drivers/media/video/usbvision/usbvision-video.c
@@ -1328,7 +1328,6 @@ static struct video_device usbvision_video_template = {
1328 .ioctl_ops = &usbvision_ioctl_ops, 1328 .ioctl_ops = &usbvision_ioctl_ops,
1329 .name = "usbvision-video", 1329 .name = "usbvision-video",
1330 .release = video_device_release, 1330 .release = video_device_release,
1331 .minor = -1,
1332 .tvnorms = USBVISION_NORMS, 1331 .tvnorms = USBVISION_NORMS,
1333 .current_norm = V4L2_STD_PAL 1332 .current_norm = V4L2_STD_PAL
1334}; 1333};
@@ -1362,7 +1361,6 @@ static struct video_device usbvision_radio_template = {
1362 .fops = &usbvision_radio_fops, 1361 .fops = &usbvision_radio_fops,
1363 .name = "usbvision-radio", 1362 .name = "usbvision-radio",
1364 .release = video_device_release, 1363 .release = video_device_release,
1365 .minor = -1,
1366 .ioctl_ops = &usbvision_radio_ioctl_ops, 1364 .ioctl_ops = &usbvision_radio_ioctl_ops,
1367 1365
1368 .tvnorms = USBVISION_NORMS, 1366 .tvnorms = USBVISION_NORMS,
@@ -1382,7 +1380,6 @@ static struct video_device usbvision_vbi_template=
1382 .fops = &usbvision_vbi_fops, 1380 .fops = &usbvision_vbi_fops,
1383 .release = video_device_release, 1381 .release = video_device_release,
1384 .name = "usbvision-vbi", 1382 .name = "usbvision-vbi",
1385 .minor = -1,
1386}; 1383};
1387 1384
1388 1385
@@ -1404,7 +1401,6 @@ static struct video_device *usbvision_vdev_init(struct usb_usbvision *usbvision,
1404 return NULL; 1401 return NULL;
1405 } 1402 }
1406 *vdev = *vdev_template; 1403 *vdev = *vdev_template;
1407// vdev->minor = -1;
1408 vdev->v4l2_dev = &usbvision->v4l2_dev; 1404 vdev->v4l2_dev = &usbvision->v4l2_dev;
1409 snprintf(vdev->name, sizeof(vdev->name), "%s", name); 1405 snprintf(vdev->name, sizeof(vdev->name), "%s", name);
1410 video_set_drvdata(vdev, usbvision); 1406 video_set_drvdata(vdev, usbvision);
@@ -1416,9 +1412,9 @@ static void usbvision_unregister_video(struct usb_usbvision *usbvision)
1416{ 1412{
1417 // vbi Device: 1413 // vbi Device:
1418 if (usbvision->vbi) { 1414 if (usbvision->vbi) {
1419 PDEBUG(DBG_PROBE, "unregister /dev/vbi%d [v4l2]", 1415 PDEBUG(DBG_PROBE, "unregister %s [v4l2]",
1420 usbvision->vbi->num); 1416 video_device_node_name(usbvision->vbi));
1421 if (usbvision->vbi->minor != -1) { 1417 if (video_is_registered(usbvision->vbi)) {
1422 video_unregister_device(usbvision->vbi); 1418 video_unregister_device(usbvision->vbi);
1423 } else { 1419 } else {
1424 video_device_release(usbvision->vbi); 1420 video_device_release(usbvision->vbi);
@@ -1428,9 +1424,9 @@ static void usbvision_unregister_video(struct usb_usbvision *usbvision)
1428 1424
1429 // Radio Device: 1425 // Radio Device:
1430 if (usbvision->rdev) { 1426 if (usbvision->rdev) {
1431 PDEBUG(DBG_PROBE, "unregister /dev/radio%d [v4l2]", 1427 PDEBUG(DBG_PROBE, "unregister %s [v4l2]",
1432 usbvision->rdev->num); 1428 video_device_node_name(usbvision->rdev));
1433 if (usbvision->rdev->minor != -1) { 1429 if (video_is_registered(usbvision->rdev)) {
1434 video_unregister_device(usbvision->rdev); 1430 video_unregister_device(usbvision->rdev);
1435 } else { 1431 } else {
1436 video_device_release(usbvision->rdev); 1432 video_device_release(usbvision->rdev);
@@ -1440,9 +1436,9 @@ static void usbvision_unregister_video(struct usb_usbvision *usbvision)
1440 1436
1441 // Video Device: 1437 // Video Device:
1442 if (usbvision->vdev) { 1438 if (usbvision->vdev) {
1443 PDEBUG(DBG_PROBE, "unregister /dev/video%d [v4l2]", 1439 PDEBUG(DBG_PROBE, "unregister %s [v4l2]",
1444 usbvision->vdev->num); 1440 video_device_node_name(usbvision->vdev));
1445 if (usbvision->vdev->minor != -1) { 1441 if (video_is_registered(usbvision->vdev)) {
1446 video_unregister_device(usbvision->vdev); 1442 video_unregister_device(usbvision->vdev);
1447 } else { 1443 } else {
1448 video_device_release(usbvision->vdev); 1444 video_device_release(usbvision->vdev);
@@ -1466,8 +1462,8 @@ static int __devinit usbvision_register_video(struct usb_usbvision *usbvision)
1466 video_nr)<0) { 1462 video_nr)<0) {
1467 goto err_exit; 1463 goto err_exit;
1468 } 1464 }
1469 printk(KERN_INFO "USBVision[%d]: registered USBVision Video device /dev/video%d [v4l2]\n", 1465 printk(KERN_INFO "USBVision[%d]: registered USBVision Video device %s [v4l2]\n",
1470 usbvision->nr, usbvision->vdev->num); 1466 usbvision->nr, video_device_node_name(usbvision->vdev));
1471 1467
1472 // Radio Device: 1468 // Radio Device:
1473 if (usbvision_device_data[usbvision->DevModel].Radio) { 1469 if (usbvision_device_data[usbvision->DevModel].Radio) {
@@ -1483,8 +1479,8 @@ static int __devinit usbvision_register_video(struct usb_usbvision *usbvision)
1483 radio_nr)<0) { 1479 radio_nr)<0) {
1484 goto err_exit; 1480 goto err_exit;
1485 } 1481 }
1486 printk(KERN_INFO "USBVision[%d]: registered USBVision Radio device /dev/radio%d [v4l2]\n", 1482 printk(KERN_INFO "USBVision[%d]: registered USBVision Radio device %s [v4l2]\n",
1487 usbvision->nr, usbvision->rdev->num); 1483 usbvision->nr, video_device_node_name(usbvision->rdev));
1488 } 1484 }
1489 // vbi Device: 1485 // vbi Device:
1490 if (usbvision_device_data[usbvision->DevModel].vbi) { 1486 if (usbvision_device_data[usbvision->DevModel].vbi) {
@@ -1499,8 +1495,8 @@ static int __devinit usbvision_register_video(struct usb_usbvision *usbvision)
1499 vbi_nr)<0) { 1495 vbi_nr)<0) {
1500 goto err_exit; 1496 goto err_exit;
1501 } 1497 }
1502 printk(KERN_INFO "USBVision[%d]: registered USBVision VBI device /dev/vbi%d [v4l2] (Not Working Yet!)\n", 1498 printk(KERN_INFO "USBVision[%d]: registered USBVision VBI device %s [v4l2] (Not Working Yet!)\n",
1503 usbvision->nr, usbvision->vbi->num); 1499 usbvision->nr, video_device_node_name(usbvision->vbi));
1504 } 1500 }
1505 // all done 1501 // all done
1506 return 0; 1502 return 0;
diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c
index c31bc50113bc..391cccca7ffc 100644
--- a/drivers/media/video/uvc/uvc_driver.c
+++ b/drivers/media/video/uvc/uvc_driver.c
@@ -1651,7 +1651,6 @@ static int uvc_register_video(struct uvc_device *dev,
1651 * get another one. 1651 * get another one.
1652 */ 1652 */
1653 vdev->parent = &dev->intf->dev; 1653 vdev->parent = &dev->intf->dev;
1654 vdev->minor = -1;
1655 vdev->fops = &uvc_fops; 1654 vdev->fops = &uvc_fops;
1656 vdev->release = uvc_release; 1655 vdev->release = uvc_release;
1657 strlcpy(vdev->name, dev->name, sizeof vdev->name); 1656 strlcpy(vdev->name, dev->name, sizeof vdev->name);
diff --git a/drivers/media/video/uvc/uvc_video.c b/drivers/media/video/uvc/uvc_video.c
index 05139a4f14f6..9a9802830d41 100644
--- a/drivers/media/video/uvc/uvc_video.c
+++ b/drivers/media/video/uvc/uvc_video.c
@@ -145,7 +145,7 @@ static int uvc_get_video_ctrl(struct uvc_streaming *stream,
145 uvc_warn_once(stream->dev, UVC_WARN_MINMAX, "UVC non " 145 uvc_warn_once(stream->dev, UVC_WARN_MINMAX, "UVC non "
146 "compliance - GET_MIN/MAX(PROBE) incorrectly " 146 "compliance - GET_MIN/MAX(PROBE) incorrectly "
147 "supported. Enabling workaround.\n"); 147 "supported. Enabling workaround.\n");
148 memset(ctrl, 0, sizeof ctrl); 148 memset(ctrl, 0, sizeof *ctrl);
149 ctrl->wCompQuality = le16_to_cpup((__le16 *)data); 149 ctrl->wCompQuality = le16_to_cpup((__le16 *)data);
150 ret = 0; 150 ret = 0;
151 goto out; 151 goto out;
diff --git a/drivers/media/video/v4l2-common.c b/drivers/media/video/v4l2-common.c
index e8e5affbabce..36b5cb86fb57 100644
--- a/drivers/media/video/v4l2-common.c
+++ b/drivers/media/video/v4l2-common.c
@@ -1024,3 +1024,50 @@ void v4l_bound_align_image(u32 *w, unsigned int wmin, unsigned int wmax,
1024 } 1024 }
1025} 1025}
1026EXPORT_SYMBOL_GPL(v4l_bound_align_image); 1026EXPORT_SYMBOL_GPL(v4l_bound_align_image);
1027
1028/**
1029 * v4l_fill_dv_preset_info - fill description of a digital video preset
1030 * @preset - preset value
1031 * @info - pointer to struct v4l2_dv_enum_preset
1032 *
1033 * drivers can use this helper function to fill description of dv preset
1034 * in info.
1035 */
1036int v4l_fill_dv_preset_info(u32 preset, struct v4l2_dv_enum_preset *info)
1037{
1038 static const struct v4l2_dv_preset_info {
1039 u16 width;
1040 u16 height;
1041 const char *name;
1042 } dv_presets[] = {
1043 { 0, 0, "Invalid" }, /* V4L2_DV_INVALID */
1044 { 720, 480, "480p@59.94" }, /* V4L2_DV_480P59_94 */
1045 { 720, 576, "576p@50" }, /* V4L2_DV_576P50 */
1046 { 1280, 720, "720p@24" }, /* V4L2_DV_720P24 */
1047 { 1280, 720, "720p@25" }, /* V4L2_DV_720P25 */
1048 { 1280, 720, "720p@30" }, /* V4L2_DV_720P30 */
1049 { 1280, 720, "720p@50" }, /* V4L2_DV_720P50 */
1050 { 1280, 720, "720p@59.94" }, /* V4L2_DV_720P59_94 */
1051 { 1280, 720, "720p@60" }, /* V4L2_DV_720P60 */
1052 { 1920, 1080, "1080i@29.97" }, /* V4L2_DV_1080I29_97 */
1053 { 1920, 1080, "1080i@30" }, /* V4L2_DV_1080I30 */
1054 { 1920, 1080, "1080i@25" }, /* V4L2_DV_1080I25 */
1055 { 1920, 1080, "1080i@50" }, /* V4L2_DV_1080I50 */
1056 { 1920, 1080, "1080i@60" }, /* V4L2_DV_1080I60 */
1057 { 1920, 1080, "1080p@24" }, /* V4L2_DV_1080P24 */
1058 { 1920, 1080, "1080p@25" }, /* V4L2_DV_1080P25 */
1059 { 1920, 1080, "1080p@30" }, /* V4L2_DV_1080P30 */
1060 { 1920, 1080, "1080p@50" }, /* V4L2_DV_1080P50 */
1061 { 1920, 1080, "1080p@60" }, /* V4L2_DV_1080P60 */
1062 };
1063
1064 if (info == NULL || preset >= ARRAY_SIZE(dv_presets))
1065 return -EINVAL;
1066
1067 info->preset = preset;
1068 info->width = dv_presets[preset].width;
1069 info->height = dv_presets[preset].height;
1070 strlcpy(info->name, dv_presets[preset].name, sizeof(info->name));
1071 return 0;
1072}
1073EXPORT_SYMBOL_GPL(v4l_fill_dv_preset_info);
diff --git a/drivers/media/video/v4l2-compat-ioctl32.c b/drivers/media/video/v4l2-compat-ioctl32.c
index 997975d5e024..c4150bd26337 100644
--- a/drivers/media/video/v4l2-compat-ioctl32.c
+++ b/drivers/media/video/v4l2-compat-ioctl32.c
@@ -1077,6 +1077,12 @@ long v4l2_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg)
1077 case VIDIOC_DBG_G_REGISTER: 1077 case VIDIOC_DBG_G_REGISTER:
1078 case VIDIOC_DBG_G_CHIP_IDENT: 1078 case VIDIOC_DBG_G_CHIP_IDENT:
1079 case VIDIOC_S_HW_FREQ_SEEK: 1079 case VIDIOC_S_HW_FREQ_SEEK:
1080 case VIDIOC_ENUM_DV_PRESETS:
1081 case VIDIOC_S_DV_PRESET:
1082 case VIDIOC_G_DV_PRESET:
1083 case VIDIOC_QUERY_DV_PRESET:
1084 case VIDIOC_S_DV_TIMINGS:
1085 case VIDIOC_G_DV_TIMINGS:
1080 ret = do_video_ioctl(file, cmd, arg); 1086 ret = do_video_ioctl(file, cmd, arg);
1081 break; 1087 break;
1082 1088
diff --git a/drivers/media/video/v4l2-dev.c b/drivers/media/video/v4l2-dev.c
index 500cbe9891ac..709069916068 100644
--- a/drivers/media/video/v4l2-dev.c
+++ b/drivers/media/video/v4l2-dev.c
@@ -189,7 +189,7 @@ static ssize_t v4l2_read(struct file *filp, char __user *buf,
189 189
190 if (!vdev->fops->read) 190 if (!vdev->fops->read)
191 return -EINVAL; 191 return -EINVAL;
192 if (video_is_unregistered(vdev)) 192 if (!video_is_registered(vdev))
193 return -EIO; 193 return -EIO;
194 return vdev->fops->read(filp, buf, sz, off); 194 return vdev->fops->read(filp, buf, sz, off);
195} 195}
@@ -201,7 +201,7 @@ static ssize_t v4l2_write(struct file *filp, const char __user *buf,
201 201
202 if (!vdev->fops->write) 202 if (!vdev->fops->write)
203 return -EINVAL; 203 return -EINVAL;
204 if (video_is_unregistered(vdev)) 204 if (!video_is_registered(vdev))
205 return -EIO; 205 return -EIO;
206 return vdev->fops->write(filp, buf, sz, off); 206 return vdev->fops->write(filp, buf, sz, off);
207} 207}
@@ -210,7 +210,7 @@ static unsigned int v4l2_poll(struct file *filp, struct poll_table_struct *poll)
210{ 210{
211 struct video_device *vdev = video_devdata(filp); 211 struct video_device *vdev = video_devdata(filp);
212 212
213 if (!vdev->fops->poll || video_is_unregistered(vdev)) 213 if (!vdev->fops->poll || !video_is_registered(vdev))
214 return DEFAULT_POLLMASK; 214 return DEFAULT_POLLMASK;
215 return vdev->fops->poll(filp, poll); 215 return vdev->fops->poll(filp, poll);
216} 216}
@@ -250,7 +250,7 @@ static unsigned long v4l2_get_unmapped_area(struct file *filp,
250 250
251 if (!vdev->fops->get_unmapped_area) 251 if (!vdev->fops->get_unmapped_area)
252 return -ENOSYS; 252 return -ENOSYS;
253 if (video_is_unregistered(vdev)) 253 if (!video_is_registered(vdev))
254 return -ENODEV; 254 return -ENODEV;
255 return vdev->fops->get_unmapped_area(filp, addr, len, pgoff, flags); 255 return vdev->fops->get_unmapped_area(filp, addr, len, pgoff, flags);
256} 256}
@@ -260,8 +260,7 @@ static int v4l2_mmap(struct file *filp, struct vm_area_struct *vm)
260{ 260{
261 struct video_device *vdev = video_devdata(filp); 261 struct video_device *vdev = video_devdata(filp);
262 262
263 if (!vdev->fops->mmap || 263 if (!vdev->fops->mmap || !video_is_registered(vdev))
264 video_is_unregistered(vdev))
265 return -ENODEV; 264 return -ENODEV;
266 return vdev->fops->mmap(filp, vm); 265 return vdev->fops->mmap(filp, vm);
267} 266}
@@ -277,7 +276,7 @@ static int v4l2_open(struct inode *inode, struct file *filp)
277 vdev = video_devdata(filp); 276 vdev = video_devdata(filp);
278 /* return ENODEV if the video device has been removed 277 /* return ENODEV if the video device has been removed
279 already or if it is not registered anymore. */ 278 already or if it is not registered anymore. */
280 if (vdev == NULL || video_is_unregistered(vdev)) { 279 if (vdev == NULL || !video_is_registered(vdev)) {
281 mutex_unlock(&videodev_lock); 280 mutex_unlock(&videodev_lock);
282 return -ENODEV; 281 return -ENODEV;
283 } 282 }
@@ -551,10 +550,11 @@ static int __video_register_device(struct video_device *vdev, int type, int nr,
551 vdev->dev.release = v4l2_device_release; 550 vdev->dev.release = v4l2_device_release;
552 551
553 if (nr != -1 && nr != vdev->num && warn_if_nr_in_use) 552 if (nr != -1 && nr != vdev->num && warn_if_nr_in_use)
554 printk(KERN_WARNING "%s: requested %s%d, got %s%d\n", 553 printk(KERN_WARNING "%s: requested %s%d, got %s\n", __func__,
555 __func__, name_base, nr, name_base, vdev->num); 554 name_base, nr, video_device_node_name(vdev));
556 555
557 /* Part 5: Activate this minor. The char device can now be used. */ 556 /* Part 5: Activate this minor. The char device can now be used. */
557 set_bit(V4L2_FL_REGISTERED, &vdev->flags);
558 mutex_lock(&videodev_lock); 558 mutex_lock(&videodev_lock);
559 video_device[vdev->minor] = vdev; 559 video_device[vdev->minor] = vdev;
560 mutex_unlock(&videodev_lock); 560 mutex_unlock(&videodev_lock);
@@ -593,11 +593,11 @@ EXPORT_SYMBOL(video_register_device_no_warn);
593void video_unregister_device(struct video_device *vdev) 593void video_unregister_device(struct video_device *vdev)
594{ 594{
595 /* Check if vdev was ever registered at all */ 595 /* Check if vdev was ever registered at all */
596 if (!vdev || vdev->minor < 0) 596 if (!vdev || !video_is_registered(vdev))
597 return; 597 return;
598 598
599 mutex_lock(&videodev_lock); 599 mutex_lock(&videodev_lock);
600 set_bit(V4L2_FL_UNREGISTERED, &vdev->flags); 600 clear_bit(V4L2_FL_REGISTERED, &vdev->flags);
601 mutex_unlock(&videodev_lock); 601 mutex_unlock(&videodev_lock);
602 device_unregister(&vdev->dev); 602 device_unregister(&vdev->dev);
603} 603}
diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c
index 30cc3347ae52..4b11257c3184 100644
--- a/drivers/media/video/v4l2-ioctl.c
+++ b/drivers/media/video/v4l2-ioctl.c
@@ -284,6 +284,12 @@ static const char *v4l2_ioctls[] = {
284 [_IOC_NR(VIDIOC_DBG_G_CHIP_IDENT)] = "VIDIOC_DBG_G_CHIP_IDENT", 284 [_IOC_NR(VIDIOC_DBG_G_CHIP_IDENT)] = "VIDIOC_DBG_G_CHIP_IDENT",
285 [_IOC_NR(VIDIOC_S_HW_FREQ_SEEK)] = "VIDIOC_S_HW_FREQ_SEEK", 285 [_IOC_NR(VIDIOC_S_HW_FREQ_SEEK)] = "VIDIOC_S_HW_FREQ_SEEK",
286#endif 286#endif
287 [_IOC_NR(VIDIOC_ENUM_DV_PRESETS)] = "VIDIOC_ENUM_DV_PRESETS",
288 [_IOC_NR(VIDIOC_S_DV_PRESET)] = "VIDIOC_S_DV_PRESET",
289 [_IOC_NR(VIDIOC_G_DV_PRESET)] = "VIDIOC_G_DV_PRESET",
290 [_IOC_NR(VIDIOC_QUERY_DV_PRESET)] = "VIDIOC_QUERY_DV_PRESET",
291 [_IOC_NR(VIDIOC_S_DV_TIMINGS)] = "VIDIOC_S_DV_TIMINGS",
292 [_IOC_NR(VIDIOC_G_DV_TIMINGS)] = "VIDIOC_G_DV_TIMINGS",
287}; 293};
288#define V4L2_IOCTLS ARRAY_SIZE(v4l2_ioctls) 294#define V4L2_IOCTLS ARRAY_SIZE(v4l2_ioctls)
289 295
@@ -1135,6 +1141,19 @@ static long __video_do_ioctl(struct file *file,
1135 { 1141 {
1136 struct v4l2_input *p = arg; 1142 struct v4l2_input *p = arg;
1137 1143
1144 /*
1145 * We set the flags for CAP_PRESETS, CAP_CUSTOM_TIMINGS &
1146 * CAP_STD here based on ioctl handler provided by the
1147 * driver. If the driver doesn't support these
1148 * for a specific input, it must override these flags.
1149 */
1150 if (ops->vidioc_s_std)
1151 p->capabilities |= V4L2_IN_CAP_STD;
1152 if (ops->vidioc_s_dv_preset)
1153 p->capabilities |= V4L2_IN_CAP_PRESETS;
1154 if (ops->vidioc_s_dv_timings)
1155 p->capabilities |= V4L2_IN_CAP_CUSTOM_TIMINGS;
1156
1138 if (!ops->vidioc_enum_input) 1157 if (!ops->vidioc_enum_input)
1139 break; 1158 break;
1140 1159
@@ -1179,6 +1198,19 @@ static long __video_do_ioctl(struct file *file,
1179 if (!ops->vidioc_enum_output) 1198 if (!ops->vidioc_enum_output)
1180 break; 1199 break;
1181 1200
1201 /*
1202 * We set the flags for CAP_PRESETS, CAP_CUSTOM_TIMINGS &
1203 * CAP_STD here based on ioctl handler provided by the
1204 * driver. If the driver doesn't support these
1205 * for a specific output, it must override these flags.
1206 */
1207 if (ops->vidioc_s_std)
1208 p->capabilities |= V4L2_OUT_CAP_STD;
1209 if (ops->vidioc_s_dv_preset)
1210 p->capabilities |= V4L2_OUT_CAP_PRESETS;
1211 if (ops->vidioc_s_dv_timings)
1212 p->capabilities |= V4L2_OUT_CAP_CUSTOM_TIMINGS;
1213
1182 ret = ops->vidioc_enum_output(file, fh, p); 1214 ret = ops->vidioc_enum_output(file, fh, p);
1183 if (!ret) 1215 if (!ret)
1184 dbgarg(cmd, "index=%d, name=%s, type=%d, " 1216 dbgarg(cmd, "index=%d, name=%s, type=%d, "
@@ -1794,6 +1826,121 @@ static long __video_do_ioctl(struct file *file,
1794 } 1826 }
1795 break; 1827 break;
1796 } 1828 }
1829 case VIDIOC_ENUM_DV_PRESETS:
1830 {
1831 struct v4l2_dv_enum_preset *p = arg;
1832
1833 if (!ops->vidioc_enum_dv_presets)
1834 break;
1835
1836 ret = ops->vidioc_enum_dv_presets(file, fh, p);
1837 if (!ret)
1838 dbgarg(cmd,
1839 "index=%d, preset=%d, name=%s, width=%d,"
1840 " height=%d ",
1841 p->index, p->preset, p->name, p->width,
1842 p->height);
1843 break;
1844 }
1845 case VIDIOC_S_DV_PRESET:
1846 {
1847 struct v4l2_dv_preset *p = arg;
1848
1849 if (!ops->vidioc_s_dv_preset)
1850 break;
1851
1852 dbgarg(cmd, "preset=%d\n", p->preset);
1853 ret = ops->vidioc_s_dv_preset(file, fh, p);
1854 break;
1855 }
1856 case VIDIOC_G_DV_PRESET:
1857 {
1858 struct v4l2_dv_preset *p = arg;
1859
1860 if (!ops->vidioc_g_dv_preset)
1861 break;
1862
1863 ret = ops->vidioc_g_dv_preset(file, fh, p);
1864 if (!ret)
1865 dbgarg(cmd, "preset=%d\n", p->preset);
1866 break;
1867 }
1868 case VIDIOC_QUERY_DV_PRESET:
1869 {
1870 struct v4l2_dv_preset *p = arg;
1871
1872 if (!ops->vidioc_query_dv_preset)
1873 break;
1874
1875 ret = ops->vidioc_query_dv_preset(file, fh, p);
1876 if (!ret)
1877 dbgarg(cmd, "preset=%d\n", p->preset);
1878 break;
1879 }
1880 case VIDIOC_S_DV_TIMINGS:
1881 {
1882 struct v4l2_dv_timings *p = arg;
1883
1884 if (!ops->vidioc_s_dv_timings)
1885 break;
1886
1887 switch (p->type) {
1888 case V4L2_DV_BT_656_1120:
1889 dbgarg2("bt-656/1120:interlaced=%d, pixelclock=%lld,"
1890 " width=%d, height=%d, polarities=%x,"
1891 " hfrontporch=%d, hsync=%d, hbackporch=%d,"
1892 " vfrontporch=%d, vsync=%d, vbackporch=%d,"
1893 " il_vfrontporch=%d, il_vsync=%d,"
1894 " il_vbackporch=%d\n",
1895 p->bt.interlaced, p->bt.pixelclock,
1896 p->bt.width, p->bt.height, p->bt.polarities,
1897 p->bt.hfrontporch, p->bt.hsync,
1898 p->bt.hbackporch, p->bt.vfrontporch,
1899 p->bt.vsync, p->bt.vbackporch,
1900 p->bt.il_vfrontporch, p->bt.il_vsync,
1901 p->bt.il_vbackporch);
1902 ret = ops->vidioc_s_dv_timings(file, fh, p);
1903 break;
1904 default:
1905 dbgarg2("Unknown type %d!\n", p->type);
1906 break;
1907 }
1908 break;
1909 }
1910 case VIDIOC_G_DV_TIMINGS:
1911 {
1912 struct v4l2_dv_timings *p = arg;
1913
1914 if (!ops->vidioc_g_dv_timings)
1915 break;
1916
1917 ret = ops->vidioc_g_dv_timings(file, fh, p);
1918 if (!ret) {
1919 switch (p->type) {
1920 case V4L2_DV_BT_656_1120:
1921 dbgarg2("bt-656/1120:interlaced=%d,"
1922 " pixelclock=%lld,"
1923 " width=%d, height=%d, polarities=%x,"
1924 " hfrontporch=%d, hsync=%d,"
1925 " hbackporch=%d, vfrontporch=%d,"
1926 " vsync=%d, vbackporch=%d,"
1927 " il_vfrontporch=%d, il_vsync=%d,"
1928 " il_vbackporch=%d\n",
1929 p->bt.interlaced, p->bt.pixelclock,
1930 p->bt.width, p->bt.height,
1931 p->bt.polarities, p->bt.hfrontporch,
1932 p->bt.hsync, p->bt.hbackporch,
1933 p->bt.vfrontporch, p->bt.vsync,
1934 p->bt.vbackporch, p->bt.il_vfrontporch,
1935 p->bt.il_vsync, p->bt.il_vbackporch);
1936 break;
1937 default:
1938 dbgarg2("Unknown type %d!\n", p->type);
1939 break;
1940 }
1941 }
1942 break;
1943 }
1797 1944
1798 default: 1945 default:
1799 { 1946 {
diff --git a/drivers/media/video/videobuf-dma-contig.c b/drivers/media/video/videobuf-dma-contig.c
index d25f28461da1..22c01097e8a8 100644
--- a/drivers/media/video/videobuf-dma-contig.c
+++ b/drivers/media/video/videobuf-dma-contig.c
@@ -141,9 +141,11 @@ static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
141 struct vm_area_struct *vma; 141 struct vm_area_struct *vma;
142 unsigned long prev_pfn, this_pfn; 142 unsigned long prev_pfn, this_pfn;
143 unsigned long pages_done, user_address; 143 unsigned long pages_done, user_address;
144 unsigned int offset;
144 int ret; 145 int ret;
145 146
146 mem->size = PAGE_ALIGN(vb->size); 147 offset = vb->baddr & ~PAGE_MASK;
148 mem->size = PAGE_ALIGN(vb->size + offset);
147 mem->is_userptr = 0; 149 mem->is_userptr = 0;
148 ret = -EINVAL; 150 ret = -EINVAL;
149 151
@@ -166,7 +168,7 @@ static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
166 break; 168 break;
167 169
168 if (pages_done == 0) 170 if (pages_done == 0)
169 mem->dma_handle = this_pfn << PAGE_SHIFT; 171 mem->dma_handle = (this_pfn << PAGE_SHIFT) + offset;
170 else if (this_pfn != (prev_pfn + 1)) 172 else if (this_pfn != (prev_pfn + 1))
171 ret = -EFAULT; 173 ret = -EFAULT;
172 174
diff --git a/drivers/media/video/vino.c b/drivers/media/video/vino.c
index b034a81d2b1c..a15d1e7cbed8 100644
--- a/drivers/media/video/vino.c
+++ b/drivers/media/video/vino.c
@@ -4068,7 +4068,6 @@ static struct video_device vdev_template = {
4068 .fops = &vino_fops, 4068 .fops = &vino_fops,
4069 .ioctl_ops = &vino_ioctl_ops, 4069 .ioctl_ops = &vino_ioctl_ops,
4070 .tvnorms = V4L2_STD_NTSC | V4L2_STD_PAL | V4L2_STD_SECAM, 4070 .tvnorms = V4L2_STD_NTSC | V4L2_STD_PAL | V4L2_STD_SECAM,
4071 .minor = -1,
4072}; 4071};
4073 4072
4074static void vino_module_cleanup(int stage) 4073static void vino_module_cleanup(int stage)
diff --git a/drivers/media/video/vivi.c b/drivers/media/video/vivi.c
index 7705fc6baf00..37632a064966 100644
--- a/drivers/media/video/vivi.c
+++ b/drivers/media/video/vivi.c
@@ -1148,7 +1148,8 @@ static int vivi_open(struct file *file)
1148 return -EBUSY; 1148 return -EBUSY;
1149 } 1149 }
1150 1150
1151 dprintk(dev, 1, "open /dev/video%d type=%s users=%d\n", dev->vfd->num, 1151 dprintk(dev, 1, "open %s type=%s users=%d\n",
1152 video_device_node_name(dev->vfd),
1152 v4l2_type_names[V4L2_BUF_TYPE_VIDEO_CAPTURE], dev->users); 1153 v4l2_type_names[V4L2_BUF_TYPE_VIDEO_CAPTURE], dev->users);
1153 1154
1154 /* allocate + initialize per filehandle data */ 1155 /* allocate + initialize per filehandle data */
@@ -1221,8 +1222,7 @@ static int vivi_close(struct file *file)
1221 struct vivi_fh *fh = file->private_data; 1222 struct vivi_fh *fh = file->private_data;
1222 struct vivi_dev *dev = fh->dev; 1223 struct vivi_dev *dev = fh->dev;
1223 struct vivi_dmaqueue *vidq = &dev->vidq; 1224 struct vivi_dmaqueue *vidq = &dev->vidq;
1224 1225 struct video_device *vdev = video_devdata(file);
1225 int minor = video_devdata(file)->minor;
1226 1226
1227 vivi_stop_thread(vidq); 1227 vivi_stop_thread(vidq);
1228 videobuf_stop(&fh->vb_vidq); 1228 videobuf_stop(&fh->vb_vidq);
@@ -1234,8 +1234,8 @@ static int vivi_close(struct file *file)
1234 dev->users--; 1234 dev->users--;
1235 mutex_unlock(&dev->mutex); 1235 mutex_unlock(&dev->mutex);
1236 1236
1237 dprintk(dev, 1, "close called (minor=%d, users=%d)\n", 1237 dprintk(dev, 1, "close called (dev=%s, users=%d)\n",
1238 minor, dev->users); 1238 video_device_node_name(vdev), dev->users);
1239 1239
1240 return 0; 1240 return 0;
1241} 1241}
@@ -1296,7 +1296,6 @@ static struct video_device vivi_template = {
1296 .name = "vivi", 1296 .name = "vivi",
1297 .fops = &vivi_fops, 1297 .fops = &vivi_fops,
1298 .ioctl_ops = &vivi_ioctl_ops, 1298 .ioctl_ops = &vivi_ioctl_ops,
1299 .minor = -1,
1300 .release = video_device_release, 1299 .release = video_device_release,
1301 1300
1302 .tvnorms = V4L2_STD_525_60, 1301 .tvnorms = V4L2_STD_525_60,
@@ -1317,8 +1316,8 @@ static int vivi_release(void)
1317 list_del(list); 1316 list_del(list);
1318 dev = list_entry(list, struct vivi_dev, vivi_devlist); 1317 dev = list_entry(list, struct vivi_dev, vivi_devlist);
1319 1318
1320 v4l2_info(&dev->v4l2_dev, "unregistering /dev/video%d\n", 1319 v4l2_info(&dev->v4l2_dev, "unregistering %s\n",
1321 dev->vfd->num); 1320 video_device_node_name(dev->vfd));
1322 video_unregister_device(dev->vfd); 1321 video_unregister_device(dev->vfd);
1323 v4l2_device_unregister(&dev->v4l2_dev); 1322 v4l2_device_unregister(&dev->v4l2_dev);
1324 kfree(dev); 1323 kfree(dev);
@@ -1372,15 +1371,12 @@ static int __init vivi_create_instance(int inst)
1372 /* Now that everything is fine, let's add it to device list */ 1371 /* Now that everything is fine, let's add it to device list */
1373 list_add_tail(&dev->vivi_devlist, &vivi_devlist); 1372 list_add_tail(&dev->vivi_devlist, &vivi_devlist);
1374 1373
1375 snprintf(vfd->name, sizeof(vfd->name), "%s (%i)",
1376 vivi_template.name, vfd->num);
1377
1378 if (video_nr >= 0) 1374 if (video_nr >= 0)
1379 video_nr++; 1375 video_nr++;
1380 1376
1381 dev->vfd = vfd; 1377 dev->vfd = vfd;
1382 v4l2_info(&dev->v4l2_dev, "V4L2 device registered as /dev/video%d\n", 1378 v4l2_info(&dev->v4l2_dev, "V4L2 device registered as %s\n",
1383 vfd->num); 1379 video_device_node_name(vfd));
1384 return 0; 1380 return 0;
1385 1381
1386rel_vdev: 1382rel_vdev:
diff --git a/drivers/media/video/w9968cf.c b/drivers/media/video/w9968cf.c
index 37fcdc447db5..d807eea91757 100644
--- a/drivers/media/video/w9968cf.c
+++ b/drivers/media/video/w9968cf.c
@@ -2323,9 +2323,9 @@ static int w9968cf_sensor_init(struct w9968cf_device* cam)
2323error: 2323error:
2324 cam->sensor_initialized = 0; 2324 cam->sensor_initialized = 0;
2325 cam->sensor = CC_UNKNOWN; 2325 cam->sensor = CC_UNKNOWN;
2326 DBG(1, "Image sensor initialization failed for %s (/dev/video%d). " 2326 DBG(1, "Image sensor initialization failed for %s (%s). "
2327 "Try to detach and attach this device again", 2327 "Try to detach and attach this device again",
2328 symbolic(camlist, cam->id), cam->v4ldev->num) 2328 symbolic(camlist, cam->id), video_device_node_name(cam->v4ldev))
2329 return err; 2329 return err;
2330} 2330}
2331 2331
@@ -2571,7 +2571,8 @@ static void w9968cf_release_resources(struct w9968cf_device* cam)
2571{ 2571{
2572 mutex_lock(&w9968cf_devlist_mutex); 2572 mutex_lock(&w9968cf_devlist_mutex);
2573 2573
2574 DBG(2, "V4L device deregistered: /dev/video%d", cam->v4ldev->num) 2574 DBG(2, "V4L device deregistered: %s",
2575 video_device_node_name(cam->v4ldev))
2575 2576
2576 video_unregister_device(cam->v4ldev); 2577 video_unregister_device(cam->v4ldev);
2577 list_del(&cam->v4llist); 2578 list_del(&cam->v4llist);
@@ -2605,17 +2606,19 @@ static int w9968cf_open(struct file *filp)
2605 2606
2606 if (cam->sensor == CC_UNKNOWN) { 2607 if (cam->sensor == CC_UNKNOWN) {
2607 DBG(2, "No supported image sensor has been detected by the " 2608 DBG(2, "No supported image sensor has been detected by the "
2608 "'ovcamchip' module for the %s (/dev/video%d). Make " 2609 "'ovcamchip' module for the %s (%s). Make sure "
2609 "sure it is loaded *before* (re)connecting the camera.", 2610 "it is loaded *before* (re)connecting the camera.",
2610 symbolic(camlist, cam->id), cam->v4ldev->num) 2611 symbolic(camlist, cam->id),
2612 video_device_node_name(cam->v4ldev))
2611 mutex_unlock(&cam->dev_mutex); 2613 mutex_unlock(&cam->dev_mutex);
2612 up_read(&w9968cf_disconnect); 2614 up_read(&w9968cf_disconnect);
2613 return -ENODEV; 2615 return -ENODEV;
2614 } 2616 }
2615 2617
2616 if (cam->users) { 2618 if (cam->users) {
2617 DBG(2, "%s (/dev/video%d) has been already occupied by '%s'", 2619 DBG(2, "%s (%s) has been already occupied by '%s'",
2618 symbolic(camlist, cam->id), cam->v4ldev->num, cam->command) 2620 symbolic(camlist, cam->id),
2621 video_device_node_name(cam->v4ldev), cam->command)
2619 if ((filp->f_flags & O_NONBLOCK)||(filp->f_flags & O_NDELAY)) { 2622 if ((filp->f_flags & O_NONBLOCK)||(filp->f_flags & O_NDELAY)) {
2620 mutex_unlock(&cam->dev_mutex); 2623 mutex_unlock(&cam->dev_mutex);
2621 up_read(&w9968cf_disconnect); 2624 up_read(&w9968cf_disconnect);
@@ -2636,8 +2639,8 @@ static int w9968cf_open(struct file *filp)
2636 mutex_lock(&cam->dev_mutex); 2639 mutex_lock(&cam->dev_mutex);
2637 } 2640 }
2638 2641
2639 DBG(5, "Opening '%s', /dev/video%d ...", 2642 DBG(5, "Opening '%s', %s ...",
2640 symbolic(camlist, cam->id), cam->v4ldev->num) 2643 symbolic(camlist, cam->id), video_device_node_name(cam->v4ldev))
2641 2644
2642 cam->streaming = 0; 2645 cam->streaming = 0;
2643 cam->misconfigured = 0; 2646 cam->misconfigured = 0;
@@ -2874,8 +2877,7 @@ static long w9968cf_v4l_ioctl(struct file *filp,
2874 .minwidth = cam->minwidth, 2877 .minwidth = cam->minwidth,
2875 .minheight = cam->minheight, 2878 .minheight = cam->minheight,
2876 }; 2879 };
2877 sprintf(cap.name, "W996[87]CF USB Camera #%d", 2880 sprintf(cap.name, "W996[87]CF USB Camera");
2878 cam->v4ldev->num);
2879 cap.maxwidth = (cam->upscaling && w9968cf_vpp) 2881 cap.maxwidth = (cam->upscaling && w9968cf_vpp)
2880 ? max((u16)W9968CF_MAX_WIDTH, cam->maxwidth) 2882 ? max((u16)W9968CF_MAX_WIDTH, cam->maxwidth)
2881 : cam->maxwidth; 2883 : cam->maxwidth;
@@ -3485,7 +3487,6 @@ w9968cf_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
3485 3487
3486 strcpy(cam->v4ldev->name, symbolic(camlist, mod_id)); 3488 strcpy(cam->v4ldev->name, symbolic(camlist, mod_id));
3487 cam->v4ldev->fops = &w9968cf_fops; 3489 cam->v4ldev->fops = &w9968cf_fops;
3488 cam->v4ldev->minor = video_nr[dev_nr];
3489 cam->v4ldev->release = video_device_release; 3490 cam->v4ldev->release = video_device_release;
3490 video_set_drvdata(cam->v4ldev, cam); 3491 video_set_drvdata(cam->v4ldev, cam);
3491 cam->v4ldev->v4l2_dev = &cam->v4l2_dev; 3492 cam->v4ldev->v4l2_dev = &cam->v4l2_dev;
@@ -3501,7 +3502,8 @@ w9968cf_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
3501 goto fail; 3502 goto fail;
3502 } 3503 }
3503 3504
3504 DBG(2, "V4L device registered as /dev/video%d", cam->v4ldev->num) 3505 DBG(2, "V4L device registered as %s",
3506 video_device_node_name(cam->v4ldev))
3505 3507
3506 /* Set some basic constants */ 3508 /* Set some basic constants */
3507 w9968cf_configure_camera(cam, udev, mod_id, dev_nr); 3509 w9968cf_configure_camera(cam, udev, mod_id, dev_nr);
@@ -3557,10 +3559,10 @@ static void w9968cf_usb_disconnect(struct usb_interface* intf)
3557 wake_up_interruptible_all(&cam->open); 3559 wake_up_interruptible_all(&cam->open);
3558 3560
3559 if (cam->users) { 3561 if (cam->users) {
3560 DBG(2, "The device is open (/dev/video%d)! " 3562 DBG(2, "The device is open (%s)! "
3561 "Process name: %s. Deregistration and memory " 3563 "Process name: %s. Deregistration and memory "
3562 "deallocation are deferred on close.", 3564 "deallocation are deferred on close.",
3563 cam->v4ldev->num, cam->command) 3565 video_device_node_name(cam->v4ldev), cam->command)
3564 cam->misconfigured = 1; 3566 cam->misconfigured = 1;
3565 w9968cf_stop_transfer(cam); 3567 w9968cf_stop_transfer(cam);
3566 wake_up_interruptible(&cam->wait_queue); 3568 wake_up_interruptible(&cam->wait_queue);
diff --git a/drivers/media/video/zc0301/zc0301_core.c b/drivers/media/video/zc0301/zc0301_core.c
index 312a71336fd0..e44e4b5f3e50 100644
--- a/drivers/media/video/zc0301/zc0301_core.c
+++ b/drivers/media/video/zc0301/zc0301_core.c
@@ -538,8 +538,8 @@ static int zc0301_stream_interrupt(struct zc0301_device* cam)
538 else if (cam->stream != STREAM_OFF) { 538 else if (cam->stream != STREAM_OFF) {
539 cam->state |= DEV_MISCONFIGURED; 539 cam->state |= DEV_MISCONFIGURED;
540 DBG(1, "URB timeout reached. The camera is misconfigured. To " 540 DBG(1, "URB timeout reached. The camera is misconfigured. To "
541 "use it, close and open /dev/video%d again.", 541 "use it, close and open %s again.",
542 cam->v4ldev->num); 542 video_device_node_name(cam->v4ldev));
543 return -EIO; 543 return -EIO;
544 } 544 }
545 545
@@ -640,7 +640,8 @@ static void zc0301_release_resources(struct kref *kref)
640{ 640{
641 struct zc0301_device *cam = container_of(kref, struct zc0301_device, 641 struct zc0301_device *cam = container_of(kref, struct zc0301_device,
642 kref); 642 kref);
643 DBG(2, "V4L2 device /dev/video%d deregistered", cam->v4ldev->num); 643 DBG(2, "V4L2 device %s deregistered",
644 video_device_node_name(cam->v4ldev));
644 video_set_drvdata(cam->v4ldev, NULL); 645 video_set_drvdata(cam->v4ldev, NULL);
645 video_unregister_device(cam->v4ldev); 646 video_unregister_device(cam->v4ldev);
646 usb_put_dev(cam->usbdev); 647 usb_put_dev(cam->usbdev);
@@ -679,7 +680,8 @@ static int zc0301_open(struct file *filp)
679 } 680 }
680 681
681 if (cam->users) { 682 if (cam->users) {
682 DBG(2, "Device /dev/video%d is busy...", cam->v4ldev->num); 683 DBG(2, "Device %s is busy...",
684 video_device_node_name(cam->v4ldev));
683 DBG(3, "Simultaneous opens are not supported"); 685 DBG(3, "Simultaneous opens are not supported");
684 if ((filp->f_flags & O_NONBLOCK) || 686 if ((filp->f_flags & O_NONBLOCK) ||
685 (filp->f_flags & O_NDELAY)) { 687 (filp->f_flags & O_NDELAY)) {
@@ -722,7 +724,8 @@ static int zc0301_open(struct file *filp)
722 cam->frame_count = 0; 724 cam->frame_count = 0;
723 zc0301_empty_framequeues(cam); 725 zc0301_empty_framequeues(cam);
724 726
725 DBG(3, "Video device /dev/video%d is open", cam->v4ldev->num); 727 DBG(3, "Video device %s is open",
728 video_device_node_name(cam->v4ldev));
726 729
727out: 730out:
728 mutex_unlock(&cam->open_mutex); 731 mutex_unlock(&cam->open_mutex);
@@ -746,7 +749,8 @@ static int zc0301_release(struct file *filp)
746 cam->users--; 749 cam->users--;
747 wake_up_interruptible_nr(&cam->wait_open, 1); 750 wake_up_interruptible_nr(&cam->wait_open, 1);
748 751
749 DBG(3, "Video device /dev/video%d closed", cam->v4ldev->num); 752 DBG(3, "Video device %s closed",
753 video_device_node_name(cam->v4ldev));
750 754
751 kref_put(&cam->kref, zc0301_release_resources); 755 kref_put(&cam->kref, zc0301_release_resources);
752 756
@@ -1276,8 +1280,8 @@ zc0301_vidioc_s_crop(struct zc0301_device* cam, void __user * arg)
1276 if (err) { /* atomic, no rollback in ioctl() */ 1280 if (err) { /* atomic, no rollback in ioctl() */
1277 cam->state |= DEV_MISCONFIGURED; 1281 cam->state |= DEV_MISCONFIGURED;
1278 DBG(1, "VIDIOC_S_CROP failed because of hardware problems. To " 1282 DBG(1, "VIDIOC_S_CROP failed because of hardware problems. To "
1279 "use the camera, close and open /dev/video%d again.", 1283 "use the camera, close and open %s again.",
1280 cam->v4ldev->num); 1284 video_device_node_name(cam->v4ldev));
1281 return -EIO; 1285 return -EIO;
1282 } 1286 }
1283 1287
@@ -1289,8 +1293,8 @@ zc0301_vidioc_s_crop(struct zc0301_device* cam, void __user * arg)
1289 nbuffers != zc0301_request_buffers(cam, nbuffers, cam->io)) { 1293 nbuffers != zc0301_request_buffers(cam, nbuffers, cam->io)) {
1290 cam->state |= DEV_MISCONFIGURED; 1294 cam->state |= DEV_MISCONFIGURED;
1291 DBG(1, "VIDIOC_S_CROP failed because of not enough memory. To " 1295 DBG(1, "VIDIOC_S_CROP failed because of not enough memory. To "
1292 "use the camera, close and open /dev/video%d again.", 1296 "use the camera, close and open %s again.",
1293 cam->v4ldev->num); 1297 video_device_node_name(cam->v4ldev));
1294 return -ENOMEM; 1298 return -ENOMEM;
1295 } 1299 }
1296 1300
@@ -1471,8 +1475,8 @@ zc0301_vidioc_try_s_fmt(struct zc0301_device* cam, unsigned int cmd,
1471 if (err) { /* atomic, no rollback in ioctl() */ 1475 if (err) { /* atomic, no rollback in ioctl() */
1472 cam->state |= DEV_MISCONFIGURED; 1476 cam->state |= DEV_MISCONFIGURED;
1473 DBG(1, "VIDIOC_S_FMT failed because of hardware problems. To " 1477 DBG(1, "VIDIOC_S_FMT failed because of hardware problems. To "
1474 "use the camera, close and open /dev/video%d again.", 1478 "use the camera, close and open %s again.",
1475 cam->v4ldev->num); 1479 video_device_node_name(cam->v4ldev));
1476 return -EIO; 1480 return -EIO;
1477 } 1481 }
1478 1482
@@ -1483,8 +1487,8 @@ zc0301_vidioc_try_s_fmt(struct zc0301_device* cam, unsigned int cmd,
1483 nbuffers != zc0301_request_buffers(cam, nbuffers, cam->io)) { 1487 nbuffers != zc0301_request_buffers(cam, nbuffers, cam->io)) {
1484 cam->state |= DEV_MISCONFIGURED; 1488 cam->state |= DEV_MISCONFIGURED;
1485 DBG(1, "VIDIOC_S_FMT failed because of not enough memory. To " 1489 DBG(1, "VIDIOC_S_FMT failed because of not enough memory. To "
1486 "use the camera, close and open /dev/video%d again.", 1490 "use the camera, close and open %s again.",
1487 cam->v4ldev->num); 1491 video_device_node_name(cam->v4ldev));
1488 return -ENOMEM; 1492 return -ENOMEM;
1489 } 1493 }
1490 1494
@@ -1530,8 +1534,8 @@ zc0301_vidioc_s_jpegcomp(struct zc0301_device* cam, void __user * arg)
1530 if (err) { /* atomic, no rollback in ioctl() */ 1534 if (err) { /* atomic, no rollback in ioctl() */
1531 cam->state |= DEV_MISCONFIGURED; 1535 cam->state |= DEV_MISCONFIGURED;
1532 DBG(1, "VIDIOC_S_JPEGCOMP failed because of hardware " 1536 DBG(1, "VIDIOC_S_JPEGCOMP failed because of hardware "
1533 "problems. To use the camera, close and open " 1537 "problems. To use the camera, close and open %s again.",
1534 "/dev/video%d again.", cam->v4ldev->num); 1538 video_device_node_name(cam->v4ldev));
1535 return -EIO; 1539 return -EIO;
1536 } 1540 }
1537 1541
@@ -1984,7 +1988,6 @@ zc0301_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
1984 1988
1985 strcpy(cam->v4ldev->name, "ZC0301[P] PC Camera"); 1989 strcpy(cam->v4ldev->name, "ZC0301[P] PC Camera");
1986 cam->v4ldev->fops = &zc0301_fops; 1990 cam->v4ldev->fops = &zc0301_fops;
1987 cam->v4ldev->minor = video_nr[dev_nr];
1988 cam->v4ldev->release = video_device_release; 1991 cam->v4ldev->release = video_device_release;
1989 cam->v4ldev->parent = &udev->dev; 1992 cam->v4ldev->parent = &udev->dev;
1990 video_set_drvdata(cam->v4ldev, cam); 1993 video_set_drvdata(cam->v4ldev, cam);
@@ -2003,7 +2006,8 @@ zc0301_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
2003 goto fail; 2006 goto fail;
2004 } 2007 }
2005 2008
2006 DBG(2, "V4L2 device registered as /dev/video%d", cam->v4ldev->num); 2009 DBG(2, "V4L2 device registered as %s",
2010 video_device_node_name(cam->v4ldev));
2007 2011
2008 cam->module_param.force_munmap = force_munmap[dev_nr]; 2012 cam->module_param.force_munmap = force_munmap[dev_nr];
2009 cam->module_param.frame_timeout = frame_timeout[dev_nr]; 2013 cam->module_param.frame_timeout = frame_timeout[dev_nr];
@@ -2040,9 +2044,9 @@ static void zc0301_usb_disconnect(struct usb_interface* intf)
2040 DBG(2, "Disconnecting %s...", cam->v4ldev->name); 2044 DBG(2, "Disconnecting %s...", cam->v4ldev->name);
2041 2045
2042 if (cam->users) { 2046 if (cam->users) {
2043 DBG(2, "Device /dev/video%d is open! Deregistration and " 2047 DBG(2, "Device %s is open! Deregistration and "
2044 "memory deallocation are deferred.", 2048 "memory deallocation are deferred.",
2045 cam->v4ldev->num); 2049 video_device_node_name(cam->v4ldev));
2046 cam->state |= DEV_MISCONFIGURED; 2050 cam->state |= DEV_MISCONFIGURED;
2047 zc0301_stop_transfer(cam); 2051 zc0301_stop_transfer(cam);
2048 cam->state |= DEV_DISCONNECTED; 2052 cam->state |= DEV_DISCONNECTED;
diff --git a/drivers/media/video/zoran/zoran_driver.c b/drivers/media/video/zoran/zoran_driver.c
index e9f72ca458f1..2ddffed019ee 100644
--- a/drivers/media/video/zoran/zoran_driver.c
+++ b/drivers/media/video/zoran/zoran_driver.c
@@ -3387,6 +3387,5 @@ struct video_device zoran_template __devinitdata = {
3387 .ioctl_ops = &zoran_ioctl_ops, 3387 .ioctl_ops = &zoran_ioctl_ops,
3388 .release = &zoran_vdev_release, 3388 .release = &zoran_vdev_release,
3389 .tvnorms = V4L2_STD_NTSC | V4L2_STD_PAL | V4L2_STD_SECAM, 3389 .tvnorms = V4L2_STD_NTSC | V4L2_STD_PAL | V4L2_STD_SECAM,
3390 .minor = -1
3391}; 3390};
3392 3391
diff --git a/drivers/media/video/zr364xx.c b/drivers/media/video/zr364xx.c
index 2ef110b5221b..f0eae83e3d89 100644
--- a/drivers/media/video/zr364xx.c
+++ b/drivers/media/video/zr364xx.c
@@ -1455,7 +1455,6 @@ static struct video_device zr364xx_template = {
1455 .fops = &zr364xx_fops, 1455 .fops = &zr364xx_fops,
1456 .ioctl_ops = &zr364xx_ioctl_ops, 1456 .ioctl_ops = &zr364xx_ioctl_ops,
1457 .release = video_device_release, 1457 .release = video_device_release,
1458 .minor = -1,
1459}; 1458};
1460 1459
1461 1460
@@ -1635,8 +1634,8 @@ static int zr364xx_probe(struct usb_interface *intf,
1635 1634
1636 spin_lock_init(&cam->slock); 1635 spin_lock_init(&cam->slock);
1637 1636
1638 dev_info(&udev->dev, DRIVER_DESC " controlling video device %d\n", 1637 dev_info(&udev->dev, DRIVER_DESC " controlling device %s\n",
1639 cam->vdev->num); 1638 video_device_node_name(cam->vdev));
1640 return 0; 1639 return 0;
1641} 1640}
1642 1641
diff --git a/drivers/mfd/twl4030-codec.c b/drivers/mfd/twl4030-codec.c
index 77b914907d7c..700b149c1b91 100644
--- a/drivers/mfd/twl4030-codec.c
+++ b/drivers/mfd/twl4030-codec.c
@@ -26,7 +26,7 @@
26#include <linux/kernel.h> 26#include <linux/kernel.h>
27#include <linux/fs.h> 27#include <linux/fs.h>
28#include <linux/platform_device.h> 28#include <linux/platform_device.h>
29#include <linux/i2c/twl4030.h> 29#include <linux/i2c/twl.h>
30#include <linux/mfd/core.h> 30#include <linux/mfd/core.h>
31#include <linux/mfd/twl4030-codec.h> 31#include <linux/mfd/twl4030-codec.h>
32 32
@@ -56,7 +56,7 @@ static int twl4030_codec_set_resource(enum twl4030_codec_res id, int enable)
56 struct twl4030_codec *codec = platform_get_drvdata(twl4030_codec_dev); 56 struct twl4030_codec *codec = platform_get_drvdata(twl4030_codec_dev);
57 u8 val; 57 u8 val;
58 58
59 twl4030_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE, &val, 59 twl_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE, &val,
60 codec->resource[id].reg); 60 codec->resource[id].reg);
61 61
62 if (enable) 62 if (enable)
@@ -64,7 +64,7 @@ static int twl4030_codec_set_resource(enum twl4030_codec_res id, int enable)
64 else 64 else
65 val &= ~codec->resource[id].mask; 65 val &= ~codec->resource[id].mask;
66 66
67 twl4030_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE, 67 twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
68 val, codec->resource[id].reg); 68 val, codec->resource[id].reg);
69 69
70 return val; 70 return val;
@@ -75,7 +75,7 @@ static inline int twl4030_codec_get_resource(enum twl4030_codec_res id)
75 struct twl4030_codec *codec = platform_get_drvdata(twl4030_codec_dev); 75 struct twl4030_codec *codec = platform_get_drvdata(twl4030_codec_dev);
76 u8 val; 76 u8 val;
77 77
78 twl4030_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE, &val, 78 twl_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE, &val,
79 codec->resource[id].reg); 79 codec->resource[id].reg);
80 80
81 return val; 81 return val;
@@ -183,7 +183,7 @@ static int __devinit twl4030_codec_probe(struct platform_device *pdev)
183 dev_err(&pdev->dev, "Invalid audio_mclk\n"); 183 dev_err(&pdev->dev, "Invalid audio_mclk\n");
184 return -EINVAL; 184 return -EINVAL;
185 } 185 }
186 twl4030_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE, 186 twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
187 val, TWL4030_REG_APLL_CTL); 187 val, TWL4030_REG_APLL_CTL);
188 188
189 codec = kzalloc(sizeof(struct twl4030_codec), GFP_KERNEL); 189 codec = kzalloc(sizeof(struct twl4030_codec), GFP_KERNEL);
diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c
index 223a90c7492f..4b2021af1d96 100644
--- a/drivers/mfd/wm831x-core.c
+++ b/drivers/mfd/wm831x-core.c
@@ -809,6 +809,9 @@ static struct resource wm831x_wdt_resources[] = {
809 809
810static struct mfd_cell wm8310_devs[] = { 810static struct mfd_cell wm8310_devs[] = {
811 { 811 {
812 .name = "wm831x-backup",
813 },
814 {
812 .name = "wm831x-buckv", 815 .name = "wm831x-buckv",
813 .id = 1, 816 .id = 1,
814 .num_resources = ARRAY_SIZE(wm831x_dcdc1_resources), 817 .num_resources = ARRAY_SIZE(wm831x_dcdc1_resources),
@@ -962,6 +965,9 @@ static struct mfd_cell wm8310_devs[] = {
962 965
963static struct mfd_cell wm8311_devs[] = { 966static struct mfd_cell wm8311_devs[] = {
964 { 967 {
968 .name = "wm831x-backup",
969 },
970 {
965 .name = "wm831x-buckv", 971 .name = "wm831x-buckv",
966 .id = 1, 972 .id = 1,
967 .num_resources = ARRAY_SIZE(wm831x_dcdc1_resources), 973 .num_resources = ARRAY_SIZE(wm831x_dcdc1_resources),
@@ -1096,6 +1102,9 @@ static struct mfd_cell wm8311_devs[] = {
1096 1102
1097static struct mfd_cell wm8312_devs[] = { 1103static struct mfd_cell wm8312_devs[] = {
1098 { 1104 {
1105 .name = "wm831x-backup",
1106 },
1107 {
1099 .name = "wm831x-buckv", 1108 .name = "wm831x-buckv",
1100 .id = 1, 1109 .id = 1,
1101 .num_resources = ARRAY_SIZE(wm831x_dcdc1_resources), 1110 .num_resources = ARRAY_SIZE(wm831x_dcdc1_resources),
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 2c16ca6501d5..59f4ba1b7034 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -13,6 +13,20 @@ menuconfig MISC_DEVICES
13 13
14if MISC_DEVICES 14if MISC_DEVICES
15 15
16config AD525X_DPOT
17 tristate "Analog Devices AD525x Digital Potentiometers"
18 depends on I2C && SYSFS
19 help
20 If you say yes here, you get support for the Analog Devices
21 AD5258, AD5259, AD5251, AD5252, AD5253, AD5254 and AD5255
22 digital potentiometer chips.
23
24 See Documentation/misc-devices/ad525x_dpot.txt for the
25 userspace interface.
26
27 This driver can also be built as a module. If so, the module
28 will be called ad525x_dpot.
29
16config ATMEL_PWM 30config ATMEL_PWM
17 tristate "Atmel AT32/AT91 PWM support" 31 tristate "Atmel AT32/AT91 PWM support"
18 depends on AVR32 || ARCH_AT91SAM9263 || ARCH_AT91SAM9RL || ARCH_AT91CAP9 32 depends on AVR32 || ARCH_AT91SAM9263 || ARCH_AT91SAM9RL || ARCH_AT91CAP9
@@ -173,6 +187,30 @@ config SGI_XP
173 this feature will allow for direct communication between SSIs 187 this feature will allow for direct communication between SSIs
174 based on a network adapter and DMA messaging. 188 based on a network adapter and DMA messaging.
175 189
190config CS5535_MFGPT
191 tristate "CS5535/CS5536 Geode Multi-Function General Purpose Timer (MFGPT) support"
192 depends on PCI
193 depends on X86
194 default n
195 help
196 This driver provides access to MFGPT functionality for other
197 drivers that need timers. MFGPTs are available in the CS5535 and
198 CS5536 companion chips that are found in AMD Geode and several
199 other platforms. They have a better resolution and max interval
200 than the generic PIT, and are suitable for use as high-res timers.
201 You probably don't want to enable this manually; other drivers that
202 make use of it should enable it.
203
204config CS5535_MFGPT_DEFAULT_IRQ
205 int
206 default 7
207 help
208 MFGPTs on the CS5535 require an interrupt. The selected IRQ
209 can be overridden as a module option as well as by driver that
210 use the cs5535_mfgpt_ API; however, different architectures might
211 want to use a different IRQ by default. This is here for
212 architectures to set as necessary.
213
176config HP_ILO 214config HP_ILO
177 tristate "Channel interface driver for HP iLO/iLO2 processor" 215 tristate "Channel interface driver for HP iLO/iLO2 processor"
178 depends on PCI 216 depends on PCI
@@ -256,6 +294,16 @@ config DS1682
256 This driver can also be built as a module. If so, the module 294 This driver can also be built as a module. If so, the module
257 will be called ds1682. 295 will be called ds1682.
258 296
297config TI_DAC7512
298 tristate "Texas Instruments DAC7512"
299 depends on SPI && SYSFS
300 help
301 If you say yes here you get support for the Texas Instruments
302 DAC7512 16-bit digital-to-analog converter.
303
304 This driver can also be built as a module. If so, the module
305 will be calles ti_dac7512.
306
259source "drivers/misc/c2port/Kconfig" 307source "drivers/misc/c2port/Kconfig"
260source "drivers/misc/eeprom/Kconfig" 308source "drivers/misc/eeprom/Kconfig"
261source "drivers/misc/cb710/Kconfig" 309source "drivers/misc/cb710/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 906a0edcea40..049ff2482f30 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -4,6 +4,7 @@
4 4
5obj-$(CONFIG_IBM_ASM) += ibmasm/ 5obj-$(CONFIG_IBM_ASM) += ibmasm/
6obj-$(CONFIG_HDPU_FEATURES) += hdpuftrs/ 6obj-$(CONFIG_HDPU_FEATURES) += hdpuftrs/
7obj-$(CONFIG_AD525X_DPOT) += ad525x_dpot.o
7obj-$(CONFIG_ATMEL_PWM) += atmel_pwm.o 8obj-$(CONFIG_ATMEL_PWM) += atmel_pwm.o
8obj-$(CONFIG_ATMEL_SSC) += atmel-ssc.o 9obj-$(CONFIG_ATMEL_SSC) += atmel-ssc.o
9obj-$(CONFIG_ATMEL_TCLIB) += atmel_tclib.o 10obj-$(CONFIG_ATMEL_TCLIB) += atmel_tclib.o
@@ -17,10 +18,12 @@ obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o
17obj-$(CONFIG_KGDB_TESTS) += kgdbts.o 18obj-$(CONFIG_KGDB_TESTS) += kgdbts.o
18obj-$(CONFIG_SGI_XP) += sgi-xp/ 19obj-$(CONFIG_SGI_XP) += sgi-xp/
19obj-$(CONFIG_SGI_GRU) += sgi-gru/ 20obj-$(CONFIG_SGI_GRU) += sgi-gru/
21obj-$(CONFIG_CS5535_MFGPT) += cs5535-mfgpt.o
20obj-$(CONFIG_HP_ILO) += hpilo.o 22obj-$(CONFIG_HP_ILO) += hpilo.o
21obj-$(CONFIG_ISL29003) += isl29003.o 23obj-$(CONFIG_ISL29003) += isl29003.o
22obj-$(CONFIG_EP93XX_PWM) += ep93xx_pwm.o 24obj-$(CONFIG_EP93XX_PWM) += ep93xx_pwm.o
23obj-$(CONFIG_DS1682) += ds1682.o 25obj-$(CONFIG_DS1682) += ds1682.o
26obj-$(CONFIG_TI_DAC7512) += ti_dac7512.o
24obj-$(CONFIG_C2PORT) += c2port/ 27obj-$(CONFIG_C2PORT) += c2port/
25obj-$(CONFIG_IWMC3200TOP) += iwmc3200top/ 28obj-$(CONFIG_IWMC3200TOP) += iwmc3200top/
26obj-y += eeprom/ 29obj-y += eeprom/
diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c
new file mode 100644
index 000000000000..30a59f2bacd2
--- /dev/null
+++ b/drivers/misc/ad525x_dpot.c
@@ -0,0 +1,666 @@
1/*
2 * ad525x_dpot: Driver for the Analog Devices AD525x digital potentiometers
3 * Copyright (c) 2009 Analog Devices, Inc.
4 * Author: Michael Hennerich <hennerich@blackfin.uclinux.org>
5 *
6 * DEVID #Wipers #Positions Resistor Options (kOhm)
7 * AD5258 1 64 1, 10, 50, 100
8 * AD5259 1 256 5, 10, 50, 100
9 * AD5251 2 64 1, 10, 50, 100
10 * AD5252 2 256 1, 10, 50, 100
11 * AD5255 3 512 25, 250
12 * AD5253 4 64 1, 10, 50, 100
13 * AD5254 4 256 1, 10, 50, 100
14 *
15 * See Documentation/misc-devices/ad525x_dpot.txt for more info.
16 *
17 * derived from ad5258.c
18 * Copyright (c) 2009 Cyber Switching, Inc.
19 * Author: Chris Verges <chrisv@cyberswitching.com>
20 *
21 * derived from ad5252.c
22 * Copyright (c) 2006 Michael Hennerich <hennerich@blackfin.uclinux.org>
23 *
24 * Licensed under the GPL-2 or later.
25 */
26
27#include <linux/module.h>
28#include <linux/device.h>
29#include <linux/kernel.h>
30#include <linux/init.h>
31#include <linux/slab.h>
32#include <linux/i2c.h>
33#include <linux/delay.h>
34
35#define DRIVER_NAME "ad525x_dpot"
36#define DRIVER_VERSION "0.1"
37
38enum dpot_devid {
39 AD5258_ID,
40 AD5259_ID,
41 AD5251_ID,
42 AD5252_ID,
43 AD5253_ID,
44 AD5254_ID,
45 AD5255_ID,
46};
47
48#define AD5258_MAX_POSITION 64
49#define AD5259_MAX_POSITION 256
50#define AD5251_MAX_POSITION 64
51#define AD5252_MAX_POSITION 256
52#define AD5253_MAX_POSITION 64
53#define AD5254_MAX_POSITION 256
54#define AD5255_MAX_POSITION 512
55
56#define AD525X_RDAC0 0
57#define AD525X_RDAC1 1
58#define AD525X_RDAC2 2
59#define AD525X_RDAC3 3
60
61#define AD525X_REG_TOL 0x18
62#define AD525X_TOL_RDAC0 (AD525X_REG_TOL | AD525X_RDAC0)
63#define AD525X_TOL_RDAC1 (AD525X_REG_TOL | AD525X_RDAC1)
64#define AD525X_TOL_RDAC2 (AD525X_REG_TOL | AD525X_RDAC2)
65#define AD525X_TOL_RDAC3 (AD525X_REG_TOL | AD525X_RDAC3)
66
67/* RDAC-to-EEPROM Interface Commands */
68#define AD525X_I2C_RDAC (0x00 << 5)
69#define AD525X_I2C_EEPROM (0x01 << 5)
70#define AD525X_I2C_CMD (0x80)
71
72#define AD525X_DEC_ALL_6DB (AD525X_I2C_CMD | (0x4 << 3))
73#define AD525X_INC_ALL_6DB (AD525X_I2C_CMD | (0x9 << 3))
74#define AD525X_DEC_ALL (AD525X_I2C_CMD | (0x6 << 3))
75#define AD525X_INC_ALL (AD525X_I2C_CMD | (0xB << 3))
76
77static s32 ad525x_read(struct i2c_client *client, u8 reg);
78static s32 ad525x_write(struct i2c_client *client, u8 reg, u8 value);
79
80/*
81 * Client data (each client gets its own)
82 */
83
84struct dpot_data {
85 struct mutex update_lock;
86 unsigned rdac_mask;
87 unsigned max_pos;
88 unsigned devid;
89};
90
91/* sysfs functions */
92
93static ssize_t sysfs_show_reg(struct device *dev,
94 struct device_attribute *attr, char *buf, u32 reg)
95{
96 struct i2c_client *client = to_i2c_client(dev);
97 struct dpot_data *data = i2c_get_clientdata(client);
98 s32 value;
99
100 mutex_lock(&data->update_lock);
101 value = ad525x_read(client, reg);
102 mutex_unlock(&data->update_lock);
103
104 if (value < 0)
105 return -EINVAL;
106 /*
107 * Let someone else deal with converting this ...
108 * the tolerance is a two-byte value where the MSB
109 * is a sign + integer value, and the LSB is a
110 * decimal value. See page 18 of the AD5258
111 * datasheet (Rev. A) for more details.
112 */
113
114 if (reg & AD525X_REG_TOL)
115 return sprintf(buf, "0x%04x\n", value & 0xFFFF);
116 else
117 return sprintf(buf, "%u\n", value & data->rdac_mask);
118}
119
120static ssize_t sysfs_set_reg(struct device *dev,
121 struct device_attribute *attr,
122 const char *buf, size_t count, u32 reg)
123{
124 struct i2c_client *client = to_i2c_client(dev);
125 struct dpot_data *data = i2c_get_clientdata(client);
126 unsigned long value;
127 int err;
128
129 err = strict_strtoul(buf, 10, &value);
130 if (err)
131 return err;
132
133 if (value > data->rdac_mask)
134 value = data->rdac_mask;
135
136 mutex_lock(&data->update_lock);
137 ad525x_write(client, reg, value);
138 if (reg & AD525X_I2C_EEPROM)
139 msleep(26); /* Sleep while the EEPROM updates */
140 mutex_unlock(&data->update_lock);
141
142 return count;
143}
144
145static ssize_t sysfs_do_cmd(struct device *dev,
146 struct device_attribute *attr,
147 const char *buf, size_t count, u32 reg)
148{
149 struct i2c_client *client = to_i2c_client(dev);
150 struct dpot_data *data = i2c_get_clientdata(client);
151
152 mutex_lock(&data->update_lock);
153 ad525x_write(client, reg, 0);
154 mutex_unlock(&data->update_lock);
155
156 return count;
157}
158
159/* ------------------------------------------------------------------------- */
160
161static ssize_t show_rdac0(struct device *dev,
162 struct device_attribute *attr, char *buf)
163{
164 return sysfs_show_reg(dev, attr, buf, AD525X_I2C_RDAC | AD525X_RDAC0);
165}
166
167static ssize_t set_rdac0(struct device *dev,
168 struct device_attribute *attr,
169 const char *buf, size_t count)
170{
171 return sysfs_set_reg(dev, attr, buf, count,
172 AD525X_I2C_RDAC | AD525X_RDAC0);
173}
174
175static DEVICE_ATTR(rdac0, S_IWUSR | S_IRUGO, show_rdac0, set_rdac0);
176
177static ssize_t show_eeprom0(struct device *dev,
178 struct device_attribute *attr, char *buf)
179{
180 return sysfs_show_reg(dev, attr, buf, AD525X_I2C_EEPROM | AD525X_RDAC0);
181}
182
183static ssize_t set_eeprom0(struct device *dev,
184 struct device_attribute *attr,
185 const char *buf, size_t count)
186{
187 return sysfs_set_reg(dev, attr, buf, count,
188 AD525X_I2C_EEPROM | AD525X_RDAC0);
189}
190
191static DEVICE_ATTR(eeprom0, S_IWUSR | S_IRUGO, show_eeprom0, set_eeprom0);
192
193static ssize_t show_tolerance0(struct device *dev,
194 struct device_attribute *attr, char *buf)
195{
196 return sysfs_show_reg(dev, attr, buf,
197 AD525X_I2C_EEPROM | AD525X_TOL_RDAC0);
198}
199
200static DEVICE_ATTR(tolerance0, S_IRUGO, show_tolerance0, NULL);
201
202/* ------------------------------------------------------------------------- */
203
204static ssize_t show_rdac1(struct device *dev,
205 struct device_attribute *attr, char *buf)
206{
207 return sysfs_show_reg(dev, attr, buf, AD525X_I2C_RDAC | AD525X_RDAC1);
208}
209
210static ssize_t set_rdac1(struct device *dev,
211 struct device_attribute *attr,
212 const char *buf, size_t count)
213{
214 return sysfs_set_reg(dev, attr, buf, count,
215 AD525X_I2C_RDAC | AD525X_RDAC1);
216}
217
218static DEVICE_ATTR(rdac1, S_IWUSR | S_IRUGO, show_rdac1, set_rdac1);
219
220static ssize_t show_eeprom1(struct device *dev,
221 struct device_attribute *attr, char *buf)
222{
223 return sysfs_show_reg(dev, attr, buf, AD525X_I2C_EEPROM | AD525X_RDAC1);
224}
225
226static ssize_t set_eeprom1(struct device *dev,
227 struct device_attribute *attr,
228 const char *buf, size_t count)
229{
230 return sysfs_set_reg(dev, attr, buf, count,
231 AD525X_I2C_EEPROM | AD525X_RDAC1);
232}
233
234static DEVICE_ATTR(eeprom1, S_IWUSR | S_IRUGO, show_eeprom1, set_eeprom1);
235
236static ssize_t show_tolerance1(struct device *dev,
237 struct device_attribute *attr, char *buf)
238{
239 return sysfs_show_reg(dev, attr, buf,
240 AD525X_I2C_EEPROM | AD525X_TOL_RDAC1);
241}
242
243static DEVICE_ATTR(tolerance1, S_IRUGO, show_tolerance1, NULL);
244
245/* ------------------------------------------------------------------------- */
246
247static ssize_t show_rdac2(struct device *dev,
248 struct device_attribute *attr, char *buf)
249{
250 return sysfs_show_reg(dev, attr, buf, AD525X_I2C_RDAC | AD525X_RDAC2);
251}
252
253static ssize_t set_rdac2(struct device *dev,
254 struct device_attribute *attr,
255 const char *buf, size_t count)
256{
257 return sysfs_set_reg(dev, attr, buf, count,
258 AD525X_I2C_RDAC | AD525X_RDAC2);
259}
260
261static DEVICE_ATTR(rdac2, S_IWUSR | S_IRUGO, show_rdac2, set_rdac2);
262
263static ssize_t show_eeprom2(struct device *dev,
264 struct device_attribute *attr, char *buf)
265{
266 return sysfs_show_reg(dev, attr, buf, AD525X_I2C_EEPROM | AD525X_RDAC2);
267}
268
269static ssize_t set_eeprom2(struct device *dev,
270 struct device_attribute *attr,
271 const char *buf, size_t count)
272{
273 return sysfs_set_reg(dev, attr, buf, count,
274 AD525X_I2C_EEPROM | AD525X_RDAC2);
275}
276
277static DEVICE_ATTR(eeprom2, S_IWUSR | S_IRUGO, show_eeprom2, set_eeprom2);
278
279static ssize_t show_tolerance2(struct device *dev,
280 struct device_attribute *attr, char *buf)
281{
282 return sysfs_show_reg(dev, attr, buf,
283 AD525X_I2C_EEPROM | AD525X_TOL_RDAC2);
284}
285
286static DEVICE_ATTR(tolerance2, S_IRUGO, show_tolerance2, NULL);
287
288/* ------------------------------------------------------------------------- */
289
290static ssize_t show_rdac3(struct device *dev,
291 struct device_attribute *attr, char *buf)
292{
293 return sysfs_show_reg(dev, attr, buf, AD525X_I2C_RDAC | AD525X_RDAC3);
294}
295
296static ssize_t set_rdac3(struct device *dev,
297 struct device_attribute *attr,
298 const char *buf, size_t count)
299{
300 return sysfs_set_reg(dev, attr, buf, count,
301 AD525X_I2C_RDAC | AD525X_RDAC3);
302}
303
304static DEVICE_ATTR(rdac3, S_IWUSR | S_IRUGO, show_rdac3, set_rdac3);
305
306static ssize_t show_eeprom3(struct device *dev,
307 struct device_attribute *attr, char *buf)
308{
309 return sysfs_show_reg(dev, attr, buf, AD525X_I2C_EEPROM | AD525X_RDAC3);
310}
311
312static ssize_t set_eeprom3(struct device *dev,
313 struct device_attribute *attr,
314 const char *buf, size_t count)
315{
316 return sysfs_set_reg(dev, attr, buf, count,
317 AD525X_I2C_EEPROM | AD525X_RDAC3);
318}
319
320static DEVICE_ATTR(eeprom3, S_IWUSR | S_IRUGO, show_eeprom3, set_eeprom3);
321
322static ssize_t show_tolerance3(struct device *dev,
323 struct device_attribute *attr, char *buf)
324{
325 return sysfs_show_reg(dev, attr, buf,
326 AD525X_I2C_EEPROM | AD525X_TOL_RDAC3);
327}
328
329static DEVICE_ATTR(tolerance3, S_IRUGO, show_tolerance3, NULL);
330
331static struct attribute *ad525x_attributes_wipers[4][4] = {
332 {
333 &dev_attr_rdac0.attr,
334 &dev_attr_eeprom0.attr,
335 &dev_attr_tolerance0.attr,
336 NULL
337 }, {
338 &dev_attr_rdac1.attr,
339 &dev_attr_eeprom1.attr,
340 &dev_attr_tolerance1.attr,
341 NULL
342 }, {
343 &dev_attr_rdac2.attr,
344 &dev_attr_eeprom2.attr,
345 &dev_attr_tolerance2.attr,
346 NULL
347 }, {
348 &dev_attr_rdac3.attr,
349 &dev_attr_eeprom3.attr,
350 &dev_attr_tolerance3.attr,
351 NULL
352 }
353};
354
355static const struct attribute_group ad525x_group_wipers[] = {
356 {.attrs = ad525x_attributes_wipers[AD525X_RDAC0]},
357 {.attrs = ad525x_attributes_wipers[AD525X_RDAC1]},
358 {.attrs = ad525x_attributes_wipers[AD525X_RDAC2]},
359 {.attrs = ad525x_attributes_wipers[AD525X_RDAC3]},
360};
361
362/* ------------------------------------------------------------------------- */
363
364static ssize_t set_inc_all(struct device *dev,
365 struct device_attribute *attr,
366 const char *buf, size_t count)
367{
368 return sysfs_do_cmd(dev, attr, buf, count, AD525X_INC_ALL);
369}
370
371static DEVICE_ATTR(inc_all, S_IWUSR, NULL, set_inc_all);
372
373static ssize_t set_dec_all(struct device *dev,
374 struct device_attribute *attr,
375 const char *buf, size_t count)
376{
377 return sysfs_do_cmd(dev, attr, buf, count, AD525X_DEC_ALL);
378}
379
380static DEVICE_ATTR(dec_all, S_IWUSR, NULL, set_dec_all);
381
382static ssize_t set_inc_all_6db(struct device *dev,
383 struct device_attribute *attr,
384 const char *buf, size_t count)
385{
386 return sysfs_do_cmd(dev, attr, buf, count, AD525X_INC_ALL_6DB);
387}
388
389static DEVICE_ATTR(inc_all_6db, S_IWUSR, NULL, set_inc_all_6db);
390
391static ssize_t set_dec_all_6db(struct device *dev,
392 struct device_attribute *attr,
393 const char *buf, size_t count)
394{
395 return sysfs_do_cmd(dev, attr, buf, count, AD525X_DEC_ALL_6DB);
396}
397
398static DEVICE_ATTR(dec_all_6db, S_IWUSR, NULL, set_dec_all_6db);
399
400static struct attribute *ad525x_attributes_commands[] = {
401 &dev_attr_inc_all.attr,
402 &dev_attr_dec_all.attr,
403 &dev_attr_inc_all_6db.attr,
404 &dev_attr_dec_all_6db.attr,
405 NULL
406};
407
408static const struct attribute_group ad525x_group_commands = {
409 .attrs = ad525x_attributes_commands,
410};
411
412/* ------------------------------------------------------------------------- */
413
414/* i2c device functions */
415
416/**
417 * ad525x_read - return the value contained in the specified register
418 * on the AD5258 device.
419 * @client: value returned from i2c_new_device()
420 * @reg: the register to read
421 *
422 * If the tolerance register is specified, 2 bytes are returned.
423 * Otherwise, 1 byte is returned. A negative value indicates an error
424 * occurred while reading the register.
425 */
426static s32 ad525x_read(struct i2c_client *client, u8 reg)
427{
428 struct dpot_data *data = i2c_get_clientdata(client);
429
430 if ((reg & AD525X_REG_TOL) || (data->max_pos > 256))
431 return i2c_smbus_read_word_data(client, (reg & 0xF8) |
432 ((reg & 0x7) << 1));
433 else
434 return i2c_smbus_read_byte_data(client, reg);
435}
436
437/**
438 * ad525x_write - store the given value in the specified register on
439 * the AD5258 device.
440 * @client: value returned from i2c_new_device()
441 * @reg: the register to write
442 * @value: the byte to store in the register
443 *
444 * For certain instructions that do not require a data byte, "NULL"
445 * should be specified for the "value" parameter. These instructions
446 * include NOP, RESTORE_FROM_EEPROM, and STORE_TO_EEPROM.
447 *
448 * A negative return value indicates an error occurred while reading
449 * the register.
450 */
451static s32 ad525x_write(struct i2c_client *client, u8 reg, u8 value)
452{
453 struct dpot_data *data = i2c_get_clientdata(client);
454
455 /* Only write the instruction byte for certain commands */
456 if (reg & AD525X_I2C_CMD)
457 return i2c_smbus_write_byte(client, reg);
458
459 if (data->max_pos > 256)
460 return i2c_smbus_write_word_data(client, (reg & 0xF8) |
461 ((reg & 0x7) << 1), value);
462 else
463 /* All other registers require instruction + data bytes */
464 return i2c_smbus_write_byte_data(client, reg, value);
465}
466
467static int ad525x_probe(struct i2c_client *client,
468 const struct i2c_device_id *id)
469{
470 struct device *dev = &client->dev;
471 struct dpot_data *data;
472 int err = 0;
473
474 dev_dbg(dev, "%s\n", __func__);
475
476 if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE)) {
477 dev_err(dev, "missing I2C functionality for this driver\n");
478 goto exit;
479 }
480
481 data = kzalloc(sizeof(struct dpot_data), GFP_KERNEL);
482 if (!data) {
483 err = -ENOMEM;
484 goto exit;
485 }
486
487 i2c_set_clientdata(client, data);
488 mutex_init(&data->update_lock);
489
490 switch (id->driver_data) {
491 case AD5258_ID:
492 data->max_pos = AD5258_MAX_POSITION;
493 err = sysfs_create_group(&dev->kobj,
494 &ad525x_group_wipers[AD525X_RDAC0]);
495 break;
496 case AD5259_ID:
497 data->max_pos = AD5259_MAX_POSITION;
498 err = sysfs_create_group(&dev->kobj,
499 &ad525x_group_wipers[AD525X_RDAC0]);
500 break;
501 case AD5251_ID:
502 data->max_pos = AD5251_MAX_POSITION;
503 err = sysfs_create_group(&dev->kobj,
504 &ad525x_group_wipers[AD525X_RDAC1]);
505 err |= sysfs_create_group(&dev->kobj,
506 &ad525x_group_wipers[AD525X_RDAC3]);
507 err |= sysfs_create_group(&dev->kobj, &ad525x_group_commands);
508 break;
509 case AD5252_ID:
510 data->max_pos = AD5252_MAX_POSITION;
511 err = sysfs_create_group(&dev->kobj,
512 &ad525x_group_wipers[AD525X_RDAC1]);
513 err |= sysfs_create_group(&dev->kobj,
514 &ad525x_group_wipers[AD525X_RDAC3]);
515 err |= sysfs_create_group(&dev->kobj, &ad525x_group_commands);
516 break;
517 case AD5253_ID:
518 data->max_pos = AD5253_MAX_POSITION;
519 err = sysfs_create_group(&dev->kobj,
520 &ad525x_group_wipers[AD525X_RDAC0]);
521 err |= sysfs_create_group(&dev->kobj,
522 &ad525x_group_wipers[AD525X_RDAC1]);
523 err |= sysfs_create_group(&dev->kobj,
524 &ad525x_group_wipers[AD525X_RDAC2]);
525 err |= sysfs_create_group(&dev->kobj,
526 &ad525x_group_wipers[AD525X_RDAC3]);
527 err |= sysfs_create_group(&dev->kobj, &ad525x_group_commands);
528 break;
529 case AD5254_ID:
530 data->max_pos = AD5254_MAX_POSITION;
531 err = sysfs_create_group(&dev->kobj,
532 &ad525x_group_wipers[AD525X_RDAC0]);
533 err |= sysfs_create_group(&dev->kobj,
534 &ad525x_group_wipers[AD525X_RDAC1]);
535 err |= sysfs_create_group(&dev->kobj,
536 &ad525x_group_wipers[AD525X_RDAC2]);
537 err |= sysfs_create_group(&dev->kobj,
538 &ad525x_group_wipers[AD525X_RDAC3]);
539 err |= sysfs_create_group(&dev->kobj, &ad525x_group_commands);
540 break;
541 case AD5255_ID:
542 data->max_pos = AD5255_MAX_POSITION;
543 err = sysfs_create_group(&dev->kobj,
544 &ad525x_group_wipers[AD525X_RDAC0]);
545 err |= sysfs_create_group(&dev->kobj,
546 &ad525x_group_wipers[AD525X_RDAC1]);
547 err |= sysfs_create_group(&dev->kobj,
548 &ad525x_group_wipers[AD525X_RDAC2]);
549 err |= sysfs_create_group(&dev->kobj, &ad525x_group_commands);
550 break;
551 default:
552 err = -ENODEV;
553 goto exit_free;
554 }
555
556 if (err) {
557 dev_err(dev, "failed to register sysfs hooks\n");
558 goto exit_free;
559 }
560
561 data->devid = id->driver_data;
562 data->rdac_mask = data->max_pos - 1;
563
564 dev_info(dev, "%s %d-Position Digital Potentiometer registered\n",
565 id->name, data->max_pos);
566
567 return 0;
568
569exit_free:
570 kfree(data);
571 i2c_set_clientdata(client, NULL);
572exit:
573 dev_err(dev, "failed to create client\n");
574 return err;
575}
576
577static int __devexit ad525x_remove(struct i2c_client *client)
578{
579 struct dpot_data *data = i2c_get_clientdata(client);
580 struct device *dev = &client->dev;
581
582 switch (data->devid) {
583 case AD5258_ID:
584 case AD5259_ID:
585 sysfs_remove_group(&dev->kobj,
586 &ad525x_group_wipers[AD525X_RDAC0]);
587 break;
588 case AD5251_ID:
589 case AD5252_ID:
590 sysfs_remove_group(&dev->kobj,
591 &ad525x_group_wipers[AD525X_RDAC1]);
592 sysfs_remove_group(&dev->kobj,
593 &ad525x_group_wipers[AD525X_RDAC3]);
594 sysfs_remove_group(&dev->kobj, &ad525x_group_commands);
595 break;
596 case AD5253_ID:
597 case AD5254_ID:
598 sysfs_remove_group(&dev->kobj,
599 &ad525x_group_wipers[AD525X_RDAC0]);
600 sysfs_remove_group(&dev->kobj,
601 &ad525x_group_wipers[AD525X_RDAC1]);
602 sysfs_remove_group(&dev->kobj,
603 &ad525x_group_wipers[AD525X_RDAC2]);
604 sysfs_remove_group(&dev->kobj,
605 &ad525x_group_wipers[AD525X_RDAC3]);
606 sysfs_remove_group(&dev->kobj, &ad525x_group_commands);
607 break;
608 case AD5255_ID:
609 sysfs_remove_group(&dev->kobj,
610 &ad525x_group_wipers[AD525X_RDAC0]);
611 sysfs_remove_group(&dev->kobj,
612 &ad525x_group_wipers[AD525X_RDAC1]);
613 sysfs_remove_group(&dev->kobj,
614 &ad525x_group_wipers[AD525X_RDAC2]);
615 sysfs_remove_group(&dev->kobj, &ad525x_group_commands);
616 break;
617 }
618
619 i2c_set_clientdata(client, NULL);
620 kfree(data);
621
622 return 0;
623}
624
625static const struct i2c_device_id ad525x_idtable[] = {
626 {"ad5258", AD5258_ID},
627 {"ad5259", AD5259_ID},
628 {"ad5251", AD5251_ID},
629 {"ad5252", AD5252_ID},
630 {"ad5253", AD5253_ID},
631 {"ad5254", AD5254_ID},
632 {"ad5255", AD5255_ID},
633 {}
634};
635
636MODULE_DEVICE_TABLE(i2c, ad525x_idtable);
637
638static struct i2c_driver ad525x_driver = {
639 .driver = {
640 .owner = THIS_MODULE,
641 .name = DRIVER_NAME,
642 },
643 .id_table = ad525x_idtable,
644 .probe = ad525x_probe,
645 .remove = __devexit_p(ad525x_remove),
646};
647
648static int __init ad525x_init(void)
649{
650 return i2c_add_driver(&ad525x_driver);
651}
652
653module_init(ad525x_init);
654
655static void __exit ad525x_exit(void)
656{
657 i2c_del_driver(&ad525x_driver);
658}
659
660module_exit(ad525x_exit);
661
662MODULE_AUTHOR("Chris Verges <chrisv@cyberswitching.com>, "
663 "Michael Hennerich <hennerich@blackfin.uclinux.org>, ");
664MODULE_DESCRIPTION("AD5258/9 digital potentiometer driver");
665MODULE_LICENSE("GPL");
666MODULE_VERSION(DRIVER_VERSION);
diff --git a/drivers/misc/cs5535-mfgpt.c b/drivers/misc/cs5535-mfgpt.c
new file mode 100644
index 000000000000..8110460558ff
--- /dev/null
+++ b/drivers/misc/cs5535-mfgpt.c
@@ -0,0 +1,370 @@
1/*
2 * Driver for the CS5535/CS5536 Multi-Function General Purpose Timers (MFGPT)
3 *
4 * Copyright (C) 2006, Advanced Micro Devices, Inc.
5 * Copyright (C) 2007 Andres Salomon <dilinger@debian.org>
6 * Copyright (C) 2009 Andres Salomon <dilinger@collabora.co.uk>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public License
10 * as published by the Free Software Foundation.
11 *
12 * The MFGPTs are documented in AMD Geode CS5536 Companion Device Data Book.
13 */
14
15#include <linux/kernel.h>
16#include <linux/spinlock.h>
17#include <linux/interrupt.h>
18#include <linux/module.h>
19#include <linux/pci.h>
20#include <linux/cs5535.h>
21
22#define DRV_NAME "cs5535-mfgpt"
23#define MFGPT_BAR 2
24
25static int mfgpt_reset_timers;
26module_param_named(mfgptfix, mfgpt_reset_timers, int, 0644);
27MODULE_PARM_DESC(mfgptfix, "Reset the MFGPT timers during init; "
28 "required by some broken BIOSes (ie, TinyBIOS < 0.99).");
29
30struct cs5535_mfgpt_timer {
31 struct cs5535_mfgpt_chip *chip;
32 int nr;
33};
34
35static struct cs5535_mfgpt_chip {
36 DECLARE_BITMAP(avail, MFGPT_MAX_TIMERS);
37 resource_size_t base;
38
39 struct pci_dev *pdev;
40 spinlock_t lock;
41 int initialized;
42} cs5535_mfgpt_chip;
43
44int cs5535_mfgpt_toggle_event(struct cs5535_mfgpt_timer *timer, int cmp,
45 int event, int enable)
46{
47 uint32_t msr, mask, value, dummy;
48 int shift = (cmp == MFGPT_CMP1) ? 0 : 8;
49
50 if (!timer) {
51 WARN_ON(1);
52 return -EIO;
53 }
54
55 /*
56 * The register maps for these are described in sections 6.17.1.x of
57 * the AMD Geode CS5536 Companion Device Data Book.
58 */
59 switch (event) {
60 case MFGPT_EVENT_RESET:
61 /*
62 * XXX: According to the docs, we cannot reset timers above
63 * 6; that is, resets for 7 and 8 will be ignored. Is this
64 * a problem? -dilinger
65 */
66 msr = MSR_MFGPT_NR;
67 mask = 1 << (timer->nr + 24);
68 break;
69
70 case MFGPT_EVENT_NMI:
71 msr = MSR_MFGPT_NR;
72 mask = 1 << (timer->nr + shift);
73 break;
74
75 case MFGPT_EVENT_IRQ:
76 msr = MSR_MFGPT_IRQ;
77 mask = 1 << (timer->nr + shift);
78 break;
79
80 default:
81 return -EIO;
82 }
83
84 rdmsr(msr, value, dummy);
85
86 if (enable)
87 value |= mask;
88 else
89 value &= ~mask;
90
91 wrmsr(msr, value, dummy);
92 return 0;
93}
94EXPORT_SYMBOL_GPL(cs5535_mfgpt_toggle_event);
95
96int cs5535_mfgpt_set_irq(struct cs5535_mfgpt_timer *timer, int cmp, int *irq,
97 int enable)
98{
99 uint32_t zsel, lpc, dummy;
100 int shift;
101
102 if (!timer) {
103 WARN_ON(1);
104 return -EIO;
105 }
106
107 /*
108 * Unfortunately, MFGPTs come in pairs sharing their IRQ lines. If VSA
109 * is using the same CMP of the timer's Siamese twin, the IRQ is set to
110 * 2, and we mustn't use nor change it.
111 * XXX: Likewise, 2 Linux drivers might clash if the 2nd overwrites the
112 * IRQ of the 1st. This can only happen if forcing an IRQ, calling this
113 * with *irq==0 is safe. Currently there _are_ no 2 drivers.
114 */
115 rdmsr(MSR_PIC_ZSEL_LOW, zsel, dummy);
116 shift = ((cmp == MFGPT_CMP1 ? 0 : 4) + timer->nr % 4) * 4;
117 if (((zsel >> shift) & 0xF) == 2)
118 return -EIO;
119
120 /* Choose IRQ: if none supplied, keep IRQ already set or use default */
121 if (!*irq)
122 *irq = (zsel >> shift) & 0xF;
123 if (!*irq)
124 *irq = CONFIG_CS5535_MFGPT_DEFAULT_IRQ;
125
126 /* Can't use IRQ if it's 0 (=disabled), 2, or routed to LPC */
127 if (*irq < 1 || *irq == 2 || *irq > 15)
128 return -EIO;
129 rdmsr(MSR_PIC_IRQM_LPC, lpc, dummy);
130 if (lpc & (1 << *irq))
131 return -EIO;
132
133 /* All chosen and checked - go for it */
134 if (cs5535_mfgpt_toggle_event(timer, cmp, MFGPT_EVENT_IRQ, enable))
135 return -EIO;
136 if (enable) {
137 zsel = (zsel & ~(0xF << shift)) | (*irq << shift);
138 wrmsr(MSR_PIC_ZSEL_LOW, zsel, dummy);
139 }
140
141 return 0;
142}
143EXPORT_SYMBOL_GPL(cs5535_mfgpt_set_irq);
144
145struct cs5535_mfgpt_timer *cs5535_mfgpt_alloc_timer(int timer_nr, int domain)
146{
147 struct cs5535_mfgpt_chip *mfgpt = &cs5535_mfgpt_chip;
148 struct cs5535_mfgpt_timer *timer = NULL;
149 unsigned long flags;
150 int max;
151
152 if (!mfgpt->initialized)
153 goto done;
154
155 /* only allocate timers from the working domain if requested */
156 if (domain == MFGPT_DOMAIN_WORKING)
157 max = 6;
158 else
159 max = MFGPT_MAX_TIMERS;
160
161 if (timer_nr >= max) {
162 /* programmer error. silly programmers! */
163 WARN_ON(1);
164 goto done;
165 }
166
167 spin_lock_irqsave(&mfgpt->lock, flags);
168 if (timer_nr < 0) {
169 unsigned long t;
170
171 /* try to find any available timer */
172 t = find_first_bit(mfgpt->avail, max);
173 /* set timer_nr to -1 if no timers available */
174 timer_nr = t < max ? (int) t : -1;
175 } else {
176 /* check if the requested timer's available */
177 if (test_bit(timer_nr, mfgpt->avail))
178 timer_nr = -1;
179 }
180
181 if (timer_nr >= 0)
182 /* if timer_nr is not -1, it's an available timer */
183 __clear_bit(timer_nr, mfgpt->avail);
184 spin_unlock_irqrestore(&mfgpt->lock, flags);
185
186 if (timer_nr < 0)
187 goto done;
188
189 timer = kmalloc(sizeof(*timer), GFP_KERNEL);
190 if (!timer) {
191 /* aw hell */
192 spin_lock_irqsave(&mfgpt->lock, flags);
193 __set_bit(timer_nr, mfgpt->avail);
194 spin_unlock_irqrestore(&mfgpt->lock, flags);
195 goto done;
196 }
197 timer->chip = mfgpt;
198 timer->nr = timer_nr;
199 dev_info(&mfgpt->pdev->dev, "registered timer %d\n", timer_nr);
200
201done:
202 return timer;
203}
204EXPORT_SYMBOL_GPL(cs5535_mfgpt_alloc_timer);
205
206/*
207 * XXX: This frees the timer memory, but never resets the actual hardware
208 * timer. The old geode_mfgpt code did this; it would be good to figure
209 * out a way to actually release the hardware timer. See comments below.
210 */
211void cs5535_mfgpt_free_timer(struct cs5535_mfgpt_timer *timer)
212{
213 kfree(timer);
214}
215EXPORT_SYMBOL_GPL(cs5535_mfgpt_free_timer);
216
217uint16_t cs5535_mfgpt_read(struct cs5535_mfgpt_timer *timer, uint16_t reg)
218{
219 return inw(timer->chip->base + reg + (timer->nr * 8));
220}
221EXPORT_SYMBOL_GPL(cs5535_mfgpt_read);
222
223void cs5535_mfgpt_write(struct cs5535_mfgpt_timer *timer, uint16_t reg,
224 uint16_t value)
225{
226 outw(value, timer->chip->base + reg + (timer->nr * 8));
227}
228EXPORT_SYMBOL_GPL(cs5535_mfgpt_write);
229
230/*
231 * This is a sledgehammer that resets all MFGPT timers. This is required by
232 * some broken BIOSes which leave the system in an unstable state
233 * (TinyBIOS 0.98, for example; fixed in 0.99). It's uncertain as to
234 * whether or not this secret MSR can be used to release individual timers.
235 * Jordan tells me that he and Mitch once played w/ it, but it's unclear
236 * what the results of that were (and they experienced some instability).
237 */
238static void __init reset_all_timers(void)
239{
240 uint32_t val, dummy;
241
242 /* The following undocumented bit resets the MFGPT timers */
243 val = 0xFF; dummy = 0;
244 wrmsr(MSR_MFGPT_SETUP, val, dummy);
245}
246
247/*
248 * Check whether any MFGPTs are available for the kernel to use. In most
249 * cases, firmware that uses AMD's VSA code will claim all timers during
250 * bootup; we certainly don't want to take them if they're already in use.
251 * In other cases (such as with VSAless OpenFirmware), the system firmware
252 * leaves timers available for us to use.
253 */
254static int __init scan_timers(struct cs5535_mfgpt_chip *mfgpt)
255{
256 struct cs5535_mfgpt_timer timer = { .chip = mfgpt };
257 unsigned long flags;
258 int timers = 0;
259 uint16_t val;
260 int i;
261
262 /* bios workaround */
263 if (mfgpt_reset_timers)
264 reset_all_timers();
265
266 /* just to be safe, protect this section w/ lock */
267 spin_lock_irqsave(&mfgpt->lock, flags);
268 for (i = 0; i < MFGPT_MAX_TIMERS; i++) {
269 timer.nr = i;
270 val = cs5535_mfgpt_read(&timer, MFGPT_REG_SETUP);
271 if (!(val & MFGPT_SETUP_SETUP)) {
272 __set_bit(i, mfgpt->avail);
273 timers++;
274 }
275 }
276 spin_unlock_irqrestore(&mfgpt->lock, flags);
277
278 return timers;
279}
280
281static int __init cs5535_mfgpt_probe(struct pci_dev *pdev,
282 const struct pci_device_id *pci_id)
283{
284 int err, t;
285
286 /* There are two ways to get the MFGPT base address; one is by
287 * fetching it from MSR_LBAR_MFGPT, the other is by reading the
288 * PCI BAR info. The latter method is easier (especially across
289 * different architectures), so we'll stick with that for now. If
290 * it turns out to be unreliable in the face of crappy BIOSes, we
291 * can always go back to using MSRs.. */
292
293 err = pci_enable_device_io(pdev);
294 if (err) {
295 dev_err(&pdev->dev, "can't enable device IO\n");
296 goto done;
297 }
298
299 err = pci_request_region(pdev, MFGPT_BAR, DRV_NAME);
300 if (err) {
301 dev_err(&pdev->dev, "can't alloc PCI BAR #%d\n", MFGPT_BAR);
302 goto done;
303 }
304
305 /* set up the driver-specific struct */
306 cs5535_mfgpt_chip.base = pci_resource_start(pdev, MFGPT_BAR);
307 cs5535_mfgpt_chip.pdev = pdev;
308 spin_lock_init(&cs5535_mfgpt_chip.lock);
309
310 dev_info(&pdev->dev, "allocated PCI BAR #%d: base 0x%llx\n", MFGPT_BAR,
311 (unsigned long long) cs5535_mfgpt_chip.base);
312
313 /* detect the available timers */
314 t = scan_timers(&cs5535_mfgpt_chip);
315 dev_info(&pdev->dev, DRV_NAME ": %d MFGPT timers available\n", t);
316 cs5535_mfgpt_chip.initialized = 1;
317 return 0;
318
319done:
320 return err;
321}
322
323static struct pci_device_id cs5535_mfgpt_pci_tbl[] = {
324 { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_ISA) },
325 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA) },
326 { 0, },
327};
328MODULE_DEVICE_TABLE(pci, cs5535_mfgpt_pci_tbl);
329
330/*
331 * Just like with the cs5535-gpio driver, we can't use the standard PCI driver
332 * registration stuff. It only allows only one driver to bind to each PCI
333 * device, and we want the GPIO and MFGPT drivers to be able to share a PCI
334 * device. Instead, we manually scan for the PCI device, request a single
335 * region, and keep track of the devices that we're using.
336 */
337
338static int __init cs5535_mfgpt_scan_pci(void)
339{
340 struct pci_dev *pdev;
341 int err = -ENODEV;
342 int i;
343
344 for (i = 0; i < ARRAY_SIZE(cs5535_mfgpt_pci_tbl); i++) {
345 pdev = pci_get_device(cs5535_mfgpt_pci_tbl[i].vendor,
346 cs5535_mfgpt_pci_tbl[i].device, NULL);
347 if (pdev) {
348 err = cs5535_mfgpt_probe(pdev,
349 &cs5535_mfgpt_pci_tbl[i]);
350 if (err)
351 pci_dev_put(pdev);
352
353 /* we only support a single CS5535/6 southbridge */
354 break;
355 }
356 }
357
358 return err;
359}
360
361static int __init cs5535_mfgpt_init(void)
362{
363 return cs5535_mfgpt_scan_pci();
364}
365
366module_init(cs5535_mfgpt_init);
367
368MODULE_AUTHOR("Andres Salomon <dilinger@collabora.co.uk>");
369MODULE_DESCRIPTION("CS5535/CS5536 MFGPT timer driver");
370MODULE_LICENSE("GPL");
diff --git a/drivers/misc/eeprom/eeprom.c b/drivers/misc/eeprom/eeprom.c
index 2c27193aeaa0..f939ebc2507c 100644
--- a/drivers/misc/eeprom/eeprom.c
+++ b/drivers/misc/eeprom/eeprom.c
@@ -32,9 +32,6 @@
32static const unsigned short normal_i2c[] = { 0x50, 0x51, 0x52, 0x53, 0x54, 32static const unsigned short normal_i2c[] = { 0x50, 0x51, 0x52, 0x53, 0x54,
33 0x55, 0x56, 0x57, I2C_CLIENT_END }; 33 0x55, 0x56, 0x57, I2C_CLIENT_END };
34 34
35/* Insmod parameters */
36I2C_CLIENT_INSMOD_1(eeprom);
37
38 35
39/* Size of EEPROM in bytes */ 36/* Size of EEPROM in bytes */
40#define EEPROM_SIZE 256 37#define EEPROM_SIZE 256
@@ -135,8 +132,7 @@ static struct bin_attribute eeprom_attr = {
135}; 132};
136 133
137/* Return 0 if detection is successful, -ENODEV otherwise */ 134/* Return 0 if detection is successful, -ENODEV otherwise */
138static int eeprom_detect(struct i2c_client *client, int kind, 135static int eeprom_detect(struct i2c_client *client, struct i2c_board_info *info)
139 struct i2c_board_info *info)
140{ 136{
141 struct i2c_adapter *adapter = client->adapter; 137 struct i2c_adapter *adapter = client->adapter;
142 138
@@ -233,7 +229,7 @@ static struct i2c_driver eeprom_driver = {
233 229
234 .class = I2C_CLASS_DDC | I2C_CLASS_SPD, 230 .class = I2C_CLASS_DDC | I2C_CLASS_SPD,
235 .detect = eeprom_detect, 231 .detect = eeprom_detect,
236 .address_data = &addr_data, 232 .address_list = normal_i2c,
237}; 233};
238 234
239static int __init eeprom_init(void) 235static int __init eeprom_init(void)
diff --git a/drivers/misc/ics932s401.c b/drivers/misc/ics932s401.c
index 4bb7a3af9ad9..395a4ea64e9c 100644
--- a/drivers/misc/ics932s401.c
+++ b/drivers/misc/ics932s401.c
@@ -30,9 +30,6 @@
30/* Addresses to scan */ 30/* Addresses to scan */
31static const unsigned short normal_i2c[] = { 0x69, I2C_CLIENT_END }; 31static const unsigned short normal_i2c[] = { 0x69, I2C_CLIENT_END };
32 32
33/* Insmod parameters */
34I2C_CLIENT_INSMOD_1(ics932s401);
35
36/* ICS932S401 registers */ 33/* ICS932S401 registers */
37#define ICS932S401_REG_CFG2 0x01 34#define ICS932S401_REG_CFG2 0x01
38#define ICS932S401_CFG1_SPREAD 0x01 35#define ICS932S401_CFG1_SPREAD 0x01
@@ -106,12 +103,12 @@ struct ics932s401_data {
106 103
107static int ics932s401_probe(struct i2c_client *client, 104static int ics932s401_probe(struct i2c_client *client,
108 const struct i2c_device_id *id); 105 const struct i2c_device_id *id);
109static int ics932s401_detect(struct i2c_client *client, int kind, 106static int ics932s401_detect(struct i2c_client *client,
110 struct i2c_board_info *info); 107 struct i2c_board_info *info);
111static int ics932s401_remove(struct i2c_client *client); 108static int ics932s401_remove(struct i2c_client *client);
112 109
113static const struct i2c_device_id ics932s401_id[] = { 110static const struct i2c_device_id ics932s401_id[] = {
114 { "ics932s401", ics932s401 }, 111 { "ics932s401", 0 },
115 { } 112 { }
116}; 113};
117MODULE_DEVICE_TABLE(i2c, ics932s401_id); 114MODULE_DEVICE_TABLE(i2c, ics932s401_id);
@@ -125,7 +122,7 @@ static struct i2c_driver ics932s401_driver = {
125 .remove = ics932s401_remove, 122 .remove = ics932s401_remove,
126 .id_table = ics932s401_id, 123 .id_table = ics932s401_id,
127 .detect = ics932s401_detect, 124 .detect = ics932s401_detect,
128 .address_data = &addr_data, 125 .address_list = normal_i2c,
129}; 126};
130 127
131static struct ics932s401_data *ics932s401_update_device(struct device *dev) 128static struct ics932s401_data *ics932s401_update_device(struct device *dev)
@@ -413,7 +410,7 @@ static ssize_t show_spread(struct device *dev,
413} 410}
414 411
415/* Return 0 if detection is successful, -ENODEV otherwise */ 412/* Return 0 if detection is successful, -ENODEV otherwise */
416static int ics932s401_detect(struct i2c_client *client, int kind, 413static int ics932s401_detect(struct i2c_client *client,
417 struct i2c_board_info *info) 414 struct i2c_board_info *info)
418{ 415{
419 struct i2c_adapter *adapter = client->adapter; 416 struct i2c_adapter *adapter = client->adapter;
diff --git a/drivers/misc/ioc4.c b/drivers/misc/ioc4.c
index 60b0b1a4fb3a..09dcb699e667 100644
--- a/drivers/misc/ioc4.c
+++ b/drivers/misc/ioc4.c
@@ -138,7 +138,7 @@ ioc4_unregister_submodule(struct ioc4_submodule *is)
138 * even though the following code utilizes external interrupt registers 138 * even though the following code utilizes external interrupt registers
139 * to perform the speed calculation. 139 * to perform the speed calculation.
140 */ 140 */
141static void 141static void __devinit
142ioc4_clock_calibrate(struct ioc4_driver_data *idd) 142ioc4_clock_calibrate(struct ioc4_driver_data *idd)
143{ 143{
144 union ioc4_int_out int_out; 144 union ioc4_int_out int_out;
@@ -230,7 +230,7 @@ ioc4_clock_calibrate(struct ioc4_driver_data *idd)
230 * on the same PCI bus at slot number 3 to differentiate IO9 from IO10. 230 * on the same PCI bus at slot number 3 to differentiate IO9 from IO10.
231 * If neither is present, it's a PCI-RT. 231 * If neither is present, it's a PCI-RT.
232 */ 232 */
233static unsigned int 233static unsigned int __devinit
234ioc4_variant(struct ioc4_driver_data *idd) 234ioc4_variant(struct ioc4_driver_data *idd)
235{ 235{
236 struct pci_dev *pdev = NULL; 236 struct pci_dev *pdev = NULL;
@@ -269,7 +269,7 @@ ioc4_variant(struct ioc4_driver_data *idd)
269 return IOC4_VARIANT_PCI_RT; 269 return IOC4_VARIANT_PCI_RT;
270} 270}
271 271
272static void 272static void __devinit
273ioc4_load_modules(struct work_struct *work) 273ioc4_load_modules(struct work_struct *work)
274{ 274{
275 /* arg just has to be freed */ 275 /* arg just has to be freed */
@@ -280,7 +280,7 @@ ioc4_load_modules(struct work_struct *work)
280} 280}
281 281
282/* Adds a new instance of an IOC4 card */ 282/* Adds a new instance of an IOC4 card */
283static int 283static int __devinit
284ioc4_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id) 284ioc4_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
285{ 285{
286 struct ioc4_driver_data *idd; 286 struct ioc4_driver_data *idd;
@@ -425,7 +425,7 @@ out:
425} 425}
426 426
427/* Removes a particular instance of an IOC4 card. */ 427/* Removes a particular instance of an IOC4 card. */
428static void 428static void __devexit
429ioc4_remove(struct pci_dev *pdev) 429ioc4_remove(struct pci_dev *pdev)
430{ 430{
431 struct ioc4_submodule *is; 431 struct ioc4_submodule *is;
@@ -476,7 +476,7 @@ static struct pci_driver ioc4_driver = {
476 .name = "IOC4", 476 .name = "IOC4",
477 .id_table = ioc4_id_table, 477 .id_table = ioc4_id_table,
478 .probe = ioc4_probe, 478 .probe = ioc4_probe,
479 .remove = ioc4_remove, 479 .remove = __devexit_p(ioc4_remove),
480}; 480};
481 481
482MODULE_DEVICE_TABLE(pci, ioc4_id_table); 482MODULE_DEVICE_TABLE(pci, ioc4_id_table);
@@ -486,14 +486,14 @@ MODULE_DEVICE_TABLE(pci, ioc4_id_table);
486 *********************/ 486 *********************/
487 487
488/* Module load */ 488/* Module load */
489static int __devinit 489static int __init
490ioc4_init(void) 490ioc4_init(void)
491{ 491{
492 return pci_register_driver(&ioc4_driver); 492 return pci_register_driver(&ioc4_driver);
493} 493}
494 494
495/* Module unload */ 495/* Module unload */
496static void __devexit 496static void __exit
497ioc4_exit(void) 497ioc4_exit(void)
498{ 498{
499 /* Ensure ioc4_load_modules() has completed before exiting */ 499 /* Ensure ioc4_load_modules() has completed before exiting */
diff --git a/drivers/misc/sgi-gru/gru.h b/drivers/misc/sgi-gru/gru.h
index f93f03a9e6e9..3ad76cd18b4b 100644
--- a/drivers/misc/sgi-gru/gru.h
+++ b/drivers/misc/sgi-gru/gru.h
@@ -53,6 +53,17 @@ struct gru_chiplet_info {
53 int free_user_cbr; 53 int free_user_cbr;
54}; 54};
55 55
56/*
57 * Statictics kept for each context.
58 */
59struct gru_gseg_statistics {
60 unsigned long fmm_tlbmiss;
61 unsigned long upm_tlbmiss;
62 unsigned long tlbdropin;
63 unsigned long context_stolen;
64 unsigned long reserved[10];
65};
66
56/* Flags for GRU options on the gru_create_context() call */ 67/* Flags for GRU options on the gru_create_context() call */
57/* Select one of the follow 4 options to specify how TLB misses are handled */ 68/* Select one of the follow 4 options to specify how TLB misses are handled */
58#define GRU_OPT_MISS_DEFAULT 0x0000 /* Use default mode */ 69#define GRU_OPT_MISS_DEFAULT 0x0000 /* Use default mode */
diff --git a/drivers/misc/sgi-gru/gru_instructions.h b/drivers/misc/sgi-gru/gru_instructions.h
index 3c9c06618e6a..d95587cc794c 100644
--- a/drivers/misc/sgi-gru/gru_instructions.h
+++ b/drivers/misc/sgi-gru/gru_instructions.h
@@ -34,17 +34,17 @@ extern void gru_wait_abort_proc(void *cb);
34#include <asm/intrinsics.h> 34#include <asm/intrinsics.h>
35#define __flush_cache(p) ia64_fc((unsigned long)p) 35#define __flush_cache(p) ia64_fc((unsigned long)p)
36/* Use volatile on IA64 to ensure ordering via st4.rel */ 36/* Use volatile on IA64 to ensure ordering via st4.rel */
37#define gru_ordered_store_int(p, v) \ 37#define gru_ordered_store_ulong(p, v) \
38 do { \ 38 do { \
39 barrier(); \ 39 barrier(); \
40 *((volatile int *)(p)) = v; /* force st.rel */ \ 40 *((volatile unsigned long *)(p)) = v; /* force st.rel */ \
41 } while (0) 41 } while (0)
42#elif defined(CONFIG_X86_64) 42#elif defined(CONFIG_X86_64)
43#define __flush_cache(p) clflush(p) 43#define __flush_cache(p) clflush(p)
44#define gru_ordered_store_int(p, v) \ 44#define gru_ordered_store_ulong(p, v) \
45 do { \ 45 do { \
46 barrier(); \ 46 barrier(); \
47 *(int *)p = v; \ 47 *(unsigned long *)p = v; \
48 } while (0) 48 } while (0)
49#else 49#else
50#error "Unsupported architecture" 50#error "Unsupported architecture"
@@ -129,8 +129,13 @@ struct gru_instruction_bits {
129 */ 129 */
130struct gru_instruction { 130struct gru_instruction {
131 /* DW 0 */ 131 /* DW 0 */
132 unsigned int op32; /* icmd,xtype,iaa0,ima,opc */ 132 union {
133 unsigned int tri0; 133 unsigned long op64; /* icmd,xtype,iaa0,ima,opc,tri0 */
134 struct {
135 unsigned int op32;
136 unsigned int tri0;
137 };
138 };
134 unsigned long tri1_bufsize; /* DW 1 */ 139 unsigned long tri1_bufsize; /* DW 1 */
135 unsigned long baddr0; /* DW 2 */ 140 unsigned long baddr0; /* DW 2 */
136 unsigned long nelem; /* DW 3 */ 141 unsigned long nelem; /* DW 3 */
@@ -140,7 +145,7 @@ struct gru_instruction {
140 unsigned long avalue; /* DW 7 */ 145 unsigned long avalue; /* DW 7 */
141}; 146};
142 147
143/* Some shifts and masks for the low 32 bits of a GRU command */ 148/* Some shifts and masks for the low 64 bits of a GRU command */
144#define GRU_CB_ICMD_SHFT 0 149#define GRU_CB_ICMD_SHFT 0
145#define GRU_CB_ICMD_MASK 0x1 150#define GRU_CB_ICMD_MASK 0x1
146#define GRU_CB_XTYPE_SHFT 8 151#define GRU_CB_XTYPE_SHFT 8
@@ -155,6 +160,10 @@ struct gru_instruction {
155#define GRU_CB_OPC_MASK 0xff 160#define GRU_CB_OPC_MASK 0xff
156#define GRU_CB_EXOPC_SHFT 24 161#define GRU_CB_EXOPC_SHFT 24
157#define GRU_CB_EXOPC_MASK 0xff 162#define GRU_CB_EXOPC_MASK 0xff
163#define GRU_IDEF2_SHFT 32
164#define GRU_IDEF2_MASK 0x3ffff
165#define GRU_ISTATUS_SHFT 56
166#define GRU_ISTATUS_MASK 0x3
158 167
159/* GRU instruction opcodes (opc field) */ 168/* GRU instruction opcodes (opc field) */
160#define OP_NOP 0x00 169#define OP_NOP 0x00
@@ -256,6 +265,7 @@ struct gru_instruction {
256#define CBE_CAUSE_PROTOCOL_STATE_DATA_ERROR (1 << 16) 265#define CBE_CAUSE_PROTOCOL_STATE_DATA_ERROR (1 << 16)
257#define CBE_CAUSE_RA_RESPONSE_DATA_ERROR (1 << 17) 266#define CBE_CAUSE_RA_RESPONSE_DATA_ERROR (1 << 17)
258#define CBE_CAUSE_HA_RESPONSE_DATA_ERROR (1 << 18) 267#define CBE_CAUSE_HA_RESPONSE_DATA_ERROR (1 << 18)
268#define CBE_CAUSE_FORCED_ERROR (1 << 19)
259 269
260/* CBE cbrexecstatus bits */ 270/* CBE cbrexecstatus bits */
261#define CBR_EXS_ABORT_OCC_BIT 0 271#define CBR_EXS_ABORT_OCC_BIT 0
@@ -264,13 +274,15 @@ struct gru_instruction {
264#define CBR_EXS_QUEUED_BIT 3 274#define CBR_EXS_QUEUED_BIT 3
265#define CBR_EXS_TLB_INVAL_BIT 4 275#define CBR_EXS_TLB_INVAL_BIT 4
266#define CBR_EXS_EXCEPTION_BIT 5 276#define CBR_EXS_EXCEPTION_BIT 5
277#define CBR_EXS_CB_INT_PENDING_BIT 6
267 278
268#define CBR_EXS_ABORT_OCC (1 << CBR_EXS_ABORT_OCC_BIT) 279#define CBR_EXS_ABORT_OCC (1 << CBR_EXS_ABORT_OCC_BIT)
269#define CBR_EXS_INT_OCC (1 << CBR_EXS_INT_OCC_BIT) 280#define CBR_EXS_INT_OCC (1 << CBR_EXS_INT_OCC_BIT)
270#define CBR_EXS_PENDING (1 << CBR_EXS_PENDING_BIT) 281#define CBR_EXS_PENDING (1 << CBR_EXS_PENDING_BIT)
271#define CBR_EXS_QUEUED (1 << CBR_EXS_QUEUED_BIT) 282#define CBR_EXS_QUEUED (1 << CBR_EXS_QUEUED_BIT)
272#define CBR_TLB_INVAL (1 << CBR_EXS_TLB_INVAL_BIT) 283#define CBR_EXS_TLB_INVAL (1 << CBR_EXS_TLB_INVAL_BIT)
273#define CBR_EXS_EXCEPTION (1 << CBR_EXS_EXCEPTION_BIT) 284#define CBR_EXS_EXCEPTION (1 << CBR_EXS_EXCEPTION_BIT)
285#define CBR_EXS_CB_INT_PENDING (1 << CBR_EXS_CB_INT_PENDING_BIT)
274 286
275/* 287/*
276 * Exceptions are retried for the following cases. If any OTHER bits are set 288 * Exceptions are retried for the following cases. If any OTHER bits are set
@@ -296,12 +308,14 @@ union gru_mesqhead {
296 308
297 309
298/* Generate the low word of a GRU instruction */ 310/* Generate the low word of a GRU instruction */
299static inline unsigned int 311static inline unsigned long
300__opword(unsigned char opcode, unsigned char exopc, unsigned char xtype, 312__opdword(unsigned char opcode, unsigned char exopc, unsigned char xtype,
301 unsigned char iaa0, unsigned char iaa1, 313 unsigned char iaa0, unsigned char iaa1,
302 unsigned char ima) 314 unsigned long idef2, unsigned char ima)
303{ 315{
304 return (1 << GRU_CB_ICMD_SHFT) | 316 return (1 << GRU_CB_ICMD_SHFT) |
317 ((unsigned long)CBS_ACTIVE << GRU_ISTATUS_SHFT) |
318 (idef2<< GRU_IDEF2_SHFT) |
305 (iaa0 << GRU_CB_IAA0_SHFT) | 319 (iaa0 << GRU_CB_IAA0_SHFT) |
306 (iaa1 << GRU_CB_IAA1_SHFT) | 320 (iaa1 << GRU_CB_IAA1_SHFT) |
307 (ima << GRU_CB_IMA_SHFT) | 321 (ima << GRU_CB_IMA_SHFT) |
@@ -319,12 +333,13 @@ static inline void gru_flush_cache(void *p)
319} 333}
320 334
321/* 335/*
322 * Store the lower 32 bits of the command including the "start" bit. Then 336 * Store the lower 64 bits of the command including the "start" bit. Then
323 * start the instruction executing. 337 * start the instruction executing.
324 */ 338 */
325static inline void gru_start_instruction(struct gru_instruction *ins, int op32) 339static inline void gru_start_instruction(struct gru_instruction *ins, unsigned long op64)
326{ 340{
327 gru_ordered_store_int(ins, op32); 341 gru_ordered_store_ulong(ins, op64);
342 mb();
328 gru_flush_cache(ins); 343 gru_flush_cache(ins);
329} 344}
330 345
@@ -340,6 +355,30 @@ static inline void gru_start_instruction(struct gru_instruction *ins, int op32)
340 * - nelem and stride are in elements 355 * - nelem and stride are in elements
341 * - tri0/tri1 is in bytes for the beginning of the data segment. 356 * - tri0/tri1 is in bytes for the beginning of the data segment.
342 */ 357 */
358static inline void gru_vload_phys(void *cb, unsigned long gpa,
359 unsigned int tri0, int iaa, unsigned long hints)
360{
361 struct gru_instruction *ins = (struct gru_instruction *)cb;
362
363 ins->baddr0 = (long)gpa | ((unsigned long)iaa << 62);
364 ins->nelem = 1;
365 ins->op1_stride = 1;
366 gru_start_instruction(ins, __opdword(OP_VLOAD, 0, XTYPE_DW, iaa, 0,
367 (unsigned long)tri0, CB_IMA(hints)));
368}
369
370static inline void gru_vstore_phys(void *cb, unsigned long gpa,
371 unsigned int tri0, int iaa, unsigned long hints)
372{
373 struct gru_instruction *ins = (struct gru_instruction *)cb;
374
375 ins->baddr0 = (long)gpa | ((unsigned long)iaa << 62);
376 ins->nelem = 1;
377 ins->op1_stride = 1;
378 gru_start_instruction(ins, __opdword(OP_VSTORE, 0, XTYPE_DW, iaa, 0,
379 (unsigned long)tri0, CB_IMA(hints)));
380}
381
343static inline void gru_vload(void *cb, unsigned long mem_addr, 382static inline void gru_vload(void *cb, unsigned long mem_addr,
344 unsigned int tri0, unsigned char xtype, unsigned long nelem, 383 unsigned int tri0, unsigned char xtype, unsigned long nelem,
345 unsigned long stride, unsigned long hints) 384 unsigned long stride, unsigned long hints)
@@ -348,10 +387,9 @@ static inline void gru_vload(void *cb, unsigned long mem_addr,
348 387
349 ins->baddr0 = (long)mem_addr; 388 ins->baddr0 = (long)mem_addr;
350 ins->nelem = nelem; 389 ins->nelem = nelem;
351 ins->tri0 = tri0;
352 ins->op1_stride = stride; 390 ins->op1_stride = stride;
353 gru_start_instruction(ins, __opword(OP_VLOAD, 0, xtype, IAA_RAM, 0, 391 gru_start_instruction(ins, __opdword(OP_VLOAD, 0, xtype, IAA_RAM, 0,
354 CB_IMA(hints))); 392 (unsigned long)tri0, CB_IMA(hints)));
355} 393}
356 394
357static inline void gru_vstore(void *cb, unsigned long mem_addr, 395static inline void gru_vstore(void *cb, unsigned long mem_addr,
@@ -362,10 +400,9 @@ static inline void gru_vstore(void *cb, unsigned long mem_addr,
362 400
363 ins->baddr0 = (long)mem_addr; 401 ins->baddr0 = (long)mem_addr;
364 ins->nelem = nelem; 402 ins->nelem = nelem;
365 ins->tri0 = tri0;
366 ins->op1_stride = stride; 403 ins->op1_stride = stride;
367 gru_start_instruction(ins, __opword(OP_VSTORE, 0, xtype, IAA_RAM, 0, 404 gru_start_instruction(ins, __opdword(OP_VSTORE, 0, xtype, IAA_RAM, 0,
368 CB_IMA(hints))); 405 tri0, CB_IMA(hints)));
369} 406}
370 407
371static inline void gru_ivload(void *cb, unsigned long mem_addr, 408static inline void gru_ivload(void *cb, unsigned long mem_addr,
@@ -376,10 +413,9 @@ static inline void gru_ivload(void *cb, unsigned long mem_addr,
376 413
377 ins->baddr0 = (long)mem_addr; 414 ins->baddr0 = (long)mem_addr;
378 ins->nelem = nelem; 415 ins->nelem = nelem;
379 ins->tri0 = tri0;
380 ins->tri1_bufsize = tri1; 416 ins->tri1_bufsize = tri1;
381 gru_start_instruction(ins, __opword(OP_IVLOAD, 0, xtype, IAA_RAM, 0, 417 gru_start_instruction(ins, __opdword(OP_IVLOAD, 0, xtype, IAA_RAM, 0,
382 CB_IMA(hints))); 418 tri0, CB_IMA(hints)));
383} 419}
384 420
385static inline void gru_ivstore(void *cb, unsigned long mem_addr, 421static inline void gru_ivstore(void *cb, unsigned long mem_addr,
@@ -390,10 +426,9 @@ static inline void gru_ivstore(void *cb, unsigned long mem_addr,
390 426
391 ins->baddr0 = (long)mem_addr; 427 ins->baddr0 = (long)mem_addr;
392 ins->nelem = nelem; 428 ins->nelem = nelem;
393 ins->tri0 = tri0;
394 ins->tri1_bufsize = tri1; 429 ins->tri1_bufsize = tri1;
395 gru_start_instruction(ins, __opword(OP_IVSTORE, 0, xtype, IAA_RAM, 0, 430 gru_start_instruction(ins, __opdword(OP_IVSTORE, 0, xtype, IAA_RAM, 0,
396 CB_IMA(hints))); 431 tri0, CB_IMA(hints)));
397} 432}
398 433
399static inline void gru_vset(void *cb, unsigned long mem_addr, 434static inline void gru_vset(void *cb, unsigned long mem_addr,
@@ -406,8 +441,8 @@ static inline void gru_vset(void *cb, unsigned long mem_addr,
406 ins->op2_value_baddr1 = value; 441 ins->op2_value_baddr1 = value;
407 ins->nelem = nelem; 442 ins->nelem = nelem;
408 ins->op1_stride = stride; 443 ins->op1_stride = stride;
409 gru_start_instruction(ins, __opword(OP_VSET, 0, xtype, IAA_RAM, 0, 444 gru_start_instruction(ins, __opdword(OP_VSET, 0, xtype, IAA_RAM, 0,
410 CB_IMA(hints))); 445 0, CB_IMA(hints)));
411} 446}
412 447
413static inline void gru_ivset(void *cb, unsigned long mem_addr, 448static inline void gru_ivset(void *cb, unsigned long mem_addr,
@@ -420,8 +455,8 @@ static inline void gru_ivset(void *cb, unsigned long mem_addr,
420 ins->op2_value_baddr1 = value; 455 ins->op2_value_baddr1 = value;
421 ins->nelem = nelem; 456 ins->nelem = nelem;
422 ins->tri1_bufsize = tri1; 457 ins->tri1_bufsize = tri1;
423 gru_start_instruction(ins, __opword(OP_IVSET, 0, xtype, IAA_RAM, 0, 458 gru_start_instruction(ins, __opdword(OP_IVSET, 0, xtype, IAA_RAM, 0,
424 CB_IMA(hints))); 459 0, CB_IMA(hints)));
425} 460}
426 461
427static inline void gru_vflush(void *cb, unsigned long mem_addr, 462static inline void gru_vflush(void *cb, unsigned long mem_addr,
@@ -433,15 +468,15 @@ static inline void gru_vflush(void *cb, unsigned long mem_addr,
433 ins->baddr0 = (long)mem_addr; 468 ins->baddr0 = (long)mem_addr;
434 ins->op1_stride = stride; 469 ins->op1_stride = stride;
435 ins->nelem = nelem; 470 ins->nelem = nelem;
436 gru_start_instruction(ins, __opword(OP_VFLUSH, 0, xtype, IAA_RAM, 0, 471 gru_start_instruction(ins, __opdword(OP_VFLUSH, 0, xtype, IAA_RAM, 0,
437 CB_IMA(hints))); 472 0, CB_IMA(hints)));
438} 473}
439 474
440static inline void gru_nop(void *cb, int hints) 475static inline void gru_nop(void *cb, int hints)
441{ 476{
442 struct gru_instruction *ins = (void *)cb; 477 struct gru_instruction *ins = (void *)cb;
443 478
444 gru_start_instruction(ins, __opword(OP_NOP, 0, 0, 0, 0, CB_IMA(hints))); 479 gru_start_instruction(ins, __opdword(OP_NOP, 0, 0, 0, 0, 0, CB_IMA(hints)));
445} 480}
446 481
447 482
@@ -455,10 +490,9 @@ static inline void gru_bcopy(void *cb, const unsigned long src,
455 ins->baddr0 = (long)src; 490 ins->baddr0 = (long)src;
456 ins->op2_value_baddr1 = (long)dest; 491 ins->op2_value_baddr1 = (long)dest;
457 ins->nelem = nelem; 492 ins->nelem = nelem;
458 ins->tri0 = tri0;
459 ins->tri1_bufsize = bufsize; 493 ins->tri1_bufsize = bufsize;
460 gru_start_instruction(ins, __opword(OP_BCOPY, 0, xtype, IAA_RAM, 494 gru_start_instruction(ins, __opdword(OP_BCOPY, 0, xtype, IAA_RAM,
461 IAA_RAM, CB_IMA(hints))); 495 IAA_RAM, tri0, CB_IMA(hints)));
462} 496}
463 497
464static inline void gru_bstore(void *cb, const unsigned long src, 498static inline void gru_bstore(void *cb, const unsigned long src,
@@ -470,9 +504,8 @@ static inline void gru_bstore(void *cb, const unsigned long src,
470 ins->baddr0 = (long)src; 504 ins->baddr0 = (long)src;
471 ins->op2_value_baddr1 = (long)dest; 505 ins->op2_value_baddr1 = (long)dest;
472 ins->nelem = nelem; 506 ins->nelem = nelem;
473 ins->tri0 = tri0; 507 gru_start_instruction(ins, __opdword(OP_BSTORE, 0, xtype, 0, IAA_RAM,
474 gru_start_instruction(ins, __opword(OP_BSTORE, 0, xtype, 0, IAA_RAM, 508 tri0, CB_IMA(hints)));
475 CB_IMA(hints)));
476} 509}
477 510
478static inline void gru_gamir(void *cb, int exopc, unsigned long src, 511static inline void gru_gamir(void *cb, int exopc, unsigned long src,
@@ -481,8 +514,8 @@ static inline void gru_gamir(void *cb, int exopc, unsigned long src,
481 struct gru_instruction *ins = (void *)cb; 514 struct gru_instruction *ins = (void *)cb;
482 515
483 ins->baddr0 = (long)src; 516 ins->baddr0 = (long)src;
484 gru_start_instruction(ins, __opword(OP_GAMIR, exopc, xtype, IAA_RAM, 0, 517 gru_start_instruction(ins, __opdword(OP_GAMIR, exopc, xtype, IAA_RAM, 0,
485 CB_IMA(hints))); 518 0, CB_IMA(hints)));
486} 519}
487 520
488static inline void gru_gamirr(void *cb, int exopc, unsigned long src, 521static inline void gru_gamirr(void *cb, int exopc, unsigned long src,
@@ -491,8 +524,8 @@ static inline void gru_gamirr(void *cb, int exopc, unsigned long src,
491 struct gru_instruction *ins = (void *)cb; 524 struct gru_instruction *ins = (void *)cb;
492 525
493 ins->baddr0 = (long)src; 526 ins->baddr0 = (long)src;
494 gru_start_instruction(ins, __opword(OP_GAMIRR, exopc, xtype, IAA_RAM, 0, 527 gru_start_instruction(ins, __opdword(OP_GAMIRR, exopc, xtype, IAA_RAM, 0,
495 CB_IMA(hints))); 528 0, CB_IMA(hints)));
496} 529}
497 530
498static inline void gru_gamer(void *cb, int exopc, unsigned long src, 531static inline void gru_gamer(void *cb, int exopc, unsigned long src,
@@ -505,8 +538,8 @@ static inline void gru_gamer(void *cb, int exopc, unsigned long src,
505 ins->baddr0 = (long)src; 538 ins->baddr0 = (long)src;
506 ins->op1_stride = operand1; 539 ins->op1_stride = operand1;
507 ins->op2_value_baddr1 = operand2; 540 ins->op2_value_baddr1 = operand2;
508 gru_start_instruction(ins, __opword(OP_GAMER, exopc, xtype, IAA_RAM, 0, 541 gru_start_instruction(ins, __opdword(OP_GAMER, exopc, xtype, IAA_RAM, 0,
509 CB_IMA(hints))); 542 0, CB_IMA(hints)));
510} 543}
511 544
512static inline void gru_gamerr(void *cb, int exopc, unsigned long src, 545static inline void gru_gamerr(void *cb, int exopc, unsigned long src,
@@ -518,8 +551,8 @@ static inline void gru_gamerr(void *cb, int exopc, unsigned long src,
518 ins->baddr0 = (long)src; 551 ins->baddr0 = (long)src;
519 ins->op1_stride = operand1; 552 ins->op1_stride = operand1;
520 ins->op2_value_baddr1 = operand2; 553 ins->op2_value_baddr1 = operand2;
521 gru_start_instruction(ins, __opword(OP_GAMERR, exopc, xtype, IAA_RAM, 0, 554 gru_start_instruction(ins, __opdword(OP_GAMERR, exopc, xtype, IAA_RAM, 0,
522 CB_IMA(hints))); 555 0, CB_IMA(hints)));
523} 556}
524 557
525static inline void gru_gamxr(void *cb, unsigned long src, 558static inline void gru_gamxr(void *cb, unsigned long src,
@@ -529,8 +562,8 @@ static inline void gru_gamxr(void *cb, unsigned long src,
529 562
530 ins->baddr0 = (long)src; 563 ins->baddr0 = (long)src;
531 ins->nelem = 4; 564 ins->nelem = 4;
532 gru_start_instruction(ins, __opword(OP_GAMXR, EOP_XR_CSWAP, XTYPE_DW, 565 gru_start_instruction(ins, __opdword(OP_GAMXR, EOP_XR_CSWAP, XTYPE_DW,
533 IAA_RAM, 0, CB_IMA(hints))); 566 IAA_RAM, 0, 0, CB_IMA(hints)));
534} 567}
535 568
536static inline void gru_mesq(void *cb, unsigned long queue, 569static inline void gru_mesq(void *cb, unsigned long queue,
@@ -541,9 +574,8 @@ static inline void gru_mesq(void *cb, unsigned long queue,
541 574
542 ins->baddr0 = (long)queue; 575 ins->baddr0 = (long)queue;
543 ins->nelem = nelem; 576 ins->nelem = nelem;
544 ins->tri0 = tri0; 577 gru_start_instruction(ins, __opdword(OP_MESQ, 0, XTYPE_CL, IAA_RAM, 0,
545 gru_start_instruction(ins, __opword(OP_MESQ, 0, XTYPE_CL, IAA_RAM, 0, 578 tri0, CB_IMA(hints)));
546 CB_IMA(hints)));
547} 579}
548 580
549static inline unsigned long gru_get_amo_value(void *cb) 581static inline unsigned long gru_get_amo_value(void *cb)
@@ -662,6 +694,14 @@ static inline void gru_wait_abort(void *cb)
662 gru_wait_abort_proc(cb); 694 gru_wait_abort_proc(cb);
663} 695}
664 696
697/*
698 * Get a pointer to the start of a gseg
699 * p - Any valid pointer within the gseg
700 */
701static inline void *gru_get_gseg_pointer (void *p)
702{
703 return (void *)((unsigned long)p & ~(GRU_GSEG_PAGESIZE - 1));
704}
665 705
666/* 706/*
667 * Get a pointer to a control block 707 * Get a pointer to a control block
diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c
index 679e01778286..38657cdaf54d 100644
--- a/drivers/misc/sgi-gru/grufault.c
+++ b/drivers/misc/sgi-gru/grufault.c
@@ -40,6 +40,12 @@
40#include "gru_instructions.h" 40#include "gru_instructions.h"
41#include <asm/uv/uv_hub.h> 41#include <asm/uv/uv_hub.h>
42 42
43/* Return codes for vtop functions */
44#define VTOP_SUCCESS 0
45#define VTOP_INVALID -1
46#define VTOP_RETRY -2
47
48
43/* 49/*
44 * Test if a physical address is a valid GRU GSEG address 50 * Test if a physical address is a valid GRU GSEG address
45 */ 51 */
@@ -90,19 +96,22 @@ static struct gru_thread_state *gru_alloc_locked_gts(unsigned long vaddr)
90{ 96{
91 struct mm_struct *mm = current->mm; 97 struct mm_struct *mm = current->mm;
92 struct vm_area_struct *vma; 98 struct vm_area_struct *vma;
93 struct gru_thread_state *gts = NULL; 99 struct gru_thread_state *gts = ERR_PTR(-EINVAL);
94 100
95 down_write(&mm->mmap_sem); 101 down_write(&mm->mmap_sem);
96 vma = gru_find_vma(vaddr); 102 vma = gru_find_vma(vaddr);
97 if (vma) 103 if (!vma)
98 gts = gru_alloc_thread_state(vma, TSID(vaddr, vma)); 104 goto err;
99 if (gts) {
100 mutex_lock(&gts->ts_ctxlock);
101 downgrade_write(&mm->mmap_sem);
102 } else {
103 up_write(&mm->mmap_sem);
104 }
105 105
106 gts = gru_alloc_thread_state(vma, TSID(vaddr, vma));
107 if (IS_ERR(gts))
108 goto err;
109 mutex_lock(&gts->ts_ctxlock);
110 downgrade_write(&mm->mmap_sem);
111 return gts;
112
113err:
114 up_write(&mm->mmap_sem);
106 return gts; 115 return gts;
107} 116}
108 117
@@ -122,39 +131,15 @@ static void gru_unlock_gts(struct gru_thread_state *gts)
122 * is necessary to prevent the user from seeing a stale cb.istatus that will 131 * is necessary to prevent the user from seeing a stale cb.istatus that will
123 * change as soon as the TFH restart is complete. Races may cause an 132 * change as soon as the TFH restart is complete. Races may cause an
124 * occasional failure to clear the cb.istatus, but that is ok. 133 * occasional failure to clear the cb.istatus, but that is ok.
125 *
126 * If the cb address is not valid (should not happen, but...), nothing
127 * bad will happen.. The get_user()/put_user() will fail but there
128 * are no bad side-effects.
129 */ 134 */
130static void gru_cb_set_istatus_active(unsigned long __user *cb) 135static void gru_cb_set_istatus_active(struct gru_instruction_bits *cbk)
131{ 136{
132 union { 137 if (cbk) {
133 struct gru_instruction_bits bits; 138 cbk->istatus = CBS_ACTIVE;
134 unsigned long dw;
135 } u;
136
137 if (cb) {
138 get_user(u.dw, cb);
139 u.bits.istatus = CBS_ACTIVE;
140 put_user(u.dw, cb);
141 } 139 }
142} 140}
143 141
144/* 142/*
145 * Convert a interrupt IRQ to a pointer to the GRU GTS that caused the
146 * interrupt. Interrupts are always sent to a cpu on the blade that contains the
147 * GRU (except for headless blades which are not currently supported). A blade
148 * has N grus; a block of N consecutive IRQs is assigned to the GRUs. The IRQ
149 * number uniquely identifies the GRU chiplet on the local blade that caused the
150 * interrupt. Always called in interrupt context.
151 */
152static inline struct gru_state *irq_to_gru(int irq)
153{
154 return &gru_base[uv_numa_blade_id()]->bs_grus[irq - IRQ_GRU];
155}
156
157/*
158 * Read & clear a TFM 143 * Read & clear a TFM
159 * 144 *
160 * The GRU has an array of fault maps. A map is private to a cpu 145 * The GRU has an array of fault maps. A map is private to a cpu
@@ -207,10 +192,11 @@ static int non_atomic_pte_lookup(struct vm_area_struct *vma,
207{ 192{
208 struct page *page; 193 struct page *page;
209 194
210 /* ZZZ Need to handle HUGE pages */ 195#ifdef CONFIG_HUGETLB_PAGE
211 if (is_vm_hugetlb_page(vma)) 196 *pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT;
212 return -EFAULT; 197#else
213 *pageshift = PAGE_SHIFT; 198 *pageshift = PAGE_SHIFT;
199#endif
214 if (get_user_pages 200 if (get_user_pages
215 (current, current->mm, vaddr, 1, write, 0, &page, NULL) <= 0) 201 (current, current->mm, vaddr, 1, write, 0, &page, NULL) <= 0)
216 return -EFAULT; 202 return -EFAULT;
@@ -268,7 +254,6 @@ static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr,
268 return 0; 254 return 0;
269 255
270err: 256err:
271 local_irq_enable();
272 return 1; 257 return 1;
273} 258}
274 259
@@ -301,14 +286,69 @@ static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr,
301 paddr = paddr & ~((1UL << ps) - 1); 286 paddr = paddr & ~((1UL << ps) - 1);
302 *gpa = uv_soc_phys_ram_to_gpa(paddr); 287 *gpa = uv_soc_phys_ram_to_gpa(paddr);
303 *pageshift = ps; 288 *pageshift = ps;
304 return 0; 289 return VTOP_SUCCESS;
305 290
306inval: 291inval:
307 return -1; 292 return VTOP_INVALID;
308upm: 293upm:
309 return -2; 294 return VTOP_RETRY;
295}
296
297
298/*
299 * Flush a CBE from cache. The CBE is clean in the cache. Dirty the
300 * CBE cacheline so that the line will be written back to home agent.
301 * Otherwise the line may be silently dropped. This has no impact
302 * except on performance.
303 */
304static void gru_flush_cache_cbe(struct gru_control_block_extended *cbe)
305{
306 if (unlikely(cbe)) {
307 cbe->cbrexecstatus = 0; /* make CL dirty */
308 gru_flush_cache(cbe);
309 }
310} 310}
311 311
312/*
313 * Preload the TLB with entries that may be required. Currently, preloading
314 * is implemented only for BCOPY. Preload <tlb_preload_count> pages OR to
315 * the end of the bcopy tranfer, whichever is smaller.
316 */
317static void gru_preload_tlb(struct gru_state *gru,
318 struct gru_thread_state *gts, int atomic,
319 unsigned long fault_vaddr, int asid, int write,
320 unsigned char tlb_preload_count,
321 struct gru_tlb_fault_handle *tfh,
322 struct gru_control_block_extended *cbe)
323{
324 unsigned long vaddr = 0, gpa;
325 int ret, pageshift;
326
327 if (cbe->opccpy != OP_BCOPY)
328 return;
329
330 if (fault_vaddr == cbe->cbe_baddr0)
331 vaddr = fault_vaddr + GRU_CACHE_LINE_BYTES * cbe->cbe_src_cl - 1;
332 else if (fault_vaddr == cbe->cbe_baddr1)
333 vaddr = fault_vaddr + (1 << cbe->xtypecpy) * cbe->cbe_nelemcur - 1;
334
335 fault_vaddr &= PAGE_MASK;
336 vaddr &= PAGE_MASK;
337 vaddr = min(vaddr, fault_vaddr + tlb_preload_count * PAGE_SIZE);
338
339 while (vaddr > fault_vaddr) {
340 ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift);
341 if (ret || tfh_write_only(tfh, gpa, GAA_RAM, vaddr, asid, write,
342 GRU_PAGESIZE(pageshift)))
343 return;
344 gru_dbg(grudev,
345 "%s: gid %d, gts 0x%p, tfh 0x%p, vaddr 0x%lx, asid 0x%x, rw %d, ps %d, gpa 0x%lx\n",
346 atomic ? "atomic" : "non-atomic", gru->gs_gid, gts, tfh,
347 vaddr, asid, write, pageshift, gpa);
348 vaddr -= PAGE_SIZE;
349 STAT(tlb_preload_page);
350 }
351}
312 352
313/* 353/*
314 * Drop a TLB entry into the GRU. The fault is described by info in an TFH. 354 * Drop a TLB entry into the GRU. The fault is described by info in an TFH.
@@ -320,11 +360,14 @@ upm:
320 * < 0 = error code 360 * < 0 = error code
321 * 361 *
322 */ 362 */
323static int gru_try_dropin(struct gru_thread_state *gts, 363static int gru_try_dropin(struct gru_state *gru,
364 struct gru_thread_state *gts,
324 struct gru_tlb_fault_handle *tfh, 365 struct gru_tlb_fault_handle *tfh,
325 unsigned long __user *cb) 366 struct gru_instruction_bits *cbk)
326{ 367{
327 int pageshift = 0, asid, write, ret, atomic = !cb; 368 struct gru_control_block_extended *cbe = NULL;
369 unsigned char tlb_preload_count = gts->ts_tlb_preload_count;
370 int pageshift = 0, asid, write, ret, atomic = !cbk, indexway;
328 unsigned long gpa = 0, vaddr = 0; 371 unsigned long gpa = 0, vaddr = 0;
329 372
330 /* 373 /*
@@ -335,24 +378,34 @@ static int gru_try_dropin(struct gru_thread_state *gts,
335 */ 378 */
336 379
337 /* 380 /*
381 * Prefetch the CBE if doing TLB preloading
382 */
383 if (unlikely(tlb_preload_count)) {
384 cbe = gru_tfh_to_cbe(tfh);
385 prefetchw(cbe);
386 }
387
388 /*
338 * Error if TFH state is IDLE or FMM mode & the user issuing a UPM call. 389 * Error if TFH state is IDLE or FMM mode & the user issuing a UPM call.
339 * Might be a hardware race OR a stupid user. Ignore FMM because FMM 390 * Might be a hardware race OR a stupid user. Ignore FMM because FMM
340 * is a transient state. 391 * is a transient state.
341 */ 392 */
342 if (tfh->status != TFHSTATUS_EXCEPTION) { 393 if (tfh->status != TFHSTATUS_EXCEPTION) {
343 gru_flush_cache(tfh); 394 gru_flush_cache(tfh);
395 sync_core();
344 if (tfh->status != TFHSTATUS_EXCEPTION) 396 if (tfh->status != TFHSTATUS_EXCEPTION)
345 goto failnoexception; 397 goto failnoexception;
346 STAT(tfh_stale_on_fault); 398 STAT(tfh_stale_on_fault);
347 } 399 }
348 if (tfh->state == TFHSTATE_IDLE) 400 if (tfh->state == TFHSTATE_IDLE)
349 goto failidle; 401 goto failidle;
350 if (tfh->state == TFHSTATE_MISS_FMM && cb) 402 if (tfh->state == TFHSTATE_MISS_FMM && cbk)
351 goto failfmm; 403 goto failfmm;
352 404
353 write = (tfh->cause & TFHCAUSE_TLB_MOD) != 0; 405 write = (tfh->cause & TFHCAUSE_TLB_MOD) != 0;
354 vaddr = tfh->missvaddr; 406 vaddr = tfh->missvaddr;
355 asid = tfh->missasid; 407 asid = tfh->missasid;
408 indexway = tfh->indexway;
356 if (asid == 0) 409 if (asid == 0)
357 goto failnoasid; 410 goto failnoasid;
358 411
@@ -366,41 +419,51 @@ static int gru_try_dropin(struct gru_thread_state *gts,
366 goto failactive; 419 goto failactive;
367 420
368 ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift); 421 ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift);
369 if (ret == -1) 422 if (ret == VTOP_INVALID)
370 goto failinval; 423 goto failinval;
371 if (ret == -2) 424 if (ret == VTOP_RETRY)
372 goto failupm; 425 goto failupm;
373 426
374 if (!(gts->ts_sizeavail & GRU_SIZEAVAIL(pageshift))) { 427 if (!(gts->ts_sizeavail & GRU_SIZEAVAIL(pageshift))) {
375 gts->ts_sizeavail |= GRU_SIZEAVAIL(pageshift); 428 gts->ts_sizeavail |= GRU_SIZEAVAIL(pageshift);
376 if (atomic || !gru_update_cch(gts, 0)) { 429 if (atomic || !gru_update_cch(gts)) {
377 gts->ts_force_cch_reload = 1; 430 gts->ts_force_cch_reload = 1;
378 goto failupm; 431 goto failupm;
379 } 432 }
380 } 433 }
381 gru_cb_set_istatus_active(cb); 434
435 if (unlikely(cbe) && pageshift == PAGE_SHIFT) {
436 gru_preload_tlb(gru, gts, atomic, vaddr, asid, write, tlb_preload_count, tfh, cbe);
437 gru_flush_cache_cbe(cbe);
438 }
439
440 gru_cb_set_istatus_active(cbk);
441 gts->ustats.tlbdropin++;
382 tfh_write_restart(tfh, gpa, GAA_RAM, vaddr, asid, write, 442 tfh_write_restart(tfh, gpa, GAA_RAM, vaddr, asid, write,
383 GRU_PAGESIZE(pageshift)); 443 GRU_PAGESIZE(pageshift));
384 STAT(tlb_dropin);
385 gru_dbg(grudev, 444 gru_dbg(grudev,
386 "%s: tfh 0x%p, vaddr 0x%lx, asid 0x%x, ps %d, gpa 0x%lx\n", 445 "%s: gid %d, gts 0x%p, tfh 0x%p, vaddr 0x%lx, asid 0x%x, indexway 0x%x,"
387 ret ? "non-atomic" : "atomic", tfh, vaddr, asid, 446 " rw %d, ps %d, gpa 0x%lx\n",
388 pageshift, gpa); 447 atomic ? "atomic" : "non-atomic", gru->gs_gid, gts, tfh, vaddr, asid,
448 indexway, write, pageshift, gpa);
449 STAT(tlb_dropin);
389 return 0; 450 return 0;
390 451
391failnoasid: 452failnoasid:
392 /* No asid (delayed unload). */ 453 /* No asid (delayed unload). */
393 STAT(tlb_dropin_fail_no_asid); 454 STAT(tlb_dropin_fail_no_asid);
394 gru_dbg(grudev, "FAILED no_asid tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr); 455 gru_dbg(grudev, "FAILED no_asid tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
395 if (!cb) 456 if (!cbk)
396 tfh_user_polling_mode(tfh); 457 tfh_user_polling_mode(tfh);
397 else 458 else
398 gru_flush_cache(tfh); 459 gru_flush_cache(tfh);
460 gru_flush_cache_cbe(cbe);
399 return -EAGAIN; 461 return -EAGAIN;
400 462
401failupm: 463failupm:
402 /* Atomic failure switch CBR to UPM */ 464 /* Atomic failure switch CBR to UPM */
403 tfh_user_polling_mode(tfh); 465 tfh_user_polling_mode(tfh);
466 gru_flush_cache_cbe(cbe);
404 STAT(tlb_dropin_fail_upm); 467 STAT(tlb_dropin_fail_upm);
405 gru_dbg(grudev, "FAILED upm tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr); 468 gru_dbg(grudev, "FAILED upm tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
406 return 1; 469 return 1;
@@ -408,6 +471,7 @@ failupm:
408failfmm: 471failfmm:
409 /* FMM state on UPM call */ 472 /* FMM state on UPM call */
410 gru_flush_cache(tfh); 473 gru_flush_cache(tfh);
474 gru_flush_cache_cbe(cbe);
411 STAT(tlb_dropin_fail_fmm); 475 STAT(tlb_dropin_fail_fmm);
412 gru_dbg(grudev, "FAILED fmm tfh: 0x%p, state %d\n", tfh, tfh->state); 476 gru_dbg(grudev, "FAILED fmm tfh: 0x%p, state %d\n", tfh, tfh->state);
413 return 0; 477 return 0;
@@ -415,17 +479,20 @@ failfmm:
415failnoexception: 479failnoexception:
416 /* TFH status did not show exception pending */ 480 /* TFH status did not show exception pending */
417 gru_flush_cache(tfh); 481 gru_flush_cache(tfh);
418 if (cb) 482 gru_flush_cache_cbe(cbe);
419 gru_flush_cache(cb); 483 if (cbk)
484 gru_flush_cache(cbk);
420 STAT(tlb_dropin_fail_no_exception); 485 STAT(tlb_dropin_fail_no_exception);
421 gru_dbg(grudev, "FAILED non-exception tfh: 0x%p, status %d, state %d\n", tfh, tfh->status, tfh->state); 486 gru_dbg(grudev, "FAILED non-exception tfh: 0x%p, status %d, state %d\n",
487 tfh, tfh->status, tfh->state);
422 return 0; 488 return 0;
423 489
424failidle: 490failidle:
425 /* TFH state was idle - no miss pending */ 491 /* TFH state was idle - no miss pending */
426 gru_flush_cache(tfh); 492 gru_flush_cache(tfh);
427 if (cb) 493 gru_flush_cache_cbe(cbe);
428 gru_flush_cache(cb); 494 if (cbk)
495 gru_flush_cache(cbk);
429 STAT(tlb_dropin_fail_idle); 496 STAT(tlb_dropin_fail_idle);
430 gru_dbg(grudev, "FAILED idle tfh: 0x%p, state %d\n", tfh, tfh->state); 497 gru_dbg(grudev, "FAILED idle tfh: 0x%p, state %d\n", tfh, tfh->state);
431 return 0; 498 return 0;
@@ -433,16 +500,18 @@ failidle:
433failinval: 500failinval:
434 /* All errors (atomic & non-atomic) switch CBR to EXCEPTION state */ 501 /* All errors (atomic & non-atomic) switch CBR to EXCEPTION state */
435 tfh_exception(tfh); 502 tfh_exception(tfh);
503 gru_flush_cache_cbe(cbe);
436 STAT(tlb_dropin_fail_invalid); 504 STAT(tlb_dropin_fail_invalid);
437 gru_dbg(grudev, "FAILED inval tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr); 505 gru_dbg(grudev, "FAILED inval tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
438 return -EFAULT; 506 return -EFAULT;
439 507
440failactive: 508failactive:
441 /* Range invalidate active. Switch to UPM iff atomic */ 509 /* Range invalidate active. Switch to UPM iff atomic */
442 if (!cb) 510 if (!cbk)
443 tfh_user_polling_mode(tfh); 511 tfh_user_polling_mode(tfh);
444 else 512 else
445 gru_flush_cache(tfh); 513 gru_flush_cache(tfh);
514 gru_flush_cache_cbe(cbe);
446 STAT(tlb_dropin_fail_range_active); 515 STAT(tlb_dropin_fail_range_active);
447 gru_dbg(grudev, "FAILED range active: tfh 0x%p, vaddr 0x%lx\n", 516 gru_dbg(grudev, "FAILED range active: tfh 0x%p, vaddr 0x%lx\n",
448 tfh, vaddr); 517 tfh, vaddr);
@@ -455,31 +524,41 @@ failactive:
455 * Note that this is the interrupt handler that is registered with linux 524 * Note that this is the interrupt handler that is registered with linux
456 * interrupt handlers. 525 * interrupt handlers.
457 */ 526 */
458irqreturn_t gru_intr(int irq, void *dev_id) 527static irqreturn_t gru_intr(int chiplet, int blade)
459{ 528{
460 struct gru_state *gru; 529 struct gru_state *gru;
461 struct gru_tlb_fault_map imap, dmap; 530 struct gru_tlb_fault_map imap, dmap;
462 struct gru_thread_state *gts; 531 struct gru_thread_state *gts;
463 struct gru_tlb_fault_handle *tfh = NULL; 532 struct gru_tlb_fault_handle *tfh = NULL;
533 struct completion *cmp;
464 int cbrnum, ctxnum; 534 int cbrnum, ctxnum;
465 535
466 STAT(intr); 536 STAT(intr);
467 537
468 gru = irq_to_gru(irq); 538 gru = &gru_base[blade]->bs_grus[chiplet];
469 if (!gru) { 539 if (!gru) {
470 dev_err(grudev, "GRU: invalid interrupt: cpu %d, irq %d\n", 540 dev_err(grudev, "GRU: invalid interrupt: cpu %d, chiplet %d\n",
471 raw_smp_processor_id(), irq); 541 raw_smp_processor_id(), chiplet);
472 return IRQ_NONE; 542 return IRQ_NONE;
473 } 543 }
474 get_clear_fault_map(gru, &imap, &dmap); 544 get_clear_fault_map(gru, &imap, &dmap);
545 gru_dbg(grudev,
546 "cpu %d, chiplet %d, gid %d, imap %016lx %016lx, dmap %016lx %016lx\n",
547 smp_processor_id(), chiplet, gru->gs_gid,
548 imap.fault_bits[0], imap.fault_bits[1],
549 dmap.fault_bits[0], dmap.fault_bits[1]);
475 550
476 for_each_cbr_in_tfm(cbrnum, dmap.fault_bits) { 551 for_each_cbr_in_tfm(cbrnum, dmap.fault_bits) {
477 complete(gru->gs_blade->bs_async_wq); 552 STAT(intr_cbr);
553 cmp = gru->gs_blade->bs_async_wq;
554 if (cmp)
555 complete(cmp);
478 gru_dbg(grudev, "gid %d, cbr_done %d, done %d\n", 556 gru_dbg(grudev, "gid %d, cbr_done %d, done %d\n",
479 gru->gs_gid, cbrnum, gru->gs_blade->bs_async_wq->done); 557 gru->gs_gid, cbrnum, cmp ? cmp->done : -1);
480 } 558 }
481 559
482 for_each_cbr_in_tfm(cbrnum, imap.fault_bits) { 560 for_each_cbr_in_tfm(cbrnum, imap.fault_bits) {
561 STAT(intr_tfh);
483 tfh = get_tfh_by_index(gru, cbrnum); 562 tfh = get_tfh_by_index(gru, cbrnum);
484 prefetchw(tfh); /* Helps on hdw, required for emulator */ 563 prefetchw(tfh); /* Helps on hdw, required for emulator */
485 564
@@ -492,14 +571,20 @@ irqreturn_t gru_intr(int irq, void *dev_id)
492 ctxnum = tfh->ctxnum; 571 ctxnum = tfh->ctxnum;
493 gts = gru->gs_gts[ctxnum]; 572 gts = gru->gs_gts[ctxnum];
494 573
574 /* Spurious interrupts can cause this. Ignore. */
575 if (!gts) {
576 STAT(intr_spurious);
577 continue;
578 }
579
495 /* 580 /*
496 * This is running in interrupt context. Trylock the mmap_sem. 581 * This is running in interrupt context. Trylock the mmap_sem.
497 * If it fails, retry the fault in user context. 582 * If it fails, retry the fault in user context.
498 */ 583 */
584 gts->ustats.fmm_tlbmiss++;
499 if (!gts->ts_force_cch_reload && 585 if (!gts->ts_force_cch_reload &&
500 down_read_trylock(&gts->ts_mm->mmap_sem)) { 586 down_read_trylock(&gts->ts_mm->mmap_sem)) {
501 gts->ustats.fmm_tlbdropin++; 587 gru_try_dropin(gru, gts, tfh, NULL);
502 gru_try_dropin(gts, tfh, NULL);
503 up_read(&gts->ts_mm->mmap_sem); 588 up_read(&gts->ts_mm->mmap_sem);
504 } else { 589 } else {
505 tfh_user_polling_mode(tfh); 590 tfh_user_polling_mode(tfh);
@@ -509,20 +594,43 @@ irqreturn_t gru_intr(int irq, void *dev_id)
509 return IRQ_HANDLED; 594 return IRQ_HANDLED;
510} 595}
511 596
597irqreturn_t gru0_intr(int irq, void *dev_id)
598{
599 return gru_intr(0, uv_numa_blade_id());
600}
601
602irqreturn_t gru1_intr(int irq, void *dev_id)
603{
604 return gru_intr(1, uv_numa_blade_id());
605}
606
607irqreturn_t gru_intr_mblade(int irq, void *dev_id)
608{
609 int blade;
610
611 for_each_possible_blade(blade) {
612 if (uv_blade_nr_possible_cpus(blade))
613 continue;
614 gru_intr(0, blade);
615 gru_intr(1, blade);
616 }
617 return IRQ_HANDLED;
618}
619
512 620
513static int gru_user_dropin(struct gru_thread_state *gts, 621static int gru_user_dropin(struct gru_thread_state *gts,
514 struct gru_tlb_fault_handle *tfh, 622 struct gru_tlb_fault_handle *tfh,
515 unsigned long __user *cb) 623 void *cb)
516{ 624{
517 struct gru_mm_struct *gms = gts->ts_gms; 625 struct gru_mm_struct *gms = gts->ts_gms;
518 int ret; 626 int ret;
519 627
520 gts->ustats.upm_tlbdropin++; 628 gts->ustats.upm_tlbmiss++;
521 while (1) { 629 while (1) {
522 wait_event(gms->ms_wait_queue, 630 wait_event(gms->ms_wait_queue,
523 atomic_read(&gms->ms_range_active) == 0); 631 atomic_read(&gms->ms_range_active) == 0);
524 prefetchw(tfh); /* Helps on hdw, required for emulator */ 632 prefetchw(tfh); /* Helps on hdw, required for emulator */
525 ret = gru_try_dropin(gts, tfh, cb); 633 ret = gru_try_dropin(gts->ts_gru, gts, tfh, cb);
526 if (ret <= 0) 634 if (ret <= 0)
527 return ret; 635 return ret;
528 STAT(call_os_wait_queue); 636 STAT(call_os_wait_queue);
@@ -538,52 +646,41 @@ int gru_handle_user_call_os(unsigned long cb)
538{ 646{
539 struct gru_tlb_fault_handle *tfh; 647 struct gru_tlb_fault_handle *tfh;
540 struct gru_thread_state *gts; 648 struct gru_thread_state *gts;
541 unsigned long __user *cbp; 649 void *cbk;
542 int ucbnum, cbrnum, ret = -EINVAL; 650 int ucbnum, cbrnum, ret = -EINVAL;
543 651
544 STAT(call_os); 652 STAT(call_os);
545 gru_dbg(grudev, "address 0x%lx\n", cb);
546 653
547 /* sanity check the cb pointer */ 654 /* sanity check the cb pointer */
548 ucbnum = get_cb_number((void *)cb); 655 ucbnum = get_cb_number((void *)cb);
549 if ((cb & (GRU_HANDLE_STRIDE - 1)) || ucbnum >= GRU_NUM_CB) 656 if ((cb & (GRU_HANDLE_STRIDE - 1)) || ucbnum >= GRU_NUM_CB)
550 return -EINVAL; 657 return -EINVAL;
551 cbp = (unsigned long *)cb;
552 658
553 gts = gru_find_lock_gts(cb); 659 gts = gru_find_lock_gts(cb);
554 if (!gts) 660 if (!gts)
555 return -EINVAL; 661 return -EINVAL;
662 gru_dbg(grudev, "address 0x%lx, gid %d, gts 0x%p\n", cb, gts->ts_gru ? gts->ts_gru->gs_gid : -1, gts);
556 663
557 if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) 664 if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE)
558 goto exit; 665 goto exit;
559 666
560 /* 667 gru_check_context_placement(gts);
561 * If force_unload is set, the UPM TLB fault is phony. The task
562 * has migrated to another node and the GSEG must be moved. Just
563 * unload the context. The task will page fault and assign a new
564 * context.
565 */
566 if (gts->ts_tgid_owner == current->tgid && gts->ts_blade >= 0 &&
567 gts->ts_blade != uv_numa_blade_id()) {
568 STAT(call_os_offnode_reference);
569 gts->ts_force_unload = 1;
570 }
571 668
572 /* 669 /*
573 * CCH may contain stale data if ts_force_cch_reload is set. 670 * CCH may contain stale data if ts_force_cch_reload is set.
574 */ 671 */
575 if (gts->ts_gru && gts->ts_force_cch_reload) { 672 if (gts->ts_gru && gts->ts_force_cch_reload) {
576 gts->ts_force_cch_reload = 0; 673 gts->ts_force_cch_reload = 0;
577 gru_update_cch(gts, 0); 674 gru_update_cch(gts);
578 } 675 }
579 676
580 ret = -EAGAIN; 677 ret = -EAGAIN;
581 cbrnum = thread_cbr_number(gts, ucbnum); 678 cbrnum = thread_cbr_number(gts, ucbnum);
582 if (gts->ts_force_unload) { 679 if (gts->ts_gru) {
583 gru_unload_context(gts, 1);
584 } else if (gts->ts_gru) {
585 tfh = get_tfh_by_index(gts->ts_gru, cbrnum); 680 tfh = get_tfh_by_index(gts->ts_gru, cbrnum);
586 ret = gru_user_dropin(gts, tfh, cbp); 681 cbk = get_gseg_base_address_cb(gts->ts_gru->gs_gru_base_vaddr,
682 gts->ts_ctxnum, ucbnum);
683 ret = gru_user_dropin(gts, tfh, cbk);
587 } 684 }
588exit: 685exit:
589 gru_unlock_gts(gts); 686 gru_unlock_gts(gts);
@@ -605,11 +702,11 @@ int gru_get_exception_detail(unsigned long arg)
605 if (copy_from_user(&excdet, (void __user *)arg, sizeof(excdet))) 702 if (copy_from_user(&excdet, (void __user *)arg, sizeof(excdet)))
606 return -EFAULT; 703 return -EFAULT;
607 704
608 gru_dbg(grudev, "address 0x%lx\n", excdet.cb);
609 gts = gru_find_lock_gts(excdet.cb); 705 gts = gru_find_lock_gts(excdet.cb);
610 if (!gts) 706 if (!gts)
611 return -EINVAL; 707 return -EINVAL;
612 708
709 gru_dbg(grudev, "address 0x%lx, gid %d, gts 0x%p\n", excdet.cb, gts->ts_gru ? gts->ts_gru->gs_gid : -1, gts);
613 ucbnum = get_cb_number((void *)excdet.cb); 710 ucbnum = get_cb_number((void *)excdet.cb);
614 if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) { 711 if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) {
615 ret = -EINVAL; 712 ret = -EINVAL;
@@ -617,6 +714,7 @@ int gru_get_exception_detail(unsigned long arg)
617 cbrnum = thread_cbr_number(gts, ucbnum); 714 cbrnum = thread_cbr_number(gts, ucbnum);
618 cbe = get_cbe_by_index(gts->ts_gru, cbrnum); 715 cbe = get_cbe_by_index(gts->ts_gru, cbrnum);
619 gru_flush_cache(cbe); /* CBE not coherent */ 716 gru_flush_cache(cbe); /* CBE not coherent */
717 sync_core(); /* make sure we are have current data */
620 excdet.opc = cbe->opccpy; 718 excdet.opc = cbe->opccpy;
621 excdet.exopc = cbe->exopccpy; 719 excdet.exopc = cbe->exopccpy;
622 excdet.ecause = cbe->ecause; 720 excdet.ecause = cbe->ecause;
@@ -624,7 +722,7 @@ int gru_get_exception_detail(unsigned long arg)
624 excdet.exceptdet1 = cbe->idef3upd; 722 excdet.exceptdet1 = cbe->idef3upd;
625 excdet.cbrstate = cbe->cbrstate; 723 excdet.cbrstate = cbe->cbrstate;
626 excdet.cbrexecstatus = cbe->cbrexecstatus; 724 excdet.cbrexecstatus = cbe->cbrexecstatus;
627 gru_flush_cache(cbe); 725 gru_flush_cache_cbe(cbe);
628 ret = 0; 726 ret = 0;
629 } else { 727 } else {
630 ret = -EAGAIN; 728 ret = -EAGAIN;
@@ -733,6 +831,11 @@ long gru_get_gseg_statistics(unsigned long arg)
733 if (copy_from_user(&req, (void __user *)arg, sizeof(req))) 831 if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
734 return -EFAULT; 832 return -EFAULT;
735 833
834 /*
835 * The library creates arrays of contexts for threaded programs.
836 * If no gts exists in the array, the context has never been used & all
837 * statistics are implicitly 0.
838 */
736 gts = gru_find_lock_gts(req.gseg); 839 gts = gru_find_lock_gts(req.gseg);
737 if (gts) { 840 if (gts) {
738 memcpy(&req.stats, &gts->ustats, sizeof(gts->ustats)); 841 memcpy(&req.stats, &gts->ustats, sizeof(gts->ustats));
@@ -762,11 +865,25 @@ int gru_set_context_option(unsigned long arg)
762 return -EFAULT; 865 return -EFAULT;
763 gru_dbg(grudev, "op %d, gseg 0x%lx, value1 0x%lx\n", req.op, req.gseg, req.val1); 866 gru_dbg(grudev, "op %d, gseg 0x%lx, value1 0x%lx\n", req.op, req.gseg, req.val1);
764 867
765 gts = gru_alloc_locked_gts(req.gseg); 868 gts = gru_find_lock_gts(req.gseg);
766 if (!gts) 869 if (!gts) {
767 return -EINVAL; 870 gts = gru_alloc_locked_gts(req.gseg);
871 if (IS_ERR(gts))
872 return PTR_ERR(gts);
873 }
768 874
769 switch (req.op) { 875 switch (req.op) {
876 case sco_blade_chiplet:
877 /* Select blade/chiplet for GRU context */
878 if (req.val1 < -1 || req.val1 >= GRU_MAX_BLADES || !gru_base[req.val1] ||
879 req.val0 < -1 || req.val0 >= GRU_CHIPLETS_PER_HUB) {
880 ret = -EINVAL;
881 } else {
882 gts->ts_user_blade_id = req.val1;
883 gts->ts_user_chiplet_id = req.val0;
884 gru_check_context_placement(gts);
885 }
886 break;
770 case sco_gseg_owner: 887 case sco_gseg_owner:
771 /* Register the current task as the GSEG owner */ 888 /* Register the current task as the GSEG owner */
772 gts->ts_tgid_owner = current->tgid; 889 gts->ts_tgid_owner = current->tgid;
diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c
index ce5eda985ab0..cb3b4d228475 100644
--- a/drivers/misc/sgi-gru/grufile.c
+++ b/drivers/misc/sgi-gru/grufile.c
@@ -35,6 +35,9 @@
35#include <linux/interrupt.h> 35#include <linux/interrupt.h>
36#include <linux/proc_fs.h> 36#include <linux/proc_fs.h>
37#include <linux/uaccess.h> 37#include <linux/uaccess.h>
38#ifdef CONFIG_X86_64
39#include <asm/uv/uv_irq.h>
40#endif
38#include <asm/uv/uv.h> 41#include <asm/uv/uv.h>
39#include "gru.h" 42#include "gru.h"
40#include "grulib.h" 43#include "grulib.h"
@@ -130,7 +133,6 @@ static int gru_create_new_context(unsigned long arg)
130 struct gru_vma_data *vdata; 133 struct gru_vma_data *vdata;
131 int ret = -EINVAL; 134 int ret = -EINVAL;
132 135
133
134 if (copy_from_user(&req, (void __user *)arg, sizeof(req))) 136 if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
135 return -EFAULT; 137 return -EFAULT;
136 138
@@ -150,6 +152,7 @@ static int gru_create_new_context(unsigned long arg)
150 vdata->vd_dsr_au_count = 152 vdata->vd_dsr_au_count =
151 GRU_DS_BYTES_TO_AU(req.data_segment_bytes); 153 GRU_DS_BYTES_TO_AU(req.data_segment_bytes);
152 vdata->vd_cbr_au_count = GRU_CB_COUNT_TO_AU(req.control_blocks); 154 vdata->vd_cbr_au_count = GRU_CB_COUNT_TO_AU(req.control_blocks);
155 vdata->vd_tlb_preload_count = req.tlb_preload_count;
153 ret = 0; 156 ret = 0;
154 } 157 }
155 up_write(&current->mm->mmap_sem); 158 up_write(&current->mm->mmap_sem);
@@ -190,7 +193,7 @@ static long gru_file_unlocked_ioctl(struct file *file, unsigned int req,
190{ 193{
191 int err = -EBADRQC; 194 int err = -EBADRQC;
192 195
193 gru_dbg(grudev, "file %p\n", file); 196 gru_dbg(grudev, "file %p, req 0x%x, 0x%lx\n", file, req, arg);
194 197
195 switch (req) { 198 switch (req) {
196 case GRU_CREATE_CONTEXT: 199 case GRU_CREATE_CONTEXT:
@@ -232,23 +235,24 @@ static long gru_file_unlocked_ioctl(struct file *file, unsigned int req,
232 * system. 235 * system.
233 */ 236 */
234static void gru_init_chiplet(struct gru_state *gru, unsigned long paddr, 237static void gru_init_chiplet(struct gru_state *gru, unsigned long paddr,
235 void *vaddr, int nid, int bid, int grunum) 238 void *vaddr, int blade_id, int chiplet_id)
236{ 239{
237 spin_lock_init(&gru->gs_lock); 240 spin_lock_init(&gru->gs_lock);
238 spin_lock_init(&gru->gs_asid_lock); 241 spin_lock_init(&gru->gs_asid_lock);
239 gru->gs_gru_base_paddr = paddr; 242 gru->gs_gru_base_paddr = paddr;
240 gru->gs_gru_base_vaddr = vaddr; 243 gru->gs_gru_base_vaddr = vaddr;
241 gru->gs_gid = bid * GRU_CHIPLETS_PER_BLADE + grunum; 244 gru->gs_gid = blade_id * GRU_CHIPLETS_PER_BLADE + chiplet_id;
242 gru->gs_blade = gru_base[bid]; 245 gru->gs_blade = gru_base[blade_id];
243 gru->gs_blade_id = bid; 246 gru->gs_blade_id = blade_id;
247 gru->gs_chiplet_id = chiplet_id;
244 gru->gs_cbr_map = (GRU_CBR_AU == 64) ? ~0 : (1UL << GRU_CBR_AU) - 1; 248 gru->gs_cbr_map = (GRU_CBR_AU == 64) ? ~0 : (1UL << GRU_CBR_AU) - 1;
245 gru->gs_dsr_map = (1UL << GRU_DSR_AU) - 1; 249 gru->gs_dsr_map = (1UL << GRU_DSR_AU) - 1;
246 gru->gs_asid_limit = MAX_ASID; 250 gru->gs_asid_limit = MAX_ASID;
247 gru_tgh_flush_init(gru); 251 gru_tgh_flush_init(gru);
248 if (gru->gs_gid >= gru_max_gids) 252 if (gru->gs_gid >= gru_max_gids)
249 gru_max_gids = gru->gs_gid + 1; 253 gru_max_gids = gru->gs_gid + 1;
250 gru_dbg(grudev, "bid %d, nid %d, gid %d, vaddr %p (0x%lx)\n", 254 gru_dbg(grudev, "bid %d, gid %d, vaddr %p (0x%lx)\n",
251 bid, nid, gru->gs_gid, gru->gs_gru_base_vaddr, 255 blade_id, gru->gs_gid, gru->gs_gru_base_vaddr,
252 gru->gs_gru_base_paddr); 256 gru->gs_gru_base_paddr);
253} 257}
254 258
@@ -264,12 +268,10 @@ static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr)
264 268
265 max_user_cbrs = GRU_NUM_CB; 269 max_user_cbrs = GRU_NUM_CB;
266 max_user_dsr_bytes = GRU_NUM_DSR_BYTES; 270 max_user_dsr_bytes = GRU_NUM_DSR_BYTES;
267 for_each_online_node(nid) { 271 for_each_possible_blade(bid) {
268 bid = uv_node_to_blade_id(nid); 272 pnode = uv_blade_to_pnode(bid);
269 pnode = uv_node_to_pnode(nid); 273 nid = uv_blade_to_memory_nid(bid);/* -1 if no memory on blade */
270 if (bid < 0 || gru_base[bid]) 274 page = alloc_pages_node(nid, GFP_KERNEL, order);
271 continue;
272 page = alloc_pages_exact_node(nid, GFP_KERNEL, order);
273 if (!page) 275 if (!page)
274 goto fail; 276 goto fail;
275 gru_base[bid] = page_address(page); 277 gru_base[bid] = page_address(page);
@@ -285,7 +287,7 @@ static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr)
285 chip++, gru++) { 287 chip++, gru++) {
286 paddr = gru_chiplet_paddr(gru_base_paddr, pnode, chip); 288 paddr = gru_chiplet_paddr(gru_base_paddr, pnode, chip);
287 vaddr = gru_chiplet_vaddr(gru_base_vaddr, pnode, chip); 289 vaddr = gru_chiplet_vaddr(gru_base_vaddr, pnode, chip);
288 gru_init_chiplet(gru, paddr, vaddr, nid, bid, chip); 290 gru_init_chiplet(gru, paddr, vaddr, bid, chip);
289 n = hweight64(gru->gs_cbr_map) * GRU_CBR_AU_SIZE; 291 n = hweight64(gru->gs_cbr_map) * GRU_CBR_AU_SIZE;
290 cbrs = max(cbrs, n); 292 cbrs = max(cbrs, n);
291 n = hweight64(gru->gs_dsr_map) * GRU_DSR_AU_BYTES; 293 n = hweight64(gru->gs_dsr_map) * GRU_DSR_AU_BYTES;
@@ -298,39 +300,215 @@ static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr)
298 return 0; 300 return 0;
299 301
300fail: 302fail:
301 for (nid--; nid >= 0; nid--) 303 for (bid--; bid >= 0; bid--)
302 free_pages((unsigned long)gru_base[nid], order); 304 free_pages((unsigned long)gru_base[bid], order);
303 return -ENOMEM; 305 return -ENOMEM;
304} 306}
305 307
306#ifdef CONFIG_IA64 308static void gru_free_tables(void)
309{
310 int bid;
311 int order = get_order(sizeof(struct gru_state) *
312 GRU_CHIPLETS_PER_BLADE);
307 313
308static int get_base_irq(void) 314 for (bid = 0; bid < GRU_MAX_BLADES; bid++)
315 free_pages((unsigned long)gru_base[bid], order);
316}
317
318static unsigned long gru_chiplet_cpu_to_mmr(int chiplet, int cpu, int *corep)
309{ 319{
310 return IRQ_GRU; 320 unsigned long mmr = 0;
321 int core;
322
323 /*
324 * We target the cores of a blade and not the hyperthreads themselves.
325 * There is a max of 8 cores per socket and 2 sockets per blade,
326 * making for a max total of 16 cores (i.e., 16 CPUs without
327 * hyperthreading and 32 CPUs with hyperthreading).
328 */
329 core = uv_cpu_core_number(cpu) + UV_MAX_INT_CORES * uv_cpu_socket_number(cpu);
330 if (core >= GRU_NUM_TFM || uv_cpu_ht_number(cpu))
331 return 0;
332
333 if (chiplet == 0) {
334 mmr = UVH_GR0_TLB_INT0_CONFIG +
335 core * (UVH_GR0_TLB_INT1_CONFIG - UVH_GR0_TLB_INT0_CONFIG);
336 } else if (chiplet == 1) {
337 mmr = UVH_GR1_TLB_INT0_CONFIG +
338 core * (UVH_GR1_TLB_INT1_CONFIG - UVH_GR1_TLB_INT0_CONFIG);
339 } else {
340 BUG();
341 }
342
343 *corep = core;
344 return mmr;
311} 345}
312 346
313#elif defined CONFIG_X86_64 347#ifdef CONFIG_IA64
314 348
315static void noop(unsigned int irq) 349static int gru_irq_count[GRU_CHIPLETS_PER_BLADE];
350
351static void gru_noop(unsigned int irq)
316{ 352{
317} 353}
318 354
319static struct irq_chip gru_chip = { 355static struct irq_chip gru_chip[GRU_CHIPLETS_PER_BLADE] = {
320 .name = "gru", 356 [0 ... GRU_CHIPLETS_PER_BLADE - 1] {
321 .mask = noop, 357 .mask = gru_noop,
322 .unmask = noop, 358 .unmask = gru_noop,
323 .ack = noop, 359 .ack = gru_noop
360 }
324}; 361};
325 362
326static int get_base_irq(void) 363static int gru_chiplet_setup_tlb_irq(int chiplet, char *irq_name,
364 irq_handler_t irq_handler, int cpu, int blade)
365{
366 unsigned long mmr;
367 int irq = IRQ_GRU + chiplet;
368 int ret, core;
369
370 mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core);
371 if (mmr == 0)
372 return 0;
373
374 if (gru_irq_count[chiplet] == 0) {
375 gru_chip[chiplet].name = irq_name;
376 ret = set_irq_chip(irq, &gru_chip[chiplet]);
377 if (ret) {
378 printk(KERN_ERR "%s: set_irq_chip failed, errno=%d\n",
379 GRU_DRIVER_ID_STR, -ret);
380 return ret;
381 }
382
383 ret = request_irq(irq, irq_handler, 0, irq_name, NULL);
384 if (ret) {
385 printk(KERN_ERR "%s: request_irq failed, errno=%d\n",
386 GRU_DRIVER_ID_STR, -ret);
387 return ret;
388 }
389 }
390 gru_irq_count[chiplet]++;
391
392 return 0;
393}
394
395static void gru_chiplet_teardown_tlb_irq(int chiplet, int cpu, int blade)
396{
397 unsigned long mmr;
398 int core, irq = IRQ_GRU + chiplet;
399
400 if (gru_irq_count[chiplet] == 0)
401 return;
402
403 mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core);
404 if (mmr == 0)
405 return;
406
407 if (--gru_irq_count[chiplet] == 0)
408 free_irq(irq, NULL);
409}
410
411#elif defined CONFIG_X86_64
412
413static int gru_chiplet_setup_tlb_irq(int chiplet, char *irq_name,
414 irq_handler_t irq_handler, int cpu, int blade)
415{
416 unsigned long mmr;
417 int irq, core;
418 int ret;
419
420 mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core);
421 if (mmr == 0)
422 return 0;
423
424 irq = uv_setup_irq(irq_name, cpu, blade, mmr, UV_AFFINITY_CPU);
425 if (irq < 0) {
426 printk(KERN_ERR "%s: uv_setup_irq failed, errno=%d\n",
427 GRU_DRIVER_ID_STR, -irq);
428 return irq;
429 }
430
431 ret = request_irq(irq, irq_handler, 0, irq_name, NULL);
432 if (ret) {
433 uv_teardown_irq(irq);
434 printk(KERN_ERR "%s: request_irq failed, errno=%d\n",
435 GRU_DRIVER_ID_STR, -ret);
436 return ret;
437 }
438 gru_base[blade]->bs_grus[chiplet].gs_irq[core] = irq;
439 return 0;
440}
441
442static void gru_chiplet_teardown_tlb_irq(int chiplet, int cpu, int blade)
327{ 443{
328 set_irq_chip(IRQ_GRU, &gru_chip); 444 int irq, core;
329 set_irq_chip(IRQ_GRU + 1, &gru_chip); 445 unsigned long mmr;
330 return IRQ_GRU; 446
447 mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core);
448 if (mmr) {
449 irq = gru_base[blade]->bs_grus[chiplet].gs_irq[core];
450 if (irq) {
451 free_irq(irq, NULL);
452 uv_teardown_irq(irq);
453 }
454 }
331} 455}
456
332#endif 457#endif
333 458
459static void gru_teardown_tlb_irqs(void)
460{
461 int blade;
462 int cpu;
463
464 for_each_online_cpu(cpu) {
465 blade = uv_cpu_to_blade_id(cpu);
466 gru_chiplet_teardown_tlb_irq(0, cpu, blade);
467 gru_chiplet_teardown_tlb_irq(1, cpu, blade);
468 }
469 for_each_possible_blade(blade) {
470 if (uv_blade_nr_possible_cpus(blade))
471 continue;
472 gru_chiplet_teardown_tlb_irq(0, 0, blade);
473 gru_chiplet_teardown_tlb_irq(1, 0, blade);
474 }
475}
476
477static int gru_setup_tlb_irqs(void)
478{
479 int blade;
480 int cpu;
481 int ret;
482
483 for_each_online_cpu(cpu) {
484 blade = uv_cpu_to_blade_id(cpu);
485 ret = gru_chiplet_setup_tlb_irq(0, "GRU0_TLB", gru0_intr, cpu, blade);
486 if (ret != 0)
487 goto exit1;
488
489 ret = gru_chiplet_setup_tlb_irq(1, "GRU1_TLB", gru1_intr, cpu, blade);
490 if (ret != 0)
491 goto exit1;
492 }
493 for_each_possible_blade(blade) {
494 if (uv_blade_nr_possible_cpus(blade))
495 continue;
496 ret = gru_chiplet_setup_tlb_irq(0, "GRU0_TLB", gru_intr_mblade, 0, blade);
497 if (ret != 0)
498 goto exit1;
499
500 ret = gru_chiplet_setup_tlb_irq(1, "GRU1_TLB", gru_intr_mblade, 0, blade);
501 if (ret != 0)
502 goto exit1;
503 }
504
505 return 0;
506
507exit1:
508 gru_teardown_tlb_irqs();
509 return ret;
510}
511
334/* 512/*
335 * gru_init 513 * gru_init
336 * 514 *
@@ -338,8 +516,7 @@ static int get_base_irq(void)
338 */ 516 */
339static int __init gru_init(void) 517static int __init gru_init(void)
340{ 518{
341 int ret, irq, chip; 519 int ret;
342 char id[10];
343 520
344 if (!is_uv_system()) 521 if (!is_uv_system())
345 return 0; 522 return 0;
@@ -354,41 +531,29 @@ static int __init gru_init(void)
354 gru_end_paddr = gru_start_paddr + GRU_MAX_BLADES * GRU_SIZE; 531 gru_end_paddr = gru_start_paddr + GRU_MAX_BLADES * GRU_SIZE;
355 printk(KERN_INFO "GRU space: 0x%lx - 0x%lx\n", 532 printk(KERN_INFO "GRU space: 0x%lx - 0x%lx\n",
356 gru_start_paddr, gru_end_paddr); 533 gru_start_paddr, gru_end_paddr);
357 irq = get_base_irq();
358 for (chip = 0; chip < GRU_CHIPLETS_PER_BLADE; chip++) {
359 ret = request_irq(irq + chip, gru_intr, 0, id, NULL);
360 /* TODO: fix irq handling on x86. For now ignore failure because
361 * interrupts are not required & not yet fully supported */
362 if (ret) {
363 printk(KERN_WARNING
364 "!!!WARNING: GRU ignoring request failure!!!\n");
365 ret = 0;
366 }
367 if (ret) {
368 printk(KERN_ERR "%s: request_irq failed\n",
369 GRU_DRIVER_ID_STR);
370 goto exit1;
371 }
372 }
373
374 ret = misc_register(&gru_miscdev); 534 ret = misc_register(&gru_miscdev);
375 if (ret) { 535 if (ret) {
376 printk(KERN_ERR "%s: misc_register failed\n", 536 printk(KERN_ERR "%s: misc_register failed\n",
377 GRU_DRIVER_ID_STR); 537 GRU_DRIVER_ID_STR);
378 goto exit1; 538 goto exit0;
379 } 539 }
380 540
381 ret = gru_proc_init(); 541 ret = gru_proc_init();
382 if (ret) { 542 if (ret) {
383 printk(KERN_ERR "%s: proc init failed\n", GRU_DRIVER_ID_STR); 543 printk(KERN_ERR "%s: proc init failed\n", GRU_DRIVER_ID_STR);
384 goto exit2; 544 goto exit1;
385 } 545 }
386 546
387 ret = gru_init_tables(gru_start_paddr, gru_start_vaddr); 547 ret = gru_init_tables(gru_start_paddr, gru_start_vaddr);
388 if (ret) { 548 if (ret) {
389 printk(KERN_ERR "%s: init tables failed\n", GRU_DRIVER_ID_STR); 549 printk(KERN_ERR "%s: init tables failed\n", GRU_DRIVER_ID_STR);
390 goto exit3; 550 goto exit2;
391 } 551 }
552
553 ret = gru_setup_tlb_irqs();
554 if (ret != 0)
555 goto exit3;
556
392 gru_kservices_init(); 557 gru_kservices_init();
393 558
394 printk(KERN_INFO "%s: v%s\n", GRU_DRIVER_ID_STR, 559 printk(KERN_INFO "%s: v%s\n", GRU_DRIVER_ID_STR,
@@ -396,31 +561,24 @@ static int __init gru_init(void)
396 return 0; 561 return 0;
397 562
398exit3: 563exit3:
399 gru_proc_exit(); 564 gru_free_tables();
400exit2: 565exit2:
401 misc_deregister(&gru_miscdev); 566 gru_proc_exit();
402exit1: 567exit1:
403 for (--chip; chip >= 0; chip--) 568 misc_deregister(&gru_miscdev);
404 free_irq(irq + chip, NULL); 569exit0:
405 return ret; 570 return ret;
406 571
407} 572}
408 573
409static void __exit gru_exit(void) 574static void __exit gru_exit(void)
410{ 575{
411 int i, bid;
412 int order = get_order(sizeof(struct gru_state) *
413 GRU_CHIPLETS_PER_BLADE);
414
415 if (!is_uv_system()) 576 if (!is_uv_system())
416 return; 577 return;
417 578
418 for (i = 0; i < GRU_CHIPLETS_PER_BLADE; i++) 579 gru_teardown_tlb_irqs();
419 free_irq(IRQ_GRU + i, NULL);
420 gru_kservices_exit(); 580 gru_kservices_exit();
421 for (bid = 0; bid < GRU_MAX_BLADES; bid++) 581 gru_free_tables();
422 free_pages((unsigned long)gru_base[bid], order);
423
424 misc_deregister(&gru_miscdev); 582 misc_deregister(&gru_miscdev);
425 gru_proc_exit(); 583 gru_proc_exit();
426} 584}
diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
index 37e7cfc53b9c..2f30badc6ffd 100644
--- a/drivers/misc/sgi-gru/gruhandles.c
+++ b/drivers/misc/sgi-gru/gruhandles.c
@@ -27,9 +27,11 @@
27#ifdef CONFIG_IA64 27#ifdef CONFIG_IA64
28#include <asm/processor.h> 28#include <asm/processor.h>
29#define GRU_OPERATION_TIMEOUT (((cycles_t) local_cpu_data->itc_freq)*10) 29#define GRU_OPERATION_TIMEOUT (((cycles_t) local_cpu_data->itc_freq)*10)
30#define CLKS2NSEC(c) ((c) *1000000000 / local_cpu_data->itc_freq)
30#else 31#else
31#include <asm/tsc.h> 32#include <asm/tsc.h>
32#define GRU_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) 33#define GRU_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000)
34#define CLKS2NSEC(c) ((c) * 1000000 / tsc_khz)
33#endif 35#endif
34 36
35/* Extract the status field from a kernel handle */ 37/* Extract the status field from a kernel handle */
@@ -39,21 +41,39 @@ struct mcs_op_statistic mcs_op_statistics[mcsop_last];
39 41
40static void update_mcs_stats(enum mcs_op op, unsigned long clks) 42static void update_mcs_stats(enum mcs_op op, unsigned long clks)
41{ 43{
44 unsigned long nsec;
45
46 nsec = CLKS2NSEC(clks);
42 atomic_long_inc(&mcs_op_statistics[op].count); 47 atomic_long_inc(&mcs_op_statistics[op].count);
43 atomic_long_add(clks, &mcs_op_statistics[op].total); 48 atomic_long_add(nsec, &mcs_op_statistics[op].total);
44 if (mcs_op_statistics[op].max < clks) 49 if (mcs_op_statistics[op].max < nsec)
45 mcs_op_statistics[op].max = clks; 50 mcs_op_statistics[op].max = nsec;
46} 51}
47 52
48static void start_instruction(void *h) 53static void start_instruction(void *h)
49{ 54{
50 unsigned long *w0 = h; 55 unsigned long *w0 = h;
51 56
52 wmb(); /* setting CMD bit must be last */ 57 wmb(); /* setting CMD/STATUS bits must be last */
53 *w0 = *w0 | 1; 58 *w0 = *w0 | 0x20001;
54 gru_flush_cache(h); 59 gru_flush_cache(h);
55} 60}
56 61
62static void report_instruction_timeout(void *h)
63{
64 unsigned long goff = GSEGPOFF((unsigned long)h);
65 char *id = "???";
66
67 if (TYPE_IS(CCH, goff))
68 id = "CCH";
69 else if (TYPE_IS(TGH, goff))
70 id = "TGH";
71 else if (TYPE_IS(TFH, goff))
72 id = "TFH";
73
74 panic(KERN_ALERT "GRU %p (%s) is malfunctioning\n", h, id);
75}
76
57static int wait_instruction_complete(void *h, enum mcs_op opc) 77static int wait_instruction_complete(void *h, enum mcs_op opc)
58{ 78{
59 int status; 79 int status;
@@ -64,9 +84,10 @@ static int wait_instruction_complete(void *h, enum mcs_op opc)
64 status = GET_MSEG_HANDLE_STATUS(h); 84 status = GET_MSEG_HANDLE_STATUS(h);
65 if (status != CCHSTATUS_ACTIVE) 85 if (status != CCHSTATUS_ACTIVE)
66 break; 86 break;
67 if (GRU_OPERATION_TIMEOUT < (get_cycles() - start_time)) 87 if (GRU_OPERATION_TIMEOUT < (get_cycles() - start_time)) {
68 panic("GRU %p is malfunctioning: start %ld, end %ld\n", 88 report_instruction_timeout(h);
69 h, start_time, (unsigned long)get_cycles()); 89 start_time = get_cycles();
90 }
70 } 91 }
71 if (gru_options & OPT_STATS) 92 if (gru_options & OPT_STATS)
72 update_mcs_stats(opc, get_cycles() - start_time); 93 update_mcs_stats(opc, get_cycles() - start_time);
@@ -75,9 +96,18 @@ static int wait_instruction_complete(void *h, enum mcs_op opc)
75 96
76int cch_allocate(struct gru_context_configuration_handle *cch) 97int cch_allocate(struct gru_context_configuration_handle *cch)
77{ 98{
99 int ret;
100
78 cch->opc = CCHOP_ALLOCATE; 101 cch->opc = CCHOP_ALLOCATE;
79 start_instruction(cch); 102 start_instruction(cch);
80 return wait_instruction_complete(cch, cchop_allocate); 103 ret = wait_instruction_complete(cch, cchop_allocate);
104
105 /*
106 * Stop speculation into the GSEG being mapped by the previous ALLOCATE.
107 * The GSEG memory does not exist until the ALLOCATE completes.
108 */
109 sync_core();
110 return ret;
81} 111}
82 112
83int cch_start(struct gru_context_configuration_handle *cch) 113int cch_start(struct gru_context_configuration_handle *cch)
@@ -96,9 +126,18 @@ int cch_interrupt(struct gru_context_configuration_handle *cch)
96 126
97int cch_deallocate(struct gru_context_configuration_handle *cch) 127int cch_deallocate(struct gru_context_configuration_handle *cch)
98{ 128{
129 int ret;
130
99 cch->opc = CCHOP_DEALLOCATE; 131 cch->opc = CCHOP_DEALLOCATE;
100 start_instruction(cch); 132 start_instruction(cch);
101 return wait_instruction_complete(cch, cchop_deallocate); 133 ret = wait_instruction_complete(cch, cchop_deallocate);
134
135 /*
136 * Stop speculation into the GSEG being unmapped by the previous
137 * DEALLOCATE.
138 */
139 sync_core();
140 return ret;
102} 141}
103 142
104int cch_interrupt_sync(struct gru_context_configuration_handle 143int cch_interrupt_sync(struct gru_context_configuration_handle
@@ -126,17 +165,20 @@ int tgh_invalidate(struct gru_tlb_global_handle *tgh,
126 return wait_instruction_complete(tgh, tghop_invalidate); 165 return wait_instruction_complete(tgh, tghop_invalidate);
127} 166}
128 167
129void tfh_write_only(struct gru_tlb_fault_handle *tfh, 168int tfh_write_only(struct gru_tlb_fault_handle *tfh,
130 unsigned long pfn, unsigned long vaddr, 169 unsigned long paddr, int gaa,
131 int asid, int dirty, int pagesize) 170 unsigned long vaddr, int asid, int dirty,
171 int pagesize)
132{ 172{
133 tfh->fillasid = asid; 173 tfh->fillasid = asid;
134 tfh->fillvaddr = vaddr; 174 tfh->fillvaddr = vaddr;
135 tfh->pfn = pfn; 175 tfh->pfn = paddr >> GRU_PADDR_SHIFT;
176 tfh->gaa = gaa;
136 tfh->dirty = dirty; 177 tfh->dirty = dirty;
137 tfh->pagesize = pagesize; 178 tfh->pagesize = pagesize;
138 tfh->opc = TFHOP_WRITE_ONLY; 179 tfh->opc = TFHOP_WRITE_ONLY;
139 start_instruction(tfh); 180 start_instruction(tfh);
181 return wait_instruction_complete(tfh, tfhop_write_only);
140} 182}
141 183
142void tfh_write_restart(struct gru_tlb_fault_handle *tfh, 184void tfh_write_restart(struct gru_tlb_fault_handle *tfh,
diff --git a/drivers/misc/sgi-gru/gruhandles.h b/drivers/misc/sgi-gru/gruhandles.h
index f44112242d00..3f998b924d8f 100644
--- a/drivers/misc/sgi-gru/gruhandles.h
+++ b/drivers/misc/sgi-gru/gruhandles.h
@@ -91,6 +91,12 @@
91/* Convert an arbitrary handle address to the beginning of the GRU segment */ 91/* Convert an arbitrary handle address to the beginning of the GRU segment */
92#define GRUBASE(h) ((void *)((unsigned long)(h) & ~(GRU_SIZE - 1))) 92#define GRUBASE(h) ((void *)((unsigned long)(h) & ~(GRU_SIZE - 1)))
93 93
94/* Test a valid handle address to determine the type */
95#define TYPE_IS(hn, h) ((h) >= GRU_##hn##_BASE && (h) < \
96 GRU_##hn##_BASE + GRU_NUM_##hn * GRU_HANDLE_STRIDE && \
97 (((h) & (GRU_HANDLE_STRIDE - 1)) == 0))
98
99
94/* General addressing macros. */ 100/* General addressing macros. */
95static inline void *get_gseg_base_address(void *base, int ctxnum) 101static inline void *get_gseg_base_address(void *base, int ctxnum)
96{ 102{
@@ -158,6 +164,16 @@ static inline void *gru_chiplet_vaddr(void *vaddr, int pnode, int chiplet)
158 return vaddr + GRU_SIZE * (2 * pnode + chiplet); 164 return vaddr + GRU_SIZE * (2 * pnode + chiplet);
159} 165}
160 166
167static inline struct gru_control_block_extended *gru_tfh_to_cbe(
168 struct gru_tlb_fault_handle *tfh)
169{
170 unsigned long cbe;
171
172 cbe = (unsigned long)tfh - GRU_TFH_BASE + GRU_CBE_BASE;
173 return (struct gru_control_block_extended*)cbe;
174}
175
176
161 177
162 178
163/* 179/*
@@ -236,6 +252,17 @@ enum gru_tgh_state {
236 TGHSTATE_RESTART_CTX, 252 TGHSTATE_RESTART_CTX,
237}; 253};
238 254
255enum gru_tgh_cause {
256 TGHCAUSE_RR_ECC,
257 TGHCAUSE_TLB_ECC,
258 TGHCAUSE_LRU_ECC,
259 TGHCAUSE_PS_ECC,
260 TGHCAUSE_MUL_ERR,
261 TGHCAUSE_DATA_ERR,
262 TGHCAUSE_SW_FORCE
263};
264
265
239/* 266/*
240 * TFH - TLB Global Handle 267 * TFH - TLB Global Handle
241 * Used for TLB dropins into the GRU TLB. 268 * Used for TLB dropins into the GRU TLB.
@@ -440,6 +467,12 @@ struct gru_control_block_extended {
440 unsigned int cbrexecstatus:8; 467 unsigned int cbrexecstatus:8;
441}; 468};
442 469
470/* CBE fields for active BCOPY instructions */
471#define cbe_baddr0 idef1upd
472#define cbe_baddr1 idef3upd
473#define cbe_src_cl idef6cpy
474#define cbe_nelemcur idef5upd
475
443enum gru_cbr_state { 476enum gru_cbr_state {
444 CBRSTATE_INACTIVE, 477 CBRSTATE_INACTIVE,
445 CBRSTATE_IDLE, 478 CBRSTATE_IDLE,
@@ -487,8 +520,8 @@ int cch_interrupt_sync(struct gru_context_configuration_handle *cch);
487int tgh_invalidate(struct gru_tlb_global_handle *tgh, unsigned long vaddr, 520int tgh_invalidate(struct gru_tlb_global_handle *tgh, unsigned long vaddr,
488 unsigned long vaddrmask, int asid, int pagesize, int global, int n, 521 unsigned long vaddrmask, int asid, int pagesize, int global, int n,
489 unsigned short ctxbitmap); 522 unsigned short ctxbitmap);
490void tfh_write_only(struct gru_tlb_fault_handle *tfh, unsigned long pfn, 523int tfh_write_only(struct gru_tlb_fault_handle *tfh, unsigned long paddr,
491 unsigned long vaddr, int asid, int dirty, int pagesize); 524 int gaa, unsigned long vaddr, int asid, int dirty, int pagesize);
492void tfh_write_restart(struct gru_tlb_fault_handle *tfh, unsigned long paddr, 525void tfh_write_restart(struct gru_tlb_fault_handle *tfh, unsigned long paddr,
493 int gaa, unsigned long vaddr, int asid, int dirty, int pagesize); 526 int gaa, unsigned long vaddr, int asid, int dirty, int pagesize);
494void tfh_restart(struct gru_tlb_fault_handle *tfh); 527void tfh_restart(struct gru_tlb_fault_handle *tfh);
diff --git a/drivers/misc/sgi-gru/grukdump.c b/drivers/misc/sgi-gru/grukdump.c
index 55eabfa85585..9b2062d17327 100644
--- a/drivers/misc/sgi-gru/grukdump.c
+++ b/drivers/misc/sgi-gru/grukdump.c
@@ -44,7 +44,8 @@ static int gru_user_copy_handle(void __user **dp, void *s)
44 44
45static int gru_dump_context_data(void *grubase, 45static int gru_dump_context_data(void *grubase,
46 struct gru_context_configuration_handle *cch, 46 struct gru_context_configuration_handle *cch,
47 void __user *ubuf, int ctxnum, int dsrcnt) 47 void __user *ubuf, int ctxnum, int dsrcnt,
48 int flush_cbrs)
48{ 49{
49 void *cb, *cbe, *tfh, *gseg; 50 void *cb, *cbe, *tfh, *gseg;
50 int i, scr; 51 int i, scr;
@@ -55,6 +56,8 @@ static int gru_dump_context_data(void *grubase,
55 tfh = grubase + GRU_TFH_BASE; 56 tfh = grubase + GRU_TFH_BASE;
56 57
57 for_each_cbr_in_allocation_map(i, &cch->cbr_allocation_map, scr) { 58 for_each_cbr_in_allocation_map(i, &cch->cbr_allocation_map, scr) {
59 if (flush_cbrs)
60 gru_flush_cache(cb);
58 if (gru_user_copy_handle(&ubuf, cb)) 61 if (gru_user_copy_handle(&ubuf, cb))
59 goto fail; 62 goto fail;
60 if (gru_user_copy_handle(&ubuf, tfh + i * GRU_HANDLE_STRIDE)) 63 if (gru_user_copy_handle(&ubuf, tfh + i * GRU_HANDLE_STRIDE))
@@ -115,7 +118,7 @@ fail:
115 118
116static int gru_dump_context(struct gru_state *gru, int ctxnum, 119static int gru_dump_context(struct gru_state *gru, int ctxnum,
117 void __user *ubuf, void __user *ubufend, char data_opt, 120 void __user *ubuf, void __user *ubufend, char data_opt,
118 char lock_cch) 121 char lock_cch, char flush_cbrs)
119{ 122{
120 struct gru_dump_context_header hdr; 123 struct gru_dump_context_header hdr;
121 struct gru_dump_context_header __user *uhdr = ubuf; 124 struct gru_dump_context_header __user *uhdr = ubuf;
@@ -159,8 +162,7 @@ static int gru_dump_context(struct gru_state *gru, int ctxnum,
159 ret = -EFBIG; 162 ret = -EFBIG;
160 else 163 else
161 ret = gru_dump_context_data(grubase, cch, ubuf, ctxnum, 164 ret = gru_dump_context_data(grubase, cch, ubuf, ctxnum,
162 dsrcnt); 165 dsrcnt, flush_cbrs);
163
164 } 166 }
165 if (cch_locked) 167 if (cch_locked)
166 unlock_cch_handle(cch); 168 unlock_cch_handle(cch);
@@ -215,7 +217,8 @@ int gru_dump_chiplet_request(unsigned long arg)
215 for (ctxnum = 0; ctxnum < GRU_NUM_CCH; ctxnum++) { 217 for (ctxnum = 0; ctxnum < GRU_NUM_CCH; ctxnum++) {
216 if (req.ctxnum == ctxnum || req.ctxnum < 0) { 218 if (req.ctxnum == ctxnum || req.ctxnum < 0) {
217 ret = gru_dump_context(gru, ctxnum, ubuf, ubufend, 219 ret = gru_dump_context(gru, ctxnum, ubuf, ubufend,
218 req.data_opt, req.lock_cch); 220 req.data_opt, req.lock_cch,
221 req.flush_cbrs);
219 if (ret < 0) 222 if (ret < 0)
220 goto fail; 223 goto fail;
221 ubuf += ret; 224 ubuf += ret;
diff --git a/drivers/misc/sgi-gru/grukservices.c b/drivers/misc/sgi-gru/grukservices.c
index 766e21e15574..34749ee88dfa 100644
--- a/drivers/misc/sgi-gru/grukservices.c
+++ b/drivers/misc/sgi-gru/grukservices.c
@@ -31,6 +31,7 @@
31#include <linux/interrupt.h> 31#include <linux/interrupt.h>
32#include <linux/uaccess.h> 32#include <linux/uaccess.h>
33#include <linux/delay.h> 33#include <linux/delay.h>
34#include <asm/io_apic.h>
34#include "gru.h" 35#include "gru.h"
35#include "grulib.h" 36#include "grulib.h"
36#include "grutables.h" 37#include "grutables.h"
@@ -97,9 +98,6 @@
97#define ASYNC_HAN_TO_BID(h) ((h) - 1) 98#define ASYNC_HAN_TO_BID(h) ((h) - 1)
98#define ASYNC_BID_TO_HAN(b) ((b) + 1) 99#define ASYNC_BID_TO_HAN(b) ((b) + 1)
99#define ASYNC_HAN_TO_BS(h) gru_base[ASYNC_HAN_TO_BID(h)] 100#define ASYNC_HAN_TO_BS(h) gru_base[ASYNC_HAN_TO_BID(h)]
100#define KCB_TO_GID(cb) ((cb - gru_start_vaddr) / \
101 (GRU_SIZE * GRU_CHIPLETS_PER_BLADE))
102#define KCB_TO_BS(cb) gru_base[KCB_TO_GID(cb)]
103 101
104#define GRU_NUM_KERNEL_CBR 1 102#define GRU_NUM_KERNEL_CBR 1
105#define GRU_NUM_KERNEL_DSR_BYTES 256 103#define GRU_NUM_KERNEL_DSR_BYTES 256
@@ -160,8 +158,10 @@ static void gru_load_kernel_context(struct gru_blade_state *bs, int blade_id)
160 up_read(&bs->bs_kgts_sema); 158 up_read(&bs->bs_kgts_sema);
161 down_write(&bs->bs_kgts_sema); 159 down_write(&bs->bs_kgts_sema);
162 160
163 if (!bs->bs_kgts) 161 if (!bs->bs_kgts) {
164 bs->bs_kgts = gru_alloc_gts(NULL, 0, 0, 0, 0); 162 bs->bs_kgts = gru_alloc_gts(NULL, 0, 0, 0, 0, 0);
163 bs->bs_kgts->ts_user_blade_id = blade_id;
164 }
165 kgts = bs->bs_kgts; 165 kgts = bs->bs_kgts;
166 166
167 if (!kgts->ts_gru) { 167 if (!kgts->ts_gru) {
@@ -172,9 +172,9 @@ static void gru_load_kernel_context(struct gru_blade_state *bs, int blade_id)
172 kgts->ts_dsr_au_count = GRU_DS_BYTES_TO_AU( 172 kgts->ts_dsr_au_count = GRU_DS_BYTES_TO_AU(
173 GRU_NUM_KERNEL_DSR_BYTES * ncpus + 173 GRU_NUM_KERNEL_DSR_BYTES * ncpus +
174 bs->bs_async_dsr_bytes); 174 bs->bs_async_dsr_bytes);
175 while (!gru_assign_gru_context(kgts, blade_id)) { 175 while (!gru_assign_gru_context(kgts)) {
176 msleep(1); 176 msleep(1);
177 gru_steal_context(kgts, blade_id); 177 gru_steal_context(kgts);
178 } 178 }
179 gru_load_context(kgts); 179 gru_load_context(kgts);
180 gru = bs->bs_kgts->ts_gru; 180 gru = bs->bs_kgts->ts_gru;
@@ -200,13 +200,15 @@ static int gru_free_kernel_contexts(void)
200 bs = gru_base[bid]; 200 bs = gru_base[bid];
201 if (!bs) 201 if (!bs)
202 continue; 202 continue;
203
204 /* Ignore busy contexts. Don't want to block here. */
203 if (down_write_trylock(&bs->bs_kgts_sema)) { 205 if (down_write_trylock(&bs->bs_kgts_sema)) {
204 kgts = bs->bs_kgts; 206 kgts = bs->bs_kgts;
205 if (kgts && kgts->ts_gru) 207 if (kgts && kgts->ts_gru)
206 gru_unload_context(kgts, 0); 208 gru_unload_context(kgts, 0);
207 kfree(kgts);
208 bs->bs_kgts = NULL; 209 bs->bs_kgts = NULL;
209 up_write(&bs->bs_kgts_sema); 210 up_write(&bs->bs_kgts_sema);
211 kfree(kgts);
210 } else { 212 } else {
211 ret++; 213 ret++;
212 } 214 }
@@ -220,13 +222,21 @@ static int gru_free_kernel_contexts(void)
220static struct gru_blade_state *gru_lock_kernel_context(int blade_id) 222static struct gru_blade_state *gru_lock_kernel_context(int blade_id)
221{ 223{
222 struct gru_blade_state *bs; 224 struct gru_blade_state *bs;
225 int bid;
223 226
224 STAT(lock_kernel_context); 227 STAT(lock_kernel_context);
225 bs = gru_base[blade_id]; 228again:
229 bid = blade_id < 0 ? uv_numa_blade_id() : blade_id;
230 bs = gru_base[bid];
226 231
232 /* Handle the case where migration occured while waiting for the sema */
227 down_read(&bs->bs_kgts_sema); 233 down_read(&bs->bs_kgts_sema);
234 if (blade_id < 0 && bid != uv_numa_blade_id()) {
235 up_read(&bs->bs_kgts_sema);
236 goto again;
237 }
228 if (!bs->bs_kgts || !bs->bs_kgts->ts_gru) 238 if (!bs->bs_kgts || !bs->bs_kgts->ts_gru)
229 gru_load_kernel_context(bs, blade_id); 239 gru_load_kernel_context(bs, bid);
230 return bs; 240 return bs;
231 241
232} 242}
@@ -255,7 +265,7 @@ static int gru_get_cpu_resources(int dsr_bytes, void **cb, void **dsr)
255 265
256 BUG_ON(dsr_bytes > GRU_NUM_KERNEL_DSR_BYTES); 266 BUG_ON(dsr_bytes > GRU_NUM_KERNEL_DSR_BYTES);
257 preempt_disable(); 267 preempt_disable();
258 bs = gru_lock_kernel_context(uv_numa_blade_id()); 268 bs = gru_lock_kernel_context(-1);
259 lcpu = uv_blade_processor_id(); 269 lcpu = uv_blade_processor_id();
260 *cb = bs->kernel_cb + lcpu * GRU_HANDLE_STRIDE; 270 *cb = bs->kernel_cb + lcpu * GRU_HANDLE_STRIDE;
261 *dsr = bs->kernel_dsr + lcpu * GRU_NUM_KERNEL_DSR_BYTES; 271 *dsr = bs->kernel_dsr + lcpu * GRU_NUM_KERNEL_DSR_BYTES;
@@ -384,13 +394,31 @@ int gru_get_cb_exception_detail(void *cb,
384 struct control_block_extended_exc_detail *excdet) 394 struct control_block_extended_exc_detail *excdet)
385{ 395{
386 struct gru_control_block_extended *cbe; 396 struct gru_control_block_extended *cbe;
387 struct gru_blade_state *bs; 397 struct gru_thread_state *kgts = NULL;
388 int cbrnum; 398 unsigned long off;
389 399 int cbrnum, bid;
390 bs = KCB_TO_BS(cb); 400
391 cbrnum = thread_cbr_number(bs->bs_kgts, get_cb_number(cb)); 401 /*
402 * Locate kgts for cb. This algorithm is SLOW but
403 * this function is rarely called (ie., almost never).
404 * Performance does not matter.
405 */
406 for_each_possible_blade(bid) {
407 if (!gru_base[bid])
408 break;
409 kgts = gru_base[bid]->bs_kgts;
410 if (!kgts || !kgts->ts_gru)
411 continue;
412 off = cb - kgts->ts_gru->gs_gru_base_vaddr;
413 if (off < GRU_SIZE)
414 break;
415 kgts = NULL;
416 }
417 BUG_ON(!kgts);
418 cbrnum = thread_cbr_number(kgts, get_cb_number(cb));
392 cbe = get_cbe(GRUBASE(cb), cbrnum); 419 cbe = get_cbe(GRUBASE(cb), cbrnum);
393 gru_flush_cache(cbe); /* CBE not coherent */ 420 gru_flush_cache(cbe); /* CBE not coherent */
421 sync_core();
394 excdet->opc = cbe->opccpy; 422 excdet->opc = cbe->opccpy;
395 excdet->exopc = cbe->exopccpy; 423 excdet->exopc = cbe->exopccpy;
396 excdet->ecause = cbe->ecause; 424 excdet->ecause = cbe->ecause;
@@ -409,8 +437,8 @@ char *gru_get_cb_exception_detail_str(int ret, void *cb,
409 if (ret > 0 && gen->istatus == CBS_EXCEPTION) { 437 if (ret > 0 && gen->istatus == CBS_EXCEPTION) {
410 gru_get_cb_exception_detail(cb, &excdet); 438 gru_get_cb_exception_detail(cb, &excdet);
411 snprintf(buf, size, 439 snprintf(buf, size,
412 "GRU exception: cb %p, opc %d, exopc %d, ecause 0x%x," 440 "GRU:%d exception: cb %p, opc %d, exopc %d, ecause 0x%x,"
413 "excdet0 0x%lx, excdet1 0x%x", 441 "excdet0 0x%lx, excdet1 0x%x", smp_processor_id(),
414 gen, excdet.opc, excdet.exopc, excdet.ecause, 442 gen, excdet.opc, excdet.exopc, excdet.ecause,
415 excdet.exceptdet0, excdet.exceptdet1); 443 excdet.exceptdet0, excdet.exceptdet1);
416 } else { 444 } else {
@@ -457,9 +485,10 @@ int gru_check_status_proc(void *cb)
457 int ret; 485 int ret;
458 486
459 ret = gen->istatus; 487 ret = gen->istatus;
460 if (ret != CBS_EXCEPTION) 488 if (ret == CBS_EXCEPTION)
461 return ret; 489 ret = gru_retry_exception(cb);
462 return gru_retry_exception(cb); 490 rmb();
491 return ret;
463 492
464} 493}
465 494
@@ -471,7 +500,7 @@ int gru_wait_proc(void *cb)
471 ret = gru_wait_idle_or_exception(gen); 500 ret = gru_wait_idle_or_exception(gen);
472 if (ret == CBS_EXCEPTION) 501 if (ret == CBS_EXCEPTION)
473 ret = gru_retry_exception(cb); 502 ret = gru_retry_exception(cb);
474 503 rmb();
475 return ret; 504 return ret;
476} 505}
477 506
@@ -538,7 +567,7 @@ int gru_create_message_queue(struct gru_message_queue_desc *mqd,
538 mqd->mq = mq; 567 mqd->mq = mq;
539 mqd->mq_gpa = uv_gpa(mq); 568 mqd->mq_gpa = uv_gpa(mq);
540 mqd->qlines = qlines; 569 mqd->qlines = qlines;
541 mqd->interrupt_pnode = UV_NASID_TO_PNODE(nasid); 570 mqd->interrupt_pnode = nasid >> 1;
542 mqd->interrupt_vector = vector; 571 mqd->interrupt_vector = vector;
543 mqd->interrupt_apicid = apicid; 572 mqd->interrupt_apicid = apicid;
544 return 0; 573 return 0;
@@ -598,6 +627,8 @@ static int send_noop_message(void *cb, struct gru_message_queue_desc *mqd,
598 ret = MQE_UNEXPECTED_CB_ERR; 627 ret = MQE_UNEXPECTED_CB_ERR;
599 break; 628 break;
600 case CBSS_PAGE_OVERFLOW: 629 case CBSS_PAGE_OVERFLOW:
630 STAT(mesq_noop_page_overflow);
631 /* fallthru */
601 default: 632 default:
602 BUG(); 633 BUG();
603 } 634 }
@@ -673,18 +704,6 @@ cberr:
673} 704}
674 705
675/* 706/*
676 * Send a cross-partition interrupt to the SSI that contains the target
677 * message queue. Normally, the interrupt is automatically delivered by hardware
678 * but some error conditions require explicit delivery.
679 */
680static void send_message_queue_interrupt(struct gru_message_queue_desc *mqd)
681{
682 if (mqd->interrupt_vector)
683 uv_hub_send_ipi(mqd->interrupt_pnode, mqd->interrupt_apicid,
684 mqd->interrupt_vector);
685}
686
687/*
688 * Handle a PUT failure. Note: if message was a 2-line message, one of the 707 * Handle a PUT failure. Note: if message was a 2-line message, one of the
689 * lines might have successfully have been written. Before sending the 708 * lines might have successfully have been written. Before sending the
690 * message, "present" must be cleared in BOTH lines to prevent the receiver 709 * message, "present" must be cleared in BOTH lines to prevent the receiver
@@ -693,7 +712,8 @@ static void send_message_queue_interrupt(struct gru_message_queue_desc *mqd)
693static int send_message_put_nacked(void *cb, struct gru_message_queue_desc *mqd, 712static int send_message_put_nacked(void *cb, struct gru_message_queue_desc *mqd,
694 void *mesg, int lines) 713 void *mesg, int lines)
695{ 714{
696 unsigned long m; 715 unsigned long m, *val = mesg, gpa, save;
716 int ret;
697 717
698 m = mqd->mq_gpa + (gru_get_amo_value_head(cb) << 6); 718 m = mqd->mq_gpa + (gru_get_amo_value_head(cb) << 6);
699 if (lines == 2) { 719 if (lines == 2) {
@@ -704,7 +724,26 @@ static int send_message_put_nacked(void *cb, struct gru_message_queue_desc *mqd,
704 gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, lines, 1, IMA); 724 gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, lines, 1, IMA);
705 if (gru_wait(cb) != CBS_IDLE) 725 if (gru_wait(cb) != CBS_IDLE)
706 return MQE_UNEXPECTED_CB_ERR; 726 return MQE_UNEXPECTED_CB_ERR;
707 send_message_queue_interrupt(mqd); 727
728 if (!mqd->interrupt_vector)
729 return MQE_OK;
730
731 /*
732 * Send a cross-partition interrupt to the SSI that contains the target
733 * message queue. Normally, the interrupt is automatically delivered by
734 * hardware but some error conditions require explicit delivery.
735 * Use the GRU to deliver the interrupt. Otherwise partition failures
736 * could cause unrecovered errors.
737 */
738 gpa = uv_global_gru_mmr_address(mqd->interrupt_pnode, UVH_IPI_INT);
739 save = *val;
740 *val = uv_hub_ipi_value(mqd->interrupt_apicid, mqd->interrupt_vector,
741 dest_Fixed);
742 gru_vstore_phys(cb, gpa, gru_get_tri(mesg), IAA_REGISTER, IMA);
743 ret = gru_wait(cb);
744 *val = save;
745 if (ret != CBS_IDLE)
746 return MQE_UNEXPECTED_CB_ERR;
708 return MQE_OK; 747 return MQE_OK;
709} 748}
710 749
@@ -739,6 +778,9 @@ static int send_message_failure(void *cb, struct gru_message_queue_desc *mqd,
739 STAT(mesq_send_put_nacked); 778 STAT(mesq_send_put_nacked);
740 ret = send_message_put_nacked(cb, mqd, mesg, lines); 779 ret = send_message_put_nacked(cb, mqd, mesg, lines);
741 break; 780 break;
781 case CBSS_PAGE_OVERFLOW:
782 STAT(mesq_page_overflow);
783 /* fallthru */
742 default: 784 default:
743 BUG(); 785 BUG();
744 } 786 }
@@ -831,7 +873,6 @@ void *gru_get_next_message(struct gru_message_queue_desc *mqd)
831 int present = mhdr->present; 873 int present = mhdr->present;
832 874
833 /* skip NOOP messages */ 875 /* skip NOOP messages */
834 STAT(mesq_receive);
835 while (present == MQS_NOOP) { 876 while (present == MQS_NOOP) {
836 gru_free_message(mqd, mhdr); 877 gru_free_message(mqd, mhdr);
837 mhdr = mq->next; 878 mhdr = mq->next;
@@ -851,6 +892,7 @@ void *gru_get_next_message(struct gru_message_queue_desc *mqd)
851 if (mhdr->lines == 2) 892 if (mhdr->lines == 2)
852 restore_present2(mhdr, mhdr->present2); 893 restore_present2(mhdr, mhdr->present2);
853 894
895 STAT(mesq_receive);
854 return mhdr; 896 return mhdr;
855} 897}
856EXPORT_SYMBOL_GPL(gru_get_next_message); 898EXPORT_SYMBOL_GPL(gru_get_next_message);
@@ -858,6 +900,29 @@ EXPORT_SYMBOL_GPL(gru_get_next_message);
858/* ---------------------- GRU DATA COPY FUNCTIONS ---------------------------*/ 900/* ---------------------- GRU DATA COPY FUNCTIONS ---------------------------*/
859 901
860/* 902/*
903 * Load a DW from a global GPA. The GPA can be a memory or MMR address.
904 */
905int gru_read_gpa(unsigned long *value, unsigned long gpa)
906{
907 void *cb;
908 void *dsr;
909 int ret, iaa;
910
911 STAT(read_gpa);
912 if (gru_get_cpu_resources(GRU_NUM_KERNEL_DSR_BYTES, &cb, &dsr))
913 return MQE_BUG_NO_RESOURCES;
914 iaa = gpa >> 62;
915 gru_vload_phys(cb, gpa, gru_get_tri(dsr), iaa, IMA);
916 ret = gru_wait(cb);
917 if (ret == CBS_IDLE)
918 *value = *(unsigned long *)dsr;
919 gru_free_cpu_resources(cb, dsr);
920 return ret;
921}
922EXPORT_SYMBOL_GPL(gru_read_gpa);
923
924
925/*
861 * Copy a block of data using the GRU resources 926 * Copy a block of data using the GRU resources
862 */ 927 */
863int gru_copy_gpa(unsigned long dest_gpa, unsigned long src_gpa, 928int gru_copy_gpa(unsigned long dest_gpa, unsigned long src_gpa,
@@ -898,24 +963,24 @@ static int quicktest0(unsigned long arg)
898 963
899 gru_vload(cb, uv_gpa(&word0), gru_get_tri(dsr), XTYPE_DW, 1, 1, IMA); 964 gru_vload(cb, uv_gpa(&word0), gru_get_tri(dsr), XTYPE_DW, 1, 1, IMA);
900 if (gru_wait(cb) != CBS_IDLE) { 965 if (gru_wait(cb) != CBS_IDLE) {
901 printk(KERN_DEBUG "GRU quicktest0: CBR failure 1\n"); 966 printk(KERN_DEBUG "GRU:%d quicktest0: CBR failure 1\n", smp_processor_id());
902 goto done; 967 goto done;
903 } 968 }
904 969
905 if (*p != MAGIC) { 970 if (*p != MAGIC) {
906 printk(KERN_DEBUG "GRU: quicktest0 bad magic 0x%lx\n", *p); 971 printk(KERN_DEBUG "GRU:%d quicktest0 bad magic 0x%lx\n", smp_processor_id(), *p);
907 goto done; 972 goto done;
908 } 973 }
909 gru_vstore(cb, uv_gpa(&word1), gru_get_tri(dsr), XTYPE_DW, 1, 1, IMA); 974 gru_vstore(cb, uv_gpa(&word1), gru_get_tri(dsr), XTYPE_DW, 1, 1, IMA);
910 if (gru_wait(cb) != CBS_IDLE) { 975 if (gru_wait(cb) != CBS_IDLE) {
911 printk(KERN_DEBUG "GRU quicktest0: CBR failure 2\n"); 976 printk(KERN_DEBUG "GRU:%d quicktest0: CBR failure 2\n", smp_processor_id());
912 goto done; 977 goto done;
913 } 978 }
914 979
915 if (word0 != word1 || word1 != MAGIC) { 980 if (word0 != word1 || word1 != MAGIC) {
916 printk(KERN_DEBUG 981 printk(KERN_DEBUG
917 "GRU quicktest0 err: found 0x%lx, expected 0x%lx\n", 982 "GRU:%d quicktest0 err: found 0x%lx, expected 0x%lx\n",
918 word1, MAGIC); 983 smp_processor_id(), word1, MAGIC);
919 goto done; 984 goto done;
920 } 985 }
921 ret = 0; 986 ret = 0;
@@ -952,8 +1017,11 @@ static int quicktest1(unsigned long arg)
952 if (ret) 1017 if (ret)
953 break; 1018 break;
954 } 1019 }
955 if (ret != MQE_QUEUE_FULL || i != 4) 1020 if (ret != MQE_QUEUE_FULL || i != 4) {
1021 printk(KERN_DEBUG "GRU:%d quicktest1: unexpect status %d, i %d\n",
1022 smp_processor_id(), ret, i);
956 goto done; 1023 goto done;
1024 }
957 1025
958 for (i = 0; i < 6; i++) { 1026 for (i = 0; i < 6; i++) {
959 m = gru_get_next_message(&mqd); 1027 m = gru_get_next_message(&mqd);
@@ -961,7 +1029,12 @@ static int quicktest1(unsigned long arg)
961 break; 1029 break;
962 gru_free_message(&mqd, m); 1030 gru_free_message(&mqd, m);
963 } 1031 }
964 ret = (i == 4) ? 0 : -EIO; 1032 if (i != 4) {
1033 printk(KERN_DEBUG "GRU:%d quicktest2: bad message, i %d, m %p, m8 %d\n",
1034 smp_processor_id(), i, m, m ? m[8] : -1);
1035 goto done;
1036 }
1037 ret = 0;
965 1038
966done: 1039done:
967 kfree(p); 1040 kfree(p);
@@ -977,6 +1050,7 @@ static int quicktest2(unsigned long arg)
977 int ret = 0; 1050 int ret = 0;
978 unsigned long *buf; 1051 unsigned long *buf;
979 void *cb0, *cb; 1052 void *cb0, *cb;
1053 struct gru_control_block_status *gen;
980 int i, k, istatus, bytes; 1054 int i, k, istatus, bytes;
981 1055
982 bytes = numcb * 4 * 8; 1056 bytes = numcb * 4 * 8;
@@ -996,20 +1070,30 @@ static int quicktest2(unsigned long arg)
996 XTYPE_DW, 4, 1, IMA_INTERRUPT); 1070 XTYPE_DW, 4, 1, IMA_INTERRUPT);
997 1071
998 ret = 0; 1072 ret = 0;
999 for (k = 0; k < numcb; k++) { 1073 k = numcb;
1074 do {
1000 gru_wait_async_cbr(han); 1075 gru_wait_async_cbr(han);
1001 for (i = 0; i < numcb; i++) { 1076 for (i = 0; i < numcb; i++) {
1002 cb = cb0 + i * GRU_HANDLE_STRIDE; 1077 cb = cb0 + i * GRU_HANDLE_STRIDE;
1003 istatus = gru_check_status(cb); 1078 istatus = gru_check_status(cb);
1004 if (istatus == CBS_ACTIVE) 1079 if (istatus != CBS_ACTIVE && istatus != CBS_CALL_OS)
1005 continue; 1080 break;
1006 if (istatus == CBS_EXCEPTION)
1007 ret = -EFAULT;
1008 else if (buf[i] || buf[i + 1] || buf[i + 2] ||
1009 buf[i + 3])
1010 ret = -EIO;
1011 } 1081 }
1012 } 1082 if (i == numcb)
1083 continue;
1084 if (istatus != CBS_IDLE) {
1085 printk(KERN_DEBUG "GRU:%d quicktest2: cb %d, exception\n", smp_processor_id(), i);
1086 ret = -EFAULT;
1087 } else if (buf[4 * i] || buf[4 * i + 1] || buf[4 * i + 2] ||
1088 buf[4 * i + 3]) {
1089 printk(KERN_DEBUG "GRU:%d quicktest2:cb %d, buf 0x%lx, 0x%lx, 0x%lx, 0x%lx\n",
1090 smp_processor_id(), i, buf[4 * i], buf[4 * i + 1], buf[4 * i + 2], buf[4 * i + 3]);
1091 ret = -EIO;
1092 }
1093 k--;
1094 gen = cb;
1095 gen->istatus = CBS_CALL_OS; /* don't handle this CBR again */
1096 } while (k);
1013 BUG_ON(cmp.done); 1097 BUG_ON(cmp.done);
1014 1098
1015 gru_unlock_async_resource(han); 1099 gru_unlock_async_resource(han);
@@ -1019,6 +1103,22 @@ done:
1019 return ret; 1103 return ret;
1020} 1104}
1021 1105
1106#define BUFSIZE 200
1107static int quicktest3(unsigned long arg)
1108{
1109 char buf1[BUFSIZE], buf2[BUFSIZE];
1110 int ret = 0;
1111
1112 memset(buf2, 0, sizeof(buf2));
1113 memset(buf1, get_cycles() & 255, sizeof(buf1));
1114 gru_copy_gpa(uv_gpa(buf2), uv_gpa(buf1), BUFSIZE);
1115 if (memcmp(buf1, buf2, BUFSIZE)) {
1116 printk(KERN_DEBUG "GRU:%d quicktest3 error\n", smp_processor_id());
1117 ret = -EIO;
1118 }
1119 return ret;
1120}
1121
1022/* 1122/*
1023 * Debugging only. User hook for various kernel tests 1123 * Debugging only. User hook for various kernel tests
1024 * of driver & gru. 1124 * of driver & gru.
@@ -1037,6 +1137,9 @@ int gru_ktest(unsigned long arg)
1037 case 2: 1137 case 2:
1038 ret = quicktest2(arg); 1138 ret = quicktest2(arg);
1039 break; 1139 break;
1140 case 3:
1141 ret = quicktest3(arg);
1142 break;
1040 case 99: 1143 case 99:
1041 ret = gru_free_kernel_contexts(); 1144 ret = gru_free_kernel_contexts();
1042 break; 1145 break;
diff --git a/drivers/misc/sgi-gru/grukservices.h b/drivers/misc/sgi-gru/grukservices.h
index d60d34bca44d..02aa94d8484a 100644
--- a/drivers/misc/sgi-gru/grukservices.h
+++ b/drivers/misc/sgi-gru/grukservices.h
@@ -131,6 +131,20 @@ extern void *gru_get_next_message(struct gru_message_queue_desc *mqd);
131 131
132 132
133/* 133/*
134 * Read a GRU global GPA. Source can be located in a remote partition.
135 *
136 * Input:
137 * value memory address where MMR value is returned
138 * gpa source numalink physical address of GPA
139 *
140 * Output:
141 * 0 OK
142 * >0 error
143 */
144int gru_read_gpa(unsigned long *value, unsigned long gpa);
145
146
147/*
134 * Copy data using the GRU. Source or destination can be located in a remote 148 * Copy data using the GRU. Source or destination can be located in a remote
135 * partition. 149 * partition.
136 * 150 *
diff --git a/drivers/misc/sgi-gru/grulib.h b/drivers/misc/sgi-gru/grulib.h
index 889bc442a3e8..e77d1b1f9d05 100644
--- a/drivers/misc/sgi-gru/grulib.h
+++ b/drivers/misc/sgi-gru/grulib.h
@@ -63,18 +63,9 @@
63#define THREAD_POINTER(p, th) (p + GRU_GSEG_PAGESIZE * (th)) 63#define THREAD_POINTER(p, th) (p + GRU_GSEG_PAGESIZE * (th))
64#define GSEG_START(cb) ((void *)((unsigned long)(cb) & ~(GRU_GSEG_PAGESIZE - 1))) 64#define GSEG_START(cb) ((void *)((unsigned long)(cb) & ~(GRU_GSEG_PAGESIZE - 1)))
65 65
66/*
67 * Statictics kept on a per-GTS basis.
68 */
69struct gts_statistics {
70 unsigned long fmm_tlbdropin;
71 unsigned long upm_tlbdropin;
72 unsigned long context_stolen;
73};
74
75struct gru_get_gseg_statistics_req { 66struct gru_get_gseg_statistics_req {
76 unsigned long gseg; 67 unsigned long gseg;
77 struct gts_statistics stats; 68 struct gru_gseg_statistics stats;
78}; 69};
79 70
80/* 71/*
@@ -86,6 +77,7 @@ struct gru_create_context_req {
86 unsigned int control_blocks; 77 unsigned int control_blocks;
87 unsigned int maximum_thread_count; 78 unsigned int maximum_thread_count;
88 unsigned int options; 79 unsigned int options;
80 unsigned char tlb_preload_count;
89}; 81};
90 82
91/* 83/*
@@ -98,11 +90,12 @@ struct gru_unload_context_req {
98/* 90/*
99 * Structure used to set context options 91 * Structure used to set context options
100 */ 92 */
101enum {sco_gseg_owner, sco_cch_req_slice}; 93enum {sco_gseg_owner, sco_cch_req_slice, sco_blade_chiplet};
102struct gru_set_context_option_req { 94struct gru_set_context_option_req {
103 unsigned long gseg; 95 unsigned long gseg;
104 int op; 96 int op;
105 unsigned long val1; 97 int val0;
98 long val1;
106}; 99};
107 100
108/* 101/*
@@ -124,6 +117,8 @@ struct gru_dump_chiplet_state_req {
124 int ctxnum; 117 int ctxnum;
125 char data_opt; 118 char data_opt;
126 char lock_cch; 119 char lock_cch;
120 char flush_cbrs;
121 char fill[10];
127 pid_t pid; 122 pid_t pid;
128 void *buf; 123 void *buf;
129 size_t buflen; 124 size_t buflen;
diff --git a/drivers/misc/sgi-gru/grumain.c b/drivers/misc/sgi-gru/grumain.c
index 3bc643dad606..f8538bbd0bfa 100644
--- a/drivers/misc/sgi-gru/grumain.c
+++ b/drivers/misc/sgi-gru/grumain.c
@@ -27,6 +27,7 @@
27#include <linux/sched.h> 27#include <linux/sched.h>
28#include <linux/device.h> 28#include <linux/device.h>
29#include <linux/list.h> 29#include <linux/list.h>
30#include <linux/err.h>
30#include <asm/uv/uv_hub.h> 31#include <asm/uv/uv_hub.h>
31#include "gru.h" 32#include "gru.h"
32#include "grutables.h" 33#include "grutables.h"
@@ -48,12 +49,20 @@ struct device *grudev = &gru_device;
48/* 49/*
49 * Select a gru fault map to be used by the current cpu. Note that 50 * Select a gru fault map to be used by the current cpu. Note that
50 * multiple cpus may be using the same map. 51 * multiple cpus may be using the same map.
51 * ZZZ should "shift" be used?? Depends on HT cpu numbering
52 * ZZZ should be inline but did not work on emulator 52 * ZZZ should be inline but did not work on emulator
53 */ 53 */
54int gru_cpu_fault_map_id(void) 54int gru_cpu_fault_map_id(void)
55{ 55{
56#ifdef CONFIG_IA64
56 return uv_blade_processor_id() % GRU_NUM_TFM; 57 return uv_blade_processor_id() % GRU_NUM_TFM;
58#else
59 int cpu = smp_processor_id();
60 int id, core;
61
62 core = uv_cpu_core_number(cpu);
63 id = core + UV_MAX_INT_CORES * uv_cpu_socket_number(cpu);
64 return id;
65#endif
57} 66}
58 67
59/*--------- ASID Management ------------------------------------------- 68/*--------- ASID Management -------------------------------------------
@@ -286,7 +295,8 @@ static void gru_unload_mm_tracker(struct gru_state *gru,
286void gts_drop(struct gru_thread_state *gts) 295void gts_drop(struct gru_thread_state *gts)
287{ 296{
288 if (gts && atomic_dec_return(&gts->ts_refcnt) == 0) { 297 if (gts && atomic_dec_return(&gts->ts_refcnt) == 0) {
289 gru_drop_mmu_notifier(gts->ts_gms); 298 if (gts->ts_gms)
299 gru_drop_mmu_notifier(gts->ts_gms);
290 kfree(gts); 300 kfree(gts);
291 STAT(gts_free); 301 STAT(gts_free);
292 } 302 }
@@ -310,16 +320,18 @@ static struct gru_thread_state *gru_find_current_gts_nolock(struct gru_vma_data
310 * Allocate a thread state structure. 320 * Allocate a thread state structure.
311 */ 321 */
312struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma, 322struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma,
313 int cbr_au_count, int dsr_au_count, int options, int tsid) 323 int cbr_au_count, int dsr_au_count,
324 unsigned char tlb_preload_count, int options, int tsid)
314{ 325{
315 struct gru_thread_state *gts; 326 struct gru_thread_state *gts;
327 struct gru_mm_struct *gms;
316 int bytes; 328 int bytes;
317 329
318 bytes = DSR_BYTES(dsr_au_count) + CBR_BYTES(cbr_au_count); 330 bytes = DSR_BYTES(dsr_au_count) + CBR_BYTES(cbr_au_count);
319 bytes += sizeof(struct gru_thread_state); 331 bytes += sizeof(struct gru_thread_state);
320 gts = kmalloc(bytes, GFP_KERNEL); 332 gts = kmalloc(bytes, GFP_KERNEL);
321 if (!gts) 333 if (!gts)
322 return NULL; 334 return ERR_PTR(-ENOMEM);
323 335
324 STAT(gts_alloc); 336 STAT(gts_alloc);
325 memset(gts, 0, sizeof(struct gru_thread_state)); /* zero out header */ 337 memset(gts, 0, sizeof(struct gru_thread_state)); /* zero out header */
@@ -327,7 +339,10 @@ struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma,
327 mutex_init(&gts->ts_ctxlock); 339 mutex_init(&gts->ts_ctxlock);
328 gts->ts_cbr_au_count = cbr_au_count; 340 gts->ts_cbr_au_count = cbr_au_count;
329 gts->ts_dsr_au_count = dsr_au_count; 341 gts->ts_dsr_au_count = dsr_au_count;
342 gts->ts_tlb_preload_count = tlb_preload_count;
330 gts->ts_user_options = options; 343 gts->ts_user_options = options;
344 gts->ts_user_blade_id = -1;
345 gts->ts_user_chiplet_id = -1;
331 gts->ts_tsid = tsid; 346 gts->ts_tsid = tsid;
332 gts->ts_ctxnum = NULLCTX; 347 gts->ts_ctxnum = NULLCTX;
333 gts->ts_tlb_int_select = -1; 348 gts->ts_tlb_int_select = -1;
@@ -336,9 +351,10 @@ struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma,
336 if (vma) { 351 if (vma) {
337 gts->ts_mm = current->mm; 352 gts->ts_mm = current->mm;
338 gts->ts_vma = vma; 353 gts->ts_vma = vma;
339 gts->ts_gms = gru_register_mmu_notifier(); 354 gms = gru_register_mmu_notifier();
340 if (!gts->ts_gms) 355 if (IS_ERR(gms))
341 goto err; 356 goto err;
357 gts->ts_gms = gms;
342 } 358 }
343 359
344 gru_dbg(grudev, "alloc gts %p\n", gts); 360 gru_dbg(grudev, "alloc gts %p\n", gts);
@@ -346,7 +362,7 @@ struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma,
346 362
347err: 363err:
348 gts_drop(gts); 364 gts_drop(gts);
349 return NULL; 365 return ERR_CAST(gms);
350} 366}
351 367
352/* 368/*
@@ -360,6 +376,7 @@ struct gru_vma_data *gru_alloc_vma_data(struct vm_area_struct *vma, int tsid)
360 if (!vdata) 376 if (!vdata)
361 return NULL; 377 return NULL;
362 378
379 STAT(vdata_alloc);
363 INIT_LIST_HEAD(&vdata->vd_head); 380 INIT_LIST_HEAD(&vdata->vd_head);
364 spin_lock_init(&vdata->vd_lock); 381 spin_lock_init(&vdata->vd_lock);
365 gru_dbg(grudev, "alloc vdata %p\n", vdata); 382 gru_dbg(grudev, "alloc vdata %p\n", vdata);
@@ -392,10 +409,12 @@ struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct *vma,
392 struct gru_vma_data *vdata = vma->vm_private_data; 409 struct gru_vma_data *vdata = vma->vm_private_data;
393 struct gru_thread_state *gts, *ngts; 410 struct gru_thread_state *gts, *ngts;
394 411
395 gts = gru_alloc_gts(vma, vdata->vd_cbr_au_count, vdata->vd_dsr_au_count, 412 gts = gru_alloc_gts(vma, vdata->vd_cbr_au_count,
413 vdata->vd_dsr_au_count,
414 vdata->vd_tlb_preload_count,
396 vdata->vd_user_options, tsid); 415 vdata->vd_user_options, tsid);
397 if (!gts) 416 if (IS_ERR(gts))
398 return NULL; 417 return gts;
399 418
400 spin_lock(&vdata->vd_lock); 419 spin_lock(&vdata->vd_lock);
401 ngts = gru_find_current_gts_nolock(vdata, tsid); 420 ngts = gru_find_current_gts_nolock(vdata, tsid);
@@ -493,6 +512,9 @@ static void gru_load_context_data(void *save, void *grubase, int ctxnum,
493 memset(cbe + i * GRU_HANDLE_STRIDE, 0, 512 memset(cbe + i * GRU_HANDLE_STRIDE, 0,
494 GRU_CACHE_LINE_BYTES); 513 GRU_CACHE_LINE_BYTES);
495 } 514 }
515 /* Flush CBE to hide race in context restart */
516 mb();
517 gru_flush_cache(cbe + i * GRU_HANDLE_STRIDE);
496 cb += GRU_HANDLE_STRIDE; 518 cb += GRU_HANDLE_STRIDE;
497 } 519 }
498 520
@@ -513,6 +535,12 @@ static void gru_unload_context_data(void *save, void *grubase, int ctxnum,
513 cb = gseg + GRU_CB_BASE; 535 cb = gseg + GRU_CB_BASE;
514 cbe = grubase + GRU_CBE_BASE; 536 cbe = grubase + GRU_CBE_BASE;
515 length = hweight64(dsrmap) * GRU_DSR_AU_BYTES; 537 length = hweight64(dsrmap) * GRU_DSR_AU_BYTES;
538
539 /* CBEs may not be coherent. Flush them from cache */
540 for_each_cbr_in_allocation_map(i, &cbrmap, scr)
541 gru_flush_cache(cbe + i * GRU_HANDLE_STRIDE);
542 mb(); /* Let the CL flush complete */
543
516 gru_prefetch_context(gseg, cb, cbe, cbrmap, length); 544 gru_prefetch_context(gseg, cb, cbe, cbrmap, length);
517 545
518 for_each_cbr_in_allocation_map(i, &cbrmap, scr) { 546 for_each_cbr_in_allocation_map(i, &cbrmap, scr) {
@@ -533,7 +561,8 @@ void gru_unload_context(struct gru_thread_state *gts, int savestate)
533 zap_vma_ptes(gts->ts_vma, UGRUADDR(gts), GRU_GSEG_PAGESIZE); 561 zap_vma_ptes(gts->ts_vma, UGRUADDR(gts), GRU_GSEG_PAGESIZE);
534 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum); 562 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
535 563
536 gru_dbg(grudev, "gts %p\n", gts); 564 gru_dbg(grudev, "gts %p, cbrmap 0x%lx, dsrmap 0x%lx\n",
565 gts, gts->ts_cbr_map, gts->ts_dsr_map);
537 lock_cch_handle(cch); 566 lock_cch_handle(cch);
538 if (cch_interrupt_sync(cch)) 567 if (cch_interrupt_sync(cch))
539 BUG(); 568 BUG();
@@ -549,7 +578,6 @@ void gru_unload_context(struct gru_thread_state *gts, int savestate)
549 578
550 if (cch_deallocate(cch)) 579 if (cch_deallocate(cch))
551 BUG(); 580 BUG();
552 gts->ts_force_unload = 0; /* ts_force_unload locked by CCH lock */
553 unlock_cch_handle(cch); 581 unlock_cch_handle(cch);
554 582
555 gru_free_gru_context(gts); 583 gru_free_gru_context(gts);
@@ -565,9 +593,7 @@ void gru_load_context(struct gru_thread_state *gts)
565 struct gru_context_configuration_handle *cch; 593 struct gru_context_configuration_handle *cch;
566 int i, err, asid, ctxnum = gts->ts_ctxnum; 594 int i, err, asid, ctxnum = gts->ts_ctxnum;
567 595
568 gru_dbg(grudev, "gts %p\n", gts);
569 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum); 596 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
570
571 lock_cch_handle(cch); 597 lock_cch_handle(cch);
572 cch->tfm_fault_bit_enable = 598 cch->tfm_fault_bit_enable =
573 (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL 599 (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL
@@ -591,6 +617,7 @@ void gru_load_context(struct gru_thread_state *gts)
591 cch->unmap_enable = 1; 617 cch->unmap_enable = 1;
592 cch->tfm_done_bit_enable = 1; 618 cch->tfm_done_bit_enable = 1;
593 cch->cb_int_enable = 1; 619 cch->cb_int_enable = 1;
620 cch->tlb_int_select = 0; /* For now, ints go to cpu 0 */
594 } else { 621 } else {
595 cch->unmap_enable = 0; 622 cch->unmap_enable = 0;
596 cch->tfm_done_bit_enable = 0; 623 cch->tfm_done_bit_enable = 0;
@@ -616,17 +643,18 @@ void gru_load_context(struct gru_thread_state *gts)
616 if (cch_start(cch)) 643 if (cch_start(cch))
617 BUG(); 644 BUG();
618 unlock_cch_handle(cch); 645 unlock_cch_handle(cch);
646
647 gru_dbg(grudev, "gid %d, gts %p, cbrmap 0x%lx, dsrmap 0x%lx, tie %d, tis %d\n",
648 gts->ts_gru->gs_gid, gts, gts->ts_cbr_map, gts->ts_dsr_map,
649 (gts->ts_user_options == GRU_OPT_MISS_FMM_INTR), gts->ts_tlb_int_select);
619} 650}
620 651
621/* 652/*
622 * Update fields in an active CCH: 653 * Update fields in an active CCH:
623 * - retarget interrupts on local blade 654 * - retarget interrupts on local blade
624 * - update sizeavail mask 655 * - update sizeavail mask
625 * - force a delayed context unload by clearing the CCH asids. This
626 * forces TLB misses for new GRU instructions. The context is unloaded
627 * when the next TLB miss occurs.
628 */ 656 */
629int gru_update_cch(struct gru_thread_state *gts, int force_unload) 657int gru_update_cch(struct gru_thread_state *gts)
630{ 658{
631 struct gru_context_configuration_handle *cch; 659 struct gru_context_configuration_handle *cch;
632 struct gru_state *gru = gts->ts_gru; 660 struct gru_state *gru = gts->ts_gru;
@@ -640,21 +668,13 @@ int gru_update_cch(struct gru_thread_state *gts, int force_unload)
640 goto exit; 668 goto exit;
641 if (cch_interrupt(cch)) 669 if (cch_interrupt(cch))
642 BUG(); 670 BUG();
643 if (!force_unload) { 671 for (i = 0; i < 8; i++)
644 for (i = 0; i < 8; i++) 672 cch->sizeavail[i] = gts->ts_sizeavail;
645 cch->sizeavail[i] = gts->ts_sizeavail; 673 gts->ts_tlb_int_select = gru_cpu_fault_map_id();
646 gts->ts_tlb_int_select = gru_cpu_fault_map_id(); 674 cch->tlb_int_select = gru_cpu_fault_map_id();
647 cch->tlb_int_select = gru_cpu_fault_map_id(); 675 cch->tfm_fault_bit_enable =
648 cch->tfm_fault_bit_enable = 676 (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL
649 (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL 677 || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
650 || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
651 } else {
652 for (i = 0; i < 8; i++)
653 cch->asid[i] = 0;
654 cch->tfm_fault_bit_enable = 0;
655 cch->tlb_int_enable = 0;
656 gts->ts_force_unload = 1;
657 }
658 if (cch_start(cch)) 678 if (cch_start(cch))
659 BUG(); 679 BUG();
660 ret = 1; 680 ret = 1;
@@ -679,7 +699,54 @@ static int gru_retarget_intr(struct gru_thread_state *gts)
679 699
680 gru_dbg(grudev, "retarget from %d to %d\n", gts->ts_tlb_int_select, 700 gru_dbg(grudev, "retarget from %d to %d\n", gts->ts_tlb_int_select,
681 gru_cpu_fault_map_id()); 701 gru_cpu_fault_map_id());
682 return gru_update_cch(gts, 0); 702 return gru_update_cch(gts);
703}
704
705/*
706 * Check if a GRU context is allowed to use a specific chiplet. By default
707 * a context is assigned to any blade-local chiplet. However, users can
708 * override this.
709 * Returns 1 if assignment allowed, 0 otherwise
710 */
711static int gru_check_chiplet_assignment(struct gru_state *gru,
712 struct gru_thread_state *gts)
713{
714 int blade_id;
715 int chiplet_id;
716
717 blade_id = gts->ts_user_blade_id;
718 if (blade_id < 0)
719 blade_id = uv_numa_blade_id();
720
721 chiplet_id = gts->ts_user_chiplet_id;
722 return gru->gs_blade_id == blade_id &&
723 (chiplet_id < 0 || chiplet_id == gru->gs_chiplet_id);
724}
725
726/*
727 * Unload the gru context if it is not assigned to the correct blade or
728 * chiplet. Misassignment can occur if the process migrates to a different
729 * blade or if the user changes the selected blade/chiplet.
730 */
731void gru_check_context_placement(struct gru_thread_state *gts)
732{
733 struct gru_state *gru;
734
735 /*
736 * If the current task is the context owner, verify that the
737 * context is correctly placed. This test is skipped for non-owner
738 * references. Pthread apps use non-owner references to the CBRs.
739 */
740 gru = gts->ts_gru;
741 if (!gru || gts->ts_tgid_owner != current->tgid)
742 return;
743
744 if (!gru_check_chiplet_assignment(gru, gts)) {
745 STAT(check_context_unload);
746 gru_unload_context(gts, 1);
747 } else if (gru_retarget_intr(gts)) {
748 STAT(check_context_retarget_intr);
749 }
683} 750}
684 751
685 752
@@ -712,13 +779,17 @@ static void gts_stolen(struct gru_thread_state *gts,
712 } 779 }
713} 780}
714 781
715void gru_steal_context(struct gru_thread_state *gts, int blade_id) 782void gru_steal_context(struct gru_thread_state *gts)
716{ 783{
717 struct gru_blade_state *blade; 784 struct gru_blade_state *blade;
718 struct gru_state *gru, *gru0; 785 struct gru_state *gru, *gru0;
719 struct gru_thread_state *ngts = NULL; 786 struct gru_thread_state *ngts = NULL;
720 int ctxnum, ctxnum0, flag = 0, cbr, dsr; 787 int ctxnum, ctxnum0, flag = 0, cbr, dsr;
788 int blade_id;
721 789
790 blade_id = gts->ts_user_blade_id;
791 if (blade_id < 0)
792 blade_id = uv_numa_blade_id();
722 cbr = gts->ts_cbr_au_count; 793 cbr = gts->ts_cbr_au_count;
723 dsr = gts->ts_dsr_au_count; 794 dsr = gts->ts_dsr_au_count;
724 795
@@ -729,35 +800,39 @@ void gru_steal_context(struct gru_thread_state *gts, int blade_id)
729 gru = blade->bs_lru_gru; 800 gru = blade->bs_lru_gru;
730 if (ctxnum == 0) 801 if (ctxnum == 0)
731 gru = next_gru(blade, gru); 802 gru = next_gru(blade, gru);
803 blade->bs_lru_gru = gru;
804 blade->bs_lru_ctxnum = ctxnum;
732 ctxnum0 = ctxnum; 805 ctxnum0 = ctxnum;
733 gru0 = gru; 806 gru0 = gru;
734 while (1) { 807 while (1) {
735 if (check_gru_resources(gru, cbr, dsr, GRU_NUM_CCH)) 808 if (gru_check_chiplet_assignment(gru, gts)) {
736 break; 809 if (check_gru_resources(gru, cbr, dsr, GRU_NUM_CCH))
737 spin_lock(&gru->gs_lock);
738 for (; ctxnum < GRU_NUM_CCH; ctxnum++) {
739 if (flag && gru == gru0 && ctxnum == ctxnum0)
740 break; 810 break;
741 ngts = gru->gs_gts[ctxnum]; 811 spin_lock(&gru->gs_lock);
742 /* 812 for (; ctxnum < GRU_NUM_CCH; ctxnum++) {
743 * We are grabbing locks out of order, so trylock is 813 if (flag && gru == gru0 && ctxnum == ctxnum0)
744 * needed. GTSs are usually not locked, so the odds of 814 break;
745 * success are high. If trylock fails, try to steal a 815 ngts = gru->gs_gts[ctxnum];
746 * different GSEG. 816 /*
747 */ 817 * We are grabbing locks out of order, so trylock is
748 if (ngts && is_gts_stealable(ngts, blade)) 818 * needed. GTSs are usually not locked, so the odds of
819 * success are high. If trylock fails, try to steal a
820 * different GSEG.
821 */
822 if (ngts && is_gts_stealable(ngts, blade))
823 break;
824 ngts = NULL;
825 }
826 spin_unlock(&gru->gs_lock);
827 if (ngts || (flag && gru == gru0 && ctxnum == ctxnum0))
749 break; 828 break;
750 ngts = NULL;
751 flag = 1;
752 } 829 }
753 spin_unlock(&gru->gs_lock); 830 if (flag && gru == gru0)
754 if (ngts || (flag && gru == gru0 && ctxnum == ctxnum0))
755 break; 831 break;
832 flag = 1;
756 ctxnum = 0; 833 ctxnum = 0;
757 gru = next_gru(blade, gru); 834 gru = next_gru(blade, gru);
758 } 835 }
759 blade->bs_lru_gru = gru;
760 blade->bs_lru_ctxnum = ctxnum;
761 spin_unlock(&blade->bs_lock); 836 spin_unlock(&blade->bs_lock);
762 837
763 if (ngts) { 838 if (ngts) {
@@ -776,19 +851,34 @@ void gru_steal_context(struct gru_thread_state *gts, int blade_id)
776} 851}
777 852
778/* 853/*
854 * Assign a gru context.
855 */
856static int gru_assign_context_number(struct gru_state *gru)
857{
858 int ctxnum;
859
860 ctxnum = find_first_zero_bit(&gru->gs_context_map, GRU_NUM_CCH);
861 __set_bit(ctxnum, &gru->gs_context_map);
862 return ctxnum;
863}
864
865/*
779 * Scan the GRUs on the local blade & assign a GRU context. 866 * Scan the GRUs on the local blade & assign a GRU context.
780 */ 867 */
781struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts, 868struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts)
782 int blade)
783{ 869{
784 struct gru_state *gru, *grux; 870 struct gru_state *gru, *grux;
785 int i, max_active_contexts; 871 int i, max_active_contexts;
872 int blade_id = gts->ts_user_blade_id;
786 873
787 874 if (blade_id < 0)
875 blade_id = uv_numa_blade_id();
788again: 876again:
789 gru = NULL; 877 gru = NULL;
790 max_active_contexts = GRU_NUM_CCH; 878 max_active_contexts = GRU_NUM_CCH;
791 for_each_gru_on_blade(grux, blade, i) { 879 for_each_gru_on_blade(grux, blade_id, i) {
880 if (!gru_check_chiplet_assignment(grux, gts))
881 continue;
792 if (check_gru_resources(grux, gts->ts_cbr_au_count, 882 if (check_gru_resources(grux, gts->ts_cbr_au_count,
793 gts->ts_dsr_au_count, 883 gts->ts_dsr_au_count,
794 max_active_contexts)) { 884 max_active_contexts)) {
@@ -809,12 +899,9 @@ again:
809 reserve_gru_resources(gru, gts); 899 reserve_gru_resources(gru, gts);
810 gts->ts_gru = gru; 900 gts->ts_gru = gru;
811 gts->ts_blade = gru->gs_blade_id; 901 gts->ts_blade = gru->gs_blade_id;
812 gts->ts_ctxnum = 902 gts->ts_ctxnum = gru_assign_context_number(gru);
813 find_first_zero_bit(&gru->gs_context_map, GRU_NUM_CCH);
814 BUG_ON(gts->ts_ctxnum == GRU_NUM_CCH);
815 atomic_inc(&gts->ts_refcnt); 903 atomic_inc(&gts->ts_refcnt);
816 gru->gs_gts[gts->ts_ctxnum] = gts; 904 gru->gs_gts[gts->ts_ctxnum] = gts;
817 __set_bit(gts->ts_ctxnum, &gru->gs_context_map);
818 spin_unlock(&gru->gs_lock); 905 spin_unlock(&gru->gs_lock);
819 906
820 STAT(assign_context); 907 STAT(assign_context);
@@ -842,7 +929,6 @@ int gru_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
842{ 929{
843 struct gru_thread_state *gts; 930 struct gru_thread_state *gts;
844 unsigned long paddr, vaddr; 931 unsigned long paddr, vaddr;
845 int blade_id;
846 932
847 vaddr = (unsigned long)vmf->virtual_address; 933 vaddr = (unsigned long)vmf->virtual_address;
848 gru_dbg(grudev, "vma %p, vaddr 0x%lx (0x%lx)\n", 934 gru_dbg(grudev, "vma %p, vaddr 0x%lx (0x%lx)\n",
@@ -857,28 +943,18 @@ int gru_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
857again: 943again:
858 mutex_lock(&gts->ts_ctxlock); 944 mutex_lock(&gts->ts_ctxlock);
859 preempt_disable(); 945 preempt_disable();
860 blade_id = uv_numa_blade_id();
861 946
862 if (gts->ts_gru) { 947 gru_check_context_placement(gts);
863 if (gts->ts_gru->gs_blade_id != blade_id) {
864 STAT(migrated_nopfn_unload);
865 gru_unload_context(gts, 1);
866 } else {
867 if (gru_retarget_intr(gts))
868 STAT(migrated_nopfn_retarget);
869 }
870 }
871 948
872 if (!gts->ts_gru) { 949 if (!gts->ts_gru) {
873 STAT(load_user_context); 950 STAT(load_user_context);
874 if (!gru_assign_gru_context(gts, blade_id)) { 951 if (!gru_assign_gru_context(gts)) {
875 preempt_enable(); 952 preempt_enable();
876 mutex_unlock(&gts->ts_ctxlock); 953 mutex_unlock(&gts->ts_ctxlock);
877 set_current_state(TASK_INTERRUPTIBLE); 954 set_current_state(TASK_INTERRUPTIBLE);
878 schedule_timeout(GRU_ASSIGN_DELAY); /* true hack ZZZ */ 955 schedule_timeout(GRU_ASSIGN_DELAY); /* true hack ZZZ */
879 blade_id = uv_numa_blade_id();
880 if (gts->ts_steal_jiffies + GRU_STEAL_DELAY < jiffies) 956 if (gts->ts_steal_jiffies + GRU_STEAL_DELAY < jiffies)
881 gru_steal_context(gts, blade_id); 957 gru_steal_context(gts);
882 goto again; 958 goto again;
883 } 959 }
884 gru_load_context(gts); 960 gru_load_context(gts);
diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
index 3f2375c5ba5b..7768b87d995b 100644
--- a/drivers/misc/sgi-gru/gruprocfs.c
+++ b/drivers/misc/sgi-gru/gruprocfs.c
@@ -36,8 +36,7 @@ static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
36{ 36{
37 unsigned long val = atomic_long_read(v); 37 unsigned long val = atomic_long_read(v);
38 38
39 if (val) 39 seq_printf(s, "%16lu %s\n", val, id);
40 seq_printf(s, "%16lu %s\n", val, id);
41} 40}
42 41
43static int statistics_show(struct seq_file *s, void *p) 42static int statistics_show(struct seq_file *s, void *p)
@@ -46,7 +45,8 @@ static int statistics_show(struct seq_file *s, void *p)
46 printstat(s, vdata_free); 45 printstat(s, vdata_free);
47 printstat(s, gts_alloc); 46 printstat(s, gts_alloc);
48 printstat(s, gts_free); 47 printstat(s, gts_free);
49 printstat(s, vdata_double_alloc); 48 printstat(s, gms_alloc);
49 printstat(s, gms_free);
50 printstat(s, gts_double_allocate); 50 printstat(s, gts_double_allocate);
51 printstat(s, assign_context); 51 printstat(s, assign_context);
52 printstat(s, assign_context_failed); 52 printstat(s, assign_context_failed);
@@ -59,28 +59,25 @@ static int statistics_show(struct seq_file *s, void *p)
59 printstat(s, steal_kernel_context); 59 printstat(s, steal_kernel_context);
60 printstat(s, steal_context_failed); 60 printstat(s, steal_context_failed);
61 printstat(s, nopfn); 61 printstat(s, nopfn);
62 printstat(s, break_cow);
63 printstat(s, asid_new); 62 printstat(s, asid_new);
64 printstat(s, asid_next); 63 printstat(s, asid_next);
65 printstat(s, asid_wrap); 64 printstat(s, asid_wrap);
66 printstat(s, asid_reuse); 65 printstat(s, asid_reuse);
67 printstat(s, intr); 66 printstat(s, intr);
67 printstat(s, intr_cbr);
68 printstat(s, intr_tfh);
69 printstat(s, intr_spurious);
68 printstat(s, intr_mm_lock_failed); 70 printstat(s, intr_mm_lock_failed);
69 printstat(s, call_os); 71 printstat(s, call_os);
70 printstat(s, call_os_offnode_reference);
71 printstat(s, call_os_check_for_bug);
72 printstat(s, call_os_wait_queue); 72 printstat(s, call_os_wait_queue);
73 printstat(s, user_flush_tlb); 73 printstat(s, user_flush_tlb);
74 printstat(s, user_unload_context); 74 printstat(s, user_unload_context);
75 printstat(s, user_exception); 75 printstat(s, user_exception);
76 printstat(s, set_context_option); 76 printstat(s, set_context_option);
77 printstat(s, migrate_check); 77 printstat(s, check_context_retarget_intr);
78 printstat(s, migrated_retarget); 78 printstat(s, check_context_unload);
79 printstat(s, migrated_unload);
80 printstat(s, migrated_unload_delay);
81 printstat(s, migrated_nopfn_retarget);
82 printstat(s, migrated_nopfn_unload);
83 printstat(s, tlb_dropin); 79 printstat(s, tlb_dropin);
80 printstat(s, tlb_preload_page);
84 printstat(s, tlb_dropin_fail_no_asid); 81 printstat(s, tlb_dropin_fail_no_asid);
85 printstat(s, tlb_dropin_fail_upm); 82 printstat(s, tlb_dropin_fail_upm);
86 printstat(s, tlb_dropin_fail_invalid); 83 printstat(s, tlb_dropin_fail_invalid);
@@ -88,16 +85,15 @@ static int statistics_show(struct seq_file *s, void *p)
88 printstat(s, tlb_dropin_fail_idle); 85 printstat(s, tlb_dropin_fail_idle);
89 printstat(s, tlb_dropin_fail_fmm); 86 printstat(s, tlb_dropin_fail_fmm);
90 printstat(s, tlb_dropin_fail_no_exception); 87 printstat(s, tlb_dropin_fail_no_exception);
91 printstat(s, tlb_dropin_fail_no_exception_war);
92 printstat(s, tfh_stale_on_fault); 88 printstat(s, tfh_stale_on_fault);
93 printstat(s, mmu_invalidate_range); 89 printstat(s, mmu_invalidate_range);
94 printstat(s, mmu_invalidate_page); 90 printstat(s, mmu_invalidate_page);
95 printstat(s, mmu_clear_flush_young);
96 printstat(s, flush_tlb); 91 printstat(s, flush_tlb);
97 printstat(s, flush_tlb_gru); 92 printstat(s, flush_tlb_gru);
98 printstat(s, flush_tlb_gru_tgh); 93 printstat(s, flush_tlb_gru_tgh);
99 printstat(s, flush_tlb_gru_zero_asid); 94 printstat(s, flush_tlb_gru_zero_asid);
100 printstat(s, copy_gpa); 95 printstat(s, copy_gpa);
96 printstat(s, read_gpa);
101 printstat(s, mesq_receive); 97 printstat(s, mesq_receive);
102 printstat(s, mesq_receive_none); 98 printstat(s, mesq_receive_none);
103 printstat(s, mesq_send); 99 printstat(s, mesq_send);
@@ -108,7 +104,6 @@ static int statistics_show(struct seq_file *s, void *p)
108 printstat(s, mesq_send_qlimit_reached); 104 printstat(s, mesq_send_qlimit_reached);
109 printstat(s, mesq_send_amo_nacked); 105 printstat(s, mesq_send_amo_nacked);
110 printstat(s, mesq_send_put_nacked); 106 printstat(s, mesq_send_put_nacked);
111 printstat(s, mesq_qf_not_full);
112 printstat(s, mesq_qf_locked); 107 printstat(s, mesq_qf_locked);
113 printstat(s, mesq_qf_noop_not_full); 108 printstat(s, mesq_qf_noop_not_full);
114 printstat(s, mesq_qf_switch_head_failed); 109 printstat(s, mesq_qf_switch_head_failed);
@@ -118,6 +113,7 @@ static int statistics_show(struct seq_file *s, void *p)
118 printstat(s, mesq_noop_qlimit_reached); 113 printstat(s, mesq_noop_qlimit_reached);
119 printstat(s, mesq_noop_amo_nacked); 114 printstat(s, mesq_noop_amo_nacked);
120 printstat(s, mesq_noop_put_nacked); 115 printstat(s, mesq_noop_put_nacked);
116 printstat(s, mesq_noop_page_overflow);
121 return 0; 117 return 0;
122} 118}
123 119
@@ -133,8 +129,10 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
133 int op; 129 int op;
134 unsigned long total, count, max; 130 unsigned long total, count, max;
135 static char *id[] = {"cch_allocate", "cch_start", "cch_interrupt", 131 static char *id[] = {"cch_allocate", "cch_start", "cch_interrupt",
136 "cch_interrupt_sync", "cch_deallocate", "tgh_invalidate"}; 132 "cch_interrupt_sync", "cch_deallocate", "tfh_write_only",
133 "tfh_write_restart", "tgh_invalidate"};
137 134
135 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
138 for (op = 0; op < mcsop_last; op++) { 136 for (op = 0; op < mcsop_last; op++) {
139 count = atomic_long_read(&mcs_op_statistics[op].count); 137 count = atomic_long_read(&mcs_op_statistics[op].count);
140 total = atomic_long_read(&mcs_op_statistics[op].total); 138 total = atomic_long_read(&mcs_op_statistics[op].total);
@@ -154,6 +152,7 @@ static ssize_t mcs_statistics_write(struct file *file,
154 152
155static int options_show(struct seq_file *s, void *p) 153static int options_show(struct seq_file *s, void *p)
156{ 154{
155 seq_printf(s, "#bitmask: 1=trace, 2=statistics\n");
157 seq_printf(s, "0x%lx\n", gru_options); 156 seq_printf(s, "0x%lx\n", gru_options);
158 return 0; 157 return 0;
159} 158}
@@ -183,16 +182,17 @@ static int cch_seq_show(struct seq_file *file, void *data)
183 const char *mode[] = { "??", "UPM", "INTR", "OS_POLL" }; 182 const char *mode[] = { "??", "UPM", "INTR", "OS_POLL" };
184 183
185 if (gid == 0) 184 if (gid == 0)
186 seq_printf(file, "#%5s%5s%6s%9s%6s%8s%8s\n", "gid", "bid", 185 seq_printf(file, "#%5s%5s%6s%7s%9s%6s%8s%8s\n", "gid", "bid",
187 "ctx#", "pid", "cbrs", "dsbytes", "mode"); 186 "ctx#", "asid", "pid", "cbrs", "dsbytes", "mode");
188 if (gru) 187 if (gru)
189 for (i = 0; i < GRU_NUM_CCH; i++) { 188 for (i = 0; i < GRU_NUM_CCH; i++) {
190 ts = gru->gs_gts[i]; 189 ts = gru->gs_gts[i];
191 if (!ts) 190 if (!ts)
192 continue; 191 continue;
193 seq_printf(file, " %5d%5d%6d%9d%6d%8d%8s\n", 192 seq_printf(file, " %5d%5d%6d%7d%9d%6d%8d%8s\n",
194 gru->gs_gid, gru->gs_blade_id, i, 193 gru->gs_gid, gru->gs_blade_id, i,
195 ts->ts_tgid_owner, 194 is_kernel_context(ts) ? 0 : ts->ts_gms->ms_asids[gid].mt_asid,
195 is_kernel_context(ts) ? 0 : ts->ts_tgid_owner,
196 ts->ts_cbr_au_count * GRU_CBR_AU_SIZE, 196 ts->ts_cbr_au_count * GRU_CBR_AU_SIZE,
197 ts->ts_cbr_au_count * GRU_DSR_AU_BYTES, 197 ts->ts_cbr_au_count * GRU_DSR_AU_BYTES,
198 mode[ts->ts_user_options & 198 mode[ts->ts_user_options &
@@ -355,7 +355,7 @@ static void delete_proc_files(void)
355 for (p = proc_files; p->name; p++) 355 for (p = proc_files; p->name; p++)
356 if (p->entry) 356 if (p->entry)
357 remove_proc_entry(p->name, proc_gru); 357 remove_proc_entry(p->name, proc_gru);
358 remove_proc_entry("gru", NULL); 358 remove_proc_entry("gru", proc_gru->parent);
359 } 359 }
360} 360}
361 361
diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
index 46990bcfa536..02a77b8b8eef 100644
--- a/drivers/misc/sgi-gru/grutables.h
+++ b/drivers/misc/sgi-gru/grutables.h
@@ -161,7 +161,7 @@ extern unsigned int gru_max_gids;
161#define GRU_MAX_GRUS (GRU_MAX_BLADES * GRU_CHIPLETS_PER_BLADE) 161#define GRU_MAX_GRUS (GRU_MAX_BLADES * GRU_CHIPLETS_PER_BLADE)
162 162
163#define GRU_DRIVER_ID_STR "SGI GRU Device Driver" 163#define GRU_DRIVER_ID_STR "SGI GRU Device Driver"
164#define GRU_DRIVER_VERSION_STR "0.80" 164#define GRU_DRIVER_VERSION_STR "0.85"
165 165
166/* 166/*
167 * GRU statistics. 167 * GRU statistics.
@@ -171,7 +171,8 @@ struct gru_stats_s {
171 atomic_long_t vdata_free; 171 atomic_long_t vdata_free;
172 atomic_long_t gts_alloc; 172 atomic_long_t gts_alloc;
173 atomic_long_t gts_free; 173 atomic_long_t gts_free;
174 atomic_long_t vdata_double_alloc; 174 atomic_long_t gms_alloc;
175 atomic_long_t gms_free;
175 atomic_long_t gts_double_allocate; 176 atomic_long_t gts_double_allocate;
176 atomic_long_t assign_context; 177 atomic_long_t assign_context;
177 atomic_long_t assign_context_failed; 178 atomic_long_t assign_context_failed;
@@ -184,28 +185,25 @@ struct gru_stats_s {
184 atomic_long_t steal_kernel_context; 185 atomic_long_t steal_kernel_context;
185 atomic_long_t steal_context_failed; 186 atomic_long_t steal_context_failed;
186 atomic_long_t nopfn; 187 atomic_long_t nopfn;
187 atomic_long_t break_cow;
188 atomic_long_t asid_new; 188 atomic_long_t asid_new;
189 atomic_long_t asid_next; 189 atomic_long_t asid_next;
190 atomic_long_t asid_wrap; 190 atomic_long_t asid_wrap;
191 atomic_long_t asid_reuse; 191 atomic_long_t asid_reuse;
192 atomic_long_t intr; 192 atomic_long_t intr;
193 atomic_long_t intr_cbr;
194 atomic_long_t intr_tfh;
195 atomic_long_t intr_spurious;
193 atomic_long_t intr_mm_lock_failed; 196 atomic_long_t intr_mm_lock_failed;
194 atomic_long_t call_os; 197 atomic_long_t call_os;
195 atomic_long_t call_os_offnode_reference;
196 atomic_long_t call_os_check_for_bug;
197 atomic_long_t call_os_wait_queue; 198 atomic_long_t call_os_wait_queue;
198 atomic_long_t user_flush_tlb; 199 atomic_long_t user_flush_tlb;
199 atomic_long_t user_unload_context; 200 atomic_long_t user_unload_context;
200 atomic_long_t user_exception; 201 atomic_long_t user_exception;
201 atomic_long_t set_context_option; 202 atomic_long_t set_context_option;
202 atomic_long_t migrate_check; 203 atomic_long_t check_context_retarget_intr;
203 atomic_long_t migrated_retarget; 204 atomic_long_t check_context_unload;
204 atomic_long_t migrated_unload;
205 atomic_long_t migrated_unload_delay;
206 atomic_long_t migrated_nopfn_retarget;
207 atomic_long_t migrated_nopfn_unload;
208 atomic_long_t tlb_dropin; 205 atomic_long_t tlb_dropin;
206 atomic_long_t tlb_preload_page;
209 atomic_long_t tlb_dropin_fail_no_asid; 207 atomic_long_t tlb_dropin_fail_no_asid;
210 atomic_long_t tlb_dropin_fail_upm; 208 atomic_long_t tlb_dropin_fail_upm;
211 atomic_long_t tlb_dropin_fail_invalid; 209 atomic_long_t tlb_dropin_fail_invalid;
@@ -213,17 +211,16 @@ struct gru_stats_s {
213 atomic_long_t tlb_dropin_fail_idle; 211 atomic_long_t tlb_dropin_fail_idle;
214 atomic_long_t tlb_dropin_fail_fmm; 212 atomic_long_t tlb_dropin_fail_fmm;
215 atomic_long_t tlb_dropin_fail_no_exception; 213 atomic_long_t tlb_dropin_fail_no_exception;
216 atomic_long_t tlb_dropin_fail_no_exception_war;
217 atomic_long_t tfh_stale_on_fault; 214 atomic_long_t tfh_stale_on_fault;
218 atomic_long_t mmu_invalidate_range; 215 atomic_long_t mmu_invalidate_range;
219 atomic_long_t mmu_invalidate_page; 216 atomic_long_t mmu_invalidate_page;
220 atomic_long_t mmu_clear_flush_young;
221 atomic_long_t flush_tlb; 217 atomic_long_t flush_tlb;
222 atomic_long_t flush_tlb_gru; 218 atomic_long_t flush_tlb_gru;
223 atomic_long_t flush_tlb_gru_tgh; 219 atomic_long_t flush_tlb_gru_tgh;
224 atomic_long_t flush_tlb_gru_zero_asid; 220 atomic_long_t flush_tlb_gru_zero_asid;
225 221
226 atomic_long_t copy_gpa; 222 atomic_long_t copy_gpa;
223 atomic_long_t read_gpa;
227 224
228 atomic_long_t mesq_receive; 225 atomic_long_t mesq_receive;
229 atomic_long_t mesq_receive_none; 226 atomic_long_t mesq_receive_none;
@@ -235,7 +232,7 @@ struct gru_stats_s {
235 atomic_long_t mesq_send_qlimit_reached; 232 atomic_long_t mesq_send_qlimit_reached;
236 atomic_long_t mesq_send_amo_nacked; 233 atomic_long_t mesq_send_amo_nacked;
237 atomic_long_t mesq_send_put_nacked; 234 atomic_long_t mesq_send_put_nacked;
238 atomic_long_t mesq_qf_not_full; 235 atomic_long_t mesq_page_overflow;
239 atomic_long_t mesq_qf_locked; 236 atomic_long_t mesq_qf_locked;
240 atomic_long_t mesq_qf_noop_not_full; 237 atomic_long_t mesq_qf_noop_not_full;
241 atomic_long_t mesq_qf_switch_head_failed; 238 atomic_long_t mesq_qf_switch_head_failed;
@@ -245,11 +242,13 @@ struct gru_stats_s {
245 atomic_long_t mesq_noop_qlimit_reached; 242 atomic_long_t mesq_noop_qlimit_reached;
246 atomic_long_t mesq_noop_amo_nacked; 243 atomic_long_t mesq_noop_amo_nacked;
247 atomic_long_t mesq_noop_put_nacked; 244 atomic_long_t mesq_noop_put_nacked;
245 atomic_long_t mesq_noop_page_overflow;
248 246
249}; 247};
250 248
251enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync, 249enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
252 cchop_deallocate, tghop_invalidate, mcsop_last}; 250 cchop_deallocate, tfhop_write_only, tfhop_write_restart,
251 tghop_invalidate, mcsop_last};
253 252
254struct mcs_op_statistic { 253struct mcs_op_statistic {
255 atomic_long_t count; 254 atomic_long_t count;
@@ -259,8 +258,8 @@ struct mcs_op_statistic {
259 258
260extern struct mcs_op_statistic mcs_op_statistics[mcsop_last]; 259extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
261 260
262#define OPT_DPRINT 1 261#define OPT_DPRINT 1
263#define OPT_STATS 2 262#define OPT_STATS 2
264 263
265 264
266#define IRQ_GRU 110 /* Starting IRQ number for interrupts */ 265#define IRQ_GRU 110 /* Starting IRQ number for interrupts */
@@ -283,7 +282,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
283#define gru_dbg(dev, fmt, x...) \ 282#define gru_dbg(dev, fmt, x...) \
284 do { \ 283 do { \
285 if (gru_options & OPT_DPRINT) \ 284 if (gru_options & OPT_DPRINT) \
286 dev_dbg(dev, "%s: " fmt, __func__, x); \ 285 printk(KERN_DEBUG "GRU:%d %s: " fmt, smp_processor_id(), __func__, x);\
287 } while (0) 286 } while (0)
288#else 287#else
289#define gru_dbg(x...) 288#define gru_dbg(x...)
@@ -297,13 +296,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
297#define ASID_INC 8 /* number of regions */ 296#define ASID_INC 8 /* number of regions */
298 297
299/* Generate a GRU asid value from a GRU base asid & a virtual address. */ 298/* Generate a GRU asid value from a GRU base asid & a virtual address. */
300#if defined CONFIG_IA64
301#define VADDR_HI_BIT 64 299#define VADDR_HI_BIT 64
302#elif defined CONFIG_X86_64
303#define VADDR_HI_BIT 48
304#else
305#error "Unsupported architecture"
306#endif
307#define GRUREGION(addr) ((addr) >> (VADDR_HI_BIT - 3) & 3) 300#define GRUREGION(addr) ((addr) >> (VADDR_HI_BIT - 3) & 3)
308#define GRUASID(asid, addr) ((asid) + GRUREGION(addr)) 301#define GRUASID(asid, addr) ((asid) + GRUREGION(addr))
309 302
@@ -345,6 +338,7 @@ struct gru_vma_data {
345 long vd_user_options;/* misc user option flags */ 338 long vd_user_options;/* misc user option flags */
346 int vd_cbr_au_count; 339 int vd_cbr_au_count;
347 int vd_dsr_au_count; 340 int vd_dsr_au_count;
341 unsigned char vd_tlb_preload_count;
348}; 342};
349 343
350/* 344/*
@@ -360,6 +354,7 @@ struct gru_thread_state {
360 struct gru_state *ts_gru; /* GRU where the context is 354 struct gru_state *ts_gru; /* GRU where the context is
361 loaded */ 355 loaded */
362 struct gru_mm_struct *ts_gms; /* asid & ioproc struct */ 356 struct gru_mm_struct *ts_gms; /* asid & ioproc struct */
357 unsigned char ts_tlb_preload_count; /* TLB preload pages */
363 unsigned long ts_cbr_map; /* map of allocated CBRs */ 358 unsigned long ts_cbr_map; /* map of allocated CBRs */
364 unsigned long ts_dsr_map; /* map of allocated DATA 359 unsigned long ts_dsr_map; /* map of allocated DATA
365 resources */ 360 resources */
@@ -368,6 +363,8 @@ struct gru_thread_state {
368 long ts_user_options;/* misc user option flags */ 363 long ts_user_options;/* misc user option flags */
369 pid_t ts_tgid_owner; /* task that is using the 364 pid_t ts_tgid_owner; /* task that is using the
370 context - for migration */ 365 context - for migration */
366 short ts_user_blade_id;/* user selected blade */
367 char ts_user_chiplet_id;/* user selected chiplet */
371 unsigned short ts_sizeavail; /* Pagesizes in use */ 368 unsigned short ts_sizeavail; /* Pagesizes in use */
372 int ts_tsid; /* thread that owns the 369 int ts_tsid; /* thread that owns the
373 structure */ 370 structure */
@@ -384,13 +381,11 @@ struct gru_thread_state {
384 char ts_blade; /* If >= 0, migrate context if 381 char ts_blade; /* If >= 0, migrate context if
385 ref from diferent blade */ 382 ref from diferent blade */
386 char ts_force_cch_reload; 383 char ts_force_cch_reload;
387 char ts_force_unload;/* force context to be unloaded
388 after migration */
389 char ts_cbr_idx[GRU_CBR_AU];/* CBR numbers of each 384 char ts_cbr_idx[GRU_CBR_AU];/* CBR numbers of each
390 allocated CB */ 385 allocated CB */
391 int ts_data_valid; /* Indicates if ts_gdata has 386 int ts_data_valid; /* Indicates if ts_gdata has
392 valid data */ 387 valid data */
393 struct gts_statistics ustats; /* User statistics */ 388 struct gru_gseg_statistics ustats; /* User statistics */
394 unsigned long ts_gdata[0]; /* save area for GRU data (CB, 389 unsigned long ts_gdata[0]; /* save area for GRU data (CB,
395 DS, CBE) */ 390 DS, CBE) */
396}; 391};
@@ -422,6 +417,7 @@ struct gru_state {
422 gru segments (64) */ 417 gru segments (64) */
423 unsigned short gs_gid; /* unique GRU number */ 418 unsigned short gs_gid; /* unique GRU number */
424 unsigned short gs_blade_id; /* blade of GRU */ 419 unsigned short gs_blade_id; /* blade of GRU */
420 unsigned char gs_chiplet_id; /* blade chiplet of GRU */
425 unsigned char gs_tgh_local_shift; /* used to pick TGH for 421 unsigned char gs_tgh_local_shift; /* used to pick TGH for
426 local flush */ 422 local flush */
427 unsigned char gs_tgh_first_remote; /* starting TGH# for 423 unsigned char gs_tgh_first_remote; /* starting TGH# for
@@ -453,6 +449,7 @@ struct gru_state {
453 in use */ 449 in use */
454 struct gru_thread_state *gs_gts[GRU_NUM_CCH]; /* GTS currently using 450 struct gru_thread_state *gs_gts[GRU_NUM_CCH]; /* GTS currently using
455 the context */ 451 the context */
452 int gs_irq[GRU_NUM_TFM]; /* Interrupt irqs */
456}; 453};
457 454
458/* 455/*
@@ -619,6 +616,15 @@ static inline int is_kernel_context(struct gru_thread_state *gts)
619 return !gts->ts_mm; 616 return !gts->ts_mm;
620} 617}
621 618
619/*
620 * The following are for Nehelem-EX. A more general scheme is needed for
621 * future processors.
622 */
623#define UV_MAX_INT_CORES 8
624#define uv_cpu_socket_number(p) ((cpu_physical_id(p) >> 5) & 1)
625#define uv_cpu_ht_number(p) (cpu_physical_id(p) & 1)
626#define uv_cpu_core_number(p) (((cpu_physical_id(p) >> 2) & 4) | \
627 ((cpu_physical_id(p) >> 1) & 3))
622/*----------------------------------------------------------------------------- 628/*-----------------------------------------------------------------------------
623 * Function prototypes & externs 629 * Function prototypes & externs
624 */ 630 */
@@ -633,24 +639,26 @@ extern struct gru_thread_state *gru_find_thread_state(struct vm_area_struct
633 *vma, int tsid); 639 *vma, int tsid);
634extern struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct 640extern struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct
635 *vma, int tsid); 641 *vma, int tsid);
636extern struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts, 642extern struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts);
637 int blade);
638extern void gru_load_context(struct gru_thread_state *gts); 643extern void gru_load_context(struct gru_thread_state *gts);
639extern void gru_steal_context(struct gru_thread_state *gts, int blade_id); 644extern void gru_steal_context(struct gru_thread_state *gts);
640extern void gru_unload_context(struct gru_thread_state *gts, int savestate); 645extern void gru_unload_context(struct gru_thread_state *gts, int savestate);
641extern int gru_update_cch(struct gru_thread_state *gts, int force_unload); 646extern int gru_update_cch(struct gru_thread_state *gts);
642extern void gts_drop(struct gru_thread_state *gts); 647extern void gts_drop(struct gru_thread_state *gts);
643extern void gru_tgh_flush_init(struct gru_state *gru); 648extern void gru_tgh_flush_init(struct gru_state *gru);
644extern int gru_kservices_init(void); 649extern int gru_kservices_init(void);
645extern void gru_kservices_exit(void); 650extern void gru_kservices_exit(void);
651extern irqreturn_t gru0_intr(int irq, void *dev_id);
652extern irqreturn_t gru1_intr(int irq, void *dev_id);
653extern irqreturn_t gru_intr_mblade(int irq, void *dev_id);
646extern int gru_dump_chiplet_request(unsigned long arg); 654extern int gru_dump_chiplet_request(unsigned long arg);
647extern long gru_get_gseg_statistics(unsigned long arg); 655extern long gru_get_gseg_statistics(unsigned long arg);
648extern irqreturn_t gru_intr(int irq, void *dev_id);
649extern int gru_handle_user_call_os(unsigned long address); 656extern int gru_handle_user_call_os(unsigned long address);
650extern int gru_user_flush_tlb(unsigned long arg); 657extern int gru_user_flush_tlb(unsigned long arg);
651extern int gru_user_unload_context(unsigned long arg); 658extern int gru_user_unload_context(unsigned long arg);
652extern int gru_get_exception_detail(unsigned long arg); 659extern int gru_get_exception_detail(unsigned long arg);
653extern int gru_set_context_option(unsigned long address); 660extern int gru_set_context_option(unsigned long address);
661extern void gru_check_context_placement(struct gru_thread_state *gts);
654extern int gru_cpu_fault_map_id(void); 662extern int gru_cpu_fault_map_id(void);
655extern struct vm_area_struct *gru_find_vma(unsigned long vaddr); 663extern struct vm_area_struct *gru_find_vma(unsigned long vaddr);
656extern void gru_flush_all_tlb(struct gru_state *gru); 664extern void gru_flush_all_tlb(struct gru_state *gru);
@@ -658,7 +666,8 @@ extern int gru_proc_init(void);
658extern void gru_proc_exit(void); 666extern void gru_proc_exit(void);
659 667
660extern struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma, 668extern struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma,
661 int cbr_au_count, int dsr_au_count, int options, int tsid); 669 int cbr_au_count, int dsr_au_count,
670 unsigned char tlb_preload_count, int options, int tsid);
662extern unsigned long gru_reserve_cb_resources(struct gru_state *gru, 671extern unsigned long gru_reserve_cb_resources(struct gru_state *gru,
663 int cbr_au_count, char *cbmap); 672 int cbr_au_count, char *cbmap);
664extern unsigned long gru_reserve_ds_resources(struct gru_state *gru, 673extern unsigned long gru_reserve_ds_resources(struct gru_state *gru,
diff --git a/drivers/misc/sgi-gru/grutlbpurge.c b/drivers/misc/sgi-gru/grutlbpurge.c
index 1d125091f5e7..240a6d361665 100644
--- a/drivers/misc/sgi-gru/grutlbpurge.c
+++ b/drivers/misc/sgi-gru/grutlbpurge.c
@@ -184,8 +184,8 @@ void gru_flush_tlb_range(struct gru_mm_struct *gms, unsigned long start,
184 STAT(flush_tlb_gru_tgh); 184 STAT(flush_tlb_gru_tgh);
185 asid = GRUASID(asid, start); 185 asid = GRUASID(asid, start);
186 gru_dbg(grudev, 186 gru_dbg(grudev,
187 " FLUSH gruid %d, asid 0x%x, num %ld, cbmap 0x%x\n", 187 " FLUSH gruid %d, asid 0x%x, vaddr 0x%lx, vamask 0x%x, num %ld, cbmap 0x%x\n",
188 gid, asid, num, asids->mt_ctxbitmap); 188 gid, asid, start, grupagesize, num, asids->mt_ctxbitmap);
189 tgh = get_lock_tgh_handle(gru); 189 tgh = get_lock_tgh_handle(gru);
190 tgh_invalidate(tgh, start, ~0, asid, grupagesize, 0, 190 tgh_invalidate(tgh, start, ~0, asid, grupagesize, 0,
191 num - 1, asids->mt_ctxbitmap); 191 num - 1, asids->mt_ctxbitmap);
@@ -299,6 +299,7 @@ struct gru_mm_struct *gru_register_mmu_notifier(void)
299{ 299{
300 struct gru_mm_struct *gms; 300 struct gru_mm_struct *gms;
301 struct mmu_notifier *mn; 301 struct mmu_notifier *mn;
302 int err;
302 303
303 mn = mmu_find_ops(current->mm, &gru_mmuops); 304 mn = mmu_find_ops(current->mm, &gru_mmuops);
304 if (mn) { 305 if (mn) {
@@ -307,16 +308,22 @@ struct gru_mm_struct *gru_register_mmu_notifier(void)
307 } else { 308 } else {
308 gms = kzalloc(sizeof(*gms), GFP_KERNEL); 309 gms = kzalloc(sizeof(*gms), GFP_KERNEL);
309 if (gms) { 310 if (gms) {
311 STAT(gms_alloc);
310 spin_lock_init(&gms->ms_asid_lock); 312 spin_lock_init(&gms->ms_asid_lock);
311 gms->ms_notifier.ops = &gru_mmuops; 313 gms->ms_notifier.ops = &gru_mmuops;
312 atomic_set(&gms->ms_refcnt, 1); 314 atomic_set(&gms->ms_refcnt, 1);
313 init_waitqueue_head(&gms->ms_wait_queue); 315 init_waitqueue_head(&gms->ms_wait_queue);
314 __mmu_notifier_register(&gms->ms_notifier, current->mm); 316 err = __mmu_notifier_register(&gms->ms_notifier, current->mm);
317 if (err)
318 goto error;
315 } 319 }
316 } 320 }
317 gru_dbg(grudev, "gms %p, refcnt %d\n", gms, 321 gru_dbg(grudev, "gms %p, refcnt %d\n", gms,
318 atomic_read(&gms->ms_refcnt)); 322 atomic_read(&gms->ms_refcnt));
319 return gms; 323 return gms;
324error:
325 kfree(gms);
326 return ERR_PTR(err);
320} 327}
321 328
322void gru_drop_mmu_notifier(struct gru_mm_struct *gms) 329void gru_drop_mmu_notifier(struct gru_mm_struct *gms)
@@ -327,6 +334,7 @@ void gru_drop_mmu_notifier(struct gru_mm_struct *gms)
327 if (!gms->ms_released) 334 if (!gms->ms_released)
328 mmu_notifier_unregister(&gms->ms_notifier, current->mm); 335 mmu_notifier_unregister(&gms->ms_notifier, current->mm);
329 kfree(gms); 336 kfree(gms);
337 STAT(gms_free);
330 } 338 }
331} 339}
332 340
diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
index 2275126cb334..851b2f25ce0e 100644
--- a/drivers/misc/sgi-xp/xp.h
+++ b/drivers/misc/sgi-xp/xp.h
@@ -339,6 +339,7 @@ extern short xp_partition_id;
339extern u8 xp_region_size; 339extern u8 xp_region_size;
340 340
341extern unsigned long (*xp_pa) (void *); 341extern unsigned long (*xp_pa) (void *);
342extern unsigned long (*xp_socket_pa) (unsigned long);
342extern enum xp_retval (*xp_remote_memcpy) (unsigned long, const unsigned long, 343extern enum xp_retval (*xp_remote_memcpy) (unsigned long, const unsigned long,
343 size_t); 344 size_t);
344extern int (*xp_cpu_to_nasid) (int); 345extern int (*xp_cpu_to_nasid) (int);
diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c
index 7896849b16dc..01be66d02ca8 100644
--- a/drivers/misc/sgi-xp/xp_main.c
+++ b/drivers/misc/sgi-xp/xp_main.c
@@ -44,6 +44,9 @@ EXPORT_SYMBOL_GPL(xp_region_size);
44unsigned long (*xp_pa) (void *addr); 44unsigned long (*xp_pa) (void *addr);
45EXPORT_SYMBOL_GPL(xp_pa); 45EXPORT_SYMBOL_GPL(xp_pa);
46 46
47unsigned long (*xp_socket_pa) (unsigned long gpa);
48EXPORT_SYMBOL_GPL(xp_socket_pa);
49
47enum xp_retval (*xp_remote_memcpy) (unsigned long dst_gpa, 50enum xp_retval (*xp_remote_memcpy) (unsigned long dst_gpa,
48 const unsigned long src_gpa, size_t len); 51 const unsigned long src_gpa, size_t len);
49EXPORT_SYMBOL_GPL(xp_remote_memcpy); 52EXPORT_SYMBOL_GPL(xp_remote_memcpy);
diff --git a/drivers/misc/sgi-xp/xp_sn2.c b/drivers/misc/sgi-xp/xp_sn2.c
index fb3ec9d735a9..d8e463f87241 100644
--- a/drivers/misc/sgi-xp/xp_sn2.c
+++ b/drivers/misc/sgi-xp/xp_sn2.c
@@ -84,6 +84,15 @@ xp_pa_sn2(void *addr)
84} 84}
85 85
86/* 86/*
87 * Convert a global physical to a socket physical address.
88 */
89static unsigned long
90xp_socket_pa_sn2(unsigned long gpa)
91{
92 return gpa;
93}
94
95/*
87 * Wrapper for bte_copy(). 96 * Wrapper for bte_copy().
88 * 97 *
89 * dst_pa - physical address of the destination of the transfer. 98 * dst_pa - physical address of the destination of the transfer.
@@ -162,6 +171,7 @@ xp_init_sn2(void)
162 xp_region_size = sn_region_size; 171 xp_region_size = sn_region_size;
163 172
164 xp_pa = xp_pa_sn2; 173 xp_pa = xp_pa_sn2;
174 xp_socket_pa = xp_socket_pa_sn2;
165 xp_remote_memcpy = xp_remote_memcpy_sn2; 175 xp_remote_memcpy = xp_remote_memcpy_sn2;
166 xp_cpu_to_nasid = xp_cpu_to_nasid_sn2; 176 xp_cpu_to_nasid = xp_cpu_to_nasid_sn2;
167 xp_expand_memprotect = xp_expand_memprotect_sn2; 177 xp_expand_memprotect = xp_expand_memprotect_sn2;
diff --git a/drivers/misc/sgi-xp/xp_uv.c b/drivers/misc/sgi-xp/xp_uv.c
index d238576b26fa..a0d093274dc0 100644
--- a/drivers/misc/sgi-xp/xp_uv.c
+++ b/drivers/misc/sgi-xp/xp_uv.c
@@ -32,12 +32,44 @@ xp_pa_uv(void *addr)
32 return uv_gpa(addr); 32 return uv_gpa(addr);
33} 33}
34 34
35/*
36 * Convert a global physical to socket physical address.
37 */
38static unsigned long
39xp_socket_pa_uv(unsigned long gpa)
40{
41 return uv_gpa_to_soc_phys_ram(gpa);
42}
43
44static enum xp_retval
45xp_remote_mmr_read(unsigned long dst_gpa, const unsigned long src_gpa,
46 size_t len)
47{
48 int ret;
49 unsigned long *dst_va = __va(uv_gpa_to_soc_phys_ram(dst_gpa));
50
51 BUG_ON(!uv_gpa_in_mmr_space(src_gpa));
52 BUG_ON(len != 8);
53
54 ret = gru_read_gpa(dst_va, src_gpa);
55 if (ret == 0)
56 return xpSuccess;
57
58 dev_err(xp, "gru_read_gpa() failed, dst_gpa=0x%016lx src_gpa=0x%016lx "
59 "len=%ld\n", dst_gpa, src_gpa, len);
60 return xpGruCopyError;
61}
62
63
35static enum xp_retval 64static enum xp_retval
36xp_remote_memcpy_uv(unsigned long dst_gpa, const unsigned long src_gpa, 65xp_remote_memcpy_uv(unsigned long dst_gpa, const unsigned long src_gpa,
37 size_t len) 66 size_t len)
38{ 67{
39 int ret; 68 int ret;
40 69
70 if (uv_gpa_in_mmr_space(src_gpa))
71 return xp_remote_mmr_read(dst_gpa, src_gpa, len);
72
41 ret = gru_copy_gpa(dst_gpa, src_gpa, len); 73 ret = gru_copy_gpa(dst_gpa, src_gpa, len);
42 if (ret == 0) 74 if (ret == 0)
43 return xpSuccess; 75 return xpSuccess;
@@ -123,6 +155,7 @@ xp_init_uv(void)
123 xp_region_size = sn_region_size; 155 xp_region_size = sn_region_size;
124 156
125 xp_pa = xp_pa_uv; 157 xp_pa = xp_pa_uv;
158 xp_socket_pa = xp_socket_pa_uv;
126 xp_remote_memcpy = xp_remote_memcpy_uv; 159 xp_remote_memcpy = xp_remote_memcpy_uv;
127 xp_cpu_to_nasid = xp_cpu_to_nasid_uv; 160 xp_cpu_to_nasid = xp_cpu_to_nasid_uv;
128 xp_expand_memprotect = xp_expand_memprotect_uv; 161 xp_expand_memprotect = xp_expand_memprotect_uv;
diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c
index 65877bc5edaa..9a6268c89fdd 100644
--- a/drivers/misc/sgi-xp/xpc_partition.c
+++ b/drivers/misc/sgi-xp/xpc_partition.c
@@ -18,6 +18,7 @@
18#include <linux/device.h> 18#include <linux/device.h>
19#include <linux/hardirq.h> 19#include <linux/hardirq.h>
20#include "xpc.h" 20#include "xpc.h"
21#include <asm/uv/uv_hub.h>
21 22
22/* XPC is exiting flag */ 23/* XPC is exiting flag */
23int xpc_exiting; 24int xpc_exiting;
@@ -92,8 +93,12 @@ xpc_get_rsvd_page_pa(int nasid)
92 break; 93 break;
93 94
94 /* !!! L1_CACHE_ALIGN() is only a sn2-bte_copy requirement */ 95 /* !!! L1_CACHE_ALIGN() is only a sn2-bte_copy requirement */
95 if (L1_CACHE_ALIGN(len) > buf_len) { 96 if (is_shub())
96 kfree(buf_base); 97 len = L1_CACHE_ALIGN(len);
98
99 if (len > buf_len) {
100 if (buf_base != NULL)
101 kfree(buf_base);
97 buf_len = L1_CACHE_ALIGN(len); 102 buf_len = L1_CACHE_ALIGN(len);
98 buf = xpc_kmalloc_cacheline_aligned(buf_len, GFP_KERNEL, 103 buf = xpc_kmalloc_cacheline_aligned(buf_len, GFP_KERNEL,
99 &buf_base); 104 &buf_base);
@@ -105,7 +110,7 @@ xpc_get_rsvd_page_pa(int nasid)
105 } 110 }
106 } 111 }
107 112
108 ret = xp_remote_memcpy(xp_pa(buf), rp_pa, buf_len); 113 ret = xp_remote_memcpy(xp_pa(buf), rp_pa, len);
109 if (ret != xpSuccess) { 114 if (ret != xpSuccess) {
110 dev_dbg(xpc_part, "xp_remote_memcpy failed %d\n", ret); 115 dev_dbg(xpc_part, "xp_remote_memcpy failed %d\n", ret);
111 break; 116 break;
@@ -143,7 +148,7 @@ xpc_setup_rsvd_page(void)
143 dev_err(xpc_part, "SAL failed to locate the reserved page\n"); 148 dev_err(xpc_part, "SAL failed to locate the reserved page\n");
144 return -ESRCH; 149 return -ESRCH;
145 } 150 }
146 rp = (struct xpc_rsvd_page *)__va(rp_pa); 151 rp = (struct xpc_rsvd_page *)__va(xp_socket_pa(rp_pa));
147 152
148 if (rp->SAL_version < 3) { 153 if (rp->SAL_version < 3) {
149 /* SAL_versions < 3 had a SAL_partid defined as a u8 */ 154 /* SAL_versions < 3 had a SAL_partid defined as a u8 */
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
index b5bbe59f9c57..8725d5e8ab0c 100644
--- a/drivers/misc/sgi-xp/xpc_uv.c
+++ b/drivers/misc/sgi-xp/xpc_uv.c
@@ -157,22 +157,24 @@ xpc_gru_mq_watchlist_alloc_uv(struct xpc_gru_mq_uv *mq)
157{ 157{
158 int ret; 158 int ret;
159 159
160#if defined CONFIG_X86_64 160#if defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
161 ret = uv_bios_mq_watchlist_alloc(mq->mmr_blade, uv_gpa(mq->address), 161 int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
162 mq->order, &mq->mmr_offset); 162
163 if (ret < 0) { 163 ret = sn_mq_watchlist_alloc(mmr_pnode, (void *)uv_gpa(mq->address),
164 dev_err(xpc_part, "uv_bios_mq_watchlist_alloc() failed, "
165 "ret=%d\n", ret);
166 return ret;
167 }
168#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
169 ret = sn_mq_watchlist_alloc(mq->mmr_blade, (void *)uv_gpa(mq->address),
170 mq->order, &mq->mmr_offset); 164 mq->order, &mq->mmr_offset);
171 if (ret < 0) { 165 if (ret < 0) {
172 dev_err(xpc_part, "sn_mq_watchlist_alloc() failed, ret=%d\n", 166 dev_err(xpc_part, "sn_mq_watchlist_alloc() failed, ret=%d\n",
173 ret); 167 ret);
174 return -EBUSY; 168 return -EBUSY;
175 } 169 }
170#elif defined CONFIG_X86_64
171 ret = uv_bios_mq_watchlist_alloc(uv_gpa(mq->address),
172 mq->order, &mq->mmr_offset);
173 if (ret < 0) {
174 dev_err(xpc_part, "uv_bios_mq_watchlist_alloc() failed, "
175 "ret=%d\n", ret);
176 return ret;
177 }
176#else 178#else
177 #error not a supported configuration 179 #error not a supported configuration
178#endif 180#endif
@@ -185,12 +187,13 @@ static void
185xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv *mq) 187xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv *mq)
186{ 188{
187 int ret; 189 int ret;
190 int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
188 191
189#if defined CONFIG_X86_64 192#if defined CONFIG_X86_64
190 ret = uv_bios_mq_watchlist_free(mq->mmr_blade, mq->watchlist_num); 193 ret = uv_bios_mq_watchlist_free(mmr_pnode, mq->watchlist_num);
191 BUG_ON(ret != BIOS_STATUS_SUCCESS); 194 BUG_ON(ret != BIOS_STATUS_SUCCESS);
192#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV 195#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
193 ret = sn_mq_watchlist_free(mq->mmr_blade, mq->watchlist_num); 196 ret = sn_mq_watchlist_free(mmr_pnode, mq->watchlist_num);
194 BUG_ON(ret != SALRET_OK); 197 BUG_ON(ret != SALRET_OK);
195#else 198#else
196 #error not a supported configuration 199 #error not a supported configuration
@@ -204,6 +207,7 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
204 enum xp_retval xp_ret; 207 enum xp_retval xp_ret;
205 int ret; 208 int ret;
206 int nid; 209 int nid;
210 int nasid;
207 int pg_order; 211 int pg_order;
208 struct page *page; 212 struct page *page;
209 struct xpc_gru_mq_uv *mq; 213 struct xpc_gru_mq_uv *mq;
@@ -259,9 +263,11 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
259 goto out_5; 263 goto out_5;
260 } 264 }
261 265
266 nasid = UV_PNODE_TO_NASID(uv_cpu_to_pnode(cpu));
267
262 mmr_value = (struct uv_IO_APIC_route_entry *)&mq->mmr_value; 268 mmr_value = (struct uv_IO_APIC_route_entry *)&mq->mmr_value;
263 ret = gru_create_message_queue(mq->gru_mq_desc, mq->address, mq_size, 269 ret = gru_create_message_queue(mq->gru_mq_desc, mq->address, mq_size,
264 nid, mmr_value->vector, mmr_value->dest); 270 nasid, mmr_value->vector, mmr_value->dest);
265 if (ret != 0) { 271 if (ret != 0) {
266 dev_err(xpc_part, "gru_create_message_queue() returned " 272 dev_err(xpc_part, "gru_create_message_queue() returned "
267 "error=%d\n", ret); 273 "error=%d\n", ret);
@@ -946,11 +952,13 @@ xpc_get_fifo_entry_uv(struct xpc_fifo_head_uv *head)
946 head->first = first->next; 952 head->first = first->next;
947 if (head->first == NULL) 953 if (head->first == NULL)
948 head->last = NULL; 954 head->last = NULL;
955
956 head->n_entries--;
957 BUG_ON(head->n_entries < 0);
958
959 first->next = NULL;
949 } 960 }
950 head->n_entries--;
951 BUG_ON(head->n_entries < 0);
952 spin_unlock_irqrestore(&head->lock, irq_flags); 961 spin_unlock_irqrestore(&head->lock, irq_flags);
953 first->next = NULL;
954 return first; 962 return first;
955} 963}
956 964
@@ -1019,7 +1027,8 @@ xpc_make_first_contact_uv(struct xpc_partition *part)
1019 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), 1027 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
1020 XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV); 1028 XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV);
1021 1029
1022 while (part->sn.uv.remote_act_state != XPC_P_AS_ACTIVATING) { 1030 while (!((part->sn.uv.remote_act_state == XPC_P_AS_ACTIVATING) ||
1031 (part->sn.uv.remote_act_state == XPC_P_AS_ACTIVE))) {
1023 1032
1024 dev_dbg(xpc_part, "waiting to make first contact with " 1033 dev_dbg(xpc_part, "waiting to make first contact with "
1025 "partition %d\n", XPC_PARTID(part)); 1034 "partition %d\n", XPC_PARTID(part));
@@ -1422,7 +1431,6 @@ xpc_handle_notify_mq_msg_uv(struct xpc_partition *part,
1422 msg_slot = ch_uv->recv_msg_slots + 1431 msg_slot = ch_uv->recv_msg_slots +
1423 (msg->hdr.msg_slot_number % ch->remote_nentries) * ch->entry_size; 1432 (msg->hdr.msg_slot_number % ch->remote_nentries) * ch->entry_size;
1424 1433
1425 BUG_ON(msg->hdr.msg_slot_number != msg_slot->hdr.msg_slot_number);
1426 BUG_ON(msg_slot->hdr.size != 0); 1434 BUG_ON(msg_slot->hdr.size != 0);
1427 1435
1428 memcpy(msg_slot, msg, msg->hdr.size); 1436 memcpy(msg_slot, msg, msg->hdr.size);
@@ -1646,8 +1654,6 @@ xpc_received_payload_uv(struct xpc_channel *ch, void *payload)
1646 sizeof(struct xpc_notify_mq_msghdr_uv)); 1654 sizeof(struct xpc_notify_mq_msghdr_uv));
1647 if (ret != xpSuccess) 1655 if (ret != xpSuccess)
1648 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret); 1656 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
1649
1650 msg->hdr.msg_slot_number += ch->remote_nentries;
1651} 1657}
1652 1658
1653static struct xpc_arch_operations xpc_arch_ops_uv = { 1659static struct xpc_arch_operations xpc_arch_ops_uv = {
diff --git a/drivers/misc/ti_dac7512.c b/drivers/misc/ti_dac7512.c
new file mode 100644
index 000000000000..d3f229a3a77e
--- /dev/null
+++ b/drivers/misc/ti_dac7512.c
@@ -0,0 +1,101 @@
1/*
2 * dac7512.c - Linux kernel module for
3 * Texas Instruments DAC7512
4 *
5 * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */
21
22#include <linux/module.h>
23#include <linux/init.h>
24#include <linux/spi/spi.h>
25
26#define DAC7512_DRV_NAME "dac7512"
27#define DRIVER_VERSION "1.0"
28
29static ssize_t dac7512_store_val(struct device *dev,
30 struct device_attribute *attr,
31 const char *buf, size_t count)
32{
33 struct spi_device *spi = to_spi_device(dev);
34 unsigned char tmp[2];
35 unsigned long val;
36
37 if (strict_strtoul(buf, 10, &val) < 0)
38 return -EINVAL;
39
40 tmp[0] = val >> 8;
41 tmp[1] = val & 0xff;
42 spi_write(spi, tmp, sizeof(tmp));
43 return count;
44}
45
46static DEVICE_ATTR(value, S_IWUSR, NULL, dac7512_store_val);
47
48static struct attribute *dac7512_attributes[] = {
49 &dev_attr_value.attr,
50 NULL
51};
52
53static const struct attribute_group dac7512_attr_group = {
54 .attrs = dac7512_attributes,
55};
56
57static int __devinit dac7512_probe(struct spi_device *spi)
58{
59 int ret;
60
61 spi->bits_per_word = 8;
62 spi->mode = SPI_MODE_0;
63 ret = spi_setup(spi);
64 if (ret < 0)
65 return ret;
66
67 return sysfs_create_group(&spi->dev.kobj, &dac7512_attr_group);
68}
69
70static int __devexit dac7512_remove(struct spi_device *spi)
71{
72 sysfs_remove_group(&spi->dev.kobj, &dac7512_attr_group);
73 return 0;
74}
75
76static struct spi_driver dac7512_driver = {
77 .driver = {
78 .name = DAC7512_DRV_NAME,
79 .owner = THIS_MODULE,
80 },
81 .probe = dac7512_probe,
82 .remove = __devexit_p(dac7512_remove),
83};
84
85static int __init dac7512_init(void)
86{
87 return spi_register_driver(&dac7512_driver);
88}
89
90static void __exit dac7512_exit(void)
91{
92 spi_unregister_driver(&dac7512_driver);
93}
94
95MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>");
96MODULE_DESCRIPTION("DAC7512 16-bit DAC");
97MODULE_LICENSE("GPL v2");
98MODULE_VERSION(DRIVER_VERSION);
99
100module_init(dac7512_init);
101module_exit(dac7512_exit);
diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig
index ab37a6d9d32a..bb22ffd76ef8 100644
--- a/drivers/mmc/core/Kconfig
+++ b/drivers/mmc/core/Kconfig
@@ -3,7 +3,7 @@
3# 3#
4 4
5config MMC_UNSAFE_RESUME 5config MMC_UNSAFE_RESUME
6 bool "Allow unsafe resume (DANGEROUS)" 6 bool "Assume MMC/SD cards are non-removable (DANGEROUS)"
7 help 7 help
8 If you say Y here, the MMC layer will assume that all cards 8 If you say Y here, the MMC layer will assume that all cards
9 stayed in their respective slots during the suspend. The 9 stayed in their respective slots during the suspend. The
@@ -14,3 +14,5 @@ config MMC_UNSAFE_RESUME
14 This option is usually just for embedded systems which use 14 This option is usually just for embedded systems which use
15 a MMC/SD card for rootfs. Most people should say N here. 15 a MMC/SD card for rootfs. Most people should say N here.
16 16
17 This option sets a default which can be overridden by the
18 module parameter "removable=0" or "removable=1".
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 7dab2e5f4bc9..30acd5265821 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -48,6 +48,22 @@ int use_spi_crc = 1;
48module_param(use_spi_crc, bool, 0); 48module_param(use_spi_crc, bool, 0);
49 49
50/* 50/*
51 * We normally treat cards as removed during suspend if they are not
52 * known to be on a non-removable bus, to avoid the risk of writing
53 * back data to a different card after resume. Allow this to be
54 * overridden if necessary.
55 */
56#ifdef CONFIG_MMC_UNSAFE_RESUME
57int mmc_assume_removable;
58#else
59int mmc_assume_removable = 1;
60#endif
61module_param_named(removable, mmc_assume_removable, bool, 0644);
62MODULE_PARM_DESC(
63 removable,
64 "MMC/SD cards are removable and may be removed during suspend");
65
66/*
51 * Internal function. Schedule delayed work in the MMC work queue. 67 * Internal function. Schedule delayed work in the MMC work queue.
52 */ 68 */
53static int mmc_schedule_delayed_work(struct delayed_work *work, 69static int mmc_schedule_delayed_work(struct delayed_work *work,
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index 67ae6abc4230..a811c52a1659 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -54,7 +54,9 @@ int mmc_attach_mmc(struct mmc_host *host, u32 ocr);
54int mmc_attach_sd(struct mmc_host *host, u32 ocr); 54int mmc_attach_sd(struct mmc_host *host, u32 ocr);
55int mmc_attach_sdio(struct mmc_host *host, u32 ocr); 55int mmc_attach_sdio(struct mmc_host *host, u32 ocr);
56 56
57/* Module parameters */
57extern int use_spi_crc; 58extern int use_spi_crc;
59extern int mmc_assume_removable;
58 60
59/* Debugfs information for hosts and cards */ 61/* Debugfs information for hosts and cards */
60void mmc_add_host_debugfs(struct mmc_host *host); 62void mmc_add_host_debugfs(struct mmc_host *host);
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index bfefce365ae7..c11189446a1f 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -602,25 +602,6 @@ static int mmc_awake(struct mmc_host *host)
602 return err; 602 return err;
603} 603}
604 604
605#ifdef CONFIG_MMC_UNSAFE_RESUME
606
607static const struct mmc_bus_ops mmc_ops = {
608 .awake = mmc_awake,
609 .sleep = mmc_sleep,
610 .remove = mmc_remove,
611 .detect = mmc_detect,
612 .suspend = mmc_suspend,
613 .resume = mmc_resume,
614 .power_restore = mmc_power_restore,
615};
616
617static void mmc_attach_bus_ops(struct mmc_host *host)
618{
619 mmc_attach_bus(host, &mmc_ops);
620}
621
622#else
623
624static const struct mmc_bus_ops mmc_ops = { 605static const struct mmc_bus_ops mmc_ops = {
625 .awake = mmc_awake, 606 .awake = mmc_awake,
626 .sleep = mmc_sleep, 607 .sleep = mmc_sleep,
@@ -645,15 +626,13 @@ static void mmc_attach_bus_ops(struct mmc_host *host)
645{ 626{
646 const struct mmc_bus_ops *bus_ops; 627 const struct mmc_bus_ops *bus_ops;
647 628
648 if (host->caps & MMC_CAP_NONREMOVABLE) 629 if (host->caps & MMC_CAP_NONREMOVABLE || !mmc_assume_removable)
649 bus_ops = &mmc_ops_unsafe; 630 bus_ops = &mmc_ops_unsafe;
650 else 631 else
651 bus_ops = &mmc_ops; 632 bus_ops = &mmc_ops;
652 mmc_attach_bus(host, bus_ops); 633 mmc_attach_bus(host, bus_ops);
653} 634}
654 635
655#endif
656
657/* 636/*
658 * Starting point for MMC card init. 637 * Starting point for MMC card init.
659 */ 638 */
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 10b2a4d20f5a..fdd414eded09 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -606,23 +606,6 @@ static void mmc_sd_power_restore(struct mmc_host *host)
606 mmc_release_host(host); 606 mmc_release_host(host);
607} 607}
608 608
609#ifdef CONFIG_MMC_UNSAFE_RESUME
610
611static const struct mmc_bus_ops mmc_sd_ops = {
612 .remove = mmc_sd_remove,
613 .detect = mmc_sd_detect,
614 .suspend = mmc_sd_suspend,
615 .resume = mmc_sd_resume,
616 .power_restore = mmc_sd_power_restore,
617};
618
619static void mmc_sd_attach_bus_ops(struct mmc_host *host)
620{
621 mmc_attach_bus(host, &mmc_sd_ops);
622}
623
624#else
625
626static const struct mmc_bus_ops mmc_sd_ops = { 609static const struct mmc_bus_ops mmc_sd_ops = {
627 .remove = mmc_sd_remove, 610 .remove = mmc_sd_remove,
628 .detect = mmc_sd_detect, 611 .detect = mmc_sd_detect,
@@ -643,15 +626,13 @@ static void mmc_sd_attach_bus_ops(struct mmc_host *host)
643{ 626{
644 const struct mmc_bus_ops *bus_ops; 627 const struct mmc_bus_ops *bus_ops;
645 628
646 if (host->caps & MMC_CAP_NONREMOVABLE) 629 if (host->caps & MMC_CAP_NONREMOVABLE || !mmc_assume_removable)
647 bus_ops = &mmc_sd_ops_unsafe; 630 bus_ops = &mmc_sd_ops_unsafe;
648 else 631 else
649 bus_ops = &mmc_sd_ops; 632 bus_ops = &mmc_sd_ops;
650 mmc_attach_bus(host, bus_ops); 633 mmc_attach_bus(host, bus_ops);
651} 634}
652 635
653#endif
654
655/* 636/*
656 * Starting point for SD card init. 637 * Starting point for SD card init.
657 */ 638 */
diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c
index f85dcd536508..9538389783c1 100644
--- a/drivers/mmc/core/sdio_cis.c
+++ b/drivers/mmc/core/sdio_cis.c
@@ -97,26 +97,56 @@ static const unsigned char speed_val[16] =
97static const unsigned int speed_unit[8] = 97static const unsigned int speed_unit[8] =
98 { 10000, 100000, 1000000, 10000000, 0, 0, 0, 0 }; 98 { 10000, 100000, 1000000, 10000000, 0, 0, 0, 0 };
99 99
100/* FUNCE tuples with these types get passed to SDIO drivers */ 100
101static const unsigned char funce_type_whitelist[] = { 101typedef int (tpl_parse_t)(struct mmc_card *, struct sdio_func *,
102 4 /* CISTPL_FUNCE_LAN_NODE_ID used in Broadcom cards */ 102 const unsigned char *, unsigned);
103
104struct cis_tpl {
105 unsigned char code;
106 unsigned char min_size;
107 tpl_parse_t *parse;
103}; 108};
104 109
105static int cistpl_funce_whitelisted(unsigned char type) 110static int cis_tpl_parse(struct mmc_card *card, struct sdio_func *func,
111 const char *tpl_descr,
112 const struct cis_tpl *tpl, int tpl_count,
113 unsigned char code,
114 const unsigned char *buf, unsigned size)
106{ 115{
107 int i; 116 int i, ret;
108 117
109 for (i = 0; i < ARRAY_SIZE(funce_type_whitelist); i++) { 118 /* look for a matching code in the table */
110 if (funce_type_whitelist[i] == type) 119 for (i = 0; i < tpl_count; i++, tpl++) {
111 return 1; 120 if (tpl->code == code)
121 break;
112 } 122 }
113 return 0; 123 if (i < tpl_count) {
124 if (size >= tpl->min_size) {
125 if (tpl->parse)
126 ret = tpl->parse(card, func, buf, size);
127 else
128 ret = -EILSEQ; /* known tuple, not parsed */
129 } else {
130 /* invalid tuple */
131 ret = -EINVAL;
132 }
133 if (ret && ret != -EILSEQ && ret != -ENOENT) {
134 printk(KERN_ERR "%s: bad %s tuple 0x%02x (%u bytes)\n",
135 mmc_hostname(card->host), tpl_descr, code, size);
136 }
137 } else {
138 /* unknown tuple */
139 ret = -ENOENT;
140 }
141
142 return ret;
114} 143}
115 144
116static int cistpl_funce_common(struct mmc_card *card, 145static int cistpl_funce_common(struct mmc_card *card, struct sdio_func *func,
117 const unsigned char *buf, unsigned size) 146 const unsigned char *buf, unsigned size)
118{ 147{
119 if (size < 0x04 || buf[0] != 0) 148 /* Only valid for the common CIS (function 0) */
149 if (func)
120 return -EINVAL; 150 return -EINVAL;
121 151
122 /* TPLFE_FN0_BLK_SIZE */ 152 /* TPLFE_FN0_BLK_SIZE */
@@ -129,20 +159,24 @@ static int cistpl_funce_common(struct mmc_card *card,
129 return 0; 159 return 0;
130} 160}
131 161
132static int cistpl_funce_func(struct sdio_func *func, 162static int cistpl_funce_func(struct mmc_card *card, struct sdio_func *func,
133 const unsigned char *buf, unsigned size) 163 const unsigned char *buf, unsigned size)
134{ 164{
135 unsigned vsn; 165 unsigned vsn;
136 unsigned min_size; 166 unsigned min_size;
137 167
138 /* let SDIO drivers take care of whitelisted FUNCE tuples */ 168 /* Only valid for the individual function's CIS (1-7) */
139 if (cistpl_funce_whitelisted(buf[0])) 169 if (!func)
140 return -EILSEQ; 170 return -EINVAL;
141 171
172 /*
173 * This tuple has a different length depending on the SDIO spec
174 * version.
175 */
142 vsn = func->card->cccr.sdio_vsn; 176 vsn = func->card->cccr.sdio_vsn;
143 min_size = (vsn == SDIO_SDIO_REV_1_00) ? 28 : 42; 177 min_size = (vsn == SDIO_SDIO_REV_1_00) ? 28 : 42;
144 178
145 if (size < min_size || buf[0] != 1) 179 if (size < min_size)
146 return -EINVAL; 180 return -EINVAL;
147 181
148 /* TPLFE_MAX_BLK_SIZE */ 182 /* TPLFE_MAX_BLK_SIZE */
@@ -157,39 +191,32 @@ static int cistpl_funce_func(struct sdio_func *func,
157 return 0; 191 return 0;
158} 192}
159 193
194/*
195 * Known TPLFE_TYPEs table for CISTPL_FUNCE tuples.
196 *
197 * Note that, unlike PCMCIA, CISTPL_FUNCE tuples are not parsed depending
198 * on the TPLFID_FUNCTION value of the previous CISTPL_FUNCID as on SDIO
199 * TPLFID_FUNCTION is always hardcoded to 0x0C.
200 */
201static const struct cis_tpl cis_tpl_funce_list[] = {
202 { 0x00, 4, cistpl_funce_common },
203 { 0x01, 0, cistpl_funce_func },
204 { 0x04, 1+1+6, /* CISTPL_FUNCE_LAN_NODE_ID */ },
205};
206
160static int cistpl_funce(struct mmc_card *card, struct sdio_func *func, 207static int cistpl_funce(struct mmc_card *card, struct sdio_func *func,
161 const unsigned char *buf, unsigned size) 208 const unsigned char *buf, unsigned size)
162{ 209{
163 int ret; 210 if (size < 1)
164 211 return -EINVAL;
165 /*
166 * There should be two versions of the CISTPL_FUNCE tuple,
167 * one for the common CIS (function 0) and a version used by
168 * the individual function's CIS (1-7). Yet, the later has a
169 * different length depending on the SDIO spec version.
170 */
171 if (func)
172 ret = cistpl_funce_func(func, buf, size);
173 else
174 ret = cistpl_funce_common(card, buf, size);
175
176 if (ret && ret != -EILSEQ) {
177 printk(KERN_ERR "%s: bad CISTPL_FUNCE size %u "
178 "type %u\n", mmc_hostname(card->host), size, buf[0]);
179 }
180 212
181 return ret; 213 return cis_tpl_parse(card, func, "CISTPL_FUNCE",
214 cis_tpl_funce_list,
215 ARRAY_SIZE(cis_tpl_funce_list),
216 buf[0], buf, size);
182} 217}
183 218
184typedef int (tpl_parse_t)(struct mmc_card *, struct sdio_func *, 219/* Known TPL_CODEs table for CIS tuples */
185 const unsigned char *, unsigned);
186
187struct cis_tpl {
188 unsigned char code;
189 unsigned char min_size;
190 tpl_parse_t *parse;
191};
192
193static const struct cis_tpl cis_tpl_list[] = { 220static const struct cis_tpl cis_tpl_list[] = {
194 { 0x15, 3, cistpl_vers_1 }, 221 { 0x15, 3, cistpl_vers_1 },
195 { 0x20, 4, cistpl_manfid }, 222 { 0x20, 4, cistpl_manfid },
@@ -268,46 +295,38 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func)
268 break; 295 break;
269 } 296 }
270 297
271 for (i = 0; i < ARRAY_SIZE(cis_tpl_list); i++) 298 /* Try to parse the CIS tuple */
272 if (cis_tpl_list[i].code == tpl_code) 299 ret = cis_tpl_parse(card, func, "CIS",
273 break; 300 cis_tpl_list, ARRAY_SIZE(cis_tpl_list),
274 if (i < ARRAY_SIZE(cis_tpl_list)) { 301 tpl_code, this->data, tpl_link);
275 const struct cis_tpl *tpl = cis_tpl_list + i; 302 if (ret == -EILSEQ || ret == -ENOENT) {
276 if (tpl_link < tpl->min_size) {
277 printk(KERN_ERR
278 "%s: bad CIS tuple 0x%02x"
279 " (length = %u, expected >= %u)\n",
280 mmc_hostname(card->host),
281 tpl_code, tpl_link, tpl->min_size);
282 ret = -EINVAL;
283 } else if (tpl->parse) {
284 ret = tpl->parse(card, func,
285 this->data, tpl_link);
286 }
287 /* 303 /*
288 * We don't need the tuple anymore if it was 304 * The tuple is unknown or known but not parsed.
289 * successfully parsed by the SDIO core or if it is 305 * Queue the tuple for the function driver.
290 * not going to be parsed by SDIO drivers.
291 */ 306 */
292 if (!ret || ret != -EILSEQ)
293 kfree(this);
294 } else {
295 /* unknown tuple */
296 ret = -EILSEQ;
297 }
298
299 if (ret == -EILSEQ) {
300 /* this tuple is unknown to the core or whitelisted */
301 this->next = NULL; 307 this->next = NULL;
302 this->code = tpl_code; 308 this->code = tpl_code;
303 this->size = tpl_link; 309 this->size = tpl_link;
304 *prev = this; 310 *prev = this;
305 prev = &this->next; 311 prev = &this->next;
306 printk(KERN_DEBUG 312
307 "%s: queuing CIS tuple 0x%02x length %u\n", 313 if (ret == -ENOENT) {
308 mmc_hostname(card->host), tpl_code, tpl_link); 314 /* warn about unknown tuples */
315 printk(KERN_WARNING "%s: queuing unknown"
316 " CIS tuple 0x%02x (%u bytes)\n",
317 mmc_hostname(card->host),
318 tpl_code, tpl_link);
319 }
320
309 /* keep on analyzing tuples */ 321 /* keep on analyzing tuples */
310 ret = 0; 322 ret = 0;
323 } else {
324 /*
325 * We don't need the tuple anymore if it was
326 * successfully parsed by the SDIO core or if it is
327 * not going to be queued for a driver.
328 */
329 kfree(this);
311 } 330 }
312 331
313 ptr += tpl_link; 332 ptr += tpl_link;
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index e04b751680d0..9d405b181781 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -251,6 +251,14 @@ config MMC_MVSDIO
251 To compile this driver as a module, choose M here: the 251 To compile this driver as a module, choose M here: the
252 module will be called mvsdio. 252 module will be called mvsdio.
253 253
254config MMC_DAVINCI
255 tristate "TI DAVINCI Multimedia Card Interface support"
256 depends on ARCH_DAVINCI
257 help
258 This selects the TI DAVINCI Multimedia card Interface.
259 If you have an DAVINCI board with a Multimedia Card slot,
260 say Y or M here. If unsure, say N.
261
254config MMC_SPI 262config MMC_SPI
255 tristate "MMC/SD/SDIO over SPI" 263 tristate "MMC/SD/SDIO over SPI"
256 depends on SPI_MASTER && !HIGHMEM && HAS_DMA 264 depends on SPI_MASTER && !HIGHMEM && HAS_DMA
@@ -357,3 +365,22 @@ config MMC_VIA_SDMMC
357 If you have a controller with this interface, say Y or M here. 365 If you have a controller with this interface, say Y or M here.
358 366
359 If unsure, say N. 367 If unsure, say N.
368
369config SDH_BFIN
370 tristate "Blackfin Secure Digital Host support"
371 depends on MMC && ((BF54x && !BF544) || (BF51x && !BF512))
372 help
373 If you say yes here you will get support for the Blackfin on-chip
374 Secure Digital Host interface. This includes support for MMC and
375 SD cards.
376
377 To compile this driver as a module, choose M here: the
378 module will be called bfin_sdh.
379
380 If unsure, say N.
381
382config SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND
383 bool "Blackfin EZkit Missing SDH_CMD Pull Up Resistor Workaround"
384 depends on SDH_BFIN
385 help
386 If you say yes here SD-Cards may work on the EZkit.
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index abcb0400e06d..ded4d8cdd9d7 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_MMC_ATMELMCI) += atmel-mci.o
25obj-$(CONFIG_MMC_TIFM_SD) += tifm_sd.o 25obj-$(CONFIG_MMC_TIFM_SD) += tifm_sd.o
26obj-$(CONFIG_MMC_MSM7X00A) += msm_sdcc.o 26obj-$(CONFIG_MMC_MSM7X00A) += msm_sdcc.o
27obj-$(CONFIG_MMC_MVSDIO) += mvsdio.o 27obj-$(CONFIG_MMC_MVSDIO) += mvsdio.o
28obj-$(CONFIG_MMC_DAVINCI) += davinci_mmc.o
28obj-$(CONFIG_MMC_SPI) += mmc_spi.o 29obj-$(CONFIG_MMC_SPI) += mmc_spi.o
29ifeq ($(CONFIG_OF),y) 30ifeq ($(CONFIG_OF),y)
30obj-$(CONFIG_MMC_SPI) += of_mmc_spi.o 31obj-$(CONFIG_MMC_SPI) += of_mmc_spi.o
@@ -34,6 +35,7 @@ obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o
34obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o 35obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o
35obj-$(CONFIG_MMC_CB710) += cb710-mmc.o 36obj-$(CONFIG_MMC_CB710) += cb710-mmc.o
36obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o 37obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o
38obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o
37 39
38ifeq ($(CONFIG_CB710_DEBUG),y) 40ifeq ($(CONFIG_CB710_DEBUG),y)
39 CFLAGS-cb710-mmc += -DDEBUG 41 CFLAGS-cb710-mmc += -DDEBUG
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index fc25586b7ee1..8072128e933b 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -25,6 +25,8 @@
25#include <linux/stat.h> 25#include <linux/stat.h>
26 26
27#include <linux/mmc/host.h> 27#include <linux/mmc/host.h>
28
29#include <mach/atmel-mci.h>
28#include <linux/atmel-mci.h> 30#include <linux/atmel-mci.h>
29 31
30#include <asm/io.h> 32#include <asm/io.h>
@@ -92,6 +94,7 @@ struct atmel_mci_dma {
92 * @need_clock_update: Update the clock rate before the next request. 94 * @need_clock_update: Update the clock rate before the next request.
93 * @need_reset: Reset controller before next request. 95 * @need_reset: Reset controller before next request.
94 * @mode_reg: Value of the MR register. 96 * @mode_reg: Value of the MR register.
97 * @cfg_reg: Value of the CFG register.
95 * @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus 98 * @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus
96 * rate and timeout calculations. 99 * rate and timeout calculations.
97 * @mapbase: Physical address of the MMIO registers. 100 * @mapbase: Physical address of the MMIO registers.
@@ -155,6 +158,7 @@ struct atmel_mci {
155 bool need_clock_update; 158 bool need_clock_update;
156 bool need_reset; 159 bool need_reset;
157 u32 mode_reg; 160 u32 mode_reg;
161 u32 cfg_reg;
158 unsigned long bus_hz; 162 unsigned long bus_hz;
159 unsigned long mapbase; 163 unsigned long mapbase;
160 struct clk *mck; 164 struct clk *mck;
@@ -223,6 +227,19 @@ static bool mci_has_rwproof(void)
223} 227}
224 228
225/* 229/*
230 * The new MCI2 module isn't 100% compatible with the old MCI module,
231 * and it has a few nice features which we want to use...
232 */
233static inline bool atmci_is_mci2(void)
234{
235 if (cpu_is_at91sam9g45())
236 return true;
237
238 return false;
239}
240
241
242/*
226 * The debugfs stuff below is mostly optimized away when 243 * The debugfs stuff below is mostly optimized away when
227 * CONFIG_DEBUG_FS is not set. 244 * CONFIG_DEBUG_FS is not set.
228 */ 245 */
@@ -357,12 +374,33 @@ static int atmci_regs_show(struct seq_file *s, void *v)
357 buf[MCI_BLKR / 4], 374 buf[MCI_BLKR / 4],
358 buf[MCI_BLKR / 4] & 0xffff, 375 buf[MCI_BLKR / 4] & 0xffff,
359 (buf[MCI_BLKR / 4] >> 16) & 0xffff); 376 (buf[MCI_BLKR / 4] >> 16) & 0xffff);
377 if (atmci_is_mci2())
378 seq_printf(s, "CSTOR:\t0x%08x\n", buf[MCI_CSTOR / 4]);
360 379
361 /* Don't read RSPR and RDR; it will consume the data there */ 380 /* Don't read RSPR and RDR; it will consume the data there */
362 381
363 atmci_show_status_reg(s, "SR", buf[MCI_SR / 4]); 382 atmci_show_status_reg(s, "SR", buf[MCI_SR / 4]);
364 atmci_show_status_reg(s, "IMR", buf[MCI_IMR / 4]); 383 atmci_show_status_reg(s, "IMR", buf[MCI_IMR / 4]);
365 384
385 if (atmci_is_mci2()) {
386 u32 val;
387
388 val = buf[MCI_DMA / 4];
389 seq_printf(s, "DMA:\t0x%08x OFFSET=%u CHKSIZE=%u%s\n",
390 val, val & 3,
391 ((val >> 4) & 3) ?
392 1 << (((val >> 4) & 3) + 1) : 1,
393 val & MCI_DMAEN ? " DMAEN" : "");
394
395 val = buf[MCI_CFG / 4];
396 seq_printf(s, "CFG:\t0x%08x%s%s%s%s\n",
397 val,
398 val & MCI_CFG_FIFOMODE_1DATA ? " FIFOMODE_ONE_DATA" : "",
399 val & MCI_CFG_FERRCTRL_COR ? " FERRCTRL_CLEAR_ON_READ" : "",
400 val & MCI_CFG_HSMODE ? " HSMODE" : "",
401 val & MCI_CFG_LSYNC ? " LSYNC" : "");
402 }
403
366 kfree(buf); 404 kfree(buf);
367 405
368 return 0; 406 return 0;
@@ -557,6 +595,10 @@ static void atmci_dma_complete(void *arg)
557 595
558 dev_vdbg(&host->pdev->dev, "DMA complete\n"); 596 dev_vdbg(&host->pdev->dev, "DMA complete\n");
559 597
598 if (atmci_is_mci2())
599 /* Disable DMA hardware handshaking on MCI */
600 mci_writel(host, DMA, mci_readl(host, DMA) & ~MCI_DMAEN);
601
560 atmci_dma_cleanup(host); 602 atmci_dma_cleanup(host);
561 603
562 /* 604 /*
@@ -592,7 +634,7 @@ static void atmci_dma_complete(void *arg)
592} 634}
593 635
594static int 636static int
595atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data) 637atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
596{ 638{
597 struct dma_chan *chan; 639 struct dma_chan *chan;
598 struct dma_async_tx_descriptor *desc; 640 struct dma_async_tx_descriptor *desc;
@@ -624,6 +666,9 @@ atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
624 if (!chan) 666 if (!chan)
625 return -ENODEV; 667 return -ENODEV;
626 668
669 if (atmci_is_mci2())
670 mci_writel(host, DMA, MCI_DMA_CHKSIZE(3) | MCI_DMAEN);
671
627 if (data->flags & MMC_DATA_READ) 672 if (data->flags & MMC_DATA_READ)
628 direction = DMA_FROM_DEVICE; 673 direction = DMA_FROM_DEVICE;
629 else 674 else
@@ -641,10 +686,6 @@ atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
641 host->dma.data_desc = desc; 686 host->dma.data_desc = desc;
642 desc->callback = atmci_dma_complete; 687 desc->callback = atmci_dma_complete;
643 desc->callback_param = host; 688 desc->callback_param = host;
644 desc->tx_submit(desc);
645
646 /* Go! */
647 chan->device->device_issue_pending(chan);
648 689
649 return 0; 690 return 0;
650unmap_exit: 691unmap_exit:
@@ -652,13 +693,26 @@ unmap_exit:
652 return -ENOMEM; 693 return -ENOMEM;
653} 694}
654 695
696static void atmci_submit_data(struct atmel_mci *host)
697{
698 struct dma_chan *chan = host->data_chan;
699 struct dma_async_tx_descriptor *desc = host->dma.data_desc;
700
701 if (chan) {
702 desc->tx_submit(desc);
703 chan->device->device_issue_pending(chan);
704 }
705}
706
655#else /* CONFIG_MMC_ATMELMCI_DMA */ 707#else /* CONFIG_MMC_ATMELMCI_DMA */
656 708
657static int atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data) 709static int atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
658{ 710{
659 return -ENOSYS; 711 return -ENOSYS;
660} 712}
661 713
714static void atmci_submit_data(struct atmel_mci *host) {}
715
662static void atmci_stop_dma(struct atmel_mci *host) 716static void atmci_stop_dma(struct atmel_mci *host)
663{ 717{
664 /* Data transfer was stopped by the interrupt handler */ 718 /* Data transfer was stopped by the interrupt handler */
@@ -672,7 +726,7 @@ static void atmci_stop_dma(struct atmel_mci *host)
672 * Returns a mask of interrupt flags to be enabled after the whole 726 * Returns a mask of interrupt flags to be enabled after the whole
673 * request has been prepared. 727 * request has been prepared.
674 */ 728 */
675static u32 atmci_submit_data(struct atmel_mci *host, struct mmc_data *data) 729static u32 atmci_prepare_data(struct atmel_mci *host, struct mmc_data *data)
676{ 730{
677 u32 iflags; 731 u32 iflags;
678 732
@@ -683,7 +737,7 @@ static u32 atmci_submit_data(struct atmel_mci *host, struct mmc_data *data)
683 host->data = data; 737 host->data = data;
684 738
685 iflags = ATMCI_DATA_ERROR_FLAGS; 739 iflags = ATMCI_DATA_ERROR_FLAGS;
686 if (atmci_submit_data_dma(host, data)) { 740 if (atmci_prepare_data_dma(host, data)) {
687 host->data_chan = NULL; 741 host->data_chan = NULL;
688 742
689 /* 743 /*
@@ -729,6 +783,8 @@ static void atmci_start_request(struct atmel_mci *host,
729 mci_writel(host, CR, MCI_CR_SWRST); 783 mci_writel(host, CR, MCI_CR_SWRST);
730 mci_writel(host, CR, MCI_CR_MCIEN); 784 mci_writel(host, CR, MCI_CR_MCIEN);
731 mci_writel(host, MR, host->mode_reg); 785 mci_writel(host, MR, host->mode_reg);
786 if (atmci_is_mci2())
787 mci_writel(host, CFG, host->cfg_reg);
732 host->need_reset = false; 788 host->need_reset = false;
733 } 789 }
734 mci_writel(host, SDCR, slot->sdc_reg); 790 mci_writel(host, SDCR, slot->sdc_reg);
@@ -744,6 +800,7 @@ static void atmci_start_request(struct atmel_mci *host,
744 while (!(mci_readl(host, SR) & MCI_CMDRDY)) 800 while (!(mci_readl(host, SR) & MCI_CMDRDY))
745 cpu_relax(); 801 cpu_relax();
746 } 802 }
803 iflags = 0;
747 data = mrq->data; 804 data = mrq->data;
748 if (data) { 805 if (data) {
749 atmci_set_timeout(host, slot, data); 806 atmci_set_timeout(host, slot, data);
@@ -753,15 +810,17 @@ static void atmci_start_request(struct atmel_mci *host,
753 | MCI_BLKLEN(data->blksz)); 810 | MCI_BLKLEN(data->blksz));
754 dev_vdbg(&slot->mmc->class_dev, "BLKR=0x%08x\n", 811 dev_vdbg(&slot->mmc->class_dev, "BLKR=0x%08x\n",
755 MCI_BCNT(data->blocks) | MCI_BLKLEN(data->blksz)); 812 MCI_BCNT(data->blocks) | MCI_BLKLEN(data->blksz));
813
814 iflags |= atmci_prepare_data(host, data);
756 } 815 }
757 816
758 iflags = MCI_CMDRDY; 817 iflags |= MCI_CMDRDY;
759 cmd = mrq->cmd; 818 cmd = mrq->cmd;
760 cmdflags = atmci_prepare_command(slot->mmc, cmd); 819 cmdflags = atmci_prepare_command(slot->mmc, cmd);
761 atmci_start_command(host, cmd, cmdflags); 820 atmci_start_command(host, cmd, cmdflags);
762 821
763 if (data) 822 if (data)
764 iflags |= atmci_submit_data(host, data); 823 atmci_submit_data(host);
765 824
766 if (mrq->stop) { 825 if (mrq->stop) {
767 host->stop_cmdr = atmci_prepare_command(slot->mmc, mrq->stop); 826 host->stop_cmdr = atmci_prepare_command(slot->mmc, mrq->stop);
@@ -857,6 +916,8 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
857 clk_enable(host->mck); 916 clk_enable(host->mck);
858 mci_writel(host, CR, MCI_CR_SWRST); 917 mci_writel(host, CR, MCI_CR_SWRST);
859 mci_writel(host, CR, MCI_CR_MCIEN); 918 mci_writel(host, CR, MCI_CR_MCIEN);
919 if (atmci_is_mci2())
920 mci_writel(host, CFG, host->cfg_reg);
860 } 921 }
861 922
862 /* 923 /*
@@ -1095,6 +1156,8 @@ static void atmci_detect_change(unsigned long data)
1095 mci_writel(host, CR, MCI_CR_SWRST); 1156 mci_writel(host, CR, MCI_CR_SWRST);
1096 mci_writel(host, CR, MCI_CR_MCIEN); 1157 mci_writel(host, CR, MCI_CR_MCIEN);
1097 mci_writel(host, MR, host->mode_reg); 1158 mci_writel(host, MR, host->mode_reg);
1159 if (atmci_is_mci2())
1160 mci_writel(host, CFG, host->cfg_reg);
1098 1161
1099 host->data = NULL; 1162 host->data = NULL;
1100 host->cmd = NULL; 1163 host->cmd = NULL;
@@ -1584,14 +1647,47 @@ static void __exit atmci_cleanup_slot(struct atmel_mci_slot *slot,
1584#ifdef CONFIG_MMC_ATMELMCI_DMA 1647#ifdef CONFIG_MMC_ATMELMCI_DMA
1585static bool filter(struct dma_chan *chan, void *slave) 1648static bool filter(struct dma_chan *chan, void *slave)
1586{ 1649{
1587 struct dw_dma_slave *dws = slave; 1650 struct mci_dma_data *sl = slave;
1588 1651
1589 if (dws->dma_dev == chan->device->dev) { 1652 if (sl && find_slave_dev(sl) == chan->device->dev) {
1590 chan->private = dws; 1653 chan->private = slave_data_ptr(sl);
1591 return true; 1654 return true;
1592 } else 1655 } else {
1593 return false; 1656 return false;
1657 }
1594} 1658}
1659
1660static void atmci_configure_dma(struct atmel_mci *host)
1661{
1662 struct mci_platform_data *pdata;
1663
1664 if (host == NULL)
1665 return;
1666
1667 pdata = host->pdev->dev.platform_data;
1668
1669 if (pdata && find_slave_dev(pdata->dma_slave)) {
1670 dma_cap_mask_t mask;
1671
1672 setup_dma_addr(pdata->dma_slave,
1673 host->mapbase + MCI_TDR,
1674 host->mapbase + MCI_RDR);
1675
1676 /* Try to grab a DMA channel */
1677 dma_cap_zero(mask);
1678 dma_cap_set(DMA_SLAVE, mask);
1679 host->dma.chan =
1680 dma_request_channel(mask, filter, pdata->dma_slave);
1681 }
1682 if (!host->dma.chan)
1683 dev_notice(&host->pdev->dev, "DMA not available, using PIO\n");
1684 else
1685 dev_info(&host->pdev->dev,
1686 "Using %s for DMA transfers\n",
1687 dma_chan_name(host->dma.chan));
1688}
1689#else
1690static void atmci_configure_dma(struct atmel_mci *host) {}
1595#endif 1691#endif
1596 1692
1597static int __init atmci_probe(struct platform_device *pdev) 1693static int __init atmci_probe(struct platform_device *pdev)
@@ -1645,22 +1741,7 @@ static int __init atmci_probe(struct platform_device *pdev)
1645 if (ret) 1741 if (ret)
1646 goto err_request_irq; 1742 goto err_request_irq;
1647 1743
1648#ifdef CONFIG_MMC_ATMELMCI_DMA 1744 atmci_configure_dma(host);
1649 if (pdata->dma_slave.dma_dev) {
1650 struct dw_dma_slave *dws = &pdata->dma_slave;
1651 dma_cap_mask_t mask;
1652
1653 dws->tx_reg = regs->start + MCI_TDR;
1654 dws->rx_reg = regs->start + MCI_RDR;
1655
1656 /* Try to grab a DMA channel */
1657 dma_cap_zero(mask);
1658 dma_cap_set(DMA_SLAVE, mask);
1659 host->dma.chan = dma_request_channel(mask, filter, dws);
1660 }
1661 if (!host->dma.chan)
1662 dev_notice(&pdev->dev, "DMA not available, using PIO\n");
1663#endif /* CONFIG_MMC_ATMELMCI_DMA */
1664 1745
1665 platform_set_drvdata(pdev, host); 1746 platform_set_drvdata(pdev, host);
1666 1747
diff --git a/drivers/mmc/host/bfin_sdh.c b/drivers/mmc/host/bfin_sdh.c
new file mode 100644
index 000000000000..3343a57355cc
--- /dev/null
+++ b/drivers/mmc/host/bfin_sdh.c
@@ -0,0 +1,639 @@
1/*
2 * bfin_sdh.c - Analog Devices Blackfin SDH Controller
3 *
4 * Copyright (C) 2007-2009 Analog Device Inc.
5 *
6 * Licensed under the GPL-2 or later.
7 */
8
9#define DRIVER_NAME "bfin-sdh"
10
11#include <linux/module.h>
12#include <linux/init.h>
13#include <linux/ioport.h>
14#include <linux/platform_device.h>
15#include <linux/delay.h>
16#include <linux/interrupt.h>
17#include <linux/dma-mapping.h>
18#include <linux/mmc/host.h>
19#include <linux/proc_fs.h>
20
21#include <asm/cacheflush.h>
22#include <asm/dma.h>
23#include <asm/portmux.h>
24#include <asm/bfin_sdh.h>
25
26#if defined(CONFIG_BF51x)
27#define bfin_read_SDH_PWR_CTL bfin_read_RSI_PWR_CTL
28#define bfin_write_SDH_PWR_CTL bfin_write_RSI_PWR_CTL
29#define bfin_read_SDH_CLK_CTL bfin_read_RSI_CLK_CTL
30#define bfin_write_SDH_CLK_CTL bfin_write_RSI_CLK_CTL
31#define bfin_write_SDH_ARGUMENT bfin_write_RSI_ARGUMENT
32#define bfin_write_SDH_COMMAND bfin_write_RSI_COMMAND
33#define bfin_write_SDH_DATA_TIMER bfin_write_RSI_DATA_TIMER
34#define bfin_read_SDH_RESPONSE0 bfin_read_RSI_RESPONSE0
35#define bfin_read_SDH_RESPONSE1 bfin_read_RSI_RESPONSE1
36#define bfin_read_SDH_RESPONSE2 bfin_read_RSI_RESPONSE2
37#define bfin_read_SDH_RESPONSE3 bfin_read_RSI_RESPONSE3
38#define bfin_write_SDH_DATA_LGTH bfin_write_RSI_DATA_LGTH
39#define bfin_read_SDH_DATA_CTL bfin_read_RSI_DATA_CTL
40#define bfin_write_SDH_DATA_CTL bfin_write_RSI_DATA_CTL
41#define bfin_read_SDH_DATA_CNT bfin_read_RSI_DATA_CNT
42#define bfin_write_SDH_STATUS_CLR bfin_write_RSI_STATUS_CLR
43#define bfin_read_SDH_E_STATUS bfin_read_RSI_E_STATUS
44#define bfin_write_SDH_E_STATUS bfin_write_RSI_E_STATUS
45#define bfin_read_SDH_STATUS bfin_read_RSI_STATUS
46#define bfin_write_SDH_MASK0 bfin_write_RSI_MASK0
47#define bfin_read_SDH_CFG bfin_read_RSI_CFG
48#define bfin_write_SDH_CFG bfin_write_RSI_CFG
49#endif
50
51struct dma_desc_array {
52 unsigned long start_addr;
53 unsigned short cfg;
54 unsigned short x_count;
55 short x_modify;
56} __packed;
57
58struct sdh_host {
59 struct mmc_host *mmc;
60 spinlock_t lock;
61 struct resource *res;
62 void __iomem *base;
63 int irq;
64 int stat_irq;
65 int dma_ch;
66 int dma_dir;
67 struct dma_desc_array *sg_cpu;
68 dma_addr_t sg_dma;
69 int dma_len;
70
71 unsigned int imask;
72 unsigned int power_mode;
73 unsigned int clk_div;
74
75 struct mmc_request *mrq;
76 struct mmc_command *cmd;
77 struct mmc_data *data;
78};
79
80static struct bfin_sd_host *get_sdh_data(struct platform_device *pdev)
81{
82 return pdev->dev.platform_data;
83}
84
85static void sdh_stop_clock(struct sdh_host *host)
86{
87 bfin_write_SDH_CLK_CTL(bfin_read_SDH_CLK_CTL() & ~CLK_E);
88 SSYNC();
89}
90
91static void sdh_enable_stat_irq(struct sdh_host *host, unsigned int mask)
92{
93 unsigned long flags;
94
95 spin_lock_irqsave(&host->lock, flags);
96 host->imask |= mask;
97 bfin_write_SDH_MASK0(mask);
98 SSYNC();
99 spin_unlock_irqrestore(&host->lock, flags);
100}
101
102static void sdh_disable_stat_irq(struct sdh_host *host, unsigned int mask)
103{
104 unsigned long flags;
105
106 spin_lock_irqsave(&host->lock, flags);
107 host->imask &= ~mask;
108 bfin_write_SDH_MASK0(host->imask);
109 SSYNC();
110 spin_unlock_irqrestore(&host->lock, flags);
111}
112
113static int sdh_setup_data(struct sdh_host *host, struct mmc_data *data)
114{
115 unsigned int length;
116 unsigned int data_ctl;
117 unsigned int dma_cfg;
118 struct scatterlist *sg;
119
120 dev_dbg(mmc_dev(host->mmc), "%s enter flags: 0x%x\n", __func__, data->flags);
121 host->data = data;
122 data_ctl = 0;
123 dma_cfg = 0;
124
125 length = data->blksz * data->blocks;
126 bfin_write_SDH_DATA_LGTH(length);
127
128 if (data->flags & MMC_DATA_STREAM)
129 data_ctl |= DTX_MODE;
130
131 if (data->flags & MMC_DATA_READ)
132 data_ctl |= DTX_DIR;
133 /* Only supports power-of-2 block size */
134 if (data->blksz & (data->blksz - 1))
135 return -EINVAL;
136 data_ctl |= ((ffs(data->blksz) - 1) << 4);
137
138 bfin_write_SDH_DATA_CTL(data_ctl);
139
140 bfin_write_SDH_DATA_TIMER(0xFFFF);
141 SSYNC();
142
143 if (data->flags & MMC_DATA_READ) {
144 host->dma_dir = DMA_FROM_DEVICE;
145 dma_cfg |= WNR;
146 } else
147 host->dma_dir = DMA_TO_DEVICE;
148
149 sdh_enable_stat_irq(host, (DAT_CRC_FAIL | DAT_TIME_OUT | DAT_END));
150 host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma_dir);
151#if defined(CONFIG_BF54x)
152 dma_cfg |= DMAFLOW_ARRAY | NDSIZE_5 | RESTART | WDSIZE_32 | DMAEN;
153 {
154 int i;
155 for_each_sg(data->sg, sg, host->dma_len, i) {
156 host->sg_cpu[i].start_addr = sg_dma_address(sg);
157 host->sg_cpu[i].cfg = dma_cfg;
158 host->sg_cpu[i].x_count = sg_dma_len(sg) / 4;
159 host->sg_cpu[i].x_modify = 4;
160 dev_dbg(mmc_dev(host->mmc), "%d: start_addr:0x%lx, "
161 "cfg:0x%x, x_count:0x%x, x_modify:0x%x\n",
162 i, host->sg_cpu[i].start_addr,
163 host->sg_cpu[i].cfg, host->sg_cpu[i].x_count,
164 host->sg_cpu[i].x_modify);
165 }
166 }
167 flush_dcache_range((unsigned int)host->sg_cpu,
168 (unsigned int)host->sg_cpu +
169 host->dma_len * sizeof(struct dma_desc_array));
170 /* Set the last descriptor to stop mode */
171 host->sg_cpu[host->dma_len - 1].cfg &= ~(DMAFLOW | NDSIZE);
172 host->sg_cpu[host->dma_len - 1].cfg |= DI_EN;
173
174 set_dma_curr_desc_addr(host->dma_ch, (unsigned long *)host->sg_dma);
175 set_dma_x_count(host->dma_ch, 0);
176 set_dma_x_modify(host->dma_ch, 0);
177 set_dma_config(host->dma_ch, dma_cfg);
178#elif defined(CONFIG_BF51x)
179 /* RSI DMA doesn't work in array mode */
180 dma_cfg |= WDSIZE_32 | DMAEN;
181 set_dma_start_addr(host->dma_ch, sg_dma_address(&data->sg[0]));
182 set_dma_x_count(host->dma_ch, length / 4);
183 set_dma_x_modify(host->dma_ch, 4);
184 set_dma_config(host->dma_ch, dma_cfg);
185#endif
186 bfin_write_SDH_DATA_CTL(bfin_read_SDH_DATA_CTL() | DTX_DMA_E | DTX_E);
187
188 SSYNC();
189
190 dev_dbg(mmc_dev(host->mmc), "%s exit\n", __func__);
191 return 0;
192}
193
194static void sdh_start_cmd(struct sdh_host *host, struct mmc_command *cmd)
195{
196 unsigned int sdh_cmd;
197 unsigned int stat_mask;
198
199 dev_dbg(mmc_dev(host->mmc), "%s enter cmd: 0x%p\n", __func__, cmd);
200 WARN_ON(host->cmd != NULL);
201 host->cmd = cmd;
202
203 sdh_cmd = 0;
204 stat_mask = 0;
205
206 sdh_cmd |= cmd->opcode;
207
208 if (cmd->flags & MMC_RSP_PRESENT) {
209 sdh_cmd |= CMD_RSP;
210 stat_mask |= CMD_RESP_END;
211 } else {
212 stat_mask |= CMD_SENT;
213 }
214
215 if (cmd->flags & MMC_RSP_136)
216 sdh_cmd |= CMD_L_RSP;
217
218 stat_mask |= CMD_CRC_FAIL | CMD_TIME_OUT;
219
220 sdh_enable_stat_irq(host, stat_mask);
221
222 bfin_write_SDH_ARGUMENT(cmd->arg);
223 bfin_write_SDH_COMMAND(sdh_cmd | CMD_E);
224 bfin_write_SDH_CLK_CTL(bfin_read_SDH_CLK_CTL() | CLK_E);
225 SSYNC();
226}
227
228static void sdh_finish_request(struct sdh_host *host, struct mmc_request *mrq)
229{
230 dev_dbg(mmc_dev(host->mmc), "%s enter\n", __func__);
231 host->mrq = NULL;
232 host->cmd = NULL;
233 host->data = NULL;
234 mmc_request_done(host->mmc, mrq);
235}
236
237static int sdh_cmd_done(struct sdh_host *host, unsigned int stat)
238{
239 struct mmc_command *cmd = host->cmd;
240 int ret = 0;
241
242 dev_dbg(mmc_dev(host->mmc), "%s enter cmd: %p\n", __func__, cmd);
243 if (!cmd)
244 return 0;
245
246 host->cmd = NULL;
247
248 if (cmd->flags & MMC_RSP_PRESENT) {
249 cmd->resp[0] = bfin_read_SDH_RESPONSE0();
250 if (cmd->flags & MMC_RSP_136) {
251 cmd->resp[1] = bfin_read_SDH_RESPONSE1();
252 cmd->resp[2] = bfin_read_SDH_RESPONSE2();
253 cmd->resp[3] = bfin_read_SDH_RESPONSE3();
254 }
255 }
256 if (stat & CMD_TIME_OUT)
257 cmd->error = -ETIMEDOUT;
258 else if (stat & CMD_CRC_FAIL && cmd->flags & MMC_RSP_CRC)
259 cmd->error = -EILSEQ;
260
261 sdh_disable_stat_irq(host, (CMD_SENT | CMD_RESP_END | CMD_TIME_OUT | CMD_CRC_FAIL));
262
263 if (host->data && !cmd->error) {
264 if (host->data->flags & MMC_DATA_WRITE) {
265 ret = sdh_setup_data(host, host->data);
266 if (ret)
267 return 0;
268 }
269
270 sdh_enable_stat_irq(host, DAT_END | RX_OVERRUN | TX_UNDERRUN | DAT_TIME_OUT);
271 } else
272 sdh_finish_request(host, host->mrq);
273
274 return 1;
275}
276
277static int sdh_data_done(struct sdh_host *host, unsigned int stat)
278{
279 struct mmc_data *data = host->data;
280
281 dev_dbg(mmc_dev(host->mmc), "%s enter stat: 0x%x\n", __func__, stat);
282 if (!data)
283 return 0;
284
285 disable_dma(host->dma_ch);
286 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
287 host->dma_dir);
288
289 if (stat & DAT_TIME_OUT)
290 data->error = -ETIMEDOUT;
291 else if (stat & DAT_CRC_FAIL)
292 data->error = -EILSEQ;
293 else if (stat & (RX_OVERRUN | TX_UNDERRUN))
294 data->error = -EIO;
295
296 if (!data->error)
297 data->bytes_xfered = data->blocks * data->blksz;
298 else
299 data->bytes_xfered = 0;
300
301 sdh_disable_stat_irq(host, DAT_END | DAT_TIME_OUT | DAT_CRC_FAIL | RX_OVERRUN | TX_UNDERRUN);
302 bfin_write_SDH_STATUS_CLR(DAT_END_STAT | DAT_TIMEOUT_STAT | \
303 DAT_CRC_FAIL_STAT | DAT_BLK_END_STAT | RX_OVERRUN | TX_UNDERRUN);
304 bfin_write_SDH_DATA_CTL(0);
305 SSYNC();
306
307 host->data = NULL;
308 if (host->mrq->stop) {
309 sdh_stop_clock(host);
310 sdh_start_cmd(host, host->mrq->stop);
311 } else {
312 sdh_finish_request(host, host->mrq);
313 }
314
315 return 1;
316}
317
318static void sdh_request(struct mmc_host *mmc, struct mmc_request *mrq)
319{
320 struct sdh_host *host = mmc_priv(mmc);
321 int ret = 0;
322
323 dev_dbg(mmc_dev(host->mmc), "%s enter, mrp:%p, cmd:%p\n", __func__, mrq, mrq->cmd);
324 WARN_ON(host->mrq != NULL);
325
326 host->mrq = mrq;
327 host->data = mrq->data;
328
329 if (mrq->data && mrq->data->flags & MMC_DATA_READ) {
330 ret = sdh_setup_data(host, mrq->data);
331 if (ret)
332 return;
333 }
334
335 sdh_start_cmd(host, mrq->cmd);
336}
337
338static void sdh_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
339{
340 struct sdh_host *host;
341 unsigned long flags;
342 u16 clk_ctl = 0;
343 u16 pwr_ctl = 0;
344 u16 cfg;
345 host = mmc_priv(mmc);
346
347 spin_lock_irqsave(&host->lock, flags);
348 if (ios->clock) {
349 unsigned long sys_clk, ios_clk;
350 unsigned char clk_div;
351 ios_clk = 2 * ios->clock;
352 sys_clk = get_sclk();
353 clk_div = sys_clk / ios_clk;
354 if (sys_clk % ios_clk == 0)
355 clk_div -= 1;
356 clk_div = min_t(unsigned char, clk_div, 0xFF);
357 clk_ctl |= clk_div;
358 clk_ctl |= CLK_E;
359 host->clk_div = clk_div;
360 } else
361 sdh_stop_clock(host);
362
363 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
364#ifdef CONFIG_SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND
365 pwr_ctl |= ROD_CTL;
366#else
367 pwr_ctl |= SD_CMD_OD | ROD_CTL;
368#endif
369
370 if (ios->bus_width == MMC_BUS_WIDTH_4) {
371 cfg = bfin_read_SDH_CFG();
372 cfg &= ~PD_SDDAT3;
373 cfg |= PUP_SDDAT3;
374 /* Enable 4 bit SDIO */
375 cfg |= (SD4E | MWE);
376 bfin_write_SDH_CFG(cfg);
377 clk_ctl |= WIDE_BUS;
378 } else {
379 cfg = bfin_read_SDH_CFG();
380 cfg |= MWE;
381 bfin_write_SDH_CFG(cfg);
382 }
383
384 bfin_write_SDH_CLK_CTL(clk_ctl);
385
386 host->power_mode = ios->power_mode;
387 if (ios->power_mode == MMC_POWER_ON)
388 pwr_ctl |= PWR_ON;
389
390 bfin_write_SDH_PWR_CTL(pwr_ctl);
391 SSYNC();
392
393 spin_unlock_irqrestore(&host->lock, flags);
394
395 dev_dbg(mmc_dev(host->mmc), "SDH: clk_div = 0x%x actual clock:%ld expected clock:%d\n",
396 host->clk_div,
397 host->clk_div ? get_sclk() / (2 * (host->clk_div + 1)) : 0,
398 ios->clock);
399}
400
401static const struct mmc_host_ops sdh_ops = {
402 .request = sdh_request,
403 .set_ios = sdh_set_ios,
404};
405
406static irqreturn_t sdh_dma_irq(int irq, void *devid)
407{
408 struct sdh_host *host = devid;
409
410 dev_dbg(mmc_dev(host->mmc), "%s enter, irq_stat: 0x%04x\n", __func__,
411 get_dma_curr_irqstat(host->dma_ch));
412 clear_dma_irqstat(host->dma_ch);
413 SSYNC();
414
415 return IRQ_HANDLED;
416}
417
418static irqreturn_t sdh_stat_irq(int irq, void *devid)
419{
420 struct sdh_host *host = devid;
421 unsigned int status;
422 int handled = 0;
423
424 dev_dbg(mmc_dev(host->mmc), "%s enter\n", __func__);
425 status = bfin_read_SDH_E_STATUS();
426 if (status & SD_CARD_DET) {
427 mmc_detect_change(host->mmc, 0);
428 bfin_write_SDH_E_STATUS(SD_CARD_DET);
429 }
430 status = bfin_read_SDH_STATUS();
431 if (status & (CMD_SENT | CMD_RESP_END | CMD_TIME_OUT | CMD_CRC_FAIL)) {
432 handled |= sdh_cmd_done(host, status);
433 bfin_write_SDH_STATUS_CLR(CMD_SENT_STAT | CMD_RESP_END_STAT | \
434 CMD_TIMEOUT_STAT | CMD_CRC_FAIL_STAT);
435 SSYNC();
436 }
437
438 status = bfin_read_SDH_STATUS();
439 if (status & (DAT_END | DAT_TIME_OUT | DAT_CRC_FAIL | RX_OVERRUN | TX_UNDERRUN))
440 handled |= sdh_data_done(host, status);
441
442 dev_dbg(mmc_dev(host->mmc), "%s exit\n\n", __func__);
443
444 return IRQ_RETVAL(handled);
445}
446
447static int __devinit sdh_probe(struct platform_device *pdev)
448{
449 struct mmc_host *mmc;
450 struct sdh_host *host;
451 struct bfin_sd_host *drv_data = get_sdh_data(pdev);
452 int ret;
453
454 if (!drv_data) {
455 dev_err(&pdev->dev, "missing platform driver data\n");
456 ret = -EINVAL;
457 goto out;
458 }
459
460 mmc = mmc_alloc_host(sizeof(*mmc), &pdev->dev);
461 if (!mmc) {
462 ret = -ENOMEM;
463 goto out;
464 }
465
466 mmc->ops = &sdh_ops;
467 mmc->max_phys_segs = 32;
468 mmc->max_seg_size = 1 << 16;
469 mmc->max_blk_size = 1 << 11;
470 mmc->max_blk_count = 1 << 11;
471 mmc->max_req_size = PAGE_SIZE;
472 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
473 mmc->f_max = get_sclk();
474 mmc->f_min = mmc->f_max >> 9;
475 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_NEEDS_POLL;
476 host = mmc_priv(mmc);
477 host->mmc = mmc;
478
479 spin_lock_init(&host->lock);
480 host->irq = drv_data->irq_int0;
481 host->dma_ch = drv_data->dma_chan;
482
483 ret = request_dma(host->dma_ch, DRIVER_NAME "DMA");
484 if (ret) {
485 dev_err(&pdev->dev, "unable to request DMA channel\n");
486 goto out1;
487 }
488
489 ret = set_dma_callback(host->dma_ch, sdh_dma_irq, host);
490 if (ret) {
491 dev_err(&pdev->dev, "unable to request DMA irq\n");
492 goto out2;
493 }
494
495 host->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &host->sg_dma, GFP_KERNEL);
496 if (host->sg_cpu == NULL) {
497 ret = -ENOMEM;
498 goto out2;
499 }
500
501 platform_set_drvdata(pdev, mmc);
502 mmc_add_host(mmc);
503
504 ret = request_irq(host->irq, sdh_stat_irq, 0, "SDH Status IRQ", host);
505 if (ret) {
506 dev_err(&pdev->dev, "unable to request status irq\n");
507 goto out3;
508 }
509
510 ret = peripheral_request_list(drv_data->pin_req, DRIVER_NAME);
511 if (ret) {
512 dev_err(&pdev->dev, "unable to request peripheral pins\n");
513 goto out4;
514 }
515#if defined(CONFIG_BF54x)
516 /* Secure Digital Host shares DMA with Nand controller */
517 bfin_write_DMAC1_PERIMUX(bfin_read_DMAC1_PERIMUX() | 0x1);
518#endif
519
520 bfin_write_SDH_CFG(bfin_read_SDH_CFG() | CLKS_EN);
521 SSYNC();
522
523 /* Disable card inserting detection pin. set MMC_CAP_NEES_POLL, and
524 * mmc stack will do the detection.
525 */
526 bfin_write_SDH_CFG((bfin_read_SDH_CFG() & 0x1F) | (PUP_SDDAT | PUP_SDDAT3));
527 SSYNC();
528
529 return 0;
530
531out4:
532 free_irq(host->irq, host);
533out3:
534 mmc_remove_host(mmc);
535 dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
536out2:
537 free_dma(host->dma_ch);
538out1:
539 mmc_free_host(mmc);
540 out:
541 return ret;
542}
543
544static int __devexit sdh_remove(struct platform_device *pdev)
545{
546 struct mmc_host *mmc = platform_get_drvdata(pdev);
547
548 platform_set_drvdata(pdev, NULL);
549
550 if (mmc) {
551 struct sdh_host *host = mmc_priv(mmc);
552
553 mmc_remove_host(mmc);
554
555 sdh_stop_clock(host);
556 free_irq(host->irq, host);
557 free_dma(host->dma_ch);
558 dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
559
560 mmc_free_host(mmc);
561 }
562
563 return 0;
564}
565
566#ifdef CONFIG_PM
567static int sdh_suspend(struct platform_device *dev, pm_message_t state)
568{
569 struct mmc_host *mmc = platform_get_drvdata(dev);
570 struct bfin_sd_host *drv_data = get_sdh_data(dev);
571 int ret = 0;
572
573 if (mmc)
574 ret = mmc_suspend_host(mmc, state);
575
576 bfin_write_SDH_PWR_CTL(bfin_read_SDH_PWR_CTL() & ~PWR_ON);
577 peripheral_free_list(drv_data->pin_req);
578
579 return ret;
580}
581
582static int sdh_resume(struct platform_device *dev)
583{
584 struct mmc_host *mmc = platform_get_drvdata(dev);
585 struct bfin_sd_host *drv_data = get_sdh_data(dev);
586 int ret = 0;
587
588 ret = peripheral_request_list(drv_data->pin_req, DRIVER_NAME);
589 if (ret) {
590 dev_err(&dev->dev, "unable to request peripheral pins\n");
591 return ret;
592 }
593
594 bfin_write_SDH_PWR_CTL(bfin_read_SDH_PWR_CTL() | PWR_ON);
595#if defined(CONFIG_BF54x)
596 /* Secure Digital Host shares DMA with Nand controller */
597 bfin_write_DMAC1_PERIMUX(bfin_read_DMAC1_PERIMUX() | 0x1);
598#endif
599 bfin_write_SDH_CFG(bfin_read_SDH_CFG() | CLKS_EN);
600 SSYNC();
601
602 bfin_write_SDH_CFG((bfin_read_SDH_CFG() & 0x1F) | (PUP_SDDAT | PUP_SDDAT3));
603 SSYNC();
604
605 if (mmc)
606 ret = mmc_resume_host(mmc);
607
608 return ret;
609}
610#else
611# define sdh_suspend NULL
612# define sdh_resume NULL
613#endif
614
615static struct platform_driver sdh_driver = {
616 .probe = sdh_probe,
617 .remove = __devexit_p(sdh_remove),
618 .suspend = sdh_suspend,
619 .resume = sdh_resume,
620 .driver = {
621 .name = DRIVER_NAME,
622 },
623};
624
625static int __init sdh_init(void)
626{
627 return platform_driver_register(&sdh_driver);
628}
629module_init(sdh_init);
630
631static void __exit sdh_exit(void)
632{
633 platform_driver_unregister(&sdh_driver);
634}
635module_exit(sdh_exit);
636
637MODULE_DESCRIPTION("Blackfin Secure Digital Host Driver");
638MODULE_AUTHOR("Cliff Cai, Roy Huang");
639MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
new file mode 100644
index 000000000000..dd45e7c3517e
--- /dev/null
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -0,0 +1,1349 @@
1/*
2 * davinci_mmc.c - TI DaVinci MMC/SD/SDIO driver
3 *
4 * Copyright (C) 2006 Texas Instruments.
5 * Original author: Purushotam Kumar
6 * Copyright (C) 2009 David Brownell
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#include <linux/module.h>
24#include <linux/ioport.h>
25#include <linux/platform_device.h>
26#include <linux/clk.h>
27#include <linux/err.h>
28#include <linux/cpufreq.h>
29#include <linux/mmc/host.h>
30#include <linux/io.h>
31#include <linux/irq.h>
32#include <linux/delay.h>
33#include <linux/dma-mapping.h>
34#include <linux/mmc/mmc.h>
35
36#include <mach/mmc.h>
37#include <mach/edma.h>
38
39/*
40 * Register Definitions
41 */
42#define DAVINCI_MMCCTL 0x00 /* Control Register */
43#define DAVINCI_MMCCLK 0x04 /* Memory Clock Control Register */
44#define DAVINCI_MMCST0 0x08 /* Status Register 0 */
45#define DAVINCI_MMCST1 0x0C /* Status Register 1 */
46#define DAVINCI_MMCIM 0x10 /* Interrupt Mask Register */
47#define DAVINCI_MMCTOR 0x14 /* Response Time-Out Register */
48#define DAVINCI_MMCTOD 0x18 /* Data Read Time-Out Register */
49#define DAVINCI_MMCBLEN 0x1C /* Block Length Register */
50#define DAVINCI_MMCNBLK 0x20 /* Number of Blocks Register */
51#define DAVINCI_MMCNBLC 0x24 /* Number of Blocks Counter Register */
52#define DAVINCI_MMCDRR 0x28 /* Data Receive Register */
53#define DAVINCI_MMCDXR 0x2C /* Data Transmit Register */
54#define DAVINCI_MMCCMD 0x30 /* Command Register */
55#define DAVINCI_MMCARGHL 0x34 /* Argument Register */
56#define DAVINCI_MMCRSP01 0x38 /* Response Register 0 and 1 */
57#define DAVINCI_MMCRSP23 0x3C /* Response Register 0 and 1 */
58#define DAVINCI_MMCRSP45 0x40 /* Response Register 0 and 1 */
59#define DAVINCI_MMCRSP67 0x44 /* Response Register 0 and 1 */
60#define DAVINCI_MMCDRSP 0x48 /* Data Response Register */
61#define DAVINCI_MMCETOK 0x4C
62#define DAVINCI_MMCCIDX 0x50 /* Command Index Register */
63#define DAVINCI_MMCCKC 0x54
64#define DAVINCI_MMCTORC 0x58
65#define DAVINCI_MMCTODC 0x5C
66#define DAVINCI_MMCBLNC 0x60
67#define DAVINCI_SDIOCTL 0x64
68#define DAVINCI_SDIOST0 0x68
69#define DAVINCI_SDIOEN 0x6C
70#define DAVINCI_SDIOST 0x70
71#define DAVINCI_MMCFIFOCTL 0x74 /* FIFO Control Register */
72
73/* DAVINCI_MMCCTL definitions */
74#define MMCCTL_DATRST (1 << 0)
75#define MMCCTL_CMDRST (1 << 1)
76#define MMCCTL_WIDTH_4_BIT (1 << 2)
77#define MMCCTL_DATEG_DISABLED (0 << 6)
78#define MMCCTL_DATEG_RISING (1 << 6)
79#define MMCCTL_DATEG_FALLING (2 << 6)
80#define MMCCTL_DATEG_BOTH (3 << 6)
81#define MMCCTL_PERMDR_LE (0 << 9)
82#define MMCCTL_PERMDR_BE (1 << 9)
83#define MMCCTL_PERMDX_LE (0 << 10)
84#define MMCCTL_PERMDX_BE (1 << 10)
85
86/* DAVINCI_MMCCLK definitions */
87#define MMCCLK_CLKEN (1 << 8)
88#define MMCCLK_CLKRT_MASK (0xFF << 0)
89
90/* IRQ bit definitions, for DAVINCI_MMCST0 and DAVINCI_MMCIM */
91#define MMCST0_DATDNE BIT(0) /* data done */
92#define MMCST0_BSYDNE BIT(1) /* busy done */
93#define MMCST0_RSPDNE BIT(2) /* command done */
94#define MMCST0_TOUTRD BIT(3) /* data read timeout */
95#define MMCST0_TOUTRS BIT(4) /* command response timeout */
96#define MMCST0_CRCWR BIT(5) /* data write CRC error */
97#define MMCST0_CRCRD BIT(6) /* data read CRC error */
98#define MMCST0_CRCRS BIT(7) /* command response CRC error */
99#define MMCST0_DXRDY BIT(9) /* data transmit ready (fifo empty) */
100#define MMCST0_DRRDY BIT(10) /* data receive ready (data in fifo)*/
101#define MMCST0_DATED BIT(11) /* DAT3 edge detect */
102#define MMCST0_TRNDNE BIT(12) /* transfer done */
103
104/* DAVINCI_MMCST1 definitions */
105#define MMCST1_BUSY (1 << 0)
106
107/* DAVINCI_MMCCMD definitions */
108#define MMCCMD_CMD_MASK (0x3F << 0)
109#define MMCCMD_PPLEN (1 << 7)
110#define MMCCMD_BSYEXP (1 << 8)
111#define MMCCMD_RSPFMT_MASK (3 << 9)
112#define MMCCMD_RSPFMT_NONE (0 << 9)
113#define MMCCMD_RSPFMT_R1456 (1 << 9)
114#define MMCCMD_RSPFMT_R2 (2 << 9)
115#define MMCCMD_RSPFMT_R3 (3 << 9)
116#define MMCCMD_DTRW (1 << 11)
117#define MMCCMD_STRMTP (1 << 12)
118#define MMCCMD_WDATX (1 << 13)
119#define MMCCMD_INITCK (1 << 14)
120#define MMCCMD_DCLR (1 << 15)
121#define MMCCMD_DMATRIG (1 << 16)
122
123/* DAVINCI_MMCFIFOCTL definitions */
124#define MMCFIFOCTL_FIFORST (1 << 0)
125#define MMCFIFOCTL_FIFODIR_WR (1 << 1)
126#define MMCFIFOCTL_FIFODIR_RD (0 << 1)
127#define MMCFIFOCTL_FIFOLEV (1 << 2) /* 0 = 128 bits, 1 = 256 bits */
128#define MMCFIFOCTL_ACCWD_4 (0 << 3) /* access width of 4 bytes */
129#define MMCFIFOCTL_ACCWD_3 (1 << 3) /* access width of 3 bytes */
130#define MMCFIFOCTL_ACCWD_2 (2 << 3) /* access width of 2 bytes */
131#define MMCFIFOCTL_ACCWD_1 (3 << 3) /* access width of 1 byte */
132
133
134/* MMCSD Init clock in Hz in opendrain mode */
135#define MMCSD_INIT_CLOCK 200000
136
137/*
138 * One scatterlist dma "segment" is at most MAX_CCNT rw_threshold units,
139 * and we handle up to NR_SG segments. MMC_BLOCK_BOUNCE kicks in only
140 * for drivers with max_hw_segs == 1, making the segments bigger (64KB)
141 * than the page or two that's otherwise typical. NR_SG == 16 gives at
142 * least the same throughput boost, using EDMA transfer linkage instead
143 * of spending CPU time copying pages.
144 */
145#define MAX_CCNT ((1 << 16) - 1)
146
147#define NR_SG 16
148
149static unsigned rw_threshold = 32;
150module_param(rw_threshold, uint, S_IRUGO);
151MODULE_PARM_DESC(rw_threshold,
152 "Read/Write threshold. Default = 32");
153
154static unsigned __initdata use_dma = 1;
155module_param(use_dma, uint, 0);
156MODULE_PARM_DESC(use_dma, "Whether to use DMA or not. Default = 1");
157
158struct mmc_davinci_host {
159 struct mmc_command *cmd;
160 struct mmc_data *data;
161 struct mmc_host *mmc;
162 struct clk *clk;
163 unsigned int mmc_input_clk;
164 void __iomem *base;
165 struct resource *mem_res;
166 int irq;
167 unsigned char bus_mode;
168
169#define DAVINCI_MMC_DATADIR_NONE 0
170#define DAVINCI_MMC_DATADIR_READ 1
171#define DAVINCI_MMC_DATADIR_WRITE 2
172 unsigned char data_dir;
173
174 /* buffer is used during PIO of one scatterlist segment, and
175 * is updated along with buffer_bytes_left. bytes_left applies
176 * to all N blocks of the PIO transfer.
177 */
178 u8 *buffer;
179 u32 buffer_bytes_left;
180 u32 bytes_left;
181
182 u32 rxdma, txdma;
183 bool use_dma;
184 bool do_dma;
185
186 /* Scatterlist DMA uses one or more parameter RAM entries:
187 * the main one (associated with rxdma or txdma) plus zero or
188 * more links. The entries for a given transfer differ only
189 * by memory buffer (address, length) and link field.
190 */
191 struct edmacc_param tx_template;
192 struct edmacc_param rx_template;
193 unsigned n_link;
194 u32 links[NR_SG - 1];
195
196 /* For PIO we walk scatterlists one segment at a time. */
197 unsigned int sg_len;
198 struct scatterlist *sg;
199
200 /* Version of the MMC/SD controller */
201 u8 version;
202 /* for ns in one cycle calculation */
203 unsigned ns_in_one_cycle;
204#ifdef CONFIG_CPU_FREQ
205 struct notifier_block freq_transition;
206#endif
207};
208
209
210/* PIO only */
211static void mmc_davinci_sg_to_buf(struct mmc_davinci_host *host)
212{
213 host->buffer_bytes_left = sg_dma_len(host->sg);
214 host->buffer = sg_virt(host->sg);
215 if (host->buffer_bytes_left > host->bytes_left)
216 host->buffer_bytes_left = host->bytes_left;
217}
218
219static void davinci_fifo_data_trans(struct mmc_davinci_host *host,
220 unsigned int n)
221{
222 u8 *p;
223 unsigned int i;
224
225 if (host->buffer_bytes_left == 0) {
226 host->sg = sg_next(host->data->sg);
227 mmc_davinci_sg_to_buf(host);
228 }
229
230 p = host->buffer;
231 if (n > host->buffer_bytes_left)
232 n = host->buffer_bytes_left;
233 host->buffer_bytes_left -= n;
234 host->bytes_left -= n;
235
236 /* NOTE: we never transfer more than rw_threshold bytes
237 * to/from the fifo here; there's no I/O overlap.
238 * This also assumes that access width( i.e. ACCWD) is 4 bytes
239 */
240 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) {
241 for (i = 0; i < (n >> 2); i++) {
242 writel(*((u32 *)p), host->base + DAVINCI_MMCDXR);
243 p = p + 4;
244 }
245 if (n & 3) {
246 iowrite8_rep(host->base + DAVINCI_MMCDXR, p, (n & 3));
247 p = p + (n & 3);
248 }
249 } else {
250 for (i = 0; i < (n >> 2); i++) {
251 *((u32 *)p) = readl(host->base + DAVINCI_MMCDRR);
252 p = p + 4;
253 }
254 if (n & 3) {
255 ioread8_rep(host->base + DAVINCI_MMCDRR, p, (n & 3));
256 p = p + (n & 3);
257 }
258 }
259 host->buffer = p;
260}
261
262static void mmc_davinci_start_command(struct mmc_davinci_host *host,
263 struct mmc_command *cmd)
264{
265 u32 cmd_reg = 0;
266 u32 im_val;
267
268 dev_dbg(mmc_dev(host->mmc), "CMD%d, arg 0x%08x%s\n",
269 cmd->opcode, cmd->arg,
270 ({ char *s;
271 switch (mmc_resp_type(cmd)) {
272 case MMC_RSP_R1:
273 s = ", R1/R5/R6/R7 response";
274 break;
275 case MMC_RSP_R1B:
276 s = ", R1b response";
277 break;
278 case MMC_RSP_R2:
279 s = ", R2 response";
280 break;
281 case MMC_RSP_R3:
282 s = ", R3/R4 response";
283 break;
284 default:
285 s = ", (R? response)";
286 break;
287 }; s; }));
288 host->cmd = cmd;
289
290 switch (mmc_resp_type(cmd)) {
291 case MMC_RSP_R1B:
292 /* There's some spec confusion about when R1B is
293 * allowed, but if the card doesn't issue a BUSY
294 * then it's harmless for us to allow it.
295 */
296 cmd_reg |= MMCCMD_BSYEXP;
297 /* FALLTHROUGH */
298 case MMC_RSP_R1: /* 48 bits, CRC */
299 cmd_reg |= MMCCMD_RSPFMT_R1456;
300 break;
301 case MMC_RSP_R2: /* 136 bits, CRC */
302 cmd_reg |= MMCCMD_RSPFMT_R2;
303 break;
304 case MMC_RSP_R3: /* 48 bits, no CRC */
305 cmd_reg |= MMCCMD_RSPFMT_R3;
306 break;
307 default:
308 cmd_reg |= MMCCMD_RSPFMT_NONE;
309 dev_dbg(mmc_dev(host->mmc), "unknown resp_type %04x\n",
310 mmc_resp_type(cmd));
311 break;
312 }
313
314 /* Set command index */
315 cmd_reg |= cmd->opcode;
316
317 /* Enable EDMA transfer triggers */
318 if (host->do_dma)
319 cmd_reg |= MMCCMD_DMATRIG;
320
321 if (host->version == MMC_CTLR_VERSION_2 && host->data != NULL &&
322 host->data_dir == DAVINCI_MMC_DATADIR_READ)
323 cmd_reg |= MMCCMD_DMATRIG;
324
325 /* Setting whether command involves data transfer or not */
326 if (cmd->data)
327 cmd_reg |= MMCCMD_WDATX;
328
329 /* Setting whether stream or block transfer */
330 if (cmd->flags & MMC_DATA_STREAM)
331 cmd_reg |= MMCCMD_STRMTP;
332
333 /* Setting whether data read or write */
334 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE)
335 cmd_reg |= MMCCMD_DTRW;
336
337 if (host->bus_mode == MMC_BUSMODE_PUSHPULL)
338 cmd_reg |= MMCCMD_PPLEN;
339
340 /* set Command timeout */
341 writel(0x1FFF, host->base + DAVINCI_MMCTOR);
342
343 /* Enable interrupt (calculate here, defer until FIFO is stuffed). */
344 im_val = MMCST0_RSPDNE | MMCST0_CRCRS | MMCST0_TOUTRS;
345 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) {
346 im_val |= MMCST0_DATDNE | MMCST0_CRCWR;
347
348 if (!host->do_dma)
349 im_val |= MMCST0_DXRDY;
350 } else if (host->data_dir == DAVINCI_MMC_DATADIR_READ) {
351 im_val |= MMCST0_DATDNE | MMCST0_CRCRD | MMCST0_TOUTRD;
352
353 if (!host->do_dma)
354 im_val |= MMCST0_DRRDY;
355 }
356
357 /*
358 * Before non-DMA WRITE commands the controller needs priming:
359 * FIFO should be populated with 32 bytes i.e. whatever is the FIFO size
360 */
361 if (!host->do_dma && (host->data_dir == DAVINCI_MMC_DATADIR_WRITE))
362 davinci_fifo_data_trans(host, rw_threshold);
363
364 writel(cmd->arg, host->base + DAVINCI_MMCARGHL);
365 writel(cmd_reg, host->base + DAVINCI_MMCCMD);
366 writel(im_val, host->base + DAVINCI_MMCIM);
367}
368
369/*----------------------------------------------------------------------*/
370
371/* DMA infrastructure */
372
373static void davinci_abort_dma(struct mmc_davinci_host *host)
374{
375 int sync_dev;
376
377 if (host->data_dir == DAVINCI_MMC_DATADIR_READ)
378 sync_dev = host->rxdma;
379 else
380 sync_dev = host->txdma;
381
382 edma_stop(sync_dev);
383 edma_clean_channel(sync_dev);
384}
385
386static void
387mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data);
388
389static void mmc_davinci_dma_cb(unsigned channel, u16 ch_status, void *data)
390{
391 if (DMA_COMPLETE != ch_status) {
392 struct mmc_davinci_host *host = data;
393
394 /* Currently means: DMA Event Missed, or "null" transfer
395 * request was seen. In the future, TC errors (like bad
396 * addresses) might be presented too.
397 */
398 dev_warn(mmc_dev(host->mmc), "DMA %s error\n",
399 (host->data->flags & MMC_DATA_WRITE)
400 ? "write" : "read");
401 host->data->error = -EIO;
402 mmc_davinci_xfer_done(host, host->data);
403 }
404}
405
406/* Set up tx or rx template, to be modified and updated later */
407static void __init mmc_davinci_dma_setup(struct mmc_davinci_host *host,
408 bool tx, struct edmacc_param *template)
409{
410 unsigned sync_dev;
411 const u16 acnt = 4;
412 const u16 bcnt = rw_threshold >> 2;
413 const u16 ccnt = 0;
414 u32 src_port = 0;
415 u32 dst_port = 0;
416 s16 src_bidx, dst_bidx;
417 s16 src_cidx, dst_cidx;
418
419 /*
420 * A-B Sync transfer: each DMA request is for one "frame" of
421 * rw_threshold bytes, broken into "acnt"-size chunks repeated
422 * "bcnt" times. Each segment needs "ccnt" such frames; since
423 * we tell the block layer our mmc->max_seg_size limit, we can
424 * trust (later) that it's within bounds.
425 *
426 * The FIFOs are read/written in 4-byte chunks (acnt == 4) and
427 * EDMA will optimize memory operations to use larger bursts.
428 */
429 if (tx) {
430 sync_dev = host->txdma;
431
432 /* src_prt, ccnt, and link to be set up later */
433 src_bidx = acnt;
434 src_cidx = acnt * bcnt;
435
436 dst_port = host->mem_res->start + DAVINCI_MMCDXR;
437 dst_bidx = 0;
438 dst_cidx = 0;
439 } else {
440 sync_dev = host->rxdma;
441
442 src_port = host->mem_res->start + DAVINCI_MMCDRR;
443 src_bidx = 0;
444 src_cidx = 0;
445
446 /* dst_prt, ccnt, and link to be set up later */
447 dst_bidx = acnt;
448 dst_cidx = acnt * bcnt;
449 }
450
451 /*
452 * We can't use FIFO mode for the FIFOs because MMC FIFO addresses
453 * are not 256-bit (32-byte) aligned. So we use INCR, and the W8BIT
454 * parameter is ignored.
455 */
456 edma_set_src(sync_dev, src_port, INCR, W8BIT);
457 edma_set_dest(sync_dev, dst_port, INCR, W8BIT);
458
459 edma_set_src_index(sync_dev, src_bidx, src_cidx);
460 edma_set_dest_index(sync_dev, dst_bidx, dst_cidx);
461
462 edma_set_transfer_params(sync_dev, acnt, bcnt, ccnt, 8, ABSYNC);
463
464 edma_read_slot(sync_dev, template);
465
466 /* don't bother with irqs or chaining */
467 template->opt |= EDMA_CHAN_SLOT(sync_dev) << 12;
468}
469
470static void mmc_davinci_send_dma_request(struct mmc_davinci_host *host,
471 struct mmc_data *data)
472{
473 struct edmacc_param *template;
474 int channel, slot;
475 unsigned link;
476 struct scatterlist *sg;
477 unsigned sg_len;
478 unsigned bytes_left = host->bytes_left;
479 const unsigned shift = ffs(rw_threshold) - 1;;
480
481 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) {
482 template = &host->tx_template;
483 channel = host->txdma;
484 } else {
485 template = &host->rx_template;
486 channel = host->rxdma;
487 }
488
489 /* We know sg_len and ccnt will never be out of range because
490 * we told the mmc layer which in turn tells the block layer
491 * to ensure that it only hands us one scatterlist segment
492 * per EDMA PARAM entry. Update the PARAM
493 * entries needed for each segment of this scatterlist.
494 */
495 for (slot = channel, link = 0, sg = data->sg, sg_len = host->sg_len;
496 sg_len-- != 0 && bytes_left;
497 sg = sg_next(sg), slot = host->links[link++]) {
498 u32 buf = sg_dma_address(sg);
499 unsigned count = sg_dma_len(sg);
500
501 template->link_bcntrld = sg_len
502 ? (EDMA_CHAN_SLOT(host->links[link]) << 5)
503 : 0xffff;
504
505 if (count > bytes_left)
506 count = bytes_left;
507 bytes_left -= count;
508
509 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE)
510 template->src = buf;
511 else
512 template->dst = buf;
513 template->ccnt = count >> shift;
514
515 edma_write_slot(slot, template);
516 }
517
518 if (host->version == MMC_CTLR_VERSION_2)
519 edma_clear_event(channel);
520
521 edma_start(channel);
522}
523
524static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host,
525 struct mmc_data *data)
526{
527 int i;
528 int mask = rw_threshold - 1;
529
530 host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
531 ((data->flags & MMC_DATA_WRITE)
532 ? DMA_TO_DEVICE
533 : DMA_FROM_DEVICE));
534
535 /* no individual DMA segment should need a partial FIFO */
536 for (i = 0; i < host->sg_len; i++) {
537 if (sg_dma_len(data->sg + i) & mask) {
538 dma_unmap_sg(mmc_dev(host->mmc),
539 data->sg, data->sg_len,
540 (data->flags & MMC_DATA_WRITE)
541 ? DMA_TO_DEVICE
542 : DMA_FROM_DEVICE);
543 return -1;
544 }
545 }
546
547 host->do_dma = 1;
548 mmc_davinci_send_dma_request(host, data);
549
550 return 0;
551}
552
553static void __init_or_module
554davinci_release_dma_channels(struct mmc_davinci_host *host)
555{
556 unsigned i;
557
558 if (!host->use_dma)
559 return;
560
561 for (i = 0; i < host->n_link; i++)
562 edma_free_slot(host->links[i]);
563
564 edma_free_channel(host->txdma);
565 edma_free_channel(host->rxdma);
566}
567
568static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host)
569{
570 int r, i;
571
572 /* Acquire master DMA write channel */
573 r = edma_alloc_channel(host->txdma, mmc_davinci_dma_cb, host,
574 EVENTQ_DEFAULT);
575 if (r < 0) {
576 dev_warn(mmc_dev(host->mmc), "alloc %s channel err %d\n",
577 "tx", r);
578 return r;
579 }
580 mmc_davinci_dma_setup(host, true, &host->tx_template);
581
582 /* Acquire master DMA read channel */
583 r = edma_alloc_channel(host->rxdma, mmc_davinci_dma_cb, host,
584 EVENTQ_DEFAULT);
585 if (r < 0) {
586 dev_warn(mmc_dev(host->mmc), "alloc %s channel err %d\n",
587 "rx", r);
588 goto free_master_write;
589 }
590 mmc_davinci_dma_setup(host, false, &host->rx_template);
591
592 /* Allocate parameter RAM slots, which will later be bound to a
593 * channel as needed to handle a scatterlist.
594 */
595 for (i = 0; i < ARRAY_SIZE(host->links); i++) {
596 r = edma_alloc_slot(EDMA_CTLR(host->txdma), EDMA_SLOT_ANY);
597 if (r < 0) {
598 dev_dbg(mmc_dev(host->mmc), "dma PaRAM alloc --> %d\n",
599 r);
600 break;
601 }
602 host->links[i] = r;
603 }
604 host->n_link = i;
605
606 return 0;
607
608free_master_write:
609 edma_free_channel(host->txdma);
610
611 return r;
612}
613
614/*----------------------------------------------------------------------*/
615
616static void
617mmc_davinci_prepare_data(struct mmc_davinci_host *host, struct mmc_request *req)
618{
619 int fifo_lev = (rw_threshold == 32) ? MMCFIFOCTL_FIFOLEV : 0;
620 int timeout;
621 struct mmc_data *data = req->data;
622
623 if (host->version == MMC_CTLR_VERSION_2)
624 fifo_lev = (rw_threshold == 64) ? MMCFIFOCTL_FIFOLEV : 0;
625
626 host->data = data;
627 if (data == NULL) {
628 host->data_dir = DAVINCI_MMC_DATADIR_NONE;
629 writel(0, host->base + DAVINCI_MMCBLEN);
630 writel(0, host->base + DAVINCI_MMCNBLK);
631 return;
632 }
633
634 dev_dbg(mmc_dev(host->mmc), "%s %s, %d blocks of %d bytes\n",
635 (data->flags & MMC_DATA_STREAM) ? "stream" : "block",
636 (data->flags & MMC_DATA_WRITE) ? "write" : "read",
637 data->blocks, data->blksz);
638 dev_dbg(mmc_dev(host->mmc), " DTO %d cycles + %d ns\n",
639 data->timeout_clks, data->timeout_ns);
640 timeout = data->timeout_clks +
641 (data->timeout_ns / host->ns_in_one_cycle);
642 if (timeout > 0xffff)
643 timeout = 0xffff;
644
645 writel(timeout, host->base + DAVINCI_MMCTOD);
646 writel(data->blocks, host->base + DAVINCI_MMCNBLK);
647 writel(data->blksz, host->base + DAVINCI_MMCBLEN);
648
649 /* Configure the FIFO */
650 switch (data->flags & MMC_DATA_WRITE) {
651 case MMC_DATA_WRITE:
652 host->data_dir = DAVINCI_MMC_DATADIR_WRITE;
653 writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR | MMCFIFOCTL_FIFORST,
654 host->base + DAVINCI_MMCFIFOCTL);
655 writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR,
656 host->base + DAVINCI_MMCFIFOCTL);
657 break;
658
659 default:
660 host->data_dir = DAVINCI_MMC_DATADIR_READ;
661 writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD | MMCFIFOCTL_FIFORST,
662 host->base + DAVINCI_MMCFIFOCTL);
663 writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD,
664 host->base + DAVINCI_MMCFIFOCTL);
665 break;
666 }
667
668 host->buffer = NULL;
669 host->bytes_left = data->blocks * data->blksz;
670
671 /* For now we try to use DMA whenever we won't need partial FIFO
672 * reads or writes, either for the whole transfer (as tested here)
673 * or for any individual scatterlist segment (tested when we call
674 * start_dma_transfer).
675 *
676 * While we *could* change that, unusual block sizes are rarely
677 * used. The occasional fallback to PIO should't hurt.
678 */
679 if (host->use_dma && (host->bytes_left & (rw_threshold - 1)) == 0
680 && mmc_davinci_start_dma_transfer(host, data) == 0) {
681 /* zero this to ensure we take no PIO paths */
682 host->bytes_left = 0;
683 } else {
684 /* Revert to CPU Copy */
685 host->sg_len = data->sg_len;
686 host->sg = host->data->sg;
687 mmc_davinci_sg_to_buf(host);
688 }
689}
690
691static void mmc_davinci_request(struct mmc_host *mmc, struct mmc_request *req)
692{
693 struct mmc_davinci_host *host = mmc_priv(mmc);
694 unsigned long timeout = jiffies + msecs_to_jiffies(900);
695 u32 mmcst1 = 0;
696
697 /* Card may still be sending BUSY after a previous operation,
698 * typically some kind of write. If so, we can't proceed yet.
699 */
700 while (time_before(jiffies, timeout)) {
701 mmcst1 = readl(host->base + DAVINCI_MMCST1);
702 if (!(mmcst1 & MMCST1_BUSY))
703 break;
704 cpu_relax();
705 }
706 if (mmcst1 & MMCST1_BUSY) {
707 dev_err(mmc_dev(host->mmc), "still BUSY? bad ... \n");
708 req->cmd->error = -ETIMEDOUT;
709 mmc_request_done(mmc, req);
710 return;
711 }
712
713 host->do_dma = 0;
714 mmc_davinci_prepare_data(host, req);
715 mmc_davinci_start_command(host, req->cmd);
716}
717
718static unsigned int calculate_freq_for_card(struct mmc_davinci_host *host,
719 unsigned int mmc_req_freq)
720{
721 unsigned int mmc_freq = 0, mmc_pclk = 0, mmc_push_pull_divisor = 0;
722
723 mmc_pclk = host->mmc_input_clk;
724 if (mmc_req_freq && mmc_pclk > (2 * mmc_req_freq))
725 mmc_push_pull_divisor = ((unsigned int)mmc_pclk
726 / (2 * mmc_req_freq)) - 1;
727 else
728 mmc_push_pull_divisor = 0;
729
730 mmc_freq = (unsigned int)mmc_pclk
731 / (2 * (mmc_push_pull_divisor + 1));
732
733 if (mmc_freq > mmc_req_freq)
734 mmc_push_pull_divisor = mmc_push_pull_divisor + 1;
735 /* Convert ns to clock cycles */
736 if (mmc_req_freq <= 400000)
737 host->ns_in_one_cycle = (1000000) / (((mmc_pclk
738 / (2 * (mmc_push_pull_divisor + 1)))/1000));
739 else
740 host->ns_in_one_cycle = (1000000) / (((mmc_pclk
741 / (2 * (mmc_push_pull_divisor + 1)))/1000000));
742
743 return mmc_push_pull_divisor;
744}
745
746static void calculate_clk_divider(struct mmc_host *mmc, struct mmc_ios *ios)
747{
748 unsigned int open_drain_freq = 0, mmc_pclk = 0;
749 unsigned int mmc_push_pull_freq = 0;
750 struct mmc_davinci_host *host = mmc_priv(mmc);
751
752 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) {
753 u32 temp;
754
755 /* Ignoring the init clock value passed for fixing the inter
756 * operability with different cards.
757 */
758 open_drain_freq = ((unsigned int)mmc_pclk
759 / (2 * MMCSD_INIT_CLOCK)) - 1;
760
761 if (open_drain_freq > 0xFF)
762 open_drain_freq = 0xFF;
763
764 temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKRT_MASK;
765 temp |= open_drain_freq;
766 writel(temp, host->base + DAVINCI_MMCCLK);
767
768 /* Convert ns to clock cycles */
769 host->ns_in_one_cycle = (1000000) / (MMCSD_INIT_CLOCK/1000);
770 } else {
771 u32 temp;
772 mmc_push_pull_freq = calculate_freq_for_card(host, ios->clock);
773
774 if (mmc_push_pull_freq > 0xFF)
775 mmc_push_pull_freq = 0xFF;
776
777 temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKEN;
778 writel(temp, host->base + DAVINCI_MMCCLK);
779
780 udelay(10);
781
782 temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKRT_MASK;
783 temp |= mmc_push_pull_freq;
784 writel(temp, host->base + DAVINCI_MMCCLK);
785
786 writel(temp | MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK);
787
788 udelay(10);
789 }
790}
791
792static void mmc_davinci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
793{
794 unsigned int mmc_pclk = 0;
795 struct mmc_davinci_host *host = mmc_priv(mmc);
796
797 mmc_pclk = host->mmc_input_clk;
798 dev_dbg(mmc_dev(host->mmc),
799 "clock %dHz busmode %d powermode %d Vdd %04x\n",
800 ios->clock, ios->bus_mode, ios->power_mode,
801 ios->vdd);
802 if (ios->bus_width == MMC_BUS_WIDTH_4) {
803 dev_dbg(mmc_dev(host->mmc), "Enabling 4 bit mode\n");
804 writel(readl(host->base + DAVINCI_MMCCTL) | MMCCTL_WIDTH_4_BIT,
805 host->base + DAVINCI_MMCCTL);
806 } else {
807 dev_dbg(mmc_dev(host->mmc), "Disabling 4 bit mode\n");
808 writel(readl(host->base + DAVINCI_MMCCTL) & ~MMCCTL_WIDTH_4_BIT,
809 host->base + DAVINCI_MMCCTL);
810 }
811
812 calculate_clk_divider(mmc, ios);
813
814 host->bus_mode = ios->bus_mode;
815 if (ios->power_mode == MMC_POWER_UP) {
816 unsigned long timeout = jiffies + msecs_to_jiffies(50);
817 bool lose = true;
818
819 /* Send clock cycles, poll completion */
820 writel(0, host->base + DAVINCI_MMCARGHL);
821 writel(MMCCMD_INITCK, host->base + DAVINCI_MMCCMD);
822 while (time_before(jiffies, timeout)) {
823 u32 tmp = readl(host->base + DAVINCI_MMCST0);
824
825 if (tmp & MMCST0_RSPDNE) {
826 lose = false;
827 break;
828 }
829 cpu_relax();
830 }
831 if (lose)
832 dev_warn(mmc_dev(host->mmc), "powerup timeout\n");
833 }
834
835 /* FIXME on power OFF, reset things ... */
836}
837
838static void
839mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data)
840{
841 host->data = NULL;
842
843 if (host->do_dma) {
844 davinci_abort_dma(host);
845
846 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
847 (data->flags & MMC_DATA_WRITE)
848 ? DMA_TO_DEVICE
849 : DMA_FROM_DEVICE);
850 host->do_dma = false;
851 }
852 host->data_dir = DAVINCI_MMC_DATADIR_NONE;
853
854 if (!data->stop || (host->cmd && host->cmd->error)) {
855 mmc_request_done(host->mmc, data->mrq);
856 writel(0, host->base + DAVINCI_MMCIM);
857 } else
858 mmc_davinci_start_command(host, data->stop);
859}
860
861static void mmc_davinci_cmd_done(struct mmc_davinci_host *host,
862 struct mmc_command *cmd)
863{
864 host->cmd = NULL;
865
866 if (cmd->flags & MMC_RSP_PRESENT) {
867 if (cmd->flags & MMC_RSP_136) {
868 /* response type 2 */
869 cmd->resp[3] = readl(host->base + DAVINCI_MMCRSP01);
870 cmd->resp[2] = readl(host->base + DAVINCI_MMCRSP23);
871 cmd->resp[1] = readl(host->base + DAVINCI_MMCRSP45);
872 cmd->resp[0] = readl(host->base + DAVINCI_MMCRSP67);
873 } else {
874 /* response types 1, 1b, 3, 4, 5, 6 */
875 cmd->resp[0] = readl(host->base + DAVINCI_MMCRSP67);
876 }
877 }
878
879 if (host->data == NULL || cmd->error) {
880 if (cmd->error == -ETIMEDOUT)
881 cmd->mrq->cmd->retries = 0;
882 mmc_request_done(host->mmc, cmd->mrq);
883 writel(0, host->base + DAVINCI_MMCIM);
884 }
885}
886
887static void
888davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data)
889{
890 u32 temp;
891
892 /* reset command and data state machines */
893 temp = readl(host->base + DAVINCI_MMCCTL);
894 writel(temp | MMCCTL_CMDRST | MMCCTL_DATRST,
895 host->base + DAVINCI_MMCCTL);
896
897 temp &= ~(MMCCTL_CMDRST | MMCCTL_DATRST);
898 udelay(10);
899 writel(temp, host->base + DAVINCI_MMCCTL);
900}
901
902static irqreturn_t mmc_davinci_irq(int irq, void *dev_id)
903{
904 struct mmc_davinci_host *host = (struct mmc_davinci_host *)dev_id;
905 unsigned int status, qstatus;
906 int end_command = 0;
907 int end_transfer = 0;
908 struct mmc_data *data = host->data;
909
910 if (host->cmd == NULL && host->data == NULL) {
911 status = readl(host->base + DAVINCI_MMCST0);
912 dev_dbg(mmc_dev(host->mmc),
913 "Spurious interrupt 0x%04x\n", status);
914 /* Disable the interrupt from mmcsd */
915 writel(0, host->base + DAVINCI_MMCIM);
916 return IRQ_NONE;
917 }
918
919 status = readl(host->base + DAVINCI_MMCST0);
920 qstatus = status;
921
922 /* handle FIFO first when using PIO for data.
923 * bytes_left will decrease to zero as I/O progress and status will
924 * read zero over iteration because this controller status
925 * register(MMCST0) reports any status only once and it is cleared
926 * by read. So, it is not unbouned loop even in the case of
927 * non-dma.
928 */
929 while (host->bytes_left && (status & (MMCST0_DXRDY | MMCST0_DRRDY))) {
930 davinci_fifo_data_trans(host, rw_threshold);
931 status = readl(host->base + DAVINCI_MMCST0);
932 if (!status)
933 break;
934 qstatus |= status;
935 }
936
937 if (qstatus & MMCST0_DATDNE) {
938 /* All blocks sent/received, and CRC checks passed */
939 if (data != NULL) {
940 if ((host->do_dma == 0) && (host->bytes_left > 0)) {
941 /* if datasize < rw_threshold
942 * no RX ints are generated
943 */
944 davinci_fifo_data_trans(host, host->bytes_left);
945 }
946 end_transfer = 1;
947 data->bytes_xfered = data->blocks * data->blksz;
948 } else {
949 dev_err(mmc_dev(host->mmc),
950 "DATDNE with no host->data\n");
951 }
952 }
953
954 if (qstatus & MMCST0_TOUTRD) {
955 /* Read data timeout */
956 data->error = -ETIMEDOUT;
957 end_transfer = 1;
958
959 dev_dbg(mmc_dev(host->mmc),
960 "read data timeout, status %x\n",
961 qstatus);
962
963 davinci_abort_data(host, data);
964 }
965
966 if (qstatus & (MMCST0_CRCWR | MMCST0_CRCRD)) {
967 /* Data CRC error */
968 data->error = -EILSEQ;
969 end_transfer = 1;
970
971 /* NOTE: this controller uses CRCWR to report both CRC
972 * errors and timeouts (on writes). MMCDRSP values are
973 * only weakly documented, but 0x9f was clearly a timeout
974 * case and the two three-bit patterns in various SD specs
975 * (101, 010) aren't part of it ...
976 */
977 if (qstatus & MMCST0_CRCWR) {
978 u32 temp = readb(host->base + DAVINCI_MMCDRSP);
979
980 if (temp == 0x9f)
981 data->error = -ETIMEDOUT;
982 }
983 dev_dbg(mmc_dev(host->mmc), "data %s %s error\n",
984 (qstatus & MMCST0_CRCWR) ? "write" : "read",
985 (data->error == -ETIMEDOUT) ? "timeout" : "CRC");
986
987 davinci_abort_data(host, data);
988 }
989
990 if (qstatus & MMCST0_TOUTRS) {
991 /* Command timeout */
992 if (host->cmd) {
993 dev_dbg(mmc_dev(host->mmc),
994 "CMD%d timeout, status %x\n",
995 host->cmd->opcode, qstatus);
996 host->cmd->error = -ETIMEDOUT;
997 if (data) {
998 end_transfer = 1;
999 davinci_abort_data(host, data);
1000 } else
1001 end_command = 1;
1002 }
1003 }
1004
1005 if (qstatus & MMCST0_CRCRS) {
1006 /* Command CRC error */
1007 dev_dbg(mmc_dev(host->mmc), "Command CRC error\n");
1008 if (host->cmd) {
1009 host->cmd->error = -EILSEQ;
1010 end_command = 1;
1011 }
1012 }
1013
1014 if (qstatus & MMCST0_RSPDNE) {
1015 /* End of command phase */
1016 end_command = (int) host->cmd;
1017 }
1018
1019 if (end_command)
1020 mmc_davinci_cmd_done(host, host->cmd);
1021 if (end_transfer)
1022 mmc_davinci_xfer_done(host, data);
1023 return IRQ_HANDLED;
1024}
1025
1026static int mmc_davinci_get_cd(struct mmc_host *mmc)
1027{
1028 struct platform_device *pdev = to_platform_device(mmc->parent);
1029 struct davinci_mmc_config *config = pdev->dev.platform_data;
1030
1031 if (!config || !config->get_cd)
1032 return -ENOSYS;
1033 return config->get_cd(pdev->id);
1034}
1035
1036static int mmc_davinci_get_ro(struct mmc_host *mmc)
1037{
1038 struct platform_device *pdev = to_platform_device(mmc->parent);
1039 struct davinci_mmc_config *config = pdev->dev.platform_data;
1040
1041 if (!config || !config->get_ro)
1042 return -ENOSYS;
1043 return config->get_ro(pdev->id);
1044}
1045
1046static struct mmc_host_ops mmc_davinci_ops = {
1047 .request = mmc_davinci_request,
1048 .set_ios = mmc_davinci_set_ios,
1049 .get_cd = mmc_davinci_get_cd,
1050 .get_ro = mmc_davinci_get_ro,
1051};
1052
1053/*----------------------------------------------------------------------*/
1054
1055#ifdef CONFIG_CPU_FREQ
1056static int mmc_davinci_cpufreq_transition(struct notifier_block *nb,
1057 unsigned long val, void *data)
1058{
1059 struct mmc_davinci_host *host;
1060 unsigned int mmc_pclk;
1061 struct mmc_host *mmc;
1062 unsigned long flags;
1063
1064 host = container_of(nb, struct mmc_davinci_host, freq_transition);
1065 mmc = host->mmc;
1066 mmc_pclk = clk_get_rate(host->clk);
1067
1068 if (val == CPUFREQ_POSTCHANGE) {
1069 spin_lock_irqsave(&mmc->lock, flags);
1070 host->mmc_input_clk = mmc_pclk;
1071 calculate_clk_divider(mmc, &mmc->ios);
1072 spin_unlock_irqrestore(&mmc->lock, flags);
1073 }
1074
1075 return 0;
1076}
1077
1078static inline int mmc_davinci_cpufreq_register(struct mmc_davinci_host *host)
1079{
1080 host->freq_transition.notifier_call = mmc_davinci_cpufreq_transition;
1081
1082 return cpufreq_register_notifier(&host->freq_transition,
1083 CPUFREQ_TRANSITION_NOTIFIER);
1084}
1085
1086static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host)
1087{
1088 cpufreq_unregister_notifier(&host->freq_transition,
1089 CPUFREQ_TRANSITION_NOTIFIER);
1090}
1091#else
1092static inline int mmc_davinci_cpufreq_register(struct mmc_davinci_host *host)
1093{
1094 return 0;
1095}
1096
1097static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host)
1098{
1099}
1100#endif
1101static void __init init_mmcsd_host(struct mmc_davinci_host *host)
1102{
1103 /* DAT line portion is diabled and in reset state */
1104 writel(readl(host->base + DAVINCI_MMCCTL) | MMCCTL_DATRST,
1105 host->base + DAVINCI_MMCCTL);
1106
1107 /* CMD line portion is diabled and in reset state */
1108 writel(readl(host->base + DAVINCI_MMCCTL) | MMCCTL_CMDRST,
1109 host->base + DAVINCI_MMCCTL);
1110
1111 udelay(10);
1112
1113 writel(0, host->base + DAVINCI_MMCCLK);
1114 writel(MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK);
1115
1116 writel(0x1FFF, host->base + DAVINCI_MMCTOR);
1117 writel(0xFFFF, host->base + DAVINCI_MMCTOD);
1118
1119 writel(readl(host->base + DAVINCI_MMCCTL) & ~MMCCTL_DATRST,
1120 host->base + DAVINCI_MMCCTL);
1121 writel(readl(host->base + DAVINCI_MMCCTL) & ~MMCCTL_CMDRST,
1122 host->base + DAVINCI_MMCCTL);
1123
1124 udelay(10);
1125}
1126
1127static int __init davinci_mmcsd_probe(struct platform_device *pdev)
1128{
1129 struct davinci_mmc_config *pdata = pdev->dev.platform_data;
1130 struct mmc_davinci_host *host = NULL;
1131 struct mmc_host *mmc = NULL;
1132 struct resource *r, *mem = NULL;
1133 int ret = 0, irq = 0;
1134 size_t mem_size;
1135
1136 /* REVISIT: when we're fully converted, fail if pdata is NULL */
1137
1138 ret = -ENODEV;
1139 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1140 irq = platform_get_irq(pdev, 0);
1141 if (!r || irq == NO_IRQ)
1142 goto out;
1143
1144 ret = -EBUSY;
1145 mem_size = resource_size(r);
1146 mem = request_mem_region(r->start, mem_size, pdev->name);
1147 if (!mem)
1148 goto out;
1149
1150 ret = -ENOMEM;
1151 mmc = mmc_alloc_host(sizeof(struct mmc_davinci_host), &pdev->dev);
1152 if (!mmc)
1153 goto out;
1154
1155 host = mmc_priv(mmc);
1156 host->mmc = mmc; /* Important */
1157
1158 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1159 if (!r)
1160 goto out;
1161 host->rxdma = r->start;
1162
1163 r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1164 if (!r)
1165 goto out;
1166 host->txdma = r->start;
1167
1168 host->mem_res = mem;
1169 host->base = ioremap(mem->start, mem_size);
1170 if (!host->base)
1171 goto out;
1172
1173 ret = -ENXIO;
1174 host->clk = clk_get(&pdev->dev, "MMCSDCLK");
1175 if (IS_ERR(host->clk)) {
1176 ret = PTR_ERR(host->clk);
1177 goto out;
1178 }
1179 clk_enable(host->clk);
1180 host->mmc_input_clk = clk_get_rate(host->clk);
1181
1182 init_mmcsd_host(host);
1183
1184 host->use_dma = use_dma;
1185 host->irq = irq;
1186
1187 if (host->use_dma && davinci_acquire_dma_channels(host) != 0)
1188 host->use_dma = 0;
1189
1190 /* REVISIT: someday, support IRQ-driven card detection. */
1191 mmc->caps |= MMC_CAP_NEEDS_POLL;
1192
1193 if (!pdata || pdata->wires == 4 || pdata->wires == 0)
1194 mmc->caps |= MMC_CAP_4_BIT_DATA;
1195
1196 host->version = pdata->version;
1197
1198 mmc->ops = &mmc_davinci_ops;
1199 mmc->f_min = 312500;
1200 mmc->f_max = 25000000;
1201 if (pdata && pdata->max_freq)
1202 mmc->f_max = pdata->max_freq;
1203 if (pdata && pdata->caps)
1204 mmc->caps |= pdata->caps;
1205 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1206
1207 /* With no iommu coalescing pages, each phys_seg is a hw_seg.
1208 * Each hw_seg uses one EDMA parameter RAM slot, always one
1209 * channel and then usually some linked slots.
1210 */
1211 mmc->max_hw_segs = 1 + host->n_link;
1212 mmc->max_phys_segs = mmc->max_hw_segs;
1213
1214 /* EDMA limit per hw segment (one or two MBytes) */
1215 mmc->max_seg_size = MAX_CCNT * rw_threshold;
1216
1217 /* MMC/SD controller limits for multiblock requests */
1218 mmc->max_blk_size = 4095; /* BLEN is 12 bits */
1219 mmc->max_blk_count = 65535; /* NBLK is 16 bits */
1220 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1221
1222 dev_dbg(mmc_dev(host->mmc), "max_phys_segs=%d\n", mmc->max_phys_segs);
1223 dev_dbg(mmc_dev(host->mmc), "max_hw_segs=%d\n", mmc->max_hw_segs);
1224 dev_dbg(mmc_dev(host->mmc), "max_blk_size=%d\n", mmc->max_blk_size);
1225 dev_dbg(mmc_dev(host->mmc), "max_req_size=%d\n", mmc->max_req_size);
1226 dev_dbg(mmc_dev(host->mmc), "max_seg_size=%d\n", mmc->max_seg_size);
1227
1228 platform_set_drvdata(pdev, host);
1229
1230 ret = mmc_davinci_cpufreq_register(host);
1231 if (ret) {
1232 dev_err(&pdev->dev, "failed to register cpufreq\n");
1233 goto cpu_freq_fail;
1234 }
1235
1236 ret = mmc_add_host(mmc);
1237 if (ret < 0)
1238 goto out;
1239
1240 ret = request_irq(irq, mmc_davinci_irq, 0, mmc_hostname(mmc), host);
1241 if (ret)
1242 goto out;
1243
1244 rename_region(mem, mmc_hostname(mmc));
1245
1246 dev_info(mmc_dev(host->mmc), "Using %s, %d-bit mode\n",
1247 host->use_dma ? "DMA" : "PIO",
1248 (mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1);
1249
1250 return 0;
1251
1252out:
1253 mmc_davinci_cpufreq_deregister(host);
1254cpu_freq_fail:
1255 if (host) {
1256 davinci_release_dma_channels(host);
1257
1258 if (host->clk) {
1259 clk_disable(host->clk);
1260 clk_put(host->clk);
1261 }
1262
1263 if (host->base)
1264 iounmap(host->base);
1265 }
1266
1267 if (mmc)
1268 mmc_free_host(mmc);
1269
1270 if (mem)
1271 release_resource(mem);
1272
1273 dev_dbg(&pdev->dev, "probe err %d\n", ret);
1274
1275 return ret;
1276}
1277
1278static int __exit davinci_mmcsd_remove(struct platform_device *pdev)
1279{
1280 struct mmc_davinci_host *host = platform_get_drvdata(pdev);
1281
1282 platform_set_drvdata(pdev, NULL);
1283 if (host) {
1284 mmc_davinci_cpufreq_deregister(host);
1285
1286 mmc_remove_host(host->mmc);
1287 free_irq(host->irq, host);
1288
1289 davinci_release_dma_channels(host);
1290
1291 clk_disable(host->clk);
1292 clk_put(host->clk);
1293
1294 iounmap(host->base);
1295
1296 release_resource(host->mem_res);
1297
1298 mmc_free_host(host->mmc);
1299 }
1300
1301 return 0;
1302}
1303
1304#ifdef CONFIG_PM
1305static int davinci_mmcsd_suspend(struct platform_device *pdev, pm_message_t msg)
1306{
1307 struct mmc_davinci_host *host = platform_get_drvdata(pdev);
1308
1309 return mmc_suspend_host(host->mmc, msg);
1310}
1311
1312static int davinci_mmcsd_resume(struct platform_device *pdev)
1313{
1314 struct mmc_davinci_host *host = platform_get_drvdata(pdev);
1315
1316 return mmc_resume_host(host->mmc);
1317}
1318#else
1319#define davinci_mmcsd_suspend NULL
1320#define davinci_mmcsd_resume NULL
1321#endif
1322
1323static struct platform_driver davinci_mmcsd_driver = {
1324 .driver = {
1325 .name = "davinci_mmc",
1326 .owner = THIS_MODULE,
1327 },
1328 .remove = __exit_p(davinci_mmcsd_remove),
1329 .suspend = davinci_mmcsd_suspend,
1330 .resume = davinci_mmcsd_resume,
1331};
1332
1333static int __init davinci_mmcsd_init(void)
1334{
1335 return platform_driver_probe(&davinci_mmcsd_driver,
1336 davinci_mmcsd_probe);
1337}
1338module_init(davinci_mmcsd_init);
1339
1340static void __exit davinci_mmcsd_exit(void)
1341{
1342 platform_driver_unregister(&davinci_mmcsd_driver);
1343}
1344module_exit(davinci_mmcsd_exit);
1345
1346MODULE_AUTHOR("Texas Instruments India");
1347MODULE_LICENSE("GPL");
1348MODULE_DESCRIPTION("MMC/SD driver for Davinci MMC controller");
1349
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 88671529c45d..60a2b69e54f5 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -679,17 +679,17 @@ static int mxcmci_probe(struct platform_device *pdev)
679{ 679{
680 struct mmc_host *mmc; 680 struct mmc_host *mmc;
681 struct mxcmci_host *host = NULL; 681 struct mxcmci_host *host = NULL;
682 struct resource *r; 682 struct resource *iores, *r;
683 int ret = 0, irq; 683 int ret = 0, irq;
684 684
685 printk(KERN_INFO "i.MX SDHC driver\n"); 685 printk(KERN_INFO "i.MX SDHC driver\n");
686 686
687 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 687 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
688 irq = platform_get_irq(pdev, 0); 688 irq = platform_get_irq(pdev, 0);
689 if (!r || irq < 0) 689 if (!iores || irq < 0)
690 return -EINVAL; 690 return -EINVAL;
691 691
692 r = request_mem_region(r->start, resource_size(r), pdev->name); 692 r = request_mem_region(iores->start, resource_size(iores), pdev->name);
693 if (!r) 693 if (!r)
694 return -EBUSY; 694 return -EBUSY;
695 695
@@ -809,7 +809,7 @@ out_iounmap:
809out_free: 809out_free:
810 mmc_free_host(mmc); 810 mmc_free_host(mmc);
811out_release_mem: 811out_release_mem:
812 release_mem_region(host->res->start, resource_size(host->res)); 812 release_mem_region(iores->start, resource_size(iores));
813 return ret; 813 return ret;
814} 814}
815 815
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 5f970e253e50..c6d7e8ecadbf 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -1459,8 +1459,10 @@ static int __init mmc_omap_probe(struct platform_device *pdev)
1459 goto err_ioremap; 1459 goto err_ioremap;
1460 1460
1461 host->iclk = clk_get(&pdev->dev, "ick"); 1461 host->iclk = clk_get(&pdev->dev, "ick");
1462 if (IS_ERR(host->iclk)) 1462 if (IS_ERR(host->iclk)) {
1463 ret = PTR_ERR(host->iclk);
1463 goto err_free_mmc_host; 1464 goto err_free_mmc_host;
1465 }
1464 clk_enable(host->iclk); 1466 clk_enable(host->iclk);
1465 1467
1466 host->fclk = clk_get(&pdev->dev, "fck"); 1468 host->fclk = clk_get(&pdev->dev, "fck");
@@ -1500,10 +1502,8 @@ err_free_irq:
1500err_free_fclk: 1502err_free_fclk:
1501 clk_put(host->fclk); 1503 clk_put(host->fclk);
1502err_free_iclk: 1504err_free_iclk:
1503 if (host->iclk != NULL) { 1505 clk_disable(host->iclk);
1504 clk_disable(host->iclk); 1506 clk_put(host->iclk);
1505 clk_put(host->iclk);
1506 }
1507err_free_mmc_host: 1507err_free_mmc_host:
1508 iounmap(host->virt_base); 1508 iounmap(host->virt_base);
1509err_ioremap: 1509err_ioremap:
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index bb47ff465c04..0d783f3e79ed 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -828,7 +828,7 @@ static int pxamci_resume(struct device *dev)
828 return ret; 828 return ret;
829} 829}
830 830
831static struct dev_pm_ops pxamci_pm_ops = { 831static const struct dev_pm_ops pxamci_pm_ops = {
832 .suspend = pxamci_suspend, 832 .suspend = pxamci_suspend,
833 .resume = pxamci_resume, 833 .resume = pxamci_resume,
834}; 834};
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 941a4d35ef8d..d96e1abf2d64 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -820,7 +820,7 @@ fail_request:
820static void finalize_request(struct s3cmci_host *host) 820static void finalize_request(struct s3cmci_host *host)
821{ 821{
822 struct mmc_request *mrq = host->mrq; 822 struct mmc_request *mrq = host->mrq;
823 struct mmc_command *cmd = host->cmd_is_stop ? mrq->stop : mrq->cmd; 823 struct mmc_command *cmd;
824 int debug_as_failure = 0; 824 int debug_as_failure = 0;
825 825
826 if (host->complete_what != COMPLETION_FINALIZE) 826 if (host->complete_what != COMPLETION_FINALIZE)
@@ -828,6 +828,7 @@ static void finalize_request(struct s3cmci_host *host)
828 828
829 if (!mrq) 829 if (!mrq)
830 return; 830 return;
831 cmd = host->cmd_is_stop ? mrq->stop : mrq->cmd;
831 832
832 if (cmd->data && (cmd->error == 0) && 833 if (cmd->data && (cmd->error == 0) &&
833 (cmd->data->error == 0)) { 834 (cmd->data->error == 0)) {
@@ -1302,10 +1303,8 @@ static int s3cmci_get_ro(struct mmc_host *mmc)
1302 if (pdata->no_wprotect) 1303 if (pdata->no_wprotect)
1303 return 0; 1304 return 0;
1304 1305
1305 ret = s3c2410_gpio_getpin(pdata->gpio_wprotect); 1306 ret = gpio_get_value(pdata->gpio_wprotect) ? 1 : 0;
1306 1307 ret ^= pdata->wprotect_invert;
1307 if (pdata->wprotect_invert)
1308 ret = !ret;
1309 1308
1310 return ret; 1309 return ret;
1311} 1310}
@@ -1654,7 +1653,7 @@ static int __devinit s3cmci_probe(struct platform_device *pdev)
1654 goto probe_free_irq; 1653 goto probe_free_irq;
1655 } 1654 }
1656 1655
1657 host->irq_cd = s3c2410_gpio_getirq(host->pdata->gpio_detect); 1656 host->irq_cd = gpio_to_irq(host->pdata->gpio_detect);
1658 1657
1659 if (host->irq_cd >= 0) { 1658 if (host->irq_cd >= 0) {
1660 if (request_irq(host->irq_cd, s3cmci_irq_cd, 1659 if (request_irq(host->irq_cd, s3cmci_irq_cd,
@@ -1892,7 +1891,7 @@ static int s3cmci_resume(struct device *dev)
1892 return mmc_resume_host(mmc); 1891 return mmc_resume_host(mmc);
1893} 1892}
1894 1893
1895static struct dev_pm_ops s3cmci_pm = { 1894static const struct dev_pm_ops s3cmci_pm = {
1896 .suspend = s3cmci_suspend, 1895 .suspend = s3cmci_suspend,
1897 .resume = s3cmci_resume, 1896 .resume = s3cmci_resume,
1898}; 1897};
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index e0356644d1aa..5c3a1767770a 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -285,6 +285,73 @@ static const struct sdhci_pci_fixes sdhci_jmicron = {
285 .resume = jmicron_resume, 285 .resume = jmicron_resume,
286}; 286};
287 287
288/* SysKonnect CardBus2SDIO extra registers */
289#define SYSKT_CTRL 0x200
290#define SYSKT_RDFIFO_STAT 0x204
291#define SYSKT_WRFIFO_STAT 0x208
292#define SYSKT_POWER_DATA 0x20c
293#define SYSKT_POWER_330 0xef
294#define SYSKT_POWER_300 0xf8
295#define SYSKT_POWER_184 0xcc
296#define SYSKT_POWER_CMD 0x20d
297#define SYSKT_POWER_START (1 << 7)
298#define SYSKT_POWER_STATUS 0x20e
299#define SYSKT_POWER_STATUS_OK (1 << 0)
300#define SYSKT_BOARD_REV 0x210
301#define SYSKT_CHIP_REV 0x211
302#define SYSKT_CONF_DATA 0x212
303#define SYSKT_CONF_DATA_1V8 (1 << 2)
304#define SYSKT_CONF_DATA_2V5 (1 << 1)
305#define SYSKT_CONF_DATA_3V3 (1 << 0)
306
307static int syskt_probe(struct sdhci_pci_chip *chip)
308{
309 if ((chip->pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) {
310 chip->pdev->class &= ~0x0000FF;
311 chip->pdev->class |= PCI_SDHCI_IFDMA;
312 }
313 return 0;
314}
315
316static int syskt_probe_slot(struct sdhci_pci_slot *slot)
317{
318 int tm, ps;
319
320 u8 board_rev = readb(slot->host->ioaddr + SYSKT_BOARD_REV);
321 u8 chip_rev = readb(slot->host->ioaddr + SYSKT_CHIP_REV);
322 dev_info(&slot->chip->pdev->dev, "SysKonnect CardBus2SDIO, "
323 "board rev %d.%d, chip rev %d.%d\n",
324 board_rev >> 4, board_rev & 0xf,
325 chip_rev >> 4, chip_rev & 0xf);
326 if (chip_rev >= 0x20)
327 slot->host->quirks |= SDHCI_QUIRK_FORCE_DMA;
328
329 writeb(SYSKT_POWER_330, slot->host->ioaddr + SYSKT_POWER_DATA);
330 writeb(SYSKT_POWER_START, slot->host->ioaddr + SYSKT_POWER_CMD);
331 udelay(50);
332 tm = 10; /* Wait max 1 ms */
333 do {
334 ps = readw(slot->host->ioaddr + SYSKT_POWER_STATUS);
335 if (ps & SYSKT_POWER_STATUS_OK)
336 break;
337 udelay(100);
338 } while (--tm);
339 if (!tm) {
340 dev_err(&slot->chip->pdev->dev,
341 "power regulator never stabilized");
342 writeb(0, slot->host->ioaddr + SYSKT_POWER_CMD);
343 return -ENODEV;
344 }
345
346 return 0;
347}
348
349static const struct sdhci_pci_fixes sdhci_syskt = {
350 .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER,
351 .probe = syskt_probe,
352 .probe_slot = syskt_probe_slot,
353};
354
288static int via_probe(struct sdhci_pci_chip *chip) 355static int via_probe(struct sdhci_pci_chip *chip)
289{ 356{
290 if (chip->pdev->revision == 0x10) 357 if (chip->pdev->revision == 0x10)
@@ -363,6 +430,14 @@ static const struct pci_device_id pci_ids[] __devinitdata = {
363 }, 430 },
364 431
365 { 432 {
433 .vendor = PCI_VENDOR_ID_SYSKONNECT,
434 .device = 0x8000,
435 .subvendor = PCI_ANY_ID,
436 .subdevice = PCI_ANY_ID,
437 .driver_data = (kernel_ulong_t)&sdhci_syskt,
438 },
439
440 {
366 .vendor = PCI_VENDOR_ID_VIA, 441 .vendor = PCI_VENDOR_ID_VIA,
367 .device = 0x95d0, 442 .device = 0x95d0,
368 .subvendor = PCI_ANY_ID, 443 .subvendor = PCI_ANY_ID,
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index 91991b460c45..7cccc8523747 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -591,7 +591,7 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
591 disable_mmc_irqs(host, TMIO_MASK_ALL); 591 disable_mmc_irqs(host, TMIO_MASK_ALL);
592 592
593 ret = request_irq(host->irq, tmio_mmc_irq, IRQF_DISABLED | 593 ret = request_irq(host->irq, tmio_mmc_irq, IRQF_DISABLED |
594 IRQF_TRIGGER_FALLING, "tmio-mmc", host); 594 IRQF_TRIGGER_FALLING, dev_name(&dev->dev), host);
595 if (ret) 595 if (ret)
596 goto unmap_cnf; 596 goto unmap_cnf;
597 597
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index e7563a9872d0..5fbf29e1e64f 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -43,15 +43,17 @@
43// debugging, turns off buffer write mode if set to 1 43// debugging, turns off buffer write mode if set to 1
44#define FORCE_WORD_WRITE 0 44#define FORCE_WORD_WRITE 0
45 45
46#define MANUFACTURER_INTEL 0x0089 46/* Intel chips */
47#define I82802AB 0x00ad 47#define I82802AB 0x00ad
48#define I82802AC 0x00ac 48#define I82802AC 0x00ac
49#define PF38F4476 0x881c 49#define PF38F4476 0x881c
50#define MANUFACTURER_ST 0x0020 50/* STMicroelectronics chips */
51#define M50LPW080 0x002F 51#define M50LPW080 0x002F
52#define M50FLW080A 0x0080 52#define M50FLW080A 0x0080
53#define M50FLW080B 0x0081 53#define M50FLW080B 0x0081
54/* Atmel chips */
54#define AT49BV640D 0x02de 55#define AT49BV640D 0x02de
56#define AT49BV640DT 0x02db
55 57
56static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 58static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
57static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); 59static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
@@ -199,6 +201,16 @@ static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
199 cfi->cfiq->BufWriteTimeoutMax = 0; 201 cfi->cfiq->BufWriteTimeoutMax = 0;
200} 202}
201 203
204static void fixup_at49bv640dx_lock(struct mtd_info *mtd, void *param)
205{
206 struct map_info *map = mtd->priv;
207 struct cfi_private *cfi = map->fldrv_priv;
208 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
209
210 cfip->FeatureSupport |= (1 << 5);
211 mtd->flags |= MTD_POWERUP_LOCK;
212}
213
202#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE 214#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
203/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */ 215/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
204static void fixup_intel_strataflash(struct mtd_info *mtd, void* param) 216static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
@@ -283,6 +295,8 @@ static void fixup_unlock_powerup_lock(struct mtd_info *mtd, void *param)
283 295
284static struct cfi_fixup cfi_fixup_table[] = { 296static struct cfi_fixup cfi_fixup_table[] = {
285 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL }, 297 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
298 { CFI_MFR_ATMEL, AT49BV640D, fixup_at49bv640dx_lock, NULL },
299 { CFI_MFR_ATMEL, AT49BV640DT, fixup_at49bv640dx_lock, NULL },
286#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE 300#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
287 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL }, 301 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
288#endif 302#endif
@@ -294,16 +308,16 @@ static struct cfi_fixup cfi_fixup_table[] = {
294#endif 308#endif
295 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL }, 309 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
296 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL }, 310 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
297 { MANUFACTURER_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock, NULL, }, 311 { CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock, NULL, },
298 { 0, 0, NULL, NULL } 312 { 0, 0, NULL, NULL }
299}; 313};
300 314
301static struct cfi_fixup jedec_fixup_table[] = { 315static struct cfi_fixup jedec_fixup_table[] = {
302 { MANUFACTURER_INTEL, I82802AB, fixup_use_fwh_lock, NULL, }, 316 { CFI_MFR_INTEL, I82802AB, fixup_use_fwh_lock, NULL, },
303 { MANUFACTURER_INTEL, I82802AC, fixup_use_fwh_lock, NULL, }, 317 { CFI_MFR_INTEL, I82802AC, fixup_use_fwh_lock, NULL, },
304 { MANUFACTURER_ST, M50LPW080, fixup_use_fwh_lock, NULL, }, 318 { CFI_MFR_ST, M50LPW080, fixup_use_fwh_lock, NULL, },
305 { MANUFACTURER_ST, M50FLW080A, fixup_use_fwh_lock, NULL, }, 319 { CFI_MFR_ST, M50FLW080A, fixup_use_fwh_lock, NULL, },
306 { MANUFACTURER_ST, M50FLW080B, fixup_use_fwh_lock, NULL, }, 320 { CFI_MFR_ST, M50FLW080B, fixup_use_fwh_lock, NULL, },
307 { 0, 0, NULL, NULL } 321 { 0, 0, NULL, NULL }
308}; 322};
309static struct cfi_fixup fixup_table[] = { 323static struct cfi_fixup fixup_table[] = {
@@ -319,7 +333,7 @@ static struct cfi_fixup fixup_table[] = {
319static void cfi_fixup_major_minor(struct cfi_private *cfi, 333static void cfi_fixup_major_minor(struct cfi_private *cfi,
320 struct cfi_pri_intelext *extp) 334 struct cfi_pri_intelext *extp)
321{ 335{
322 if (cfi->mfr == MANUFACTURER_INTEL && 336 if (cfi->mfr == CFI_MFR_INTEL &&
323 cfi->id == PF38F4476 && extp->MinorVersion == '3') 337 cfi->id == PF38F4476 && extp->MinorVersion == '3')
324 extp->MinorVersion = '1'; 338 extp->MinorVersion = '1';
325} 339}
@@ -2235,7 +2249,7 @@ static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2235 2249
2236 /* Some chips have OTP located in the _top_ partition only. 2250 /* Some chips have OTP located in the _top_ partition only.
2237 For example: Intel 28F256L18T (T means top-parameter device) */ 2251 For example: Intel 28F256L18T (T means top-parameter device) */
2238 if (cfi->mfr == MANUFACTURER_INTEL) { 2252 if (cfi->mfr == CFI_MFR_INTEL) {
2239 switch (cfi->id) { 2253 switch (cfi->id) {
2240 case 0x880b: 2254 case 0x880b:
2241 case 0x880c: 2255 case 0x880c:
@@ -2564,6 +2578,7 @@ static int cfi_intelext_reset(struct mtd_info *mtd)
2564 if (!ret) { 2578 if (!ret) {
2565 map_write(map, CMD(0xff), chip->start); 2579 map_write(map, CMD(0xff), chip->start);
2566 chip->state = FL_SHUTDOWN; 2580 chip->state = FL_SHUTDOWN;
2581 put_chip(map, chip, chip->start);
2567 } 2582 }
2568 spin_unlock(chip->mutex); 2583 spin_unlock(chip->mutex);
2569 } 2584 }
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 94bb61e19047..f3600e8d5382 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -490,10 +490,6 @@ static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
490 } 490 }
491#endif 491#endif
492 492
493 /* FIXME: erase-suspend-program is broken. See
494 http://lists.infradead.org/pipermail/linux-mtd/2003-December/009001.html */
495 printk(KERN_NOTICE "cfi_cmdset_0002: Disabling erase-suspend-program due to code brokenness.\n");
496
497 __module_get(THIS_MODULE); 493 __module_get(THIS_MODULE);
498 return mtd; 494 return mtd;
499 495
@@ -573,7 +569,6 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
573 569
574 if (time_after(jiffies, timeo)) { 570 if (time_after(jiffies, timeo)) {
575 printk(KERN_ERR "Waiting for chip to be ready timed out.\n"); 571 printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
576 spin_unlock(chip->mutex);
577 return -EIO; 572 return -EIO;
578 } 573 }
579 spin_unlock(chip->mutex); 574 spin_unlock(chip->mutex);
@@ -589,15 +584,9 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
589 return 0; 584 return 0;
590 585
591 case FL_ERASING: 586 case FL_ERASING:
592 if (mode == FL_WRITING) /* FIXME: Erase-suspend-program appears broken. */ 587 if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
593 goto sleep; 588 !(mode == FL_READY || mode == FL_POINT ||
594 589 (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
595 if (!( mode == FL_READY
596 || mode == FL_POINT
597 || !cfip
598 || (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))
599 || (mode == FL_WRITING && (cfip->EraseSuspend & 0x1)
600 )))
601 goto sleep; 590 goto sleep;
602 591
603 /* We could check to see if we're trying to access the sector 592 /* We could check to see if we're trying to access the sector
diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c
index c5a84fda5410..ca584d0380b4 100755
--- a/drivers/mtd/chips/cfi_util.c
+++ b/drivers/mtd/chips/cfi_util.c
@@ -71,6 +71,13 @@ int __xipram cfi_qry_mode_on(uint32_t base, struct map_info *map,
71 cfi_send_gen_cmd(0x98, 0x555, base, map, cfi, cfi->device_type, NULL); 71 cfi_send_gen_cmd(0x98, 0x555, base, map, cfi, cfi->device_type, NULL);
72 if (cfi_qry_present(map, base, cfi)) 72 if (cfi_qry_present(map, base, cfi))
73 return 1; 73 return 1;
74 /* some old SST chips, e.g. 39VF160x/39VF320x */
75 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
76 cfi_send_gen_cmd(0xAA, 0x5555, base, map, cfi, cfi->device_type, NULL);
77 cfi_send_gen_cmd(0x55, 0x2AAA, base, map, cfi, cfi->device_type, NULL);
78 cfi_send_gen_cmd(0x98, 0x5555, base, map, cfi, cfi->device_type, NULL);
79 if (cfi_qry_present(map, base, cfi))
80 return 1;
74 /* QRY not found */ 81 /* QRY not found */
75 return 0; 82 return 0;
76} 83}
diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c
index 736a3be265f2..1bec5e1ce6ac 100644
--- a/drivers/mtd/chips/jedec_probe.c
+++ b/drivers/mtd/chips/jedec_probe.c
@@ -142,8 +142,8 @@
142 142
143/* ST - www.st.com */ 143/* ST - www.st.com */
144#define M29F800AB 0x0058 144#define M29F800AB 0x0058
145#define M29W800DT 0x00D7 145#define M29W800DT 0x22D7
146#define M29W800DB 0x005B 146#define M29W800DB 0x225B
147#define M29W400DT 0x00EE 147#define M29W400DT 0x00EE
148#define M29W400DB 0x00EF 148#define M29W400DB 0x00EF
149#define M29W160DT 0x22C4 149#define M29W160DT 0x22C4
@@ -1575,7 +1575,7 @@ static const struct amd_flash_info jedec_table[] = {
1575 .dev_id = M29W800DT, 1575 .dev_id = M29W800DT,
1576 .name = "ST M29W800DT", 1576 .name = "ST M29W800DT",
1577 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 1577 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
1578 .uaddr = MTD_UADDR_0x5555_0x2AAA, /* ???? */ 1578 .uaddr = MTD_UADDR_0x0AAA_0x0555,
1579 .dev_size = SIZE_1MiB, 1579 .dev_size = SIZE_1MiB,
1580 .cmd_set = P_ID_AMD_STD, 1580 .cmd_set = P_ID_AMD_STD,
1581 .nr_regions = 4, 1581 .nr_regions = 4,
@@ -1590,7 +1590,7 @@ static const struct amd_flash_info jedec_table[] = {
1590 .dev_id = M29W800DB, 1590 .dev_id = M29W800DB,
1591 .name = "ST M29W800DB", 1591 .name = "ST M29W800DB",
1592 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 1592 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
1593 .uaddr = MTD_UADDR_0x5555_0x2AAA, /* ???? */ 1593 .uaddr = MTD_UADDR_0x0AAA_0x0555,
1594 .dev_size = SIZE_1MiB, 1594 .dev_size = SIZE_1MiB,
1595 .cmd_set = P_ID_AMD_STD, 1595 .cmd_set = P_ID_AMD_STD,
1596 .nr_regions = 4, 1596 .nr_regions = 4,
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 4c19269de91a..f3f4768d6e18 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -22,6 +22,7 @@
22#include <linux/mutex.h> 22#include <linux/mutex.h>
23#include <linux/math64.h> 23#include <linux/math64.h>
24#include <linux/sched.h> 24#include <linux/sched.h>
25#include <linux/mod_devicetable.h>
25 26
26#include <linux/mtd/mtd.h> 27#include <linux/mtd/mtd.h>
27#include <linux/mtd/partitions.h> 28#include <linux/mtd/partitions.h>
@@ -29,9 +30,6 @@
29#include <linux/spi/spi.h> 30#include <linux/spi/spi.h>
30#include <linux/spi/flash.h> 31#include <linux/spi/flash.h>
31 32
32
33#define FLASH_PAGESIZE 256
34
35/* Flash opcodes. */ 33/* Flash opcodes. */
36#define OPCODE_WREN 0x06 /* Write enable */ 34#define OPCODE_WREN 0x06 /* Write enable */
37#define OPCODE_RDSR 0x05 /* Read status register */ 35#define OPCODE_RDSR 0x05 /* Read status register */
@@ -61,7 +59,7 @@
61 59
62/* Define max times to check status register before we give up. */ 60/* Define max times to check status register before we give up. */
63#define MAX_READY_WAIT_JIFFIES (40 * HZ) /* M25P16 specs 40s max chip erase */ 61#define MAX_READY_WAIT_JIFFIES (40 * HZ) /* M25P16 specs 40s max chip erase */
64#define CMD_SIZE 4 62#define MAX_CMD_SIZE 4
65 63
66#ifdef CONFIG_M25PXX_USE_FAST_READ 64#ifdef CONFIG_M25PXX_USE_FAST_READ
67#define OPCODE_READ OPCODE_FAST_READ 65#define OPCODE_READ OPCODE_FAST_READ
@@ -78,8 +76,10 @@ struct m25p {
78 struct mutex lock; 76 struct mutex lock;
79 struct mtd_info mtd; 77 struct mtd_info mtd;
80 unsigned partitioned:1; 78 unsigned partitioned:1;
79 u16 page_size;
80 u16 addr_width;
81 u8 erase_opcode; 81 u8 erase_opcode;
82 u8 command[CMD_SIZE + FAST_READ_DUMMY_BYTE]; 82 u8 *command;
83}; 83};
84 84
85static inline struct m25p *mtd_to_m25p(struct mtd_info *mtd) 85static inline struct m25p *mtd_to_m25p(struct mtd_info *mtd)
@@ -198,6 +198,19 @@ static int erase_chip(struct m25p *flash)
198 return 0; 198 return 0;
199} 199}
200 200
201static void m25p_addr2cmd(struct m25p *flash, unsigned int addr, u8 *cmd)
202{
203 /* opcode is in cmd[0] */
204 cmd[1] = addr >> (flash->addr_width * 8 - 8);
205 cmd[2] = addr >> (flash->addr_width * 8 - 16);
206 cmd[3] = addr >> (flash->addr_width * 8 - 24);
207}
208
209static int m25p_cmdsz(struct m25p *flash)
210{
211 return 1 + flash->addr_width;
212}
213
201/* 214/*
202 * Erase one sector of flash memory at offset ``offset'' which is any 215 * Erase one sector of flash memory at offset ``offset'' which is any
203 * address within the sector which should be erased. 216 * address within the sector which should be erased.
@@ -219,11 +232,9 @@ static int erase_sector(struct m25p *flash, u32 offset)
219 232
220 /* Set up command buffer. */ 233 /* Set up command buffer. */
221 flash->command[0] = flash->erase_opcode; 234 flash->command[0] = flash->erase_opcode;
222 flash->command[1] = offset >> 16; 235 m25p_addr2cmd(flash, offset, flash->command);
223 flash->command[2] = offset >> 8;
224 flash->command[3] = offset;
225 236
226 spi_write(flash->spi, flash->command, CMD_SIZE); 237 spi_write(flash->spi, flash->command, m25p_cmdsz(flash));
227 238
228 return 0; 239 return 0;
229} 240}
@@ -325,7 +336,7 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
325 * Should add 1 byte DUMMY_BYTE. 336 * Should add 1 byte DUMMY_BYTE.
326 */ 337 */
327 t[0].tx_buf = flash->command; 338 t[0].tx_buf = flash->command;
328 t[0].len = CMD_SIZE + FAST_READ_DUMMY_BYTE; 339 t[0].len = m25p_cmdsz(flash) + FAST_READ_DUMMY_BYTE;
329 spi_message_add_tail(&t[0], &m); 340 spi_message_add_tail(&t[0], &m);
330 341
331 t[1].rx_buf = buf; 342 t[1].rx_buf = buf;
@@ -352,13 +363,11 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
352 363
353 /* Set up the write data buffer. */ 364 /* Set up the write data buffer. */
354 flash->command[0] = OPCODE_READ; 365 flash->command[0] = OPCODE_READ;
355 flash->command[1] = from >> 16; 366 m25p_addr2cmd(flash, from, flash->command);
356 flash->command[2] = from >> 8;
357 flash->command[3] = from;
358 367
359 spi_sync(flash->spi, &m); 368 spi_sync(flash->spi, &m);
360 369
361 *retlen = m.actual_length - CMD_SIZE - FAST_READ_DUMMY_BYTE; 370 *retlen = m.actual_length - m25p_cmdsz(flash) - FAST_READ_DUMMY_BYTE;
362 371
363 mutex_unlock(&flash->lock); 372 mutex_unlock(&flash->lock);
364 373
@@ -396,7 +405,7 @@ static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len,
396 memset(t, 0, (sizeof t)); 405 memset(t, 0, (sizeof t));
397 406
398 t[0].tx_buf = flash->command; 407 t[0].tx_buf = flash->command;
399 t[0].len = CMD_SIZE; 408 t[0].len = m25p_cmdsz(flash);
400 spi_message_add_tail(&t[0], &m); 409 spi_message_add_tail(&t[0], &m);
401 410
402 t[1].tx_buf = buf; 411 t[1].tx_buf = buf;
@@ -414,41 +423,36 @@ static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len,
414 423
415 /* Set up the opcode in the write buffer. */ 424 /* Set up the opcode in the write buffer. */
416 flash->command[0] = OPCODE_PP; 425 flash->command[0] = OPCODE_PP;
417 flash->command[1] = to >> 16; 426 m25p_addr2cmd(flash, to, flash->command);
418 flash->command[2] = to >> 8;
419 flash->command[3] = to;
420 427
421 /* what page do we start with? */ 428 page_offset = to & (flash->page_size - 1);
422 page_offset = to % FLASH_PAGESIZE;
423 429
424 /* do all the bytes fit onto one page? */ 430 /* do all the bytes fit onto one page? */
425 if (page_offset + len <= FLASH_PAGESIZE) { 431 if (page_offset + len <= flash->page_size) {
426 t[1].len = len; 432 t[1].len = len;
427 433
428 spi_sync(flash->spi, &m); 434 spi_sync(flash->spi, &m);
429 435
430 *retlen = m.actual_length - CMD_SIZE; 436 *retlen = m.actual_length - m25p_cmdsz(flash);
431 } else { 437 } else {
432 u32 i; 438 u32 i;
433 439
434 /* the size of data remaining on the first page */ 440 /* the size of data remaining on the first page */
435 page_size = FLASH_PAGESIZE - page_offset; 441 page_size = flash->page_size - page_offset;
436 442
437 t[1].len = page_size; 443 t[1].len = page_size;
438 spi_sync(flash->spi, &m); 444 spi_sync(flash->spi, &m);
439 445
440 *retlen = m.actual_length - CMD_SIZE; 446 *retlen = m.actual_length - m25p_cmdsz(flash);
441 447
442 /* write everything in PAGESIZE chunks */ 448 /* write everything in flash->page_size chunks */
443 for (i = page_size; i < len; i += page_size) { 449 for (i = page_size; i < len; i += page_size) {
444 page_size = len - i; 450 page_size = len - i;
445 if (page_size > FLASH_PAGESIZE) 451 if (page_size > flash->page_size)
446 page_size = FLASH_PAGESIZE; 452 page_size = flash->page_size;
447 453
448 /* write the next page to flash */ 454 /* write the next page to flash */
449 flash->command[1] = (to + i) >> 16; 455 m25p_addr2cmd(flash, to + i, flash->command);
450 flash->command[2] = (to + i) >> 8;
451 flash->command[3] = (to + i);
452 456
453 t[1].tx_buf = buf + i; 457 t[1].tx_buf = buf + i;
454 t[1].len = page_size; 458 t[1].len = page_size;
@@ -460,7 +464,7 @@ static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len,
460 spi_sync(flash->spi, &m); 464 spi_sync(flash->spi, &m);
461 465
462 if (retlen) 466 if (retlen)
463 *retlen += m.actual_length - CMD_SIZE; 467 *retlen += m.actual_length - m25p_cmdsz(flash);
464 } 468 }
465 } 469 }
466 470
@@ -492,7 +496,7 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
492 memset(t, 0, (sizeof t)); 496 memset(t, 0, (sizeof t));
493 497
494 t[0].tx_buf = flash->command; 498 t[0].tx_buf = flash->command;
495 t[0].len = CMD_SIZE; 499 t[0].len = m25p_cmdsz(flash);
496 spi_message_add_tail(&t[0], &m); 500 spi_message_add_tail(&t[0], &m);
497 501
498 t[1].tx_buf = buf; 502 t[1].tx_buf = buf;
@@ -511,9 +515,7 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
511 /* Start write from odd address. */ 515 /* Start write from odd address. */
512 if (actual) { 516 if (actual) {
513 flash->command[0] = OPCODE_BP; 517 flash->command[0] = OPCODE_BP;
514 flash->command[1] = to >> 16; 518 m25p_addr2cmd(flash, to, flash->command);
515 flash->command[2] = to >> 8;
516 flash->command[3] = to;
517 519
518 /* write one byte. */ 520 /* write one byte. */
519 t[1].len = 1; 521 t[1].len = 1;
@@ -521,17 +523,15 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
521 ret = wait_till_ready(flash); 523 ret = wait_till_ready(flash);
522 if (ret) 524 if (ret)
523 goto time_out; 525 goto time_out;
524 *retlen += m.actual_length - CMD_SIZE; 526 *retlen += m.actual_length - m25p_cmdsz(flash);
525 } 527 }
526 to += actual; 528 to += actual;
527 529
528 flash->command[0] = OPCODE_AAI_WP; 530 flash->command[0] = OPCODE_AAI_WP;
529 flash->command[1] = to >> 16; 531 m25p_addr2cmd(flash, to, flash->command);
530 flash->command[2] = to >> 8;
531 flash->command[3] = to;
532 532
533 /* Write out most of the data here. */ 533 /* Write out most of the data here. */
534 cmd_sz = CMD_SIZE; 534 cmd_sz = m25p_cmdsz(flash);
535 for (; actual < len - 1; actual += 2) { 535 for (; actual < len - 1; actual += 2) {
536 t[0].len = cmd_sz; 536 t[0].len = cmd_sz;
537 /* write two bytes. */ 537 /* write two bytes. */
@@ -555,10 +555,8 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
555 if (actual != len) { 555 if (actual != len) {
556 write_enable(flash); 556 write_enable(flash);
557 flash->command[0] = OPCODE_BP; 557 flash->command[0] = OPCODE_BP;
558 flash->command[1] = to >> 16; 558 m25p_addr2cmd(flash, to, flash->command);
559 flash->command[2] = to >> 8; 559 t[0].len = m25p_cmdsz(flash);
560 flash->command[3] = to;
561 t[0].len = CMD_SIZE;
562 t[1].len = 1; 560 t[1].len = 1;
563 t[1].tx_buf = buf + actual; 561 t[1].tx_buf = buf + actual;
564 562
@@ -566,7 +564,7 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
566 ret = wait_till_ready(flash); 564 ret = wait_till_ready(flash);
567 if (ret) 565 if (ret)
568 goto time_out; 566 goto time_out;
569 *retlen += m.actual_length - CMD_SIZE; 567 *retlen += m.actual_length - m25p_cmdsz(flash);
570 write_disable(flash); 568 write_disable(flash);
571 } 569 }
572 570
@@ -582,8 +580,6 @@ time_out:
582 */ 580 */
583 581
584struct flash_info { 582struct flash_info {
585 char *name;
586
587 /* JEDEC id zero means "no ID" (most older chips); otherwise it has 583 /* JEDEC id zero means "no ID" (most older chips); otherwise it has
588 * a high byte of zero plus three data bytes: the manufacturer id, 584 * a high byte of zero plus three data bytes: the manufacturer id,
589 * then a two byte device id. 585 * then a two byte device id.
@@ -597,87 +593,119 @@ struct flash_info {
597 unsigned sector_size; 593 unsigned sector_size;
598 u16 n_sectors; 594 u16 n_sectors;
599 595
596 u16 page_size;
597 u16 addr_width;
598
600 u16 flags; 599 u16 flags;
601#define SECT_4K 0x01 /* OPCODE_BE_4K works uniformly */ 600#define SECT_4K 0x01 /* OPCODE_BE_4K works uniformly */
601#define M25P_NO_ERASE 0x02 /* No erase command needed */
602}; 602};
603 603
604#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
605 ((kernel_ulong_t)&(struct flash_info) { \
606 .jedec_id = (_jedec_id), \
607 .ext_id = (_ext_id), \
608 .sector_size = (_sector_size), \
609 .n_sectors = (_n_sectors), \
610 .page_size = 256, \
611 .addr_width = 3, \
612 .flags = (_flags), \
613 })
614
615#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width) \
616 ((kernel_ulong_t)&(struct flash_info) { \
617 .sector_size = (_sector_size), \
618 .n_sectors = (_n_sectors), \
619 .page_size = (_page_size), \
620 .addr_width = (_addr_width), \
621 .flags = M25P_NO_ERASE, \
622 })
604 623
605/* NOTE: double check command sets and memory organization when you add 624/* NOTE: double check command sets and memory organization when you add
606 * more flash chips. This current list focusses on newer chips, which 625 * more flash chips. This current list focusses on newer chips, which
607 * have been converging on command sets which including JEDEC ID. 626 * have been converging on command sets which including JEDEC ID.
608 */ 627 */
609static struct flash_info __devinitdata m25p_data [] = { 628static const struct spi_device_id m25p_ids[] = {
610
611 /* Atmel -- some are (confusingly) marketed as "DataFlash" */ 629 /* Atmel -- some are (confusingly) marketed as "DataFlash" */
612 { "at25fs010", 0x1f6601, 0, 32 * 1024, 4, SECT_4K, }, 630 { "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4, SECT_4K) },
613 { "at25fs040", 0x1f6604, 0, 64 * 1024, 8, SECT_4K, }, 631 { "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K) },
614 632
615 { "at25df041a", 0x1f4401, 0, 64 * 1024, 8, SECT_4K, }, 633 { "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8, SECT_4K) },
616 { "at25df641", 0x1f4800, 0, 64 * 1024, 128, SECT_4K, }, 634 { "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) },
617 635
618 { "at26f004", 0x1f0400, 0, 64 * 1024, 8, SECT_4K, }, 636 { "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K) },
619 { "at26df081a", 0x1f4501, 0, 64 * 1024, 16, SECT_4K, }, 637 { "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K) },
620 { "at26df161a", 0x1f4601, 0, 64 * 1024, 32, SECT_4K, }, 638 { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) },
621 { "at26df321", 0x1f4701, 0, 64 * 1024, 64, SECT_4K, }, 639 { "at26df321", INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K) },
622 640
623 /* Macronix */ 641 /* Macronix */
624 { "mx25l3205d", 0xc22016, 0, 64 * 1024, 64, }, 642 { "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) },
625 { "mx25l6405d", 0xc22017, 0, 64 * 1024, 128, }, 643 { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, 0) },
626 { "mx25l12805d", 0xc22018, 0, 64 * 1024, 256, }, 644 { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, 0) },
627 { "mx25l12855e", 0xc22618, 0, 64 * 1024, 256, }, 645 { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
646 { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
628 647
629 /* Spansion -- single (large) sector size only, at least 648 /* Spansion -- single (large) sector size only, at least
630 * for the chips listed here (without boot sectors). 649 * for the chips listed here (without boot sectors).
631 */ 650 */
632 { "s25sl004a", 0x010212, 0, 64 * 1024, 8, }, 651 { "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) },
633 { "s25sl008a", 0x010213, 0, 64 * 1024, 16, }, 652 { "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) },
634 { "s25sl016a", 0x010214, 0, 64 * 1024, 32, }, 653 { "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) },
635 { "s25sl032a", 0x010215, 0, 64 * 1024, 64, }, 654 { "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) },
636 { "s25sl064a", 0x010216, 0, 64 * 1024, 128, }, 655 { "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) },
637 { "s25sl12800", 0x012018, 0x0300, 256 * 1024, 64, }, 656 { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
638 { "s25sl12801", 0x012018, 0x0301, 64 * 1024, 256, }, 657 { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
639 { "s25fl129p0", 0x012018, 0x4d00, 256 * 1024, 64, }, 658 { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, 0) },
640 { "s25fl129p1", 0x012018, 0x4d01, 64 * 1024, 256, }, 659 { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, 0) },
641 660
642 /* SST -- large erase sizes are "overlays", "sectors" are 4K */ 661 /* SST -- large erase sizes are "overlays", "sectors" are 4K */
643 { "sst25vf040b", 0xbf258d, 0, 64 * 1024, 8, SECT_4K, }, 662 { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K) },
644 { "sst25vf080b", 0xbf258e, 0, 64 * 1024, 16, SECT_4K, }, 663 { "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16, SECT_4K) },
645 { "sst25vf016b", 0xbf2541, 0, 64 * 1024, 32, SECT_4K, }, 664 { "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32, SECT_4K) },
646 { "sst25vf032b", 0xbf254a, 0, 64 * 1024, 64, SECT_4K, }, 665 { "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64, SECT_4K) },
647 { "sst25wf512", 0xbf2501, 0, 64 * 1024, 1, SECT_4K, }, 666 { "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1, SECT_4K) },
648 { "sst25wf010", 0xbf2502, 0, 64 * 1024, 2, SECT_4K, }, 667 { "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2, SECT_4K) },
649 { "sst25wf020", 0xbf2503, 0, 64 * 1024, 4, SECT_4K, }, 668 { "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4, SECT_4K) },
650 { "sst25wf040", 0xbf2504, 0, 64 * 1024, 8, SECT_4K, }, 669 { "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8, SECT_4K) },
651 670
652 /* ST Microelectronics -- newer production may have feature updates */ 671 /* ST Microelectronics -- newer production may have feature updates */
653 { "m25p05", 0x202010, 0, 32 * 1024, 2, }, 672 { "m25p05", INFO(0x202010, 0, 32 * 1024, 2, 0) },
654 { "m25p10", 0x202011, 0, 32 * 1024, 4, }, 673 { "m25p10", INFO(0x202011, 0, 32 * 1024, 4, 0) },
655 { "m25p20", 0x202012, 0, 64 * 1024, 4, }, 674 { "m25p20", INFO(0x202012, 0, 64 * 1024, 4, 0) },
656 { "m25p40", 0x202013, 0, 64 * 1024, 8, }, 675 { "m25p40", INFO(0x202013, 0, 64 * 1024, 8, 0) },
657 { "m25p80", 0, 0, 64 * 1024, 16, }, 676 { "m25p80", INFO(0x202014, 0, 64 * 1024, 16, 0) },
658 { "m25p16", 0x202015, 0, 64 * 1024, 32, }, 677 { "m25p16", INFO(0x202015, 0, 64 * 1024, 32, 0) },
659 { "m25p32", 0x202016, 0, 64 * 1024, 64, }, 678 { "m25p32", INFO(0x202016, 0, 64 * 1024, 64, 0) },
660 { "m25p64", 0x202017, 0, 64 * 1024, 128, }, 679 { "m25p64", INFO(0x202017, 0, 64 * 1024, 128, 0) },
661 { "m25p128", 0x202018, 0, 256 * 1024, 64, }, 680 { "m25p128", INFO(0x202018, 0, 256 * 1024, 64, 0) },
662 681
663 { "m45pe10", 0x204011, 0, 64 * 1024, 2, }, 682 { "m45pe10", INFO(0x204011, 0, 64 * 1024, 2, 0) },
664 { "m45pe80", 0x204014, 0, 64 * 1024, 16, }, 683 { "m45pe80", INFO(0x204014, 0, 64 * 1024, 16, 0) },
665 { "m45pe16", 0x204015, 0, 64 * 1024, 32, }, 684 { "m45pe16", INFO(0x204015, 0, 64 * 1024, 32, 0) },
666 685
667 { "m25pe80", 0x208014, 0, 64 * 1024, 16, }, 686 { "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) },
668 { "m25pe16", 0x208015, 0, 64 * 1024, 32, SECT_4K, }, 687 { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) },
669 688
670 /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */ 689 /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
671 { "w25x10", 0xef3011, 0, 64 * 1024, 2, SECT_4K, }, 690 { "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K) },
672 { "w25x20", 0xef3012, 0, 64 * 1024, 4, SECT_4K, }, 691 { "w25x20", INFO(0xef3012, 0, 64 * 1024, 4, SECT_4K) },
673 { "w25x40", 0xef3013, 0, 64 * 1024, 8, SECT_4K, }, 692 { "w25x40", INFO(0xef3013, 0, 64 * 1024, 8, SECT_4K) },
674 { "w25x80", 0xef3014, 0, 64 * 1024, 16, SECT_4K, }, 693 { "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) },
675 { "w25x16", 0xef3015, 0, 64 * 1024, 32, SECT_4K, }, 694 { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) },
676 { "w25x32", 0xef3016, 0, 64 * 1024, 64, SECT_4K, }, 695 { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
677 { "w25x64", 0xef3017, 0, 64 * 1024, 128, SECT_4K, }, 696 { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
697
698 /* Catalyst / On Semiconductor -- non-JEDEC */
699 { "cat25c11", CAT25_INFO( 16, 8, 16, 1) },
700 { "cat25c03", CAT25_INFO( 32, 8, 16, 2) },
701 { "cat25c09", CAT25_INFO( 128, 8, 32, 2) },
702 { "cat25c17", CAT25_INFO( 256, 8, 32, 2) },
703 { "cat25128", CAT25_INFO(2048, 8, 64, 2) },
704 { },
678}; 705};
706MODULE_DEVICE_TABLE(spi, m25p_ids);
679 707
680static struct flash_info *__devinit jedec_probe(struct spi_device *spi) 708static const struct spi_device_id *__devinit jedec_probe(struct spi_device *spi)
681{ 709{
682 int tmp; 710 int tmp;
683 u8 code = OPCODE_RDID; 711 u8 code = OPCODE_RDID;
@@ -702,18 +730,24 @@ static struct flash_info *__devinit jedec_probe(struct spi_device *spi)
702 jedec = jedec << 8; 730 jedec = jedec << 8;
703 jedec |= id[2]; 731 jedec |= id[2];
704 732
733 /*
734 * Some chips (like Numonyx M25P80) have JEDEC and non-JEDEC variants,
735 * which depend on technology process. Officially RDID command doesn't
736 * exist for non-JEDEC chips, but for compatibility they return ID 0.
737 */
738 if (jedec == 0)
739 return NULL;
740
705 ext_jedec = id[3] << 8 | id[4]; 741 ext_jedec = id[3] << 8 | id[4];
706 742
707 for (tmp = 0, info = m25p_data; 743 for (tmp = 0; tmp < ARRAY_SIZE(m25p_ids) - 1; tmp++) {
708 tmp < ARRAY_SIZE(m25p_data); 744 info = (void *)m25p_ids[tmp].driver_data;
709 tmp++, info++) {
710 if (info->jedec_id == jedec) { 745 if (info->jedec_id == jedec) {
711 if (info->ext_id != 0 && info->ext_id != ext_jedec) 746 if (info->ext_id != 0 && info->ext_id != ext_jedec)
712 continue; 747 continue;
713 return info; 748 return &m25p_ids[tmp];
714 } 749 }
715 } 750 }
716 dev_err(&spi->dev, "unrecognized JEDEC id %06x\n", jedec);
717 return NULL; 751 return NULL;
718} 752}
719 753
@@ -725,6 +759,7 @@ static struct flash_info *__devinit jedec_probe(struct spi_device *spi)
725 */ 759 */
726static int __devinit m25p_probe(struct spi_device *spi) 760static int __devinit m25p_probe(struct spi_device *spi)
727{ 761{
762 const struct spi_device_id *id = spi_get_device_id(spi);
728 struct flash_platform_data *data; 763 struct flash_platform_data *data;
729 struct m25p *flash; 764 struct m25p *flash;
730 struct flash_info *info; 765 struct flash_info *info;
@@ -737,50 +772,65 @@ static int __devinit m25p_probe(struct spi_device *spi)
737 */ 772 */
738 data = spi->dev.platform_data; 773 data = spi->dev.platform_data;
739 if (data && data->type) { 774 if (data && data->type) {
740 for (i = 0, info = m25p_data; 775 const struct spi_device_id *plat_id;
741 i < ARRAY_SIZE(m25p_data);
742 i++, info++) {
743 if (strcmp(data->type, info->name) == 0)
744 break;
745 }
746 776
747 /* unrecognized chip? */ 777 for (i = 0; i < ARRAY_SIZE(m25p_ids) - 1; i++) {
748 if (i == ARRAY_SIZE(m25p_data)) { 778 plat_id = &m25p_ids[i];
749 DEBUG(MTD_DEBUG_LEVEL0, "%s: unrecognized id %s\n", 779 if (strcmp(data->type, plat_id->name))
750 dev_name(&spi->dev), data->type); 780 continue;
751 info = NULL; 781 break;
752
753 /* recognized; is that chip really what's there? */
754 } else if (info->jedec_id) {
755 struct flash_info *chip = jedec_probe(spi);
756
757 if (!chip || chip != info) {
758 dev_warn(&spi->dev, "found %s, expected %s\n",
759 chip ? chip->name : "UNKNOWN",
760 info->name);
761 info = NULL;
762 }
763 } 782 }
764 } else
765 info = jedec_probe(spi);
766 783
767 if (!info) 784 if (plat_id)
768 return -ENODEV; 785 id = plat_id;
786 else
787 dev_warn(&spi->dev, "unrecognized id %s\n", data->type);
788 }
789
790 info = (void *)id->driver_data;
791
792 if (info->jedec_id) {
793 const struct spi_device_id *jid;
794
795 jid = jedec_probe(spi);
796 if (!jid) {
797 dev_info(&spi->dev, "non-JEDEC variant of %s\n",
798 id->name);
799 } else if (jid != id) {
800 /*
801 * JEDEC knows better, so overwrite platform ID. We
802 * can't trust partitions any longer, but we'll let
803 * mtd apply them anyway, since some partitions may be
804 * marked read-only, and we don't want to lose that
805 * information, even if it's not 100% accurate.
806 */
807 dev_warn(&spi->dev, "found %s, expected %s\n",
808 jid->name, id->name);
809 id = jid;
810 info = (void *)jid->driver_data;
811 }
812 }
769 813
770 flash = kzalloc(sizeof *flash, GFP_KERNEL); 814 flash = kzalloc(sizeof *flash, GFP_KERNEL);
771 if (!flash) 815 if (!flash)
772 return -ENOMEM; 816 return -ENOMEM;
817 flash->command = kmalloc(MAX_CMD_SIZE + FAST_READ_DUMMY_BYTE, GFP_KERNEL);
818 if (!flash->command) {
819 kfree(flash);
820 return -ENOMEM;
821 }
773 822
774 flash->spi = spi; 823 flash->spi = spi;
775 mutex_init(&flash->lock); 824 mutex_init(&flash->lock);
776 dev_set_drvdata(&spi->dev, flash); 825 dev_set_drvdata(&spi->dev, flash);
777 826
778 /* 827 /*
779 * Atmel serial flash tend to power up 828 * Atmel and SST serial flash tend to power
780 * with the software protection bits set 829 * up with the software protection bits set
781 */ 830 */
782 831
783 if (info->jedec_id >> 16 == 0x1f) { 832 if (info->jedec_id >> 16 == 0x1f ||
833 info->jedec_id >> 16 == 0xbf) {
784 write_enable(flash); 834 write_enable(flash);
785 write_sr(flash, 0); 835 write_sr(flash, 0);
786 } 836 }
@@ -812,9 +862,14 @@ static int __devinit m25p_probe(struct spi_device *spi)
812 flash->mtd.erasesize = info->sector_size; 862 flash->mtd.erasesize = info->sector_size;
813 } 863 }
814 864
865 if (info->flags & M25P_NO_ERASE)
866 flash->mtd.flags |= MTD_NO_ERASE;
867
815 flash->mtd.dev.parent = &spi->dev; 868 flash->mtd.dev.parent = &spi->dev;
869 flash->page_size = info->page_size;
870 flash->addr_width = info->addr_width;
816 871
817 dev_info(&spi->dev, "%s (%lld Kbytes)\n", info->name, 872 dev_info(&spi->dev, "%s (%lld Kbytes)\n", id->name,
818 (long long)flash->mtd.size >> 10); 873 (long long)flash->mtd.size >> 10);
819 874
820 DEBUG(MTD_DEBUG_LEVEL2, 875 DEBUG(MTD_DEBUG_LEVEL2,
@@ -888,8 +943,10 @@ static int __devexit m25p_remove(struct spi_device *spi)
888 status = del_mtd_partitions(&flash->mtd); 943 status = del_mtd_partitions(&flash->mtd);
889 else 944 else
890 status = del_mtd_device(&flash->mtd); 945 status = del_mtd_device(&flash->mtd);
891 if (status == 0) 946 if (status == 0) {
947 kfree(flash->command);
892 kfree(flash); 948 kfree(flash);
949 }
893 return 0; 950 return 0;
894} 951}
895 952
@@ -900,6 +957,7 @@ static struct spi_driver m25p80_driver = {
900 .bus = &spi_bus_type, 957 .bus = &spi_bus_type,
901 .owner = THIS_MODULE, 958 .owner = THIS_MODULE,
902 }, 959 },
960 .id_table = m25p_ids,
903 .probe = m25p_probe, 961 .probe = m25p_probe,
904 .remove = __devexit_p(m25p_remove), 962 .remove = __devexit_p(m25p_remove),
905 963
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 93e3627be74c..19817404ce7d 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -636,6 +636,7 @@ add_dataflash_otp(struct spi_device *spi, char *name,
636 struct mtd_info *device; 636 struct mtd_info *device;
637 struct flash_platform_data *pdata = spi->dev.platform_data; 637 struct flash_platform_data *pdata = spi->dev.platform_data;
638 char *otp_tag = ""; 638 char *otp_tag = "";
639 int err = 0;
639 640
640 priv = kzalloc(sizeof *priv, GFP_KERNEL); 641 priv = kzalloc(sizeof *priv, GFP_KERNEL);
641 if (!priv) 642 if (!priv)
@@ -693,13 +694,23 @@ add_dataflash_otp(struct spi_device *spi, char *name,
693 694
694 if (nr_parts > 0) { 695 if (nr_parts > 0) {
695 priv->partitioned = 1; 696 priv->partitioned = 1;
696 return add_mtd_partitions(device, parts, nr_parts); 697 err = add_mtd_partitions(device, parts, nr_parts);
698 goto out;
697 } 699 }
698 } else if (pdata && pdata->nr_parts) 700 } else if (pdata && pdata->nr_parts)
699 dev_warn(&spi->dev, "ignoring %d default partitions on %s\n", 701 dev_warn(&spi->dev, "ignoring %d default partitions on %s\n",
700 pdata->nr_parts, device->name); 702 pdata->nr_parts, device->name);
701 703
702 return add_mtd_device(device) == 1 ? -ENODEV : 0; 704 if (add_mtd_device(device) == 1)
705 err = -ENODEV;
706
707out:
708 if (!err)
709 return 0;
710
711 dev_set_drvdata(&spi->dev, NULL);
712 kfree(priv);
713 return err;
703} 714}
704 715
705static inline int __devinit 716static inline int __devinit
@@ -932,8 +943,10 @@ static int __devexit dataflash_remove(struct spi_device *spi)
932 status = del_mtd_partitions(&flash->mtd); 943 status = del_mtd_partitions(&flash->mtd);
933 else 944 else
934 status = del_mtd_device(&flash->mtd); 945 status = del_mtd_device(&flash->mtd);
935 if (status == 0) 946 if (status == 0) {
947 dev_set_drvdata(&spi->dev, NULL);
936 kfree(flash); 948 kfree(flash);
949 }
937 return status; 950 return status;
938} 951}
939 952
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 847e214ade59..4c364d44ad59 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -359,12 +359,6 @@ config MTD_SA1100
359 the SA1100 and SA1110, including the Assabet and the Compaq iPAQ. 359 the SA1100 and SA1110, including the Assabet and the Compaq iPAQ.
360 If you have such a board, say 'Y'. 360 If you have such a board, say 'Y'.
361 361
362config MTD_IPAQ
363 tristate "CFI Flash device mapped on Compaq/HP iPAQ"
364 depends on IPAQ_HANDHELD && MTD_CFI
365 help
366 This provides a driver for the on-board flash of the iPAQ.
367
368config MTD_DC21285 362config MTD_DC21285
369 tristate "CFI Flash device mapped on DC21285 Footbridge" 363 tristate "CFI Flash device mapped on DC21285 Footbridge"
370 depends on MTD_CFI && ARCH_FOOTBRIDGE && MTD_COMPLEX_MAPPINGS 364 depends on MTD_CFI && ARCH_FOOTBRIDGE && MTD_COMPLEX_MAPPINGS
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index ae2f6dbe43c3..ce315214ff2b 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -24,12 +24,12 @@ obj-$(CONFIG_MTD_CEIVA) += ceiva.o
24obj-$(CONFIG_MTD_OCTAGON) += octagon-5066.o 24obj-$(CONFIG_MTD_OCTAGON) += octagon-5066.o
25obj-$(CONFIG_MTD_PHYSMAP) += physmap.o 25obj-$(CONFIG_MTD_PHYSMAP) += physmap.o
26obj-$(CONFIG_MTD_PHYSMAP_OF) += physmap_of.o 26obj-$(CONFIG_MTD_PHYSMAP_OF) += physmap_of.o
27obj-$(CONFIG_MTD_PISMO) += pismo.o
27obj-$(CONFIG_MTD_PMC_MSP_EVM) += pmcmsp-flash.o 28obj-$(CONFIG_MTD_PMC_MSP_EVM) += pmcmsp-flash.o
28obj-$(CONFIG_MTD_PCMCIA) += pcmciamtd.o 29obj-$(CONFIG_MTD_PCMCIA) += pcmciamtd.o
29obj-$(CONFIG_MTD_RPXLITE) += rpxlite.o 30obj-$(CONFIG_MTD_RPXLITE) += rpxlite.o
30obj-$(CONFIG_MTD_TQM8XXL) += tqm8xxl.o 31obj-$(CONFIG_MTD_TQM8XXL) += tqm8xxl.o
31obj-$(CONFIG_MTD_SA1100) += sa1100-flash.o 32obj-$(CONFIG_MTD_SA1100) += sa1100-flash.o
32obj-$(CONFIG_MTD_IPAQ) += ipaq-flash.o
33obj-$(CONFIG_MTD_SBC_GXX) += sbc_gxx.o 33obj-$(CONFIG_MTD_SBC_GXX) += sbc_gxx.o
34obj-$(CONFIG_MTD_SC520CDP) += sc520cdp.o 34obj-$(CONFIG_MTD_SC520CDP) += sc520cdp.o
35obj-$(CONFIG_MTD_NETSC520) += netsc520.o 35obj-$(CONFIG_MTD_NETSC520) += netsc520.o
diff --git a/drivers/mtd/maps/ipaq-flash.c b/drivers/mtd/maps/ipaq-flash.c
deleted file mode 100644
index 76708e796b70..000000000000
--- a/drivers/mtd/maps/ipaq-flash.c
+++ /dev/null
@@ -1,460 +0,0 @@
1/*
2 * Flash memory access on iPAQ Handhelds (either SA1100 or PXA250 based)
3 *
4 * (C) 2000 Nicolas Pitre <nico@fluxnic.net>
5 * (C) 2002 Hewlett-Packard Company <jamey.hicks@hp.com>
6 * (C) 2003 Christian Pellegrin <chri@ascensit.com>, <chri@infis.univ.ts.it>: concatenation of multiple flashes
7 */
8
9#include <linux/module.h>
10#include <linux/types.h>
11#include <linux/kernel.h>
12#include <linux/spinlock.h>
13#include <linux/init.h>
14#include <linux/slab.h>
15#include <asm/page.h>
16#include <asm/mach-types.h>
17#include <asm/system.h>
18#include <asm/errno.h>
19
20#include <linux/mtd/mtd.h>
21#include <linux/mtd/map.h>
22#include <linux/mtd/partitions.h>
23#ifdef CONFIG_MTD_CONCAT
24#include <linux/mtd/concat.h>
25#endif
26
27#include <mach/hardware.h>
28#include <mach/h3600.h>
29#include <asm/io.h>
30
31
32#ifndef CONFIG_IPAQ_HANDHELD
33#error This is for iPAQ Handhelds only
34#endif
35#ifdef CONFIG_SA1100_JORNADA56X
36
37static void jornada56x_set_vpp(struct map_info *map, int vpp)
38{
39 if (vpp)
40 GPSR = GPIO_GPIO26;
41 else
42 GPCR = GPIO_GPIO26;
43 GPDR |= GPIO_GPIO26;
44}
45
46#endif
47
48#ifdef CONFIG_SA1100_JORNADA720
49
50static void jornada720_set_vpp(struct map_info *map, int vpp)
51{
52 if (vpp)
53 PPSR |= 0x80;
54 else
55 PPSR &= ~0x80;
56 PPDR |= 0x80;
57}
58
59#endif
60
61#define MAX_IPAQ_CS 2 /* Number of CS we are going to test */
62
63#define IPAQ_MAP_INIT(X) \
64 { \
65 name: "IPAQ flash " X, \
66 }
67
68
69static struct map_info ipaq_map[MAX_IPAQ_CS] = {
70 IPAQ_MAP_INIT("bank 1"),
71 IPAQ_MAP_INIT("bank 2")
72};
73
74static struct mtd_info *my_sub_mtd[MAX_IPAQ_CS] = {
75 NULL,
76 NULL
77};
78
79/*
80 * Here are partition information for all known IPAQ-based devices.
81 * See include/linux/mtd/partitions.h for definition of the mtd_partition
82 * structure.
83 *
84 * The *_max_flash_size is the maximum possible mapped flash size which
85 * is not necessarily the actual flash size. It must be no more than
86 * the value specified in the "struct map_desc *_io_desc" mapping
87 * definition for the corresponding machine.
88 *
89 * Please keep these in alphabetical order, and formatted as per existing
90 * entries. Thanks.
91 */
92
93#ifdef CONFIG_IPAQ_HANDHELD
94static unsigned long h3xxx_max_flash_size = 0x04000000;
95static struct mtd_partition h3xxx_partitions[] = {
96 {
97 name: "H3XXX boot firmware",
98#ifndef CONFIG_LAB
99 size: 0x00040000,
100#else
101 size: 0x00080000,
102#endif
103 offset: 0,
104#ifndef CONFIG_LAB
105 mask_flags: MTD_WRITEABLE, /* force read-only */
106#endif
107 },
108 {
109 name: "H3XXX root jffs2",
110#ifndef CONFIG_LAB
111 size: 0x2000000 - 2*0x40000, /* Warning, this is fixed later */
112 offset: 0x00040000,
113#else
114 size: 0x2000000 - 0x40000 - 0x80000, /* Warning, this is fixed later */
115 offset: 0x00080000,
116#endif
117 },
118 {
119 name: "asset",
120 size: 0x40000,
121 offset: 0x2000000 - 0x40000, /* Warning, this is fixed later */
122 mask_flags: MTD_WRITEABLE, /* force read-only */
123 }
124};
125
126#ifndef CONFIG_MTD_CONCAT
127static struct mtd_partition h3xxx_partitions_bank2[] = {
128 /* this is used only on 2 CS machines when concat is not present */
129 {
130 name: "second H3XXX root jffs2",
131 size: 0x1000000 - 0x40000, /* Warning, this is fixed later */
132 offset: 0x00000000,
133 },
134 {
135 name: "second asset",
136 size: 0x40000,
137 offset: 0x1000000 - 0x40000, /* Warning, this is fixed later */
138 mask_flags: MTD_WRITEABLE, /* force read-only */
139 }
140};
141#endif
142
143static DEFINE_SPINLOCK(ipaq_vpp_lock);
144
145static void h3xxx_set_vpp(struct map_info *map, int vpp)
146{
147 static int nest = 0;
148
149 spin_lock(&ipaq_vpp_lock);
150 if (vpp)
151 nest++;
152 else
153 nest--;
154 if (nest)
155 assign_h3600_egpio(IPAQ_EGPIO_VPP_ON, 1);
156 else
157 assign_h3600_egpio(IPAQ_EGPIO_VPP_ON, 0);
158 spin_unlock(&ipaq_vpp_lock);
159}
160
161#endif
162
163#if defined(CONFIG_SA1100_JORNADA56X) || defined(CONFIG_SA1100_JORNADA720)
164static unsigned long jornada_max_flash_size = 0x02000000;
165static struct mtd_partition jornada_partitions[] = {
166 {
167 name: "Jornada boot firmware",
168 size: 0x00040000,
169 offset: 0,
170 mask_flags: MTD_WRITEABLE, /* force read-only */
171 }, {
172 name: "Jornada root jffs2",
173 size: MTDPART_SIZ_FULL,
174 offset: 0x00040000,
175 }
176};
177#endif
178
179
180static struct mtd_partition *parsed_parts;
181static struct mtd_info *mymtd;
182
183static unsigned long cs_phys[] = {
184#ifdef CONFIG_ARCH_SA1100
185 SA1100_CS0_PHYS,
186 SA1100_CS1_PHYS,
187 SA1100_CS2_PHYS,
188 SA1100_CS3_PHYS,
189 SA1100_CS4_PHYS,
190 SA1100_CS5_PHYS,
191#else
192 PXA_CS0_PHYS,
193 PXA_CS1_PHYS,
194 PXA_CS2_PHYS,
195 PXA_CS3_PHYS,
196 PXA_CS4_PHYS,
197 PXA_CS5_PHYS,
198#endif
199};
200
201static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL };
202
203static int __init h1900_special_case(void);
204
205static int __init ipaq_mtd_init(void)
206{
207 struct mtd_partition *parts = NULL;
208 int nb_parts = 0;
209 int parsed_nr_parts = 0;
210 const char *part_type;
211 int i; /* used when we have >1 flash chips */
212 unsigned long tot_flashsize = 0; /* used when we have >1 flash chips */
213
214 /* Default flash bankwidth */
215 // ipaq_map.bankwidth = (MSC0 & MSC_RBW) ? 2 : 4;
216
217 if (machine_is_h1900())
218 {
219 /* For our intents, the h1900 is not a real iPAQ, so we special-case it. */
220 return h1900_special_case();
221 }
222
223 if (machine_is_h3100() || machine_is_h1900())
224 for(i=0; i<MAX_IPAQ_CS; i++)
225 ipaq_map[i].bankwidth = 2;
226 else
227 for(i=0; i<MAX_IPAQ_CS; i++)
228 ipaq_map[i].bankwidth = 4;
229
230 /*
231 * Static partition definition selection
232 */
233 part_type = "static";
234
235 simple_map_init(&ipaq_map[0]);
236 simple_map_init(&ipaq_map[1]);
237
238#ifdef CONFIG_IPAQ_HANDHELD
239 if (machine_is_ipaq()) {
240 parts = h3xxx_partitions;
241 nb_parts = ARRAY_SIZE(h3xxx_partitions);
242 for(i=0; i<MAX_IPAQ_CS; i++) {
243 ipaq_map[i].size = h3xxx_max_flash_size;
244 ipaq_map[i].set_vpp = h3xxx_set_vpp;
245 ipaq_map[i].phys = cs_phys[i];
246 ipaq_map[i].virt = ioremap(cs_phys[i], 0x04000000);
247 if (machine_is_h3100 () || machine_is_h1900())
248 ipaq_map[i].bankwidth = 2;
249 }
250 if (machine_is_h3600()) {
251 /* No asset partition here */
252 h3xxx_partitions[1].size += 0x40000;
253 nb_parts--;
254 }
255 }
256#endif
257#ifdef CONFIG_ARCH_H5400
258 if (machine_is_h5400()) {
259 ipaq_map[0].size = 0x02000000;
260 ipaq_map[1].size = 0x02000000;
261 ipaq_map[1].phys = 0x02000000;
262 ipaq_map[1].virt = ipaq_map[0].virt + 0x02000000;
263 }
264#endif
265#ifdef CONFIG_ARCH_H1900
266 if (machine_is_h1900()) {
267 ipaq_map[0].size = 0x00400000;
268 ipaq_map[1].size = 0x02000000;
269 ipaq_map[1].phys = 0x00080000;
270 ipaq_map[1].virt = ipaq_map[0].virt + 0x00080000;
271 }
272#endif
273
274#ifdef CONFIG_SA1100_JORNADA56X
275 if (machine_is_jornada56x()) {
276 parts = jornada_partitions;
277 nb_parts = ARRAY_SIZE(jornada_partitions);
278 ipaq_map[0].size = jornada_max_flash_size;
279 ipaq_map[0].set_vpp = jornada56x_set_vpp;
280 ipaq_map[0].virt = (__u32)ioremap(0x0, 0x04000000);
281 }
282#endif
283#ifdef CONFIG_SA1100_JORNADA720
284 if (machine_is_jornada720()) {
285 parts = jornada_partitions;
286 nb_parts = ARRAY_SIZE(jornada_partitions);
287 ipaq_map[0].size = jornada_max_flash_size;
288 ipaq_map[0].set_vpp = jornada720_set_vpp;
289 }
290#endif
291
292
293 if (machine_is_ipaq()) { /* for iPAQs only */
294 for(i=0; i<MAX_IPAQ_CS; i++) {
295 printk(KERN_NOTICE "iPAQ flash: probing %d-bit flash bus, window=%lx with CFI.\n", ipaq_map[i].bankwidth*8, ipaq_map[i].virt);
296 my_sub_mtd[i] = do_map_probe("cfi_probe", &ipaq_map[i]);
297 if (!my_sub_mtd[i]) {
298 printk(KERN_NOTICE "iPAQ flash: probing %d-bit flash bus, window=%lx with JEDEC.\n", ipaq_map[i].bankwidth*8, ipaq_map[i].virt);
299 my_sub_mtd[i] = do_map_probe("jedec_probe", &ipaq_map[i]);
300 }
301 if (!my_sub_mtd[i]) {
302 printk(KERN_NOTICE "iPAQ flash: failed to find flash.\n");
303 if (i)
304 break;
305 else
306 return -ENXIO;
307 } else
308 printk(KERN_NOTICE "iPAQ flash: found %d bytes\n", my_sub_mtd[i]->size);
309
310 /* do we really need this debugging? --joshua 20030703 */
311 // printk("my_sub_mtd[%d]=%p\n", i, my_sub_mtd[i]);
312 my_sub_mtd[i]->owner = THIS_MODULE;
313 tot_flashsize += my_sub_mtd[i]->size;
314 }
315#ifdef CONFIG_MTD_CONCAT
316 /* fix the asset location */
317# ifdef CONFIG_LAB
318 h3xxx_partitions[1].size = tot_flashsize - 0x40000 - 0x80000 /* extra big boot block */;
319# else
320 h3xxx_partitions[1].size = tot_flashsize - 2 * 0x40000;
321# endif
322 h3xxx_partitions[2].offset = tot_flashsize - 0x40000;
323 /* and concat the devices */
324 mymtd = mtd_concat_create(&my_sub_mtd[0], i,
325 "ipaq");
326 if (!mymtd) {
327 printk("Cannot create iPAQ concat device\n");
328 return -ENXIO;
329 }
330#else
331 mymtd = my_sub_mtd[0];
332
333 /*
334 *In the very near future, command line partition parsing
335 * will use the device name as 'mtd-id' instead of a value
336 * passed to the parse_cmdline_partitions() routine. Since
337 * the bootldr says 'ipaq', make sure it continues to work.
338 */
339 mymtd->name = "ipaq";
340
341 if ((machine_is_h3600())) {
342# ifdef CONFIG_LAB
343 h3xxx_partitions[1].size = my_sub_mtd[0]->size - 0x80000;
344# else
345 h3xxx_partitions[1].size = my_sub_mtd[0]->size - 0x40000;
346# endif
347 nb_parts = 2;
348 } else {
349# ifdef CONFIG_LAB
350 h3xxx_partitions[1].size = my_sub_mtd[0]->size - 0x40000 - 0x80000; /* extra big boot block */
351# else
352 h3xxx_partitions[1].size = my_sub_mtd[0]->size - 2*0x40000;
353# endif
354 h3xxx_partitions[2].offset = my_sub_mtd[0]->size - 0x40000;
355 }
356
357 if (my_sub_mtd[1]) {
358# ifdef CONFIG_LAB
359 h3xxx_partitions_bank2[0].size = my_sub_mtd[1]->size - 0x80000;
360# else
361 h3xxx_partitions_bank2[0].size = my_sub_mtd[1]->size - 0x40000;
362# endif
363 h3xxx_partitions_bank2[1].offset = my_sub_mtd[1]->size - 0x40000;
364 }
365#endif
366 }
367 else {
368 /*
369 * Now let's probe for the actual flash. Do it here since
370 * specific machine settings might have been set above.
371 */
372 printk(KERN_NOTICE "IPAQ flash: probing %d-bit flash bus, window=%lx\n", ipaq_map[0].bankwidth*8, ipaq_map[0].virt);
373 mymtd = do_map_probe("cfi_probe", &ipaq_map[0]);
374 if (!mymtd)
375 return -ENXIO;
376 mymtd->owner = THIS_MODULE;
377 }
378
379
380 /*
381 * Dynamic partition selection stuff (might override the static ones)
382 */
383
384 i = parse_mtd_partitions(mymtd, part_probes, &parsed_parts, 0);
385
386 if (i > 0) {
387 nb_parts = parsed_nr_parts = i;
388 parts = parsed_parts;
389 part_type = "dynamic";
390 }
391
392 if (!parts) {
393 printk(KERN_NOTICE "IPAQ flash: no partition info available, registering whole flash at once\n");
394 add_mtd_device(mymtd);
395#ifndef CONFIG_MTD_CONCAT
396 if (my_sub_mtd[1])
397 add_mtd_device(my_sub_mtd[1]);
398#endif
399 } else {
400 printk(KERN_NOTICE "Using %s partition definition\n", part_type);
401 add_mtd_partitions(mymtd, parts, nb_parts);
402#ifndef CONFIG_MTD_CONCAT
403 if (my_sub_mtd[1])
404 add_mtd_partitions(my_sub_mtd[1], h3xxx_partitions_bank2, ARRAY_SIZE(h3xxx_partitions_bank2));
405#endif
406 }
407
408 return 0;
409}
410
411static void __exit ipaq_mtd_cleanup(void)
412{
413 int i;
414
415 if (mymtd) {
416 del_mtd_partitions(mymtd);
417#ifndef CONFIG_MTD_CONCAT
418 if (my_sub_mtd[1])
419 del_mtd_partitions(my_sub_mtd[1]);
420#endif
421 map_destroy(mymtd);
422#ifdef CONFIG_MTD_CONCAT
423 for(i=0; i<MAX_IPAQ_CS; i++)
424#else
425 for(i=1; i<MAX_IPAQ_CS; i++)
426#endif
427 {
428 if (my_sub_mtd[i])
429 map_destroy(my_sub_mtd[i]);
430 }
431 kfree(parsed_parts);
432 }
433}
434
435static int __init h1900_special_case(void)
436{
437 /* The iPAQ h1900 is a special case - it has weird ROM. */
438 simple_map_init(&ipaq_map[0]);
439 ipaq_map[0].size = 0x80000;
440 ipaq_map[0].set_vpp = h3xxx_set_vpp;
441 ipaq_map[0].phys = 0x0;
442 ipaq_map[0].virt = ioremap(0x0, 0x04000000);
443 ipaq_map[0].bankwidth = 2;
444
445 printk(KERN_NOTICE "iPAQ flash: probing %d-bit flash bus, window=%lx with JEDEC.\n", ipaq_map[0].bankwidth*8, ipaq_map[0].virt);
446 mymtd = do_map_probe("jedec_probe", &ipaq_map[0]);
447 if (!mymtd)
448 return -ENODEV;
449 add_mtd_device(mymtd);
450 printk(KERN_NOTICE "iPAQ flash: registered h1910 flash\n");
451
452 return 0;
453}
454
455module_init(ipaq_mtd_init);
456module_exit(ipaq_mtd_cleanup);
457
458MODULE_AUTHOR("Jamey Hicks");
459MODULE_DESCRIPTION("IPAQ CFI map driver");
460MODULE_LICENSE("MIT");
diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c
index 7214b876feba..7b0515297411 100644
--- a/drivers/mtd/maps/ixp4xx.c
+++ b/drivers/mtd/maps/ixp4xx.c
@@ -210,7 +210,7 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
210 * not attempt to do a direct access on us. 210 * not attempt to do a direct access on us.
211 */ 211 */
212 info->map.phys = NO_XIP; 212 info->map.phys = NO_XIP;
213 info->map.size = dev->resource->end - dev->resource->start + 1; 213 info->map.size = resource_size(dev->resource);
214 214
215 /* 215 /*
216 * We only support 16-bit accesses for now. If and when 216 * We only support 16-bit accesses for now. If and when
@@ -224,7 +224,7 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
224 info->map.copy_from = ixp4xx_copy_from, 224 info->map.copy_from = ixp4xx_copy_from,
225 225
226 info->res = request_mem_region(dev->resource->start, 226 info->res = request_mem_region(dev->resource->start,
227 dev->resource->end - dev->resource->start + 1, 227 resource_size(dev->resource),
228 "IXP4XXFlash"); 228 "IXP4XXFlash");
229 if (!info->res) { 229 if (!info->res) {
230 printk(KERN_ERR "IXP4XXFlash: Could not reserve memory region\n"); 230 printk(KERN_ERR "IXP4XXFlash: Could not reserve memory region\n");
@@ -233,7 +233,7 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
233 } 233 }
234 234
235 info->map.virt = ioremap(dev->resource->start, 235 info->map.virt = ioremap(dev->resource->start,
236 dev->resource->end - dev->resource->start + 1); 236 resource_size(dev->resource));
237 if (!info->map.virt) { 237 if (!info->map.virt) {
238 printk(KERN_ERR "IXP4XXFlash: Failed to ioremap region\n"); 238 printk(KERN_ERR "IXP4XXFlash: Failed to ioremap region\n");
239 err = -EIO; 239 err = -EIO;
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index 380648e9051a..d9603f7f9652 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -48,23 +48,22 @@ static int physmap_flash_remove(struct platform_device *dev)
48 48
49 if (info->cmtd) { 49 if (info->cmtd) {
50#ifdef CONFIG_MTD_PARTITIONS 50#ifdef CONFIG_MTD_PARTITIONS
51 if (info->nr_parts || physmap_data->nr_parts) 51 if (info->nr_parts || physmap_data->nr_parts) {
52 del_mtd_partitions(info->cmtd); 52 del_mtd_partitions(info->cmtd);
53 else 53
54 if (info->nr_parts)
55 kfree(info->parts);
56 } else {
54 del_mtd_device(info->cmtd); 57 del_mtd_device(info->cmtd);
58 }
55#else 59#else
56 del_mtd_device(info->cmtd); 60 del_mtd_device(info->cmtd);
57#endif 61#endif
58 }
59#ifdef CONFIG_MTD_PARTITIONS
60 if (info->nr_parts)
61 kfree(info->parts);
62#endif
63
64#ifdef CONFIG_MTD_CONCAT 62#ifdef CONFIG_MTD_CONCAT
65 if (info->cmtd != info->mtd[0]) 63 if (info->cmtd != info->mtd[0])
66 mtd_concat_destroy(info->cmtd); 64 mtd_concat_destroy(info->cmtd);
67#endif 65#endif
66 }
68 67
69 for (i = 0; i < MAX_RESOURCES; i++) { 68 for (i = 0; i < MAX_RESOURCES; i++) {
70 if (info->mtd[i] != NULL) 69 if (info->mtd[i] != NULL)
@@ -130,7 +129,7 @@ static int physmap_flash_probe(struct platform_device *dev)
130 info->map[i].size); 129 info->map[i].size);
131 if (info->map[i].virt == NULL) { 130 if (info->map[i].virt == NULL) {
132 dev_err(&dev->dev, "Failed to ioremap flash region\n"); 131 dev_err(&dev->dev, "Failed to ioremap flash region\n");
133 err = EIO; 132 err = -EIO;
134 goto err_out; 133 goto err_out;
135 } 134 }
136 135
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index d7a47574d21e..f3af87e08ecd 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -248,7 +248,7 @@ static void sa1100_destroy(struct sa_info *info, struct flash_platform_data *pla
248 plat->exit(); 248 plat->exit();
249} 249}
250 250
251static struct sa_info *__init 251static struct sa_info *__devinit
252sa1100_setup_mtd(struct platform_device *pdev, struct flash_platform_data *plat) 252sa1100_setup_mtd(struct platform_device *pdev, struct flash_platform_data *plat)
253{ 253{
254 struct sa_info *info; 254 struct sa_info *info;
diff --git a/drivers/mtd/maps/vmu-flash.c b/drivers/mtd/maps/vmu-flash.c
index 1f73297e7776..82afad0ddd72 100644
--- a/drivers/mtd/maps/vmu-flash.c
+++ b/drivers/mtd/maps/vmu-flash.c
@@ -612,16 +612,15 @@ static int __devinit vmu_connect(struct maple_device *mdev)
612 612
613 test_flash_data = be32_to_cpu(mdev->devinfo.function); 613 test_flash_data = be32_to_cpu(mdev->devinfo.function);
614 /* Need to count how many bits are set - to find out which 614 /* Need to count how many bits are set - to find out which
615 * function_data element has details of the memory card: 615 * function_data element has details of the memory card
616 * using Brian Kernighan's/Peter Wegner's method */ 616 */
617 for (c = 0; test_flash_data; c++) 617 c = hweight_long(test_flash_data);
618 test_flash_data &= test_flash_data - 1;
619 618
620 basic_flash_data = be32_to_cpu(mdev->devinfo.function_data[c - 1]); 619 basic_flash_data = be32_to_cpu(mdev->devinfo.function_data[c - 1]);
621 620
622 card = kmalloc(sizeof(struct memcard), GFP_KERNEL); 621 card = kmalloc(sizeof(struct memcard), GFP_KERNEL);
623 if (!card) { 622 if (!card) {
624 error = ENOMEM; 623 error = -ENOMEM;
625 goto fail_nomem; 624 goto fail_nomem;
626 } 625 }
627 626
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 64e2b379a350..c82e09bbc5fd 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -84,9 +84,6 @@ static int mtd_blktrans_thread(void *arg)
84 struct request_queue *rq = tr->blkcore_priv->rq; 84 struct request_queue *rq = tr->blkcore_priv->rq;
85 struct request *req = NULL; 85 struct request *req = NULL;
86 86
87 /* we might get involved when memory gets low, so use PF_MEMALLOC */
88 current->flags |= PF_MEMALLOC;
89
90 spin_lock_irq(rq->queue_lock); 87 spin_lock_irq(rq->queue_lock);
91 88
92 while (!kthread_should_stop()) { 89 while (!kthread_should_stop()) {
@@ -381,7 +378,7 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
381 tr->blkcore_priv->thread = kthread_run(mtd_blktrans_thread, tr, 378 tr->blkcore_priv->thread = kthread_run(mtd_blktrans_thread, tr,
382 "%sd", tr->name); 379 "%sd", tr->name);
383 if (IS_ERR(tr->blkcore_priv->thread)) { 380 if (IS_ERR(tr->blkcore_priv->thread)) {
384 int ret = PTR_ERR(tr->blkcore_priv->thread); 381 ret = PTR_ERR(tr->blkcore_priv->thread);
385 blk_cleanup_queue(tr->blkcore_priv->rq); 382 blk_cleanup_queue(tr->blkcore_priv->rq);
386 unregister_blkdev(tr->major, tr->name); 383 unregister_blkdev(tr->major, tr->name);
387 kfree(tr->blkcore_priv); 384 kfree(tr->blkcore_priv);
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 467a4f177bfb..c356c0a30c3e 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -447,7 +447,7 @@ struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
447 for (i=0; i< MAX_MTD_DEVICES; i++) 447 for (i=0; i< MAX_MTD_DEVICES; i++)
448 if (mtd_table[i] == mtd) 448 if (mtd_table[i] == mtd)
449 ret = mtd_table[i]; 449 ret = mtd_table[i];
450 } else if (num < MAX_MTD_DEVICES) { 450 } else if (num >= 0 && num < MAX_MTD_DEVICES) {
451 ret = mtd_table[num]; 451 ret = mtd_table[num];
452 if (mtd && mtd != ret) 452 if (mtd && mtd != ret)
453 ret = NULL; 453 ret = NULL;
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index 1060337c06df..a714ec482761 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -29,14 +29,34 @@
29#include <linux/sched.h> 29#include <linux/sched.h>
30#include <linux/wait.h> 30#include <linux/wait.h>
31#include <linux/delay.h> 31#include <linux/delay.h>
32#include <linux/spinlock.h>
33#include <linux/interrupt.h> 32#include <linux/interrupt.h>
34#include <linux/mtd/mtd.h> 33#include <linux/mtd/mtd.h>
34#include <linux/kmsg_dump.h>
35
36/* Maximum MTD partition size */
37#define MTDOOPS_MAX_MTD_SIZE (8 * 1024 * 1024)
35 38
36#define MTDOOPS_KERNMSG_MAGIC 0x5d005d00 39#define MTDOOPS_KERNMSG_MAGIC 0x5d005d00
37#define OOPS_PAGE_SIZE 4096 40#define MTDOOPS_HEADER_SIZE 8
41
42static unsigned long record_size = 4096;
43module_param(record_size, ulong, 0400);
44MODULE_PARM_DESC(record_size,
45 "record size for MTD OOPS pages in bytes (default 4096)");
46
47static char mtddev[80];
48module_param_string(mtddev, mtddev, 80, 0400);
49MODULE_PARM_DESC(mtddev,
50 "name or index number of the MTD device to use");
51
52static int dump_oops = 1;
53module_param(dump_oops, int, 0600);
54MODULE_PARM_DESC(dump_oops,
55 "set to 1 to dump oopses, 0 to only dump panics (default 1)");
38 56
39static struct mtdoops_context { 57static struct mtdoops_context {
58 struct kmsg_dumper dump;
59
40 int mtd_index; 60 int mtd_index;
41 struct work_struct work_erase; 61 struct work_struct work_erase;
42 struct work_struct work_write; 62 struct work_struct work_write;
@@ -44,28 +64,43 @@ static struct mtdoops_context {
44 int oops_pages; 64 int oops_pages;
45 int nextpage; 65 int nextpage;
46 int nextcount; 66 int nextcount;
47 char *name; 67 unsigned long *oops_page_used;
48 68
49 void *oops_buf; 69 void *oops_buf;
50
51 /* writecount and disabling ready are spin lock protected */
52 spinlock_t writecount_lock;
53 int ready;
54 int writecount;
55} oops_cxt; 70} oops_cxt;
56 71
72static void mark_page_used(struct mtdoops_context *cxt, int page)
73{
74 set_bit(page, cxt->oops_page_used);
75}
76
77static void mark_page_unused(struct mtdoops_context *cxt, int page)
78{
79 clear_bit(page, cxt->oops_page_used);
80}
81
82static int page_is_used(struct mtdoops_context *cxt, int page)
83{
84 return test_bit(page, cxt->oops_page_used);
85}
86
57static void mtdoops_erase_callback(struct erase_info *done) 87static void mtdoops_erase_callback(struct erase_info *done)
58{ 88{
59 wait_queue_head_t *wait_q = (wait_queue_head_t *)done->priv; 89 wait_queue_head_t *wait_q = (wait_queue_head_t *)done->priv;
60 wake_up(wait_q); 90 wake_up(wait_q);
61} 91}
62 92
63static int mtdoops_erase_block(struct mtd_info *mtd, int offset) 93static int mtdoops_erase_block(struct mtdoops_context *cxt, int offset)
64{ 94{
95 struct mtd_info *mtd = cxt->mtd;
96 u32 start_page_offset = mtd_div_by_eb(offset, mtd) * mtd->erasesize;
97 u32 start_page = start_page_offset / record_size;
98 u32 erase_pages = mtd->erasesize / record_size;
65 struct erase_info erase; 99 struct erase_info erase;
66 DECLARE_WAITQUEUE(wait, current); 100 DECLARE_WAITQUEUE(wait, current);
67 wait_queue_head_t wait_q; 101 wait_queue_head_t wait_q;
68 int ret; 102 int ret;
103 int page;
69 104
70 init_waitqueue_head(&wait_q); 105 init_waitqueue_head(&wait_q);
71 erase.mtd = mtd; 106 erase.mtd = mtd;
@@ -81,25 +116,24 @@ static int mtdoops_erase_block(struct mtd_info *mtd, int offset)
81 if (ret) { 116 if (ret) {
82 set_current_state(TASK_RUNNING); 117 set_current_state(TASK_RUNNING);
83 remove_wait_queue(&wait_q, &wait); 118 remove_wait_queue(&wait_q, &wait);
84 printk (KERN_WARNING "mtdoops: erase of region [0x%llx, 0x%llx] " 119 printk(KERN_WARNING "mtdoops: erase of region [0x%llx, 0x%llx] on \"%s\" failed\n",
85 "on \"%s\" failed\n", 120 (unsigned long long)erase.addr,
86 (unsigned long long)erase.addr, (unsigned long long)erase.len, mtd->name); 121 (unsigned long long)erase.len, mtddev);
87 return ret; 122 return ret;
88 } 123 }
89 124
90 schedule(); /* Wait for erase to finish. */ 125 schedule(); /* Wait for erase to finish. */
91 remove_wait_queue(&wait_q, &wait); 126 remove_wait_queue(&wait_q, &wait);
92 127
128 /* Mark pages as unused */
129 for (page = start_page; page < start_page + erase_pages; page++)
130 mark_page_unused(cxt, page);
131
93 return 0; 132 return 0;
94} 133}
95 134
96static void mtdoops_inc_counter(struct mtdoops_context *cxt) 135static void mtdoops_inc_counter(struct mtdoops_context *cxt)
97{ 136{
98 struct mtd_info *mtd = cxt->mtd;
99 size_t retlen;
100 u32 count;
101 int ret;
102
103 cxt->nextpage++; 137 cxt->nextpage++;
104 if (cxt->nextpage >= cxt->oops_pages) 138 if (cxt->nextpage >= cxt->oops_pages)
105 cxt->nextpage = 0; 139 cxt->nextpage = 0;
@@ -107,25 +141,13 @@ static void mtdoops_inc_counter(struct mtdoops_context *cxt)
107 if (cxt->nextcount == 0xffffffff) 141 if (cxt->nextcount == 0xffffffff)
108 cxt->nextcount = 0; 142 cxt->nextcount = 0;
109 143
110 ret = mtd->read(mtd, cxt->nextpage * OOPS_PAGE_SIZE, 4, 144 if (page_is_used(cxt, cxt->nextpage)) {
111 &retlen, (u_char *) &count);
112 if ((retlen != 4) || ((ret < 0) && (ret != -EUCLEAN))) {
113 printk(KERN_ERR "mtdoops: Read failure at %d (%td of 4 read)"
114 ", err %d.\n", cxt->nextpage * OOPS_PAGE_SIZE,
115 retlen, ret);
116 schedule_work(&cxt->work_erase); 145 schedule_work(&cxt->work_erase);
117 return; 146 return;
118 } 147 }
119 148
120 /* See if we need to erase the next block */ 149 printk(KERN_DEBUG "mtdoops: ready %d, %d (no erase)\n",
121 if (count != 0xffffffff) { 150 cxt->nextpage, cxt->nextcount);
122 schedule_work(&cxt->work_erase);
123 return;
124 }
125
126 printk(KERN_DEBUG "mtdoops: Ready %d, %d (no erase)\n",
127 cxt->nextpage, cxt->nextcount);
128 cxt->ready = 1;
129} 151}
130 152
131/* Scheduled work - when we can't proceed without erasing a block */ 153/* Scheduled work - when we can't proceed without erasing a block */
@@ -140,47 +162,47 @@ static void mtdoops_workfunc_erase(struct work_struct *work)
140 if (!mtd) 162 if (!mtd)
141 return; 163 return;
142 164
143 mod = (cxt->nextpage * OOPS_PAGE_SIZE) % mtd->erasesize; 165 mod = (cxt->nextpage * record_size) % mtd->erasesize;
144 if (mod != 0) { 166 if (mod != 0) {
145 cxt->nextpage = cxt->nextpage + ((mtd->erasesize - mod) / OOPS_PAGE_SIZE); 167 cxt->nextpage = cxt->nextpage + ((mtd->erasesize - mod) / record_size);
146 if (cxt->nextpage >= cxt->oops_pages) 168 if (cxt->nextpage >= cxt->oops_pages)
147 cxt->nextpage = 0; 169 cxt->nextpage = 0;
148 } 170 }
149 171
150 while (mtd->block_isbad) { 172 while (mtd->block_isbad) {
151 ret = mtd->block_isbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE); 173 ret = mtd->block_isbad(mtd, cxt->nextpage * record_size);
152 if (!ret) 174 if (!ret)
153 break; 175 break;
154 if (ret < 0) { 176 if (ret < 0) {
155 printk(KERN_ERR "mtdoops: block_isbad failed, aborting.\n"); 177 printk(KERN_ERR "mtdoops: block_isbad failed, aborting\n");
156 return; 178 return;
157 } 179 }
158badblock: 180badblock:
159 printk(KERN_WARNING "mtdoops: Bad block at %08x\n", 181 printk(KERN_WARNING "mtdoops: bad block at %08lx\n",
160 cxt->nextpage * OOPS_PAGE_SIZE); 182 cxt->nextpage * record_size);
161 i++; 183 i++;
162 cxt->nextpage = cxt->nextpage + (mtd->erasesize / OOPS_PAGE_SIZE); 184 cxt->nextpage = cxt->nextpage + (mtd->erasesize / record_size);
163 if (cxt->nextpage >= cxt->oops_pages) 185 if (cxt->nextpage >= cxt->oops_pages)
164 cxt->nextpage = 0; 186 cxt->nextpage = 0;
165 if (i == (cxt->oops_pages / (mtd->erasesize / OOPS_PAGE_SIZE))) { 187 if (i == cxt->oops_pages / (mtd->erasesize / record_size)) {
166 printk(KERN_ERR "mtdoops: All blocks bad!\n"); 188 printk(KERN_ERR "mtdoops: all blocks bad!\n");
167 return; 189 return;
168 } 190 }
169 } 191 }
170 192
171 for (j = 0, ret = -1; (j < 3) && (ret < 0); j++) 193 for (j = 0, ret = -1; (j < 3) && (ret < 0); j++)
172 ret = mtdoops_erase_block(mtd, cxt->nextpage * OOPS_PAGE_SIZE); 194 ret = mtdoops_erase_block(cxt, cxt->nextpage * record_size);
173 195
174 if (ret >= 0) { 196 if (ret >= 0) {
175 printk(KERN_DEBUG "mtdoops: Ready %d, %d \n", cxt->nextpage, cxt->nextcount); 197 printk(KERN_DEBUG "mtdoops: ready %d, %d\n",
176 cxt->ready = 1; 198 cxt->nextpage, cxt->nextcount);
177 return; 199 return;
178 } 200 }
179 201
180 if (mtd->block_markbad && (ret == -EIO)) { 202 if (mtd->block_markbad && ret == -EIO) {
181 ret = mtd->block_markbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE); 203 ret = mtd->block_markbad(mtd, cxt->nextpage * record_size);
182 if (ret < 0) { 204 if (ret < 0) {
183 printk(KERN_ERR "mtdoops: block_markbad failed, aborting.\n"); 205 printk(KERN_ERR "mtdoops: block_markbad failed, aborting\n");
184 return; 206 return;
185 } 207 }
186 } 208 }
@@ -191,36 +213,37 @@ static void mtdoops_write(struct mtdoops_context *cxt, int panic)
191{ 213{
192 struct mtd_info *mtd = cxt->mtd; 214 struct mtd_info *mtd = cxt->mtd;
193 size_t retlen; 215 size_t retlen;
216 u32 *hdr;
194 int ret; 217 int ret;
195 218
196 if (cxt->writecount < OOPS_PAGE_SIZE) 219 /* Add mtdoops header to the buffer */
197 memset(cxt->oops_buf + cxt->writecount, 0xff, 220 hdr = cxt->oops_buf;
198 OOPS_PAGE_SIZE - cxt->writecount); 221 hdr[0] = cxt->nextcount;
222 hdr[1] = MTDOOPS_KERNMSG_MAGIC;
199 223
200 if (panic) 224 if (panic)
201 ret = mtd->panic_write(mtd, cxt->nextpage * OOPS_PAGE_SIZE, 225 ret = mtd->panic_write(mtd, cxt->nextpage * record_size,
202 OOPS_PAGE_SIZE, &retlen, cxt->oops_buf); 226 record_size, &retlen, cxt->oops_buf);
203 else 227 else
204 ret = mtd->write(mtd, cxt->nextpage * OOPS_PAGE_SIZE, 228 ret = mtd->write(mtd, cxt->nextpage * record_size,
205 OOPS_PAGE_SIZE, &retlen, cxt->oops_buf); 229 record_size, &retlen, cxt->oops_buf);
206
207 cxt->writecount = 0;
208 230
209 if ((retlen != OOPS_PAGE_SIZE) || (ret < 0)) 231 if (retlen != record_size || ret < 0)
210 printk(KERN_ERR "mtdoops: Write failure at %d (%td of %d written), err %d.\n", 232 printk(KERN_ERR "mtdoops: write failure at %ld (%td of %ld written), error %d\n",
211 cxt->nextpage * OOPS_PAGE_SIZE, retlen, OOPS_PAGE_SIZE, ret); 233 cxt->nextpage * record_size, retlen, record_size, ret);
234 mark_page_used(cxt, cxt->nextpage);
235 memset(cxt->oops_buf, 0xff, record_size);
212 236
213 mtdoops_inc_counter(cxt); 237 mtdoops_inc_counter(cxt);
214} 238}
215 239
216
217static void mtdoops_workfunc_write(struct work_struct *work) 240static void mtdoops_workfunc_write(struct work_struct *work)
218{ 241{
219 struct mtdoops_context *cxt = 242 struct mtdoops_context *cxt =
220 container_of(work, struct mtdoops_context, work_write); 243 container_of(work, struct mtdoops_context, work_write);
221 244
222 mtdoops_write(cxt, 0); 245 mtdoops_write(cxt, 0);
223} 246}
224 247
225static void find_next_position(struct mtdoops_context *cxt) 248static void find_next_position(struct mtdoops_context *cxt)
226{ 249{
@@ -230,28 +253,33 @@ static void find_next_position(struct mtdoops_context *cxt)
230 size_t retlen; 253 size_t retlen;
231 254
232 for (page = 0; page < cxt->oops_pages; page++) { 255 for (page = 0; page < cxt->oops_pages; page++) {
233 ret = mtd->read(mtd, page * OOPS_PAGE_SIZE, 8, &retlen, (u_char *) &count[0]); 256 /* Assume the page is used */
234 if ((retlen != 8) || ((ret < 0) && (ret != -EUCLEAN))) { 257 mark_page_used(cxt, page);
235 printk(KERN_ERR "mtdoops: Read failure at %d (%td of 8 read)" 258 ret = mtd->read(mtd, page * record_size, MTDOOPS_HEADER_SIZE,
236 ", err %d.\n", page * OOPS_PAGE_SIZE, retlen, ret); 259 &retlen, (u_char *) &count[0]);
260 if (retlen != MTDOOPS_HEADER_SIZE ||
261 (ret < 0 && ret != -EUCLEAN)) {
262 printk(KERN_ERR "mtdoops: read failure at %ld (%td of %d read), err %d\n",
263 page * record_size, retlen,
264 MTDOOPS_HEADER_SIZE, ret);
237 continue; 265 continue;
238 } 266 }
239 267
240 if (count[1] != MTDOOPS_KERNMSG_MAGIC) 268 if (count[0] == 0xffffffff && count[1] == 0xffffffff)
241 continue; 269 mark_page_unused(cxt, page);
242 if (count[0] == 0xffffffff) 270 if (count[0] == 0xffffffff)
243 continue; 271 continue;
244 if (maxcount == 0xffffffff) { 272 if (maxcount == 0xffffffff) {
245 maxcount = count[0]; 273 maxcount = count[0];
246 maxpos = page; 274 maxpos = page;
247 } else if ((count[0] < 0x40000000) && (maxcount > 0xc0000000)) { 275 } else if (count[0] < 0x40000000 && maxcount > 0xc0000000) {
248 maxcount = count[0]; 276 maxcount = count[0];
249 maxpos = page; 277 maxpos = page;
250 } else if ((count[0] > maxcount) && (count[0] < 0xc0000000)) { 278 } else if (count[0] > maxcount && count[0] < 0xc0000000) {
251 maxcount = count[0]; 279 maxcount = count[0];
252 maxpos = page; 280 maxpos = page;
253 } else if ((count[0] > maxcount) && (count[0] > 0xc0000000) 281 } else if (count[0] > maxcount && count[0] > 0xc0000000
254 && (maxcount > 0x80000000)) { 282 && maxcount > 0x80000000) {
255 maxcount = count[0]; 283 maxcount = count[0];
256 maxpos = page; 284 maxpos = page;
257 } 285 }
@@ -269,187 +297,170 @@ static void find_next_position(struct mtdoops_context *cxt)
269 mtdoops_inc_counter(cxt); 297 mtdoops_inc_counter(cxt);
270} 298}
271 299
272 300static void mtdoops_do_dump(struct kmsg_dumper *dumper,
273static void mtdoops_notify_add(struct mtd_info *mtd) 301 enum kmsg_dump_reason reason, const char *s1, unsigned long l1,
302 const char *s2, unsigned long l2)
274{ 303{
275 struct mtdoops_context *cxt = &oops_cxt; 304 struct mtdoops_context *cxt = container_of(dumper,
305 struct mtdoops_context, dump);
306 unsigned long s1_start, s2_start;
307 unsigned long l1_cpy, l2_cpy;
308 char *dst;
309
310 /* Only dump oopses if dump_oops is set */
311 if (reason == KMSG_DUMP_OOPS && !dump_oops)
312 return;
276 313
277 if (cxt->name && !strcmp(mtd->name, cxt->name)) 314 dst = cxt->oops_buf + MTDOOPS_HEADER_SIZE; /* Skip the header */
278 cxt->mtd_index = mtd->index; 315 l2_cpy = min(l2, record_size - MTDOOPS_HEADER_SIZE);
316 l1_cpy = min(l1, record_size - MTDOOPS_HEADER_SIZE - l2_cpy);
279 317
280 if ((mtd->index != cxt->mtd_index) || cxt->mtd_index < 0) 318 s2_start = l2 - l2_cpy;
281 return; 319 s1_start = l1 - l1_cpy;
282 320
283 if (mtd->size < (mtd->erasesize * 2)) { 321 memcpy(dst, s1 + s1_start, l1_cpy);
284 printk(KERN_ERR "MTD partition %d not big enough for mtdoops\n", 322 memcpy(dst + l1_cpy, s2 + s2_start, l2_cpy);
285 mtd->index);
286 return;
287 }
288 323
289 if (mtd->erasesize < OOPS_PAGE_SIZE) { 324 /* Panics must be written immediately */
290 printk(KERN_ERR "Eraseblock size of MTD partition %d too small\n", 325 if (reason == KMSG_DUMP_PANIC) {
291 mtd->index); 326 if (!cxt->mtd->panic_write)
327 printk(KERN_ERR "mtdoops: Cannot write from panic without panic_write\n");
328 else
329 mtdoops_write(cxt, 1);
292 return; 330 return;
293 } 331 }
294 332
295 cxt->mtd = mtd; 333 /* For other cases, schedule work to write it "nicely" */
296 if (mtd->size > INT_MAX) 334 schedule_work(&cxt->work_write);
297 cxt->oops_pages = INT_MAX / OOPS_PAGE_SIZE;
298 else
299 cxt->oops_pages = (int)mtd->size / OOPS_PAGE_SIZE;
300
301 find_next_position(cxt);
302
303 printk(KERN_INFO "mtdoops: Attached to MTD device %d\n", mtd->index);
304} 335}
305 336
306static void mtdoops_notify_remove(struct mtd_info *mtd) 337static void mtdoops_notify_add(struct mtd_info *mtd)
307{ 338{
308 struct mtdoops_context *cxt = &oops_cxt; 339 struct mtdoops_context *cxt = &oops_cxt;
340 u64 mtdoops_pages = div_u64(mtd->size, record_size);
341 int err;
309 342
310 if ((mtd->index != cxt->mtd_index) || cxt->mtd_index < 0) 343 if (!strcmp(mtd->name, mtddev))
311 return; 344 cxt->mtd_index = mtd->index;
312
313 cxt->mtd = NULL;
314 flush_scheduled_work();
315}
316
317static void mtdoops_console_sync(void)
318{
319 struct mtdoops_context *cxt = &oops_cxt;
320 struct mtd_info *mtd = cxt->mtd;
321 unsigned long flags;
322 345
323 if (!cxt->ready || !mtd || cxt->writecount == 0) 346 if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0)
324 return; 347 return;
325 348
326 /* 349 if (mtd->size < mtd->erasesize * 2) {
327 * Once ready is 0 and we've held the lock no further writes to the 350 printk(KERN_ERR "mtdoops: MTD partition %d not big enough for mtdoops\n",
328 * buffer will happen 351 mtd->index);
329 */
330 spin_lock_irqsave(&cxt->writecount_lock, flags);
331 if (!cxt->ready) {
332 spin_unlock_irqrestore(&cxt->writecount_lock, flags);
333 return; 352 return;
334 } 353 }
335 cxt->ready = 0; 354 if (mtd->erasesize < record_size) {
336 spin_unlock_irqrestore(&cxt->writecount_lock, flags); 355 printk(KERN_ERR "mtdoops: eraseblock size of MTD partition %d too small\n",
337 356 mtd->index);
338 if (mtd->panic_write && in_interrupt())
339 /* Interrupt context, we're going to panic so try and log */
340 mtdoops_write(cxt, 1);
341 else
342 schedule_work(&cxt->work_write);
343}
344
345static void
346mtdoops_console_write(struct console *co, const char *s, unsigned int count)
347{
348 struct mtdoops_context *cxt = co->data;
349 struct mtd_info *mtd = cxt->mtd;
350 unsigned long flags;
351
352 if (!oops_in_progress) {
353 mtdoops_console_sync();
354 return; 357 return;
355 } 358 }
356 359 if (mtd->size > MTDOOPS_MAX_MTD_SIZE) {
357 if (!cxt->ready || !mtd) 360 printk(KERN_ERR "mtdoops: mtd%d is too large (limit is %d MiB)\n",
361 mtd->index, MTDOOPS_MAX_MTD_SIZE / 1024 / 1024);
358 return; 362 return;
363 }
359 364
360 /* Locking on writecount ensures sequential writes to the buffer */ 365 /* oops_page_used is a bit field */
361 spin_lock_irqsave(&cxt->writecount_lock, flags); 366 cxt->oops_page_used = vmalloc(DIV_ROUND_UP(mtdoops_pages,
362 367 BITS_PER_LONG));
363 /* Check ready status didn't change whilst waiting for the lock */ 368 if (!cxt->oops_page_used) {
364 if (!cxt->ready) { 369 printk(KERN_ERR "mtdoops: could not allocate page array\n");
365 spin_unlock_irqrestore(&cxt->writecount_lock, flags);
366 return; 370 return;
367 } 371 }
368 372
369 if (cxt->writecount == 0) { 373 cxt->dump.dump = mtdoops_do_dump;
370 u32 *stamp = cxt->oops_buf; 374 err = kmsg_dump_register(&cxt->dump);
371 *stamp++ = cxt->nextcount; 375 if (err) {
372 *stamp = MTDOOPS_KERNMSG_MAGIC; 376 printk(KERN_ERR "mtdoops: registering kmsg dumper failed, error %d\n", err);
373 cxt->writecount = 8; 377 vfree(cxt->oops_page_used);
378 cxt->oops_page_used = NULL;
379 return;
374 } 380 }
375 381
376 if ((count + cxt->writecount) > OOPS_PAGE_SIZE) 382 cxt->mtd = mtd;
377 count = OOPS_PAGE_SIZE - cxt->writecount; 383 cxt->oops_pages = (int)mtd->size / record_size;
378 384 find_next_position(cxt);
379 memcpy(cxt->oops_buf + cxt->writecount, s, count); 385 printk(KERN_INFO "mtdoops: Attached to MTD device %d\n", mtd->index);
380 cxt->writecount += count;
381
382 spin_unlock_irqrestore(&cxt->writecount_lock, flags);
383
384 if (cxt->writecount == OOPS_PAGE_SIZE)
385 mtdoops_console_sync();
386} 386}
387 387
388static int __init mtdoops_console_setup(struct console *co, char *options) 388static void mtdoops_notify_remove(struct mtd_info *mtd)
389{ 389{
390 struct mtdoops_context *cxt = co->data; 390 struct mtdoops_context *cxt = &oops_cxt;
391 391
392 if (cxt->mtd_index != -1 || cxt->name) 392 if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0)
393 return -EBUSY; 393 return;
394 if (options) {
395 cxt->name = kstrdup(options, GFP_KERNEL);
396 return 0;
397 }
398 if (co->index == -1)
399 return -EINVAL;
400 394
401 cxt->mtd_index = co->index; 395 if (kmsg_dump_unregister(&cxt->dump) < 0)
402 return 0; 396 printk(KERN_WARNING "mtdoops: could not unregister kmsg_dumper\n");
397
398 cxt->mtd = NULL;
399 flush_scheduled_work();
403} 400}
404 401
402
405static struct mtd_notifier mtdoops_notifier = { 403static struct mtd_notifier mtdoops_notifier = {
406 .add = mtdoops_notify_add, 404 .add = mtdoops_notify_add,
407 .remove = mtdoops_notify_remove, 405 .remove = mtdoops_notify_remove,
408}; 406};
409 407
410static struct console mtdoops_console = { 408static int __init mtdoops_init(void)
411 .name = "ttyMTD",
412 .write = mtdoops_console_write,
413 .setup = mtdoops_console_setup,
414 .unblank = mtdoops_console_sync,
415 .index = -1,
416 .data = &oops_cxt,
417};
418
419static int __init mtdoops_console_init(void)
420{ 409{
421 struct mtdoops_context *cxt = &oops_cxt; 410 struct mtdoops_context *cxt = &oops_cxt;
411 int mtd_index;
412 char *endp;
422 413
414 if (strlen(mtddev) == 0) {
415 printk(KERN_ERR "mtdoops: mtd device (mtddev=name/number) must be supplied\n");
416 return -EINVAL;
417 }
418 if ((record_size & 4095) != 0) {
419 printk(KERN_ERR "mtdoops: record_size must be a multiple of 4096\n");
420 return -EINVAL;
421 }
422 if (record_size < 4096) {
423 printk(KERN_ERR "mtdoops: record_size must be over 4096 bytes\n");
424 return -EINVAL;
425 }
426
427 /* Setup the MTD device to use */
423 cxt->mtd_index = -1; 428 cxt->mtd_index = -1;
424 cxt->oops_buf = vmalloc(OOPS_PAGE_SIZE); 429 mtd_index = simple_strtoul(mtddev, &endp, 0);
425 spin_lock_init(&cxt->writecount_lock); 430 if (*endp == '\0')
431 cxt->mtd_index = mtd_index;
432 if (cxt->mtd_index > MAX_MTD_DEVICES) {
433 printk(KERN_ERR "mtdoops: invalid mtd device number (%u) given\n",
434 mtd_index);
435 return -EINVAL;
436 }
426 437
438 cxt->oops_buf = vmalloc(record_size);
427 if (!cxt->oops_buf) { 439 if (!cxt->oops_buf) {
428 printk(KERN_ERR "Failed to allocate mtdoops buffer workspace\n"); 440 printk(KERN_ERR "mtdoops: failed to allocate buffer workspace\n");
429 return -ENOMEM; 441 return -ENOMEM;
430 } 442 }
443 memset(cxt->oops_buf, 0xff, record_size);
431 444
432 INIT_WORK(&cxt->work_erase, mtdoops_workfunc_erase); 445 INIT_WORK(&cxt->work_erase, mtdoops_workfunc_erase);
433 INIT_WORK(&cxt->work_write, mtdoops_workfunc_write); 446 INIT_WORK(&cxt->work_write, mtdoops_workfunc_write);
434 447
435 register_console(&mtdoops_console);
436 register_mtd_user(&mtdoops_notifier); 448 register_mtd_user(&mtdoops_notifier);
437 return 0; 449 return 0;
438} 450}
439 451
440static void __exit mtdoops_console_exit(void) 452static void __exit mtdoops_exit(void)
441{ 453{
442 struct mtdoops_context *cxt = &oops_cxt; 454 struct mtdoops_context *cxt = &oops_cxt;
443 455
444 unregister_mtd_user(&mtdoops_notifier); 456 unregister_mtd_user(&mtdoops_notifier);
445 unregister_console(&mtdoops_console);
446 kfree(cxt->name);
447 vfree(cxt->oops_buf); 457 vfree(cxt->oops_buf);
458 vfree(cxt->oops_page_used);
448} 459}
449 460
450 461
451subsys_initcall(mtdoops_console_init); 462module_init(mtdoops_init);
452module_exit(mtdoops_console_exit); 463module_exit(mtdoops_exit);
453 464
454MODULE_LICENSE("GPL"); 465MODULE_LICENSE("GPL");
455MODULE_AUTHOR("Richard Purdie <rpurdie@openedhand.com>"); 466MODULE_AUTHOR("Richard Purdie <rpurdie@openedhand.com>");
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 0e35e1aefd22..7678538344f4 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -201,6 +201,22 @@ config MTD_NAND_S3C2410_CLKSTOP
201 when the is NAND chip selected or released, but will save 201 when the is NAND chip selected or released, but will save
202 approximately 5mA of power when there is nothing happening. 202 approximately 5mA of power when there is nothing happening.
203 203
204config MTD_NAND_BCM_UMI
205 tristate "NAND Flash support for BCM Reference Boards"
206 depends on ARCH_BCMRING && MTD_NAND
207 help
208 This enables the NAND flash controller on the BCM UMI block.
209
210 No board specfic support is done by this driver, each board
211 must advertise a platform_device for the driver to attach.
212
213config MTD_NAND_BCM_UMI_HWCS
214 bool "BCM UMI NAND Hardware CS"
215 depends on MTD_NAND_BCM_UMI
216 help
217 Enable the use of the BCM UMI block's internal CS using NAND.
218 This should only be used if you know the external NAND CS can toggle.
219
204config MTD_NAND_DISKONCHIP 220config MTD_NAND_DISKONCHIP
205 tristate "DiskOnChip 2000, Millennium and Millennium Plus (NAND reimplementation) (EXPERIMENTAL)" 221 tristate "DiskOnChip 2000, Millennium and Millennium Plus (NAND reimplementation) (EXPERIMENTAL)"
206 depends on EXPERIMENTAL 222 depends on EXPERIMENTAL
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 6950d3dabf10..460a1f39a8d1 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -42,5 +42,6 @@ obj-$(CONFIG_MTD_NAND_SOCRATES) += socrates_nand.o
42obj-$(CONFIG_MTD_NAND_TXX9NDFMC) += txx9ndfmc.o 42obj-$(CONFIG_MTD_NAND_TXX9NDFMC) += txx9ndfmc.o
43obj-$(CONFIG_MTD_NAND_W90P910) += w90p910_nand.o 43obj-$(CONFIG_MTD_NAND_W90P910) += w90p910_nand.o
44obj-$(CONFIG_MTD_NAND_NOMADIK) += nomadik_nand.o 44obj-$(CONFIG_MTD_NAND_NOMADIK) += nomadik_nand.o
45obj-$(CONFIG_MTD_NAND_BCM_UMI) += bcm_umi_nand.o nand_bcm_umi.o
45 46
46nand-objs := nand_base.o nand_bbt.o 47nand-objs := nand_base.o nand_bbt.o
diff --git a/drivers/mtd/nand/alauda.c b/drivers/mtd/nand/alauda.c
index 6d9649159a18..2d6773281fd9 100644
--- a/drivers/mtd/nand/alauda.c
+++ b/drivers/mtd/nand/alauda.c
@@ -372,15 +372,6 @@ static int alauda_read_oob(struct mtd_info *mtd, loff_t from, void *oob)
372 return __alauda_read_page(mtd, from, ignore_buf, oob); 372 return __alauda_read_page(mtd, from, ignore_buf, oob);
373} 373}
374 374
375static int popcount8(u8 c)
376{
377 int ret = 0;
378
379 for ( ; c; c>>=1)
380 ret += c & 1;
381 return ret;
382}
383
384static int alauda_isbad(struct mtd_info *mtd, loff_t ofs) 375static int alauda_isbad(struct mtd_info *mtd, loff_t ofs)
385{ 376{
386 u8 oob[16]; 377 u8 oob[16];
@@ -391,7 +382,7 @@ static int alauda_isbad(struct mtd_info *mtd, loff_t ofs)
391 return err; 382 return err;
392 383
393 /* A block is marked bad if two or more bits are zero */ 384 /* A block is marked bad if two or more bits are zero */
394 return popcount8(oob[5]) >= 7 ? 0 : 1; 385 return hweight8(oob[5]) >= 7 ? 0 : 1;
395} 386}
396 387
397static int alauda_bounce_read(struct mtd_info *mtd, loff_t from, size_t len, 388static int alauda_bounce_read(struct mtd_info *mtd, loff_t from, size_t len,
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index f8e9975c86e5..524e6c9e0672 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -192,7 +192,6 @@ static int atmel_nand_calculate(struct mtd_info *mtd,
192{ 192{
193 struct nand_chip *nand_chip = mtd->priv; 193 struct nand_chip *nand_chip = mtd->priv;
194 struct atmel_nand_host *host = nand_chip->priv; 194 struct atmel_nand_host *host = nand_chip->priv;
195 uint32_t *eccpos = nand_chip->ecc.layout->eccpos;
196 unsigned int ecc_value; 195 unsigned int ecc_value;
197 196
198 /* get the first 2 ECC bytes */ 197 /* get the first 2 ECC bytes */
@@ -464,7 +463,7 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
464 if (host->board->det_pin) { 463 if (host->board->det_pin) {
465 if (gpio_get_value(host->board->det_pin)) { 464 if (gpio_get_value(host->board->det_pin)) {
466 printk(KERN_INFO "No SmartMedia card inserted.\n"); 465 printk(KERN_INFO "No SmartMedia card inserted.\n");
467 res = ENXIO; 466 res = -ENXIO;
468 goto err_no_card; 467 goto err_no_card;
469 } 468 }
470 } 469 }
@@ -535,7 +534,7 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
535 534
536 if ((!partitions) || (num_partitions == 0)) { 535 if ((!partitions) || (num_partitions == 0)) {
537 printk(KERN_ERR "atmel_nand: No partitions defined, or unsupported device.\n"); 536 printk(KERN_ERR "atmel_nand: No partitions defined, or unsupported device.\n");
538 res = ENXIO; 537 res = -ENXIO;
539 goto err_no_partitions; 538 goto err_no_partitions;
540 } 539 }
541 540
diff --git a/drivers/mtd/nand/bcm_umi_bch.c b/drivers/mtd/nand/bcm_umi_bch.c
new file mode 100644
index 000000000000..a930666d0687
--- /dev/null
+++ b/drivers/mtd/nand/bcm_umi_bch.c
@@ -0,0 +1,213 @@
1/*****************************************************************************
2* Copyright 2004 - 2009 Broadcom Corporation. All rights reserved.
3*
4* Unless you and Broadcom execute a separate written software license
5* agreement governing use of this software, this software is licensed to you
6* under the terms of the GNU General Public License version 2, available at
7* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
8*
9* Notwithstanding the above, under no circumstances may you combine this
10* software in any way with any other Broadcom software provided under a
11* license other than the GPL, without Broadcom's express prior written
12* consent.
13*****************************************************************************/
14
15/* ---- Include Files ---------------------------------------------------- */
16#include "nand_bcm_umi.h"
17
18/* ---- External Variable Declarations ----------------------------------- */
19/* ---- External Function Prototypes ------------------------------------- */
20/* ---- Public Variables ------------------------------------------------- */
21/* ---- Private Constants and Types -------------------------------------- */
22
23/* ---- Private Function Prototypes -------------------------------------- */
24static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd,
25 struct nand_chip *chip, uint8_t *buf, int page);
26static void bcm_umi_bch_write_page_hwecc(struct mtd_info *mtd,
27 struct nand_chip *chip, const uint8_t *buf);
28
29/* ---- Private Variables ------------------------------------------------ */
30
31/*
32** nand_hw_eccoob
33** New oob placement block for use with hardware ecc generation.
34*/
35static struct nand_ecclayout nand_hw_eccoob_512 = {
36 /* Reserve 5 for BI indicator */
37 .oobfree = {
38#if (NAND_ECC_NUM_BYTES > 3)
39 {.offset = 0, .length = 2}
40#else
41 {.offset = 0, .length = 5},
42 {.offset = 6, .length = 7}
43#endif
44 }
45};
46
47/*
48** We treat the OOB for a 2K page as if it were 4 512 byte oobs,
49** except the BI is at byte 0.
50*/
51static struct nand_ecclayout nand_hw_eccoob_2048 = {
52 /* Reserve 0 as BI indicator */
53 .oobfree = {
54#if (NAND_ECC_NUM_BYTES > 10)
55 {.offset = 1, .length = 2},
56#elif (NAND_ECC_NUM_BYTES > 7)
57 {.offset = 1, .length = 5},
58 {.offset = 16, .length = 6},
59 {.offset = 32, .length = 6},
60 {.offset = 48, .length = 6}
61#else
62 {.offset = 1, .length = 8},
63 {.offset = 16, .length = 9},
64 {.offset = 32, .length = 9},
65 {.offset = 48, .length = 9}
66#endif
67 }
68};
69
70/* We treat the OOB for a 4K page as if it were 8 512 byte oobs,
71 * except the BI is at byte 0. */
72static struct nand_ecclayout nand_hw_eccoob_4096 = {
73 /* Reserve 0 as BI indicator */
74 .oobfree = {
75#if (NAND_ECC_NUM_BYTES > 10)
76 {.offset = 1, .length = 2},
77 {.offset = 16, .length = 3},
78 {.offset = 32, .length = 3},
79 {.offset = 48, .length = 3},
80 {.offset = 64, .length = 3},
81 {.offset = 80, .length = 3},
82 {.offset = 96, .length = 3},
83 {.offset = 112, .length = 3}
84#else
85 {.offset = 1, .length = 5},
86 {.offset = 16, .length = 6},
87 {.offset = 32, .length = 6},
88 {.offset = 48, .length = 6},
89 {.offset = 64, .length = 6},
90 {.offset = 80, .length = 6},
91 {.offset = 96, .length = 6},
92 {.offset = 112, .length = 6}
93#endif
94 }
95};
96
97/* ---- Private Functions ------------------------------------------------ */
98/* ==== Public Functions ================================================= */
99
100/****************************************************************************
101*
102* bcm_umi_bch_read_page_hwecc - hardware ecc based page read function
103* @mtd: mtd info structure
104* @chip: nand chip info structure
105* @buf: buffer to store read data
106*
107***************************************************************************/
108static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd,
109 struct nand_chip *chip, uint8_t * buf,
110 int page)
111{
112 int sectorIdx = 0;
113 int eccsize = chip->ecc.size;
114 int eccsteps = chip->ecc.steps;
115 uint8_t *datap = buf;
116 uint8_t eccCalc[NAND_ECC_NUM_BYTES];
117 int sectorOobSize = mtd->oobsize / eccsteps;
118 int stat;
119
120 for (sectorIdx = 0; sectorIdx < eccsteps;
121 sectorIdx++, datap += eccsize) {
122 if (sectorIdx > 0) {
123 /* Seek to page location within sector */
124 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, sectorIdx * eccsize,
125 -1);
126 }
127
128 /* Enable hardware ECC before reading the buf */
129 nand_bcm_umi_bch_enable_read_hwecc();
130
131 /* Read in data */
132 bcm_umi_nand_read_buf(mtd, datap, eccsize);
133
134 /* Pause hardware ECC after reading the buf */
135 nand_bcm_umi_bch_pause_read_ecc_calc();
136
137 /* Read the OOB ECC */
138 chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
139 mtd->writesize + sectorIdx * sectorOobSize, -1);
140 nand_bcm_umi_bch_read_oobEcc(mtd->writesize, eccCalc,
141 NAND_ECC_NUM_BYTES,
142 chip->oob_poi +
143 sectorIdx * sectorOobSize);
144
145 /* Correct any ECC detected errors */
146 stat =
147 nand_bcm_umi_bch_correct_page(datap, eccCalc,
148 NAND_ECC_NUM_BYTES);
149
150 /* Update Stats */
151 if (stat < 0) {
152#if defined(NAND_BCM_UMI_DEBUG)
153 printk(KERN_WARNING "%s uncorr_err sectorIdx=%d\n",
154 __func__, sectorIdx);
155 printk(KERN_WARNING
156 "%s data %02x %02x %02x %02x "
157 "%02x %02x %02x %02x\n",
158 __func__, datap[0], datap[1], datap[2], datap[3],
159 datap[4], datap[5], datap[6], datap[7]);
160 printk(KERN_WARNING
161 "%s ecc %02x %02x %02x %02x "
162 "%02x %02x %02x %02x %02x %02x "
163 "%02x %02x %02x\n",
164 __func__, eccCalc[0], eccCalc[1], eccCalc[2],
165 eccCalc[3], eccCalc[4], eccCalc[5], eccCalc[6],
166 eccCalc[7], eccCalc[8], eccCalc[9], eccCalc[10],
167 eccCalc[11], eccCalc[12]);
168 BUG();
169#endif
170 mtd->ecc_stats.failed++;
171 } else {
172#if defined(NAND_BCM_UMI_DEBUG)
173 if (stat > 0) {
174 printk(KERN_INFO
175 "%s %d correctable_errors detected\n",
176 __func__, stat);
177 }
178#endif
179 mtd->ecc_stats.corrected += stat;
180 }
181 }
182 return 0;
183}
184
185/****************************************************************************
186*
187* bcm_umi_bch_write_page_hwecc - hardware ecc based page write function
188* @mtd: mtd info structure
189* @chip: nand chip info structure
190* @buf: data buffer
191*
192***************************************************************************/
193static void bcm_umi_bch_write_page_hwecc(struct mtd_info *mtd,
194 struct nand_chip *chip, const uint8_t *buf)
195{
196 int sectorIdx = 0;
197 int eccsize = chip->ecc.size;
198 int eccsteps = chip->ecc.steps;
199 const uint8_t *datap = buf;
200 uint8_t *oobp = chip->oob_poi;
201 int sectorOobSize = mtd->oobsize / eccsteps;
202
203 for (sectorIdx = 0; sectorIdx < eccsteps;
204 sectorIdx++, datap += eccsize, oobp += sectorOobSize) {
205 /* Enable hardware ECC before writing the buf */
206 nand_bcm_umi_bch_enable_write_hwecc();
207 bcm_umi_nand_write_buf(mtd, datap, eccsize);
208 nand_bcm_umi_bch_write_oobEcc(mtd->writesize, oobp,
209 NAND_ECC_NUM_BYTES);
210 }
211
212 bcm_umi_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
213}
diff --git a/drivers/mtd/nand/bcm_umi_nand.c b/drivers/mtd/nand/bcm_umi_nand.c
new file mode 100644
index 000000000000..087bcd745bb7
--- /dev/null
+++ b/drivers/mtd/nand/bcm_umi_nand.c
@@ -0,0 +1,581 @@
1/*****************************************************************************
2* Copyright 2004 - 2009 Broadcom Corporation. All rights reserved.
3*
4* Unless you and Broadcom execute a separate written software license
5* agreement governing use of this software, this software is licensed to you
6* under the terms of the GNU General Public License version 2, available at
7* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
8*
9* Notwithstanding the above, under no circumstances may you combine this
10* software in any way with any other Broadcom software provided under a
11* license other than the GPL, without Broadcom's express prior written
12* consent.
13*****************************************************************************/
14
15/* ---- Include Files ---------------------------------------------------- */
16#include <linux/version.h>
17#include <linux/module.h>
18#include <linux/types.h>
19#include <linux/init.h>
20#include <linux/kernel.h>
21#include <linux/string.h>
22#include <linux/ioport.h>
23#include <linux/device.h>
24#include <linux/delay.h>
25#include <linux/err.h>
26#include <linux/io.h>
27#include <linux/platform_device.h>
28#include <linux/mtd/mtd.h>
29#include <linux/mtd/nand.h>
30#include <linux/mtd/nand_ecc.h>
31#include <linux/mtd/partitions.h>
32
33#include <asm/mach-types.h>
34#include <asm/system.h>
35
36#include <mach/reg_nand.h>
37#include <mach/reg_umi.h>
38
39#include "nand_bcm_umi.h"
40
41#include <mach/memory_settings.h>
42
43#define USE_DMA 1
44#include <mach/dma.h>
45#include <linux/dma-mapping.h>
46#include <linux/completion.h>
47
48/* ---- External Variable Declarations ----------------------------------- */
49/* ---- External Function Prototypes ------------------------------------- */
50/* ---- Public Variables ------------------------------------------------- */
51/* ---- Private Constants and Types -------------------------------------- */
52static const __devinitconst char gBanner[] = KERN_INFO \
53 "BCM UMI MTD NAND Driver: 1.00\n";
54
55#ifdef CONFIG_MTD_PARTITIONS
56const char *part_probes[] = { "cmdlinepart", NULL };
57#endif
58
59#if NAND_ECC_BCH
60static uint8_t scan_ff_pattern[] = { 0xff };
61
62static struct nand_bbt_descr largepage_bbt = {
63 .options = 0,
64 .offs = 0,
65 .len = 1,
66 .pattern = scan_ff_pattern
67};
68#endif
69
70/*
71** Preallocate a buffer to avoid having to do this every dma operation.
72** This is the size of the preallocated coherent DMA buffer.
73*/
74#if USE_DMA
75#define DMA_MIN_BUFLEN 512
76#define DMA_MAX_BUFLEN PAGE_SIZE
77#define USE_DIRECT_IO(len) (((len) < DMA_MIN_BUFLEN) || \
78 ((len) > DMA_MAX_BUFLEN))
79
80/*
81 * The current NAND data space goes from 0x80001900 to 0x80001FFF,
82 * which is only 0x700 = 1792 bytes long. This is too small for 2K, 4K page
83 * size NAND flash. Need to break the DMA down to multiple 1Ks.
84 *
85 * Need to make sure REG_NAND_DATA_PADDR + DMA_MAX_LEN < 0x80002000
86 */
87#define DMA_MAX_LEN 1024
88
89#else /* !USE_DMA */
90#define DMA_MIN_BUFLEN 0
91#define DMA_MAX_BUFLEN 0
92#define USE_DIRECT_IO(len) 1
93#endif
94/* ---- Private Function Prototypes -------------------------------------- */
95static void bcm_umi_nand_read_buf(struct mtd_info *mtd, u_char * buf, int len);
96static void bcm_umi_nand_write_buf(struct mtd_info *mtd, const u_char * buf,
97 int len);
98
99/* ---- Private Variables ------------------------------------------------ */
100static struct mtd_info *board_mtd;
101static void __iomem *bcm_umi_io_base;
102static void *virtPtr;
103static dma_addr_t physPtr;
104static struct completion nand_comp;
105
106/* ---- Private Functions ------------------------------------------------ */
107#if NAND_ECC_BCH
108#include "bcm_umi_bch.c"
109#else
110#include "bcm_umi_hamming.c"
111#endif
112
113#if USE_DMA
114
115/* Handler called when the DMA finishes. */
116static void nand_dma_handler(DMA_Device_t dev, int reason, void *userData)
117{
118 complete(&nand_comp);
119}
120
121static int nand_dma_init(void)
122{
123 int rc;
124
125 rc = dma_set_device_handler(DMA_DEVICE_NAND_MEM_TO_MEM,
126 nand_dma_handler, NULL);
127 if (rc != 0) {
128 printk(KERN_ERR "dma_set_device_handler failed: %d\n", rc);
129 return rc;
130 }
131
132 virtPtr =
133 dma_alloc_coherent(NULL, DMA_MAX_BUFLEN, &physPtr, GFP_KERNEL);
134 if (virtPtr == NULL) {
135 printk(KERN_ERR "NAND - Failed to allocate memory for DMA buffer\n");
136 return -ENOMEM;
137 }
138
139 return 0;
140}
141
142static void nand_dma_term(void)
143{
144 if (virtPtr != NULL)
145 dma_free_coherent(NULL, DMA_MAX_BUFLEN, virtPtr, physPtr);
146}
147
148static void nand_dma_read(void *buf, int len)
149{
150 int offset = 0;
151 int tmp_len = 0;
152 int len_left = len;
153 DMA_Handle_t hndl;
154
155 if (virtPtr == NULL)
156 panic("nand_dma_read: virtPtr == NULL\n");
157
158 if ((void *)physPtr == NULL)
159 panic("nand_dma_read: physPtr == NULL\n");
160
161 hndl = dma_request_channel(DMA_DEVICE_NAND_MEM_TO_MEM);
162 if (hndl < 0) {
163 printk(KERN_ERR
164 "nand_dma_read: unable to allocate dma channel: %d\n",
165 (int)hndl);
166 panic("\n");
167 }
168
169 while (len_left > 0) {
170 if (len_left > DMA_MAX_LEN) {
171 tmp_len = DMA_MAX_LEN;
172 len_left -= DMA_MAX_LEN;
173 } else {
174 tmp_len = len_left;
175 len_left = 0;
176 }
177
178 init_completion(&nand_comp);
179 dma_transfer_mem_to_mem(hndl, REG_NAND_DATA_PADDR,
180 physPtr + offset, tmp_len);
181 wait_for_completion(&nand_comp);
182
183 offset += tmp_len;
184 }
185
186 dma_free_channel(hndl);
187
188 if (buf != NULL)
189 memcpy(buf, virtPtr, len);
190}
191
192static void nand_dma_write(const void *buf, int len)
193{
194 int offset = 0;
195 int tmp_len = 0;
196 int len_left = len;
197 DMA_Handle_t hndl;
198
199 if (buf == NULL)
200 panic("nand_dma_write: buf == NULL\n");
201
202 if (virtPtr == NULL)
203 panic("nand_dma_write: virtPtr == NULL\n");
204
205 if ((void *)physPtr == NULL)
206 panic("nand_dma_write: physPtr == NULL\n");
207
208 memcpy(virtPtr, buf, len);
209
210
211 hndl = dma_request_channel(DMA_DEVICE_NAND_MEM_TO_MEM);
212 if (hndl < 0) {
213 printk(KERN_ERR
214 "nand_dma_write: unable to allocate dma channel: %d\n",
215 (int)hndl);
216 panic("\n");
217 }
218
219 while (len_left > 0) {
220 if (len_left > DMA_MAX_LEN) {
221 tmp_len = DMA_MAX_LEN;
222 len_left -= DMA_MAX_LEN;
223 } else {
224 tmp_len = len_left;
225 len_left = 0;
226 }
227
228 init_completion(&nand_comp);
229 dma_transfer_mem_to_mem(hndl, physPtr + offset,
230 REG_NAND_DATA_PADDR, tmp_len);
231 wait_for_completion(&nand_comp);
232
233 offset += tmp_len;
234 }
235
236 dma_free_channel(hndl);
237}
238
239#endif
240
241static int nand_dev_ready(struct mtd_info *mtd)
242{
243 return nand_bcm_umi_dev_ready();
244}
245
246/****************************************************************************
247*
248* bcm_umi_nand_inithw
249*
250* This routine does the necessary hardware (board-specific)
251* initializations. This includes setting up the timings, etc.
252*
253***************************************************************************/
254int bcm_umi_nand_inithw(void)
255{
256 /* Configure nand timing parameters */
257 REG_UMI_NAND_TCR &= ~0x7ffff;
258 REG_UMI_NAND_TCR |= HW_CFG_NAND_TCR;
259
260#if !defined(CONFIG_MTD_NAND_BCM_UMI_HWCS)
261 /* enable software control of CS */
262 REG_UMI_NAND_TCR |= REG_UMI_NAND_TCR_CS_SWCTRL;
263#endif
264
265 /* keep NAND chip select asserted */
266 REG_UMI_NAND_RCSR |= REG_UMI_NAND_RCSR_CS_ASSERTED;
267
268 REG_UMI_NAND_TCR &= ~REG_UMI_NAND_TCR_WORD16;
269 /* enable writes to flash */
270 REG_UMI_MMD_ICR |= REG_UMI_MMD_ICR_FLASH_WP;
271
272 writel(NAND_CMD_RESET, bcm_umi_io_base + REG_NAND_CMD_OFFSET);
273 nand_bcm_umi_wait_till_ready();
274
275#if NAND_ECC_BCH
276 nand_bcm_umi_bch_config_ecc(NAND_ECC_NUM_BYTES);
277#endif
278
279 return 0;
280}
281
282/* Used to turn latch the proper register for access. */
283static void bcm_umi_nand_hwcontrol(struct mtd_info *mtd, int cmd,
284 unsigned int ctrl)
285{
286 /* send command to hardware */
287 struct nand_chip *chip = mtd->priv;
288 if (ctrl & NAND_CTRL_CHANGE) {
289 if (ctrl & NAND_CLE) {
290 chip->IO_ADDR_W = bcm_umi_io_base + REG_NAND_CMD_OFFSET;
291 goto CMD;
292 }
293 if (ctrl & NAND_ALE) {
294 chip->IO_ADDR_W =
295 bcm_umi_io_base + REG_NAND_ADDR_OFFSET;
296 goto CMD;
297 }
298 chip->IO_ADDR_W = bcm_umi_io_base + REG_NAND_DATA8_OFFSET;
299 }
300
301CMD:
302 /* Send command to chip directly */
303 if (cmd != NAND_CMD_NONE)
304 writeb(cmd, chip->IO_ADDR_W);
305}
306
307static void bcm_umi_nand_write_buf(struct mtd_info *mtd, const u_char * buf,
308 int len)
309{
310 if (USE_DIRECT_IO(len)) {
311 /* Do it the old way if the buffer is small or too large.
312 * Probably quicker than starting and checking dma. */
313 int i;
314 struct nand_chip *this = mtd->priv;
315
316 for (i = 0; i < len; i++)
317 writeb(buf[i], this->IO_ADDR_W);
318 }
319#if USE_DMA
320 else
321 nand_dma_write(buf, len);
322#endif
323}
324
325static void bcm_umi_nand_read_buf(struct mtd_info *mtd, u_char * buf, int len)
326{
327 if (USE_DIRECT_IO(len)) {
328 int i;
329 struct nand_chip *this = mtd->priv;
330
331 for (i = 0; i < len; i++)
332 buf[i] = readb(this->IO_ADDR_R);
333 }
334#if USE_DMA
335 else
336 nand_dma_read(buf, len);
337#endif
338}
339
340static uint8_t readbackbuf[NAND_MAX_PAGESIZE];
341static int bcm_umi_nand_verify_buf(struct mtd_info *mtd, const u_char * buf,
342 int len)
343{
344 /*
345 * Try to readback page with ECC correction. This is necessary
346 * for MLC parts which may have permanently stuck bits.
347 */
348 struct nand_chip *chip = mtd->priv;
349 int ret = chip->ecc.read_page(mtd, chip, readbackbuf, 0);
350 if (ret < 0)
351 return -EFAULT;
352 else {
353 if (memcmp(readbackbuf, buf, len) == 0)
354 return 0;
355
356 return -EFAULT;
357 }
358 return 0;
359}
360
361static int __devinit bcm_umi_nand_probe(struct platform_device *pdev)
362{
363 struct nand_chip *this;
364 struct resource *r;
365 int err = 0;
366
367 printk(gBanner);
368
369 /* Allocate memory for MTD device structure and private data */
370 board_mtd =
371 kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip),
372 GFP_KERNEL);
373 if (!board_mtd) {
374 printk(KERN_WARNING
375 "Unable to allocate NAND MTD device structure.\n");
376 return -ENOMEM;
377 }
378
379 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
380
381 if (!r)
382 return -ENXIO;
383
384 /* map physical adress */
385 bcm_umi_io_base = ioremap(r->start, r->end - r->start + 1);
386
387 if (!bcm_umi_io_base) {
388 printk(KERN_ERR "ioremap to access BCM UMI NAND chip failed\n");
389 kfree(board_mtd);
390 return -EIO;
391 }
392
393 /* Get pointer to private data */
394 this = (struct nand_chip *)(&board_mtd[1]);
395
396 /* Initialize structures */
397 memset((char *)board_mtd, 0, sizeof(struct mtd_info));
398 memset((char *)this, 0, sizeof(struct nand_chip));
399
400 /* Link the private data with the MTD structure */
401 board_mtd->priv = this;
402
403 /* Initialize the NAND hardware. */
404 if (bcm_umi_nand_inithw() < 0) {
405 printk(KERN_ERR "BCM UMI NAND chip could not be initialized\n");
406 iounmap(bcm_umi_io_base);
407 kfree(board_mtd);
408 return -EIO;
409 }
410
411 /* Set address of NAND IO lines */
412 this->IO_ADDR_W = bcm_umi_io_base + REG_NAND_DATA8_OFFSET;
413 this->IO_ADDR_R = bcm_umi_io_base + REG_NAND_DATA8_OFFSET;
414
415 /* Set command delay time, see datasheet for correct value */
416 this->chip_delay = 0;
417 /* Assign the device ready function, if available */
418 this->dev_ready = nand_dev_ready;
419 this->options = 0;
420
421 this->write_buf = bcm_umi_nand_write_buf;
422 this->read_buf = bcm_umi_nand_read_buf;
423 this->verify_buf = bcm_umi_nand_verify_buf;
424
425 this->cmd_ctrl = bcm_umi_nand_hwcontrol;
426 this->ecc.mode = NAND_ECC_HW;
427 this->ecc.size = 512;
428 this->ecc.bytes = NAND_ECC_NUM_BYTES;
429#if NAND_ECC_BCH
430 this->ecc.read_page = bcm_umi_bch_read_page_hwecc;
431 this->ecc.write_page = bcm_umi_bch_write_page_hwecc;
432#else
433 this->ecc.correct = nand_correct_data512;
434 this->ecc.calculate = bcm_umi_hamming_get_hw_ecc;
435 this->ecc.hwctl = bcm_umi_hamming_enable_hwecc;
436#endif
437
438#if USE_DMA
439 err = nand_dma_init();
440 if (err != 0)
441 return err;
442#endif
443
444 /* Figure out the size of the device that we have.
445 * We need to do this to figure out which ECC
446 * layout we'll be using.
447 */
448
449 err = nand_scan_ident(board_mtd, 1);
450 if (err) {
451 printk(KERN_ERR "nand_scan failed: %d\n", err);
452 iounmap(bcm_umi_io_base);
453 kfree(board_mtd);
454 return err;
455 }
456
457 /* Now that we know the nand size, we can setup the ECC layout */
458
459 switch (board_mtd->writesize) { /* writesize is the pagesize */
460 case 4096:
461 this->ecc.layout = &nand_hw_eccoob_4096;
462 break;
463 case 2048:
464 this->ecc.layout = &nand_hw_eccoob_2048;
465 break;
466 case 512:
467 this->ecc.layout = &nand_hw_eccoob_512;
468 break;
469 default:
470 {
471 printk(KERN_ERR "NAND - Unrecognized pagesize: %d\n",
472 board_mtd->writesize);
473 return -EINVAL;
474 }
475 }
476
477#if NAND_ECC_BCH
478 if (board_mtd->writesize > 512) {
479 if (this->options & NAND_USE_FLASH_BBT)
480 largepage_bbt.options = NAND_BBT_SCAN2NDPAGE;
481 this->badblock_pattern = &largepage_bbt;
482 }
483#endif
484
485 /* Now finish off the scan, now that ecc.layout has been initialized. */
486
487 err = nand_scan_tail(board_mtd);
488 if (err) {
489 printk(KERN_ERR "nand_scan failed: %d\n", err);
490 iounmap(bcm_umi_io_base);
491 kfree(board_mtd);
492 return err;
493 }
494
495 /* Register the partitions */
496 {
497 int nr_partitions;
498 struct mtd_partition *partition_info;
499
500 board_mtd->name = "bcm_umi-nand";
501 nr_partitions =
502 parse_mtd_partitions(board_mtd, part_probes,
503 &partition_info, 0);
504
505 if (nr_partitions <= 0) {
506 printk(KERN_ERR "BCM UMI NAND: Too few partitions - %d\n",
507 nr_partitions);
508 iounmap(bcm_umi_io_base);
509 kfree(board_mtd);
510 return -EIO;
511 }
512 add_mtd_partitions(board_mtd, partition_info, nr_partitions);
513 }
514
515 /* Return happy */
516 return 0;
517}
518
519static int bcm_umi_nand_remove(struct platform_device *pdev)
520{
521#if USE_DMA
522 nand_dma_term();
523#endif
524
525 /* Release resources, unregister device */
526 nand_release(board_mtd);
527
528 /* unmap physical adress */
529 iounmap(bcm_umi_io_base);
530
531 /* Free the MTD device structure */
532 kfree(board_mtd);
533
534 return 0;
535}
536
537#ifdef CONFIG_PM
538static int bcm_umi_nand_suspend(struct platform_device *pdev,
539 pm_message_t state)
540{
541 printk(KERN_ERR "MTD NAND suspend is being called\n");
542 return 0;
543}
544
545static int bcm_umi_nand_resume(struct platform_device *pdev)
546{
547 printk(KERN_ERR "MTD NAND resume is being called\n");
548 return 0;
549}
550#else
551#define bcm_umi_nand_suspend NULL
552#define bcm_umi_nand_resume NULL
553#endif
554
555static struct platform_driver nand_driver = {
556 .driver = {
557 .name = "bcm-nand",
558 .owner = THIS_MODULE,
559 },
560 .probe = bcm_umi_nand_probe,
561 .remove = bcm_umi_nand_remove,
562 .suspend = bcm_umi_nand_suspend,
563 .resume = bcm_umi_nand_resume,
564};
565
566static int __init nand_init(void)
567{
568 return platform_driver_register(&nand_driver);
569}
570
571static void __exit nand_exit(void)
572{
573 platform_driver_unregister(&nand_driver);
574}
575
576module_init(nand_init);
577module_exit(nand_exit);
578
579MODULE_LICENSE("GPL");
580MODULE_AUTHOR("Broadcom");
581MODULE_DESCRIPTION("BCM UMI MTD NAND driver");
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index f13f5b9afaf7..fe3eba87de40 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -591,6 +591,8 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
591 591
592 /* options such as NAND_USE_FLASH_BBT or 16-bit widths */ 592 /* options such as NAND_USE_FLASH_BBT or 16-bit widths */
593 info->chip.options = pdata->options; 593 info->chip.options = pdata->options;
594 info->chip.bbt_td = pdata->bbt_td;
595 info->chip.bbt_md = pdata->bbt_md;
594 596
595 info->ioaddr = (uint32_t __force) vaddr; 597 info->ioaddr = (uint32_t __force) vaddr;
596 598
@@ -599,7 +601,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
599 info->mask_chipsel = pdata->mask_chipsel; 601 info->mask_chipsel = pdata->mask_chipsel;
600 602
601 /* use nandboot-capable ALE/CLE masks by default */ 603 /* use nandboot-capable ALE/CLE masks by default */
602 info->mask_ale = pdata->mask_cle ? : MASK_ALE; 604 info->mask_ale = pdata->mask_ale ? : MASK_ALE;
603 info->mask_cle = pdata->mask_cle ? : MASK_CLE; 605 info->mask_cle = pdata->mask_cle ? : MASK_CLE;
604 606
605 /* Set address of hardware control function */ 607 /* Set address of hardware control function */
diff --git a/drivers/mtd/nand/excite_nandflash.c b/drivers/mtd/nand/excite_nandflash.c
index 72446fb48d4b..af6a6a5399e1 100644
--- a/drivers/mtd/nand/excite_nandflash.c
+++ b/drivers/mtd/nand/excite_nandflash.c
@@ -128,7 +128,7 @@ static int excite_nand_devready(struct mtd_info *mtd)
128 * The binding to the mtd and all allocated 128 * The binding to the mtd and all allocated
129 * resources are released. 129 * resources are released.
130 */ 130 */
131static int __exit excite_nand_remove(struct platform_device *dev) 131static int __devexit excite_nand_remove(struct platform_device *dev)
132{ 132{
133 struct excite_nand_drvdata * const this = platform_get_drvdata(dev); 133 struct excite_nand_drvdata * const this = platform_get_drvdata(dev);
134 134
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index ddd37d2554ed..ae30fb6eed97 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -237,12 +237,15 @@ static int fsl_elbc_run_command(struct mtd_info *mtd)
237 237
238 ctrl->use_mdr = 0; 238 ctrl->use_mdr = 0;
239 239
240 dev_vdbg(ctrl->dev, 240 if (ctrl->status != LTESR_CC) {
241 "fsl_elbc_run_command: stat=%08x mdr=%08x fmr=%08x\n", 241 dev_info(ctrl->dev,
242 ctrl->status, ctrl->mdr, in_be32(&lbc->fmr)); 242 "command failed: fir %x fcr %x status %x mdr %x\n",
243 in_be32(&lbc->fir), in_be32(&lbc->fcr),
244 ctrl->status, ctrl->mdr);
245 return -EIO;
246 }
243 247
244 /* returns 0 on success otherwise non-zero) */ 248 return 0;
245 return ctrl->status == LTESR_CC ? 0 : -EIO;
246} 249}
247 250
248static void fsl_elbc_do_read(struct nand_chip *chip, int oob) 251static void fsl_elbc_do_read(struct nand_chip *chip, int oob)
@@ -253,17 +256,17 @@ static void fsl_elbc_do_read(struct nand_chip *chip, int oob)
253 256
254 if (priv->page_size) { 257 if (priv->page_size) {
255 out_be32(&lbc->fir, 258 out_be32(&lbc->fir,
256 (FIR_OP_CW0 << FIR_OP0_SHIFT) | 259 (FIR_OP_CM0 << FIR_OP0_SHIFT) |
257 (FIR_OP_CA << FIR_OP1_SHIFT) | 260 (FIR_OP_CA << FIR_OP1_SHIFT) |
258 (FIR_OP_PA << FIR_OP2_SHIFT) | 261 (FIR_OP_PA << FIR_OP2_SHIFT) |
259 (FIR_OP_CW1 << FIR_OP3_SHIFT) | 262 (FIR_OP_CM1 << FIR_OP3_SHIFT) |
260 (FIR_OP_RBW << FIR_OP4_SHIFT)); 263 (FIR_OP_RBW << FIR_OP4_SHIFT));
261 264
262 out_be32(&lbc->fcr, (NAND_CMD_READ0 << FCR_CMD0_SHIFT) | 265 out_be32(&lbc->fcr, (NAND_CMD_READ0 << FCR_CMD0_SHIFT) |
263 (NAND_CMD_READSTART << FCR_CMD1_SHIFT)); 266 (NAND_CMD_READSTART << FCR_CMD1_SHIFT));
264 } else { 267 } else {
265 out_be32(&lbc->fir, 268 out_be32(&lbc->fir,
266 (FIR_OP_CW0 << FIR_OP0_SHIFT) | 269 (FIR_OP_CM0 << FIR_OP0_SHIFT) |
267 (FIR_OP_CA << FIR_OP1_SHIFT) | 270 (FIR_OP_CA << FIR_OP1_SHIFT) |
268 (FIR_OP_PA << FIR_OP2_SHIFT) | 271 (FIR_OP_PA << FIR_OP2_SHIFT) |
269 (FIR_OP_RBW << FIR_OP3_SHIFT)); 272 (FIR_OP_RBW << FIR_OP3_SHIFT));
@@ -332,7 +335,7 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
332 case NAND_CMD_READID: 335 case NAND_CMD_READID:
333 dev_vdbg(ctrl->dev, "fsl_elbc_cmdfunc: NAND_CMD_READID.\n"); 336 dev_vdbg(ctrl->dev, "fsl_elbc_cmdfunc: NAND_CMD_READID.\n");
334 337
335 out_be32(&lbc->fir, (FIR_OP_CW0 << FIR_OP0_SHIFT) | 338 out_be32(&lbc->fir, (FIR_OP_CM0 << FIR_OP0_SHIFT) |
336 (FIR_OP_UA << FIR_OP1_SHIFT) | 339 (FIR_OP_UA << FIR_OP1_SHIFT) |
337 (FIR_OP_RBW << FIR_OP2_SHIFT)); 340 (FIR_OP_RBW << FIR_OP2_SHIFT));
338 out_be32(&lbc->fcr, NAND_CMD_READID << FCR_CMD0_SHIFT); 341 out_be32(&lbc->fcr, NAND_CMD_READID << FCR_CMD0_SHIFT);
@@ -359,16 +362,20 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
359 dev_vdbg(ctrl->dev, "fsl_elbc_cmdfunc: NAND_CMD_ERASE2.\n"); 362 dev_vdbg(ctrl->dev, "fsl_elbc_cmdfunc: NAND_CMD_ERASE2.\n");
360 363
361 out_be32(&lbc->fir, 364 out_be32(&lbc->fir,
362 (FIR_OP_CW0 << FIR_OP0_SHIFT) | 365 (FIR_OP_CM0 << FIR_OP0_SHIFT) |
363 (FIR_OP_PA << FIR_OP1_SHIFT) | 366 (FIR_OP_PA << FIR_OP1_SHIFT) |
364 (FIR_OP_CM1 << FIR_OP2_SHIFT)); 367 (FIR_OP_CM2 << FIR_OP2_SHIFT) |
368 (FIR_OP_CW1 << FIR_OP3_SHIFT) |
369 (FIR_OP_RS << FIR_OP4_SHIFT));
365 370
366 out_be32(&lbc->fcr, 371 out_be32(&lbc->fcr,
367 (NAND_CMD_ERASE1 << FCR_CMD0_SHIFT) | 372 (NAND_CMD_ERASE1 << FCR_CMD0_SHIFT) |
368 (NAND_CMD_ERASE2 << FCR_CMD1_SHIFT)); 373 (NAND_CMD_STATUS << FCR_CMD1_SHIFT) |
374 (NAND_CMD_ERASE2 << FCR_CMD2_SHIFT));
369 375
370 out_be32(&lbc->fbcr, 0); 376 out_be32(&lbc->fbcr, 0);
371 ctrl->read_bytes = 0; 377 ctrl->read_bytes = 0;
378 ctrl->use_mdr = 1;
372 379
373 fsl_elbc_run_command(mtd); 380 fsl_elbc_run_command(mtd);
374 return; 381 return;
@@ -383,40 +390,41 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
383 390
384 ctrl->column = column; 391 ctrl->column = column;
385 ctrl->oob = 0; 392 ctrl->oob = 0;
393 ctrl->use_mdr = 1;
386 394
387 if (priv->page_size) { 395 fcr = (NAND_CMD_STATUS << FCR_CMD1_SHIFT) |
388 fcr = (NAND_CMD_SEQIN << FCR_CMD0_SHIFT) | 396 (NAND_CMD_SEQIN << FCR_CMD2_SHIFT) |
389 (NAND_CMD_PAGEPROG << FCR_CMD1_SHIFT); 397 (NAND_CMD_PAGEPROG << FCR_CMD3_SHIFT);
390 398
399 if (priv->page_size) {
391 out_be32(&lbc->fir, 400 out_be32(&lbc->fir,
392 (FIR_OP_CW0 << FIR_OP0_SHIFT) | 401 (FIR_OP_CM2 << FIR_OP0_SHIFT) |
393 (FIR_OP_CA << FIR_OP1_SHIFT) | 402 (FIR_OP_CA << FIR_OP1_SHIFT) |
394 (FIR_OP_PA << FIR_OP2_SHIFT) | 403 (FIR_OP_PA << FIR_OP2_SHIFT) |
395 (FIR_OP_WB << FIR_OP3_SHIFT) | 404 (FIR_OP_WB << FIR_OP3_SHIFT) |
396 (FIR_OP_CW1 << FIR_OP4_SHIFT)); 405 (FIR_OP_CM3 << FIR_OP4_SHIFT) |
406 (FIR_OP_CW1 << FIR_OP5_SHIFT) |
407 (FIR_OP_RS << FIR_OP6_SHIFT));
397 } else { 408 } else {
398 fcr = (NAND_CMD_PAGEPROG << FCR_CMD1_SHIFT) |
399 (NAND_CMD_SEQIN << FCR_CMD2_SHIFT);
400
401 out_be32(&lbc->fir, 409 out_be32(&lbc->fir,
402 (FIR_OP_CW0 << FIR_OP0_SHIFT) | 410 (FIR_OP_CM0 << FIR_OP0_SHIFT) |
403 (FIR_OP_CM2 << FIR_OP1_SHIFT) | 411 (FIR_OP_CM2 << FIR_OP1_SHIFT) |
404 (FIR_OP_CA << FIR_OP2_SHIFT) | 412 (FIR_OP_CA << FIR_OP2_SHIFT) |
405 (FIR_OP_PA << FIR_OP3_SHIFT) | 413 (FIR_OP_PA << FIR_OP3_SHIFT) |
406 (FIR_OP_WB << FIR_OP4_SHIFT) | 414 (FIR_OP_WB << FIR_OP4_SHIFT) |
407 (FIR_OP_CW1 << FIR_OP5_SHIFT)); 415 (FIR_OP_CM3 << FIR_OP5_SHIFT) |
416 (FIR_OP_CW1 << FIR_OP6_SHIFT) |
417 (FIR_OP_RS << FIR_OP7_SHIFT));
408 418
409 if (column >= mtd->writesize) { 419 if (column >= mtd->writesize) {
410 /* OOB area --> READOOB */ 420 /* OOB area --> READOOB */
411 column -= mtd->writesize; 421 column -= mtd->writesize;
412 fcr |= NAND_CMD_READOOB << FCR_CMD0_SHIFT; 422 fcr |= NAND_CMD_READOOB << FCR_CMD0_SHIFT;
413 ctrl->oob = 1; 423 ctrl->oob = 1;
414 } else if (column < 256) { 424 } else {
425 WARN_ON(column != 0);
415 /* First 256 bytes --> READ0 */ 426 /* First 256 bytes --> READ0 */
416 fcr |= NAND_CMD_READ0 << FCR_CMD0_SHIFT; 427 fcr |= NAND_CMD_READ0 << FCR_CMD0_SHIFT;
417 } else {
418 /* Second 256 bytes --> READ1 */
419 fcr |= NAND_CMD_READ1 << FCR_CMD0_SHIFT;
420 } 428 }
421 } 429 }
422 430
@@ -628,22 +636,6 @@ static int fsl_elbc_wait(struct mtd_info *mtd, struct nand_chip *chip)
628{ 636{
629 struct fsl_elbc_mtd *priv = chip->priv; 637 struct fsl_elbc_mtd *priv = chip->priv;
630 struct fsl_elbc_ctrl *ctrl = priv->ctrl; 638 struct fsl_elbc_ctrl *ctrl = priv->ctrl;
631 struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
632
633 if (ctrl->status != LTESR_CC)
634 return NAND_STATUS_FAIL;
635
636 /* Use READ_STATUS command, but wait for the device to be ready */
637 ctrl->use_mdr = 0;
638 out_be32(&lbc->fir,
639 (FIR_OP_CW0 << FIR_OP0_SHIFT) |
640 (FIR_OP_RBW << FIR_OP1_SHIFT));
641 out_be32(&lbc->fcr, NAND_CMD_STATUS << FCR_CMD0_SHIFT);
642 out_be32(&lbc->fbcr, 1);
643 set_addr(mtd, 0, 0, 0);
644 ctrl->read_bytes = 1;
645
646 fsl_elbc_run_command(mtd);
647 639
648 if (ctrl->status != LTESR_CC) 640 if (ctrl->status != LTESR_CC)
649 return NAND_STATUS_FAIL; 641 return NAND_STATUS_FAIL;
@@ -651,8 +643,7 @@ static int fsl_elbc_wait(struct mtd_info *mtd, struct nand_chip *chip)
651 /* The chip always seems to report that it is 643 /* The chip always seems to report that it is
652 * write-protected, even when it is not. 644 * write-protected, even when it is not.
653 */ 645 */
654 setbits8(ctrl->addr, NAND_STATUS_WP); 646 return (ctrl->mdr & 0xff) | NAND_STATUS_WP;
655 return fsl_elbc_read_byte(mtd);
656} 647}
657 648
658static int fsl_elbc_chip_init_tail(struct mtd_info *mtd) 649static int fsl_elbc_chip_init_tail(struct mtd_info *mtd)
@@ -946,6 +937,13 @@ static int __devinit fsl_elbc_ctrl_init(struct fsl_elbc_ctrl *ctrl)
946{ 937{
947 struct fsl_lbc_regs __iomem *lbc = ctrl->regs; 938 struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
948 939
940 /*
941 * NAND transactions can tie up the bus for a long time, so set the
942 * bus timeout to max by clearing LBCR[BMT] (highest base counter
943 * value) and setting LBCR[BMTPS] to the highest prescaler value.
944 */
945 clrsetbits_be32(&lbc->lbcr, LBCR_BMT, 15);
946
949 /* clear event registers */ 947 /* clear event registers */
950 setbits32(&lbc->ltesr, LTESR_NAND_MASK); 948 setbits32(&lbc->ltesr, LTESR_NAND_MASK);
951 out_be32(&lbc->lteatr, 0); 949 out_be32(&lbc->lteatr, 0);
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c
index d120cd8d7267..071a60cb4204 100644
--- a/drivers/mtd/nand/fsl_upm.c
+++ b/drivers/mtd/nand/fsl_upm.c
@@ -112,7 +112,7 @@ static void fun_select_chip(struct mtd_info *mtd, int mchip_nr)
112 112
113 if (mchip_nr == -1) { 113 if (mchip_nr == -1) {
114 chip->cmd_ctrl(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE); 114 chip->cmd_ctrl(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
115 } else if (mchip_nr >= 0) { 115 } else if (mchip_nr >= 0 && mchip_nr < NAND_MAX_CHIPS) {
116 fun->mchip_number = mchip_nr; 116 fun->mchip_number = mchip_nr;
117 chip->IO_ADDR_R = fun->io_base + fun->mchip_offsets[mchip_nr]; 117 chip->IO_ADDR_R = fun->io_base + fun->mchip_offsets[mchip_nr];
118 chip->IO_ADDR_W = chip->IO_ADDR_R; 118 chip->IO_ADDR_W = chip->IO_ADDR_R;
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 65b26d5a5c0d..45dec5770da0 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -33,9 +33,13 @@
33 33
34#include <asm/mach/flash.h> 34#include <asm/mach/flash.h>
35#include <mach/mxc_nand.h> 35#include <mach/mxc_nand.h>
36#include <mach/hardware.h>
36 37
37#define DRIVER_NAME "mxc_nand" 38#define DRIVER_NAME "mxc_nand"
38 39
40#define nfc_is_v21() (cpu_is_mx25() || cpu_is_mx35())
41#define nfc_is_v1() (cpu_is_mx31() || cpu_is_mx27())
42
39/* Addresses for NFC registers */ 43/* Addresses for NFC registers */
40#define NFC_BUF_SIZE 0xE00 44#define NFC_BUF_SIZE 0xE00
41#define NFC_BUF_ADDR 0xE04 45#define NFC_BUF_ADDR 0xE04
@@ -46,24 +50,14 @@
46#define NFC_RSLTMAIN_AREA 0xE0E 50#define NFC_RSLTMAIN_AREA 0xE0E
47#define NFC_RSLTSPARE_AREA 0xE10 51#define NFC_RSLTSPARE_AREA 0xE10
48#define NFC_WRPROT 0xE12 52#define NFC_WRPROT 0xE12
49#define NFC_UNLOCKSTART_BLKADDR 0xE14 53#define NFC_V1_UNLOCKSTART_BLKADDR 0xe14
50#define NFC_UNLOCKEND_BLKADDR 0xE16 54#define NFC_V1_UNLOCKEND_BLKADDR 0xe16
55#define NFC_V21_UNLOCKSTART_BLKADDR 0xe20
56#define NFC_V21_UNLOCKEND_BLKADDR 0xe22
51#define NFC_NF_WRPRST 0xE18 57#define NFC_NF_WRPRST 0xE18
52#define NFC_CONFIG1 0xE1A 58#define NFC_CONFIG1 0xE1A
53#define NFC_CONFIG2 0xE1C 59#define NFC_CONFIG2 0xE1C
54 60
55/* Addresses for NFC RAM BUFFER Main area 0 */
56#define MAIN_AREA0 0x000
57#define MAIN_AREA1 0x200
58#define MAIN_AREA2 0x400
59#define MAIN_AREA3 0x600
60
61/* Addresses for NFC SPARE BUFFER Spare area 0 */
62#define SPARE_AREA0 0x800
63#define SPARE_AREA1 0x810
64#define SPARE_AREA2 0x820
65#define SPARE_AREA3 0x830
66
67/* Set INT to 0, FCMD to 1, rest to 0 in NFC_CONFIG2 Register 61/* Set INT to 0, FCMD to 1, rest to 0 in NFC_CONFIG2 Register
68 * for Command operation */ 62 * for Command operation */
69#define NFC_CMD 0x1 63#define NFC_CMD 0x1
@@ -106,48 +100,66 @@ struct mxc_nand_host {
106 struct mtd_partition *parts; 100 struct mtd_partition *parts;
107 struct device *dev; 101 struct device *dev;
108 102
103 void *spare0;
104 void *main_area0;
105 void *main_area1;
106
107 void __iomem *base;
109 void __iomem *regs; 108 void __iomem *regs;
110 int spare_only;
111 int status_request; 109 int status_request;
112 int pagesize_2k;
113 uint16_t col_addr;
114 struct clk *clk; 110 struct clk *clk;
115 int clk_act; 111 int clk_act;
116 int irq; 112 int irq;
117 113
118 wait_queue_head_t irq_waitq; 114 wait_queue_head_t irq_waitq;
119};
120
121/* Define delays in microsec for NAND device operations */
122#define TROP_US_DELAY 2000
123/* Macros to get byte and bit positions of ECC */
124#define COLPOS(x) ((x) >> 3)
125#define BITPOS(x) ((x) & 0xf)
126 115
127/* Define single bit Error positions in Main & Spare area */ 116 uint8_t *data_buf;
128#define MAIN_SINGLEBIT_ERROR 0x4 117 unsigned int buf_start;
129#define SPARE_SINGLEBIT_ERROR 0x1 118 int spare_len;
130
131/* OOB placement block for use with hardware ecc generation */
132static struct nand_ecclayout nand_hw_eccoob_8 = {
133 .eccbytes = 5,
134 .eccpos = {6, 7, 8, 9, 10},
135 .oobfree = {{0, 5}, {11, 5}, }
136}; 119};
137 120
138static struct nand_ecclayout nand_hw_eccoob_16 = { 121/* OOB placement block for use with hardware ecc generation */
122static struct nand_ecclayout nandv1_hw_eccoob_smallpage = {
139 .eccbytes = 5, 123 .eccbytes = 5,
140 .eccpos = {6, 7, 8, 9, 10}, 124 .eccpos = {6, 7, 8, 9, 10},
141 .oobfree = {{0, 5}, {11, 5}, } 125 .oobfree = {{0, 5}, {12, 4}, }
142}; 126};
143 127
144static struct nand_ecclayout nand_hw_eccoob_64 = { 128static struct nand_ecclayout nandv1_hw_eccoob_largepage = {
145 .eccbytes = 20, 129 .eccbytes = 20,
146 .eccpos = {6, 7, 8, 9, 10, 22, 23, 24, 25, 26, 130 .eccpos = {6, 7, 8, 9, 10, 22, 23, 24, 25, 26,
147 38, 39, 40, 41, 42, 54, 55, 56, 57, 58}, 131 38, 39, 40, 41, 42, 54, 55, 56, 57, 58},
148 .oobfree = {{2, 4}, {11, 10}, {27, 10}, {43, 10}, {59, 5}, } 132 .oobfree = {{2, 4}, {11, 10}, {27, 10}, {43, 10}, {59, 5}, }
149}; 133};
150 134
135/* OOB description for 512 byte pages with 16 byte OOB */
136static struct nand_ecclayout nandv2_hw_eccoob_smallpage = {
137 .eccbytes = 1 * 9,
138 .eccpos = {
139 7, 8, 9, 10, 11, 12, 13, 14, 15
140 },
141 .oobfree = {
142 {.offset = 0, .length = 5}
143 }
144};
145
146/* OOB description for 2048 byte pages with 64 byte OOB */
147static struct nand_ecclayout nandv2_hw_eccoob_largepage = {
148 .eccbytes = 4 * 9,
149 .eccpos = {
150 7, 8, 9, 10, 11, 12, 13, 14, 15,
151 23, 24, 25, 26, 27, 28, 29, 30, 31,
152 39, 40, 41, 42, 43, 44, 45, 46, 47,
153 55, 56, 57, 58, 59, 60, 61, 62, 63
154 },
155 .oobfree = {
156 {.offset = 2, .length = 4},
157 {.offset = 16, .length = 7},
158 {.offset = 32, .length = 7},
159 {.offset = 48, .length = 7}
160 }
161};
162
151#ifdef CONFIG_MTD_PARTITIONS 163#ifdef CONFIG_MTD_PARTITIONS
152static const char *part_probes[] = { "RedBoot", "cmdlinepart", NULL }; 164static const char *part_probes[] = { "RedBoot", "cmdlinepart", NULL };
153#endif 165#endif
@@ -170,10 +182,10 @@ static irqreturn_t mxc_nfc_irq(int irq, void *dev_id)
170/* This function polls the NANDFC to wait for the basic operation to 182/* This function polls the NANDFC to wait for the basic operation to
171 * complete by checking the INT bit of config2 register. 183 * complete by checking the INT bit of config2 register.
172 */ 184 */
173static void wait_op_done(struct mxc_nand_host *host, int max_retries, 185static void wait_op_done(struct mxc_nand_host *host, int useirq)
174 uint16_t param, int useirq)
175{ 186{
176 uint32_t tmp; 187 uint32_t tmp;
188 int max_retries = 2000;
177 189
178 if (useirq) { 190 if (useirq) {
179 if ((readw(host->regs + NFC_CONFIG2) & NFC_INT) == 0) { 191 if ((readw(host->regs + NFC_CONFIG2) & NFC_INT) == 0) {
@@ -200,8 +212,8 @@ static void wait_op_done(struct mxc_nand_host *host, int max_retries,
200 udelay(1); 212 udelay(1);
201 } 213 }
202 if (max_retries < 0) 214 if (max_retries < 0)
203 DEBUG(MTD_DEBUG_LEVEL0, "%s(%d): INT not set\n", 215 DEBUG(MTD_DEBUG_LEVEL0, "%s: INT not set\n",
204 __func__, param); 216 __func__);
205 } 217 }
206} 218}
207 219
@@ -215,7 +227,7 @@ static void send_cmd(struct mxc_nand_host *host, uint16_t cmd, int useirq)
215 writew(NFC_CMD, host->regs + NFC_CONFIG2); 227 writew(NFC_CMD, host->regs + NFC_CONFIG2);
216 228
217 /* Wait for operation to complete */ 229 /* Wait for operation to complete */
218 wait_op_done(host, TROP_US_DELAY, cmd, useirq); 230 wait_op_done(host, useirq);
219} 231}
220 232
221/* This function sends an address (or partial address) to the 233/* This function sends an address (or partial address) to the
@@ -229,82 +241,47 @@ static void send_addr(struct mxc_nand_host *host, uint16_t addr, int islast)
229 writew(NFC_ADDR, host->regs + NFC_CONFIG2); 241 writew(NFC_ADDR, host->regs + NFC_CONFIG2);
230 242
231 /* Wait for operation to complete */ 243 /* Wait for operation to complete */
232 wait_op_done(host, TROP_US_DELAY, addr, islast); 244 wait_op_done(host, islast);
233} 245}
234 246
235/* This function requests the NANDFC to initate the transfer 247static void send_page(struct mtd_info *mtd, unsigned int ops)
236 * of data currently in the NANDFC RAM buffer to the NAND device. */
237static void send_prog_page(struct mxc_nand_host *host, uint8_t buf_id,
238 int spare_only)
239{ 248{
240 DEBUG(MTD_DEBUG_LEVEL3, "send_prog_page (%d)\n", spare_only); 249 struct nand_chip *nand_chip = mtd->priv;
241 250 struct mxc_nand_host *host = nand_chip->priv;
242 /* NANDFC buffer 0 is used for page read/write */ 251 int bufs, i;
243 writew(buf_id, host->regs + NFC_BUF_ADDR);
244
245 /* Configure spare or page+spare access */
246 if (!host->pagesize_2k) {
247 uint16_t config1 = readw(host->regs + NFC_CONFIG1);
248 if (spare_only)
249 config1 |= NFC_SP_EN;
250 else
251 config1 &= ~(NFC_SP_EN);
252 writew(config1, host->regs + NFC_CONFIG1);
253 }
254 252
255 writew(NFC_INPUT, host->regs + NFC_CONFIG2); 253 if (nfc_is_v1() && mtd->writesize > 512)
254 bufs = 4;
255 else
256 bufs = 1;
256 257
257 /* Wait for operation to complete */ 258 for (i = 0; i < bufs; i++) {
258 wait_op_done(host, TROP_US_DELAY, spare_only, true);
259}
260 259
261/* Requests NANDFC to initated the transfer of data from the 260 /* NANDFC buffer 0 is used for page read/write */
262 * NAND device into in the NANDFC ram buffer. */ 261 writew(i, host->regs + NFC_BUF_ADDR);
263static void send_read_page(struct mxc_nand_host *host, uint8_t buf_id,
264 int spare_only)
265{
266 DEBUG(MTD_DEBUG_LEVEL3, "send_read_page (%d)\n", spare_only);
267 262
268 /* NANDFC buffer 0 is used for page read/write */ 263 writew(ops, host->regs + NFC_CONFIG2);
269 writew(buf_id, host->regs + NFC_BUF_ADDR);
270 264
271 /* Configure spare or page+spare access */ 265 /* Wait for operation to complete */
272 if (!host->pagesize_2k) { 266 wait_op_done(host, true);
273 uint32_t config1 = readw(host->regs + NFC_CONFIG1);
274 if (spare_only)
275 config1 |= NFC_SP_EN;
276 else
277 config1 &= ~NFC_SP_EN;
278 writew(config1, host->regs + NFC_CONFIG1);
279 } 267 }
280
281 writew(NFC_OUTPUT, host->regs + NFC_CONFIG2);
282
283 /* Wait for operation to complete */
284 wait_op_done(host, TROP_US_DELAY, spare_only, true);
285} 268}
286 269
287/* Request the NANDFC to perform a read of the NAND device ID. */ 270/* Request the NANDFC to perform a read of the NAND device ID. */
288static void send_read_id(struct mxc_nand_host *host) 271static void send_read_id(struct mxc_nand_host *host)
289{ 272{
290 struct nand_chip *this = &host->nand; 273 struct nand_chip *this = &host->nand;
291 uint16_t tmp;
292 274
293 /* NANDFC buffer 0 is used for device ID output */ 275 /* NANDFC buffer 0 is used for device ID output */
294 writew(0x0, host->regs + NFC_BUF_ADDR); 276 writew(0x0, host->regs + NFC_BUF_ADDR);
295 277
296 /* Read ID into main buffer */
297 tmp = readw(host->regs + NFC_CONFIG1);
298 tmp &= ~NFC_SP_EN;
299 writew(tmp, host->regs + NFC_CONFIG1);
300
301 writew(NFC_ID, host->regs + NFC_CONFIG2); 278 writew(NFC_ID, host->regs + NFC_CONFIG2);
302 279
303 /* Wait for operation to complete */ 280 /* Wait for operation to complete */
304 wait_op_done(host, TROP_US_DELAY, 0, true); 281 wait_op_done(host, true);
305 282
306 if (this->options & NAND_BUSWIDTH_16) { 283 if (this->options & NAND_BUSWIDTH_16) {
307 void __iomem *main_buf = host->regs + MAIN_AREA0; 284 void __iomem *main_buf = host->main_area0;
308 /* compress the ID info */ 285 /* compress the ID info */
309 writeb(readb(main_buf + 2), main_buf + 1); 286 writeb(readb(main_buf + 2), main_buf + 1);
310 writeb(readb(main_buf + 4), main_buf + 2); 287 writeb(readb(main_buf + 4), main_buf + 2);
@@ -312,15 +289,16 @@ static void send_read_id(struct mxc_nand_host *host)
312 writeb(readb(main_buf + 8), main_buf + 4); 289 writeb(readb(main_buf + 8), main_buf + 4);
313 writeb(readb(main_buf + 10), main_buf + 5); 290 writeb(readb(main_buf + 10), main_buf + 5);
314 } 291 }
292 memcpy(host->data_buf, host->main_area0, 16);
315} 293}
316 294
317/* This function requests the NANDFC to perform a read of the 295/* This function requests the NANDFC to perform a read of the
318 * NAND device status and returns the current status. */ 296 * NAND device status and returns the current status. */
319static uint16_t get_dev_status(struct mxc_nand_host *host) 297static uint16_t get_dev_status(struct mxc_nand_host *host)
320{ 298{
321 void __iomem *main_buf = host->regs + MAIN_AREA1; 299 void __iomem *main_buf = host->main_area1;
322 uint32_t store; 300 uint32_t store;
323 uint16_t ret, tmp; 301 uint16_t ret;
324 /* Issue status request to NAND device */ 302 /* Issue status request to NAND device */
325 303
326 /* store the main area1 first word, later do recovery */ 304 /* store the main area1 first word, later do recovery */
@@ -329,15 +307,10 @@ static uint16_t get_dev_status(struct mxc_nand_host *host)
329 * corruption of read/write buffer on status requests. */ 307 * corruption of read/write buffer on status requests. */
330 writew(1, host->regs + NFC_BUF_ADDR); 308 writew(1, host->regs + NFC_BUF_ADDR);
331 309
332 /* Read status into main buffer */
333 tmp = readw(host->regs + NFC_CONFIG1);
334 tmp &= ~NFC_SP_EN;
335 writew(tmp, host->regs + NFC_CONFIG1);
336
337 writew(NFC_STATUS, host->regs + NFC_CONFIG2); 310 writew(NFC_STATUS, host->regs + NFC_CONFIG2);
338 311
339 /* Wait for operation to complete */ 312 /* Wait for operation to complete */
340 wait_op_done(host, TROP_US_DELAY, 0, true); 313 wait_op_done(host, true);
341 314
342 /* Status is placed in first word of main buffer */ 315 /* Status is placed in first word of main buffer */
343 /* get status, then recovery area 1 data */ 316 /* get status, then recovery area 1 data */
@@ -397,32 +370,14 @@ static u_char mxc_nand_read_byte(struct mtd_info *mtd)
397{ 370{
398 struct nand_chip *nand_chip = mtd->priv; 371 struct nand_chip *nand_chip = mtd->priv;
399 struct mxc_nand_host *host = nand_chip->priv; 372 struct mxc_nand_host *host = nand_chip->priv;
400 uint8_t ret = 0; 373 uint8_t ret;
401 uint16_t col, rd_word;
402 uint16_t __iomem *main_buf = host->regs + MAIN_AREA0;
403 uint16_t __iomem *spare_buf = host->regs + SPARE_AREA0;
404 374
405 /* Check for status request */ 375 /* Check for status request */
406 if (host->status_request) 376 if (host->status_request)
407 return get_dev_status(host) & 0xFF; 377 return get_dev_status(host) & 0xFF;
408 378
409 /* Get column for 16-bit access */ 379 ret = *(uint8_t *)(host->data_buf + host->buf_start);
410 col = host->col_addr >> 1; 380 host->buf_start++;
411
412 /* If we are accessing the spare region */
413 if (host->spare_only)
414 rd_word = readw(&spare_buf[col]);
415 else
416 rd_word = readw(&main_buf[col]);
417
418 /* Pick upper/lower byte of word from RAM buffer */
419 if (host->col_addr & 0x1)
420 ret = (rd_word >> 8) & 0xFF;
421 else
422 ret = rd_word & 0xFF;
423
424 /* Update saved column address */
425 host->col_addr++;
426 381
427 return ret; 382 return ret;
428} 383}
@@ -431,33 +386,10 @@ static uint16_t mxc_nand_read_word(struct mtd_info *mtd)
431{ 386{
432 struct nand_chip *nand_chip = mtd->priv; 387 struct nand_chip *nand_chip = mtd->priv;
433 struct mxc_nand_host *host = nand_chip->priv; 388 struct mxc_nand_host *host = nand_chip->priv;
434 uint16_t col, rd_word, ret; 389 uint16_t ret;
435 uint16_t __iomem *p;
436
437 DEBUG(MTD_DEBUG_LEVEL3,
438 "mxc_nand_read_word(col = %d)\n", host->col_addr);
439
440 col = host->col_addr;
441 /* Adjust saved column address */
442 if (col < mtd->writesize && host->spare_only)
443 col += mtd->writesize;
444 390
445 if (col < mtd->writesize) 391 ret = *(uint16_t *)(host->data_buf + host->buf_start);
446 p = (host->regs + MAIN_AREA0) + (col >> 1); 392 host->buf_start += 2;
447 else
448 p = (host->regs + SPARE_AREA0) + ((col - mtd->writesize) >> 1);
449
450 if (col & 1) {
451 rd_word = readw(p);
452 ret = (rd_word >> 8) & 0xff;
453 rd_word = readw(&p[1]);
454 ret |= (rd_word << 8) & 0xff00;
455
456 } else
457 ret = readw(p);
458
459 /* Update saved column address */
460 host->col_addr = col + 2;
461 393
462 return ret; 394 return ret;
463} 395}
@@ -470,94 +402,14 @@ static void mxc_nand_write_buf(struct mtd_info *mtd,
470{ 402{
471 struct nand_chip *nand_chip = mtd->priv; 403 struct nand_chip *nand_chip = mtd->priv;
472 struct mxc_nand_host *host = nand_chip->priv; 404 struct mxc_nand_host *host = nand_chip->priv;
473 int n, col, i = 0; 405 u16 col = host->buf_start;
474 406 int n = mtd->oobsize + mtd->writesize - col;
475 DEBUG(MTD_DEBUG_LEVEL3,
476 "mxc_nand_write_buf(col = %d, len = %d)\n", host->col_addr,
477 len);
478
479 col = host->col_addr;
480 407
481 /* Adjust saved column address */ 408 n = min(n, len);
482 if (col < mtd->writesize && host->spare_only)
483 col += mtd->writesize;
484 409
485 n = mtd->writesize + mtd->oobsize - col; 410 memcpy(host->data_buf + col, buf, n);
486 n = min(len, n);
487
488 DEBUG(MTD_DEBUG_LEVEL3,
489 "%s:%d: col = %d, n = %d\n", __func__, __LINE__, col, n);
490
491 while (n) {
492 void __iomem *p;
493
494 if (col < mtd->writesize)
495 p = host->regs + MAIN_AREA0 + (col & ~3);
496 else
497 p = host->regs + SPARE_AREA0 -
498 mtd->writesize + (col & ~3);
499
500 DEBUG(MTD_DEBUG_LEVEL3, "%s:%d: p = %p\n", __func__,
501 __LINE__, p);
502
503 if (((col | (int)&buf[i]) & 3) || n < 16) {
504 uint32_t data = 0;
505
506 if (col & 3 || n < 4)
507 data = readl(p);
508
509 switch (col & 3) {
510 case 0:
511 if (n) {
512 data = (data & 0xffffff00) |
513 (buf[i++] << 0);
514 n--;
515 col++;
516 }
517 case 1:
518 if (n) {
519 data = (data & 0xffff00ff) |
520 (buf[i++] << 8);
521 n--;
522 col++;
523 }
524 case 2:
525 if (n) {
526 data = (data & 0xff00ffff) |
527 (buf[i++] << 16);
528 n--;
529 col++;
530 }
531 case 3:
532 if (n) {
533 data = (data & 0x00ffffff) |
534 (buf[i++] << 24);
535 n--;
536 col++;
537 }
538 }
539
540 writel(data, p);
541 } else {
542 int m = mtd->writesize - col;
543 411
544 if (col >= mtd->writesize) 412 host->buf_start += n;
545 m += mtd->oobsize;
546
547 m = min(n, m) & ~3;
548
549 DEBUG(MTD_DEBUG_LEVEL3,
550 "%s:%d: n = %d, m = %d, i = %d, col = %d\n",
551 __func__, __LINE__, n, m, i, col);
552
553 memcpy(p, &buf[i], m);
554 col += m;
555 i += m;
556 n -= m;
557 }
558 }
559 /* Update saved column address */
560 host->col_addr = col;
561} 413}
562 414
563/* Read the data buffer from the NAND Flash. To read the data from NAND 415/* Read the data buffer from the NAND Flash. To read the data from NAND
@@ -568,75 +420,14 @@ static void mxc_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
568{ 420{
569 struct nand_chip *nand_chip = mtd->priv; 421 struct nand_chip *nand_chip = mtd->priv;
570 struct mxc_nand_host *host = nand_chip->priv; 422 struct mxc_nand_host *host = nand_chip->priv;
571 int n, col, i = 0; 423 u16 col = host->buf_start;
572 424 int n = mtd->oobsize + mtd->writesize - col;
573 DEBUG(MTD_DEBUG_LEVEL3,
574 "mxc_nand_read_buf(col = %d, len = %d)\n", host->col_addr, len);
575
576 col = host->col_addr;
577 425
578 /* Adjust saved column address */ 426 n = min(n, len);
579 if (col < mtd->writesize && host->spare_only)
580 col += mtd->writesize;
581 427
582 n = mtd->writesize + mtd->oobsize - col; 428 memcpy(buf, host->data_buf + col, len);
583 n = min(len, n);
584
585 while (n) {
586 void __iomem *p;
587
588 if (col < mtd->writesize)
589 p = host->regs + MAIN_AREA0 + (col & ~3);
590 else
591 p = host->regs + SPARE_AREA0 -
592 mtd->writesize + (col & ~3);
593
594 if (((col | (int)&buf[i]) & 3) || n < 16) {
595 uint32_t data;
596
597 data = readl(p);
598 switch (col & 3) {
599 case 0:
600 if (n) {
601 buf[i++] = (uint8_t) (data);
602 n--;
603 col++;
604 }
605 case 1:
606 if (n) {
607 buf[i++] = (uint8_t) (data >> 8);
608 n--;
609 col++;
610 }
611 case 2:
612 if (n) {
613 buf[i++] = (uint8_t) (data >> 16);
614 n--;
615 col++;
616 }
617 case 3:
618 if (n) {
619 buf[i++] = (uint8_t) (data >> 24);
620 n--;
621 col++;
622 }
623 }
624 } else {
625 int m = mtd->writesize - col;
626
627 if (col >= mtd->writesize)
628 m += mtd->oobsize;
629
630 m = min(n, m) & ~3;
631 memcpy(&buf[i], p, m);
632 col += m;
633 i += m;
634 n -= m;
635 }
636 }
637 /* Update saved column address */
638 host->col_addr = col;
639 429
430 host->buf_start += len;
640} 431}
641 432
642/* Used by the upper layer to verify the data in NAND Flash 433/* Used by the upper layer to verify the data in NAND Flash
@@ -654,23 +445,6 @@ static void mxc_nand_select_chip(struct mtd_info *mtd, int chip)
654 struct nand_chip *nand_chip = mtd->priv; 445 struct nand_chip *nand_chip = mtd->priv;
655 struct mxc_nand_host *host = nand_chip->priv; 446 struct mxc_nand_host *host = nand_chip->priv;
656 447
657#ifdef CONFIG_MTD_NAND_MXC_FORCE_CE
658 if (chip > 0) {
659 DEBUG(MTD_DEBUG_LEVEL0,
660 "ERROR: Illegal chip select (chip = %d)\n", chip);
661 return;
662 }
663
664 if (chip == -1) {
665 writew(readw(host->regs + NFC_CONFIG1) & ~NFC_CE,
666 host->regs + NFC_CONFIG1);
667 return;
668 }
669
670 writew(readw(host->regs + NFC_CONFIG1) | NFC_CE,
671 host->regs + NFC_CONFIG1);
672#endif
673
674 switch (chip) { 448 switch (chip) {
675 case -1: 449 case -1:
676 /* Disable the NFC clock */ 450 /* Disable the NFC clock */
@@ -692,94 +466,40 @@ static void mxc_nand_select_chip(struct mtd_info *mtd, int chip)
692 } 466 }
693} 467}
694 468
695/* Used by the upper layer to write command to NAND Flash for 469/*
696 * different operations to be carried out on NAND Flash */ 470 * Function to transfer data to/from spare area.
697static void mxc_nand_command(struct mtd_info *mtd, unsigned command, 471 */
698 int column, int page_addr) 472static void copy_spare(struct mtd_info *mtd, bool bfrom)
699{ 473{
700 struct nand_chip *nand_chip = mtd->priv; 474 struct nand_chip *this = mtd->priv;
701 struct mxc_nand_host *host = nand_chip->priv; 475 struct mxc_nand_host *host = this->priv;
702 int useirq = true; 476 u16 i, j;
703 477 u16 n = mtd->writesize >> 9;
704 DEBUG(MTD_DEBUG_LEVEL3, 478 u8 *d = host->data_buf + mtd->writesize;
705 "mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n", 479 u8 *s = host->spare0;
706 command, column, page_addr); 480 u16 t = host->spare_len;
707 481
708 /* Reset command state information */ 482 j = (mtd->oobsize / n >> 1) << 1;
709 host->status_request = false; 483
710 484 if (bfrom) {
711 /* Command pre-processing step */ 485 for (i = 0; i < n - 1; i++)
712 switch (command) { 486 memcpy(d + i * j, s + i * t, j);
713 487
714 case NAND_CMD_STATUS: 488 /* the last section */
715 host->col_addr = 0; 489 memcpy(d + i * j, s + i * t, mtd->oobsize - i * j);
716 host->status_request = true; 490 } else {
717 break; 491 for (i = 0; i < n - 1; i++)
718 492 memcpy(&s[i * t], &d[i * j], j);
719 case NAND_CMD_READ0:
720 host->col_addr = column;
721 host->spare_only = false;
722 useirq = false;
723 break;
724
725 case NAND_CMD_READOOB:
726 host->col_addr = column;
727 host->spare_only = true;
728 useirq = false;
729 if (host->pagesize_2k)
730 command = NAND_CMD_READ0; /* only READ0 is valid */
731 break;
732
733 case NAND_CMD_SEQIN:
734 if (column >= mtd->writesize) {
735 /*
736 * FIXME: before send SEQIN command for write OOB,
737 * We must read one page out.
738 * For K9F1GXX has no READ1 command to set current HW
739 * pointer to spare area, we must write the whole page
740 * including OOB together.
741 */
742 if (host->pagesize_2k)
743 /* call ourself to read a page */
744 mxc_nand_command(mtd, NAND_CMD_READ0, 0,
745 page_addr);
746
747 host->col_addr = column - mtd->writesize;
748 host->spare_only = true;
749
750 /* Set program pointer to spare region */
751 if (!host->pagesize_2k)
752 send_cmd(host, NAND_CMD_READOOB, false);
753 } else {
754 host->spare_only = false;
755 host->col_addr = column;
756
757 /* Set program pointer to page start */
758 if (!host->pagesize_2k)
759 send_cmd(host, NAND_CMD_READ0, false);
760 }
761 useirq = false;
762 break;
763
764 case NAND_CMD_PAGEPROG:
765 send_prog_page(host, 0, host->spare_only);
766
767 if (host->pagesize_2k) {
768 /* data in 4 areas datas */
769 send_prog_page(host, 1, host->spare_only);
770 send_prog_page(host, 2, host->spare_only);
771 send_prog_page(host, 3, host->spare_only);
772 }
773
774 break;
775 493
776 case NAND_CMD_ERASE1: 494 /* the last section */
777 useirq = false; 495 memcpy(&s[i * t], &d[i * j], mtd->oobsize - i * j);
778 break;
779 } 496 }
497}
780 498
781 /* Write out the command to the device. */ 499static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr)
782 send_cmd(host, command, useirq); 500{
501 struct nand_chip *nand_chip = mtd->priv;
502 struct mxc_nand_host *host = nand_chip->priv;
783 503
784 /* Write out column address, if necessary */ 504 /* Write out column address, if necessary */
785 if (column != -1) { 505 if (column != -1) {
@@ -791,7 +511,7 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
791 * the full page. 511 * the full page.
792 */ 512 */
793 send_addr(host, 0, page_addr == -1); 513 send_addr(host, 0, page_addr == -1);
794 if (host->pagesize_2k) 514 if (mtd->writesize > 512)
795 /* another col addr cycle for 2k page */ 515 /* another col addr cycle for 2k page */
796 send_addr(host, 0, false); 516 send_addr(host, 0, false);
797 } 517 }
@@ -801,7 +521,7 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
801 /* paddr_0 - p_addr_7 */ 521 /* paddr_0 - p_addr_7 */
802 send_addr(host, (page_addr & 0xff), false); 522 send_addr(host, (page_addr & 0xff), false);
803 523
804 if (host->pagesize_2k) { 524 if (mtd->writesize > 512) {
805 if (mtd->size >= 0x10000000) { 525 if (mtd->size >= 0x10000000) {
806 /* paddr_8 - paddr_15 */ 526 /* paddr_8 - paddr_15 */
807 send_addr(host, (page_addr >> 8) & 0xff, false); 527 send_addr(host, (page_addr >> 8) & 0xff, false);
@@ -820,52 +540,136 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
820 send_addr(host, (page_addr >> 8) & 0xff, true); 540 send_addr(host, (page_addr >> 8) & 0xff, true);
821 } 541 }
822 } 542 }
543}
544
545/* Used by the upper layer to write command to NAND Flash for
546 * different operations to be carried out on NAND Flash */
547static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
548 int column, int page_addr)
549{
550 struct nand_chip *nand_chip = mtd->priv;
551 struct mxc_nand_host *host = nand_chip->priv;
552
553 DEBUG(MTD_DEBUG_LEVEL3,
554 "mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n",
555 command, column, page_addr);
556
557 /* Reset command state information */
558 host->status_request = false;
823 559
824 /* Command post-processing step */ 560 /* Command pre-processing step */
825 switch (command) { 561 switch (command) {
826 562
827 case NAND_CMD_RESET: 563 case NAND_CMD_STATUS:
564 host->buf_start = 0;
565 host->status_request = true;
566
567 send_cmd(host, command, true);
568 mxc_do_addr_cycle(mtd, column, page_addr);
828 break; 569 break;
829 570
830 case NAND_CMD_READOOB:
831 case NAND_CMD_READ0: 571 case NAND_CMD_READ0:
832 if (host->pagesize_2k) { 572 case NAND_CMD_READOOB:
833 /* send read confirm command */ 573 if (command == NAND_CMD_READ0)
574 host->buf_start = column;
575 else
576 host->buf_start = column + mtd->writesize;
577
578 if (mtd->writesize > 512)
579 command = NAND_CMD_READ0; /* only READ0 is valid */
580
581 send_cmd(host, command, false);
582 mxc_do_addr_cycle(mtd, column, page_addr);
583
584 if (mtd->writesize > 512)
834 send_cmd(host, NAND_CMD_READSTART, true); 585 send_cmd(host, NAND_CMD_READSTART, true);
835 /* read for each AREA */ 586
836 send_read_page(host, 0, host->spare_only); 587 send_page(mtd, NFC_OUTPUT);
837 send_read_page(host, 1, host->spare_only); 588
838 send_read_page(host, 2, host->spare_only); 589 memcpy(host->data_buf, host->main_area0, mtd->writesize);
839 send_read_page(host, 3, host->spare_only); 590 copy_spare(mtd, true);
840 } else
841 send_read_page(host, 0, host->spare_only);
842 break; 591 break;
843 592
844 case NAND_CMD_READID: 593 case NAND_CMD_SEQIN:
845 host->col_addr = 0; 594 if (column >= mtd->writesize) {
846 send_read_id(host); 595 /*
596 * FIXME: before send SEQIN command for write OOB,
597 * We must read one page out.
598 * For K9F1GXX has no READ1 command to set current HW
599 * pointer to spare area, we must write the whole page
600 * including OOB together.
601 */
602 if (mtd->writesize > 512)
603 /* call ourself to read a page */
604 mxc_nand_command(mtd, NAND_CMD_READ0, 0,
605 page_addr);
606
607 host->buf_start = column;
608
609 /* Set program pointer to spare region */
610 if (mtd->writesize == 512)
611 send_cmd(host, NAND_CMD_READOOB, false);
612 } else {
613 host->buf_start = column;
614
615 /* Set program pointer to page start */
616 if (mtd->writesize == 512)
617 send_cmd(host, NAND_CMD_READ0, false);
618 }
619
620 send_cmd(host, command, false);
621 mxc_do_addr_cycle(mtd, column, page_addr);
847 break; 622 break;
848 623
849 case NAND_CMD_PAGEPROG: 624 case NAND_CMD_PAGEPROG:
625 memcpy(host->main_area0, host->data_buf, mtd->writesize);
626 copy_spare(mtd, false);
627 send_page(mtd, NFC_INPUT);
628 send_cmd(host, command, true);
629 mxc_do_addr_cycle(mtd, column, page_addr);
850 break; 630 break;
851 631
852 case NAND_CMD_STATUS: 632 case NAND_CMD_READID:
633 send_cmd(host, command, true);
634 mxc_do_addr_cycle(mtd, column, page_addr);
635 send_read_id(host);
636 host->buf_start = column;
853 break; 637 break;
854 638
639 case NAND_CMD_ERASE1:
855 case NAND_CMD_ERASE2: 640 case NAND_CMD_ERASE2:
641 send_cmd(host, command, false);
642 mxc_do_addr_cycle(mtd, column, page_addr);
643
856 break; 644 break;
857 } 645 }
858} 646}
859 647
860/* Define some generic bad / good block scan pattern which are used 648/*
861 * while scanning a device for factory marked good / bad blocks. */ 649 * The generic flash bbt decriptors overlap with our ecc
862static uint8_t scan_ff_pattern[] = { 0xff, 0xff }; 650 * hardware, so define some i.MX specific ones.
651 */
652static uint8_t bbt_pattern[] = { 'B', 'b', 't', '0' };
653static uint8_t mirror_pattern[] = { '1', 't', 'b', 'B' };
654
655static struct nand_bbt_descr bbt_main_descr = {
656 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
657 | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
658 .offs = 0,
659 .len = 4,
660 .veroffs = 4,
661 .maxblocks = 4,
662 .pattern = bbt_pattern,
663};
863 664
864static struct nand_bbt_descr smallpage_memorybased = { 665static struct nand_bbt_descr bbt_mirror_descr = {
865 .options = NAND_BBT_SCAN2NDPAGE, 666 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
866 .offs = 5, 667 | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
867 .len = 1, 668 .offs = 0,
868 .pattern = scan_ff_pattern 669 .len = 4,
670 .veroffs = 4,
671 .maxblocks = 4,
672 .pattern = mirror_pattern,
869}; 673};
870 674
871static int __init mxcnd_probe(struct platform_device *pdev) 675static int __init mxcnd_probe(struct platform_device *pdev)
@@ -877,12 +681,16 @@ static int __init mxcnd_probe(struct platform_device *pdev)
877 struct resource *res; 681 struct resource *res;
878 uint16_t tmp; 682 uint16_t tmp;
879 int err = 0, nr_parts = 0; 683 int err = 0, nr_parts = 0;
684 struct nand_ecclayout *oob_smallpage, *oob_largepage;
880 685
881 /* Allocate memory for MTD device structure and private data */ 686 /* Allocate memory for MTD device structure and private data */
882 host = kzalloc(sizeof(struct mxc_nand_host), GFP_KERNEL); 687 host = kzalloc(sizeof(struct mxc_nand_host) + NAND_MAX_PAGESIZE +
688 NAND_MAX_OOBSIZE, GFP_KERNEL);
883 if (!host) 689 if (!host)
884 return -ENOMEM; 690 return -ENOMEM;
885 691
692 host->data_buf = (uint8_t *)(host + 1);
693
886 host->dev = &pdev->dev; 694 host->dev = &pdev->dev;
887 /* structures must be linked */ 695 /* structures must be linked */
888 this = &host->nand; 696 this = &host->nand;
@@ -890,7 +698,7 @@ static int __init mxcnd_probe(struct platform_device *pdev)
890 mtd->priv = this; 698 mtd->priv = this;
891 mtd->owner = THIS_MODULE; 699 mtd->owner = THIS_MODULE;
892 mtd->dev.parent = &pdev->dev; 700 mtd->dev.parent = &pdev->dev;
893 mtd->name = "mxc_nand"; 701 mtd->name = DRIVER_NAME;
894 702
895 /* 50 us command delay time */ 703 /* 50 us command delay time */
896 this->chip_delay = 5; 704 this->chip_delay = 5;
@@ -920,62 +728,93 @@ static int __init mxcnd_probe(struct platform_device *pdev)
920 goto eres; 728 goto eres;
921 } 729 }
922 730
923 host->regs = ioremap(res->start, res->end - res->start + 1); 731 host->base = ioremap(res->start, resource_size(res));
924 if (!host->regs) { 732 if (!host->base) {
925 err = -ENOMEM; 733 err = -ENOMEM;
926 goto eres; 734 goto eres;
927 } 735 }
928 736
737 host->main_area0 = host->base;
738 host->main_area1 = host->base + 0x200;
739
740 if (nfc_is_v21()) {
741 host->regs = host->base + 0x1000;
742 host->spare0 = host->base + 0x1000;
743 host->spare_len = 64;
744 oob_smallpage = &nandv2_hw_eccoob_smallpage;
745 oob_largepage = &nandv2_hw_eccoob_largepage;
746 } else if (nfc_is_v1()) {
747 host->regs = host->base;
748 host->spare0 = host->base + 0x800;
749 host->spare_len = 16;
750 oob_smallpage = &nandv1_hw_eccoob_smallpage;
751 oob_largepage = &nandv1_hw_eccoob_largepage;
752 } else
753 BUG();
754
755 /* disable interrupt and spare enable */
929 tmp = readw(host->regs + NFC_CONFIG1); 756 tmp = readw(host->regs + NFC_CONFIG1);
930 tmp |= NFC_INT_MSK; 757 tmp |= NFC_INT_MSK;
758 tmp &= ~NFC_SP_EN;
931 writew(tmp, host->regs + NFC_CONFIG1); 759 writew(tmp, host->regs + NFC_CONFIG1);
932 760
933 init_waitqueue_head(&host->irq_waitq); 761 init_waitqueue_head(&host->irq_waitq);
934 762
935 host->irq = platform_get_irq(pdev, 0); 763 host->irq = platform_get_irq(pdev, 0);
936 764
937 err = request_irq(host->irq, mxc_nfc_irq, 0, "mxc_nd", host); 765 err = request_irq(host->irq, mxc_nfc_irq, 0, DRIVER_NAME, host);
938 if (err) 766 if (err)
939 goto eirq; 767 goto eirq;
940 768
769 /* Reset NAND */
770 this->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
771
772 /* preset operation */
773 /* Unlock the internal RAM Buffer */
774 writew(0x2, host->regs + NFC_CONFIG);
775
776 /* Blocks to be unlocked */
777 if (nfc_is_v21()) {
778 writew(0x0, host->regs + NFC_V21_UNLOCKSTART_BLKADDR);
779 writew(0xffff, host->regs + NFC_V21_UNLOCKEND_BLKADDR);
780 this->ecc.bytes = 9;
781 } else if (nfc_is_v1()) {
782 writew(0x0, host->regs + NFC_V1_UNLOCKSTART_BLKADDR);
783 writew(0x4000, host->regs + NFC_V1_UNLOCKEND_BLKADDR);
784 this->ecc.bytes = 3;
785 } else
786 BUG();
787
788 /* Unlock Block Command for given address range */
789 writew(0x4, host->regs + NFC_WRPROT);
790
791 this->ecc.size = 512;
792 this->ecc.layout = oob_smallpage;
793
941 if (pdata->hw_ecc) { 794 if (pdata->hw_ecc) {
942 this->ecc.calculate = mxc_nand_calculate_ecc; 795 this->ecc.calculate = mxc_nand_calculate_ecc;
943 this->ecc.hwctl = mxc_nand_enable_hwecc; 796 this->ecc.hwctl = mxc_nand_enable_hwecc;
944 this->ecc.correct = mxc_nand_correct_data; 797 this->ecc.correct = mxc_nand_correct_data;
945 this->ecc.mode = NAND_ECC_HW; 798 this->ecc.mode = NAND_ECC_HW;
946 this->ecc.size = 512;
947 this->ecc.bytes = 3;
948 tmp = readw(host->regs + NFC_CONFIG1); 799 tmp = readw(host->regs + NFC_CONFIG1);
949 tmp |= NFC_ECC_EN; 800 tmp |= NFC_ECC_EN;
950 writew(tmp, host->regs + NFC_CONFIG1); 801 writew(tmp, host->regs + NFC_CONFIG1);
951 } else { 802 } else {
952 this->ecc.size = 512;
953 this->ecc.bytes = 3;
954 this->ecc.layout = &nand_hw_eccoob_8;
955 this->ecc.mode = NAND_ECC_SOFT; 803 this->ecc.mode = NAND_ECC_SOFT;
956 tmp = readw(host->regs + NFC_CONFIG1); 804 tmp = readw(host->regs + NFC_CONFIG1);
957 tmp &= ~NFC_ECC_EN; 805 tmp &= ~NFC_ECC_EN;
958 writew(tmp, host->regs + NFC_CONFIG1); 806 writew(tmp, host->regs + NFC_CONFIG1);
959 } 807 }
960 808
961 /* Reset NAND */
962 this->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
963
964 /* preset operation */
965 /* Unlock the internal RAM Buffer */
966 writew(0x2, host->regs + NFC_CONFIG);
967
968 /* Blocks to be unlocked */
969 writew(0x0, host->regs + NFC_UNLOCKSTART_BLKADDR);
970 writew(0x4000, host->regs + NFC_UNLOCKEND_BLKADDR);
971
972 /* Unlock Block Command for given address range */
973 writew(0x4, host->regs + NFC_WRPROT);
974
975 /* NAND bus width determines access funtions used by upper layer */ 809 /* NAND bus width determines access funtions used by upper layer */
976 if (pdata->width == 2) { 810 if (pdata->width == 2)
977 this->options |= NAND_BUSWIDTH_16; 811 this->options |= NAND_BUSWIDTH_16;
978 this->ecc.layout = &nand_hw_eccoob_16; 812
813 if (pdata->flash_bbt) {
814 this->bbt_td = &bbt_main_descr;
815 this->bbt_md = &bbt_mirror_descr;
816 /* update flash based bbt */
817 this->options |= NAND_USE_FLASH_BBT;
979 } 818 }
980 819
981 /* first scan to find the device and get the page size */ 820 /* first scan to find the device and get the page size */
@@ -984,38 +823,8 @@ static int __init mxcnd_probe(struct platform_device *pdev)
984 goto escan; 823 goto escan;
985 } 824 }
986 825
987 if (mtd->writesize == 2048) { 826 if (mtd->writesize == 2048)
988 host->pagesize_2k = 1; 827 this->ecc.layout = oob_largepage;
989 this->badblock_pattern = &smallpage_memorybased;
990 }
991
992 if (this->ecc.mode == NAND_ECC_HW) {
993 switch (mtd->oobsize) {
994 case 8:
995 this->ecc.layout = &nand_hw_eccoob_8;
996 break;
997 case 16:
998 this->ecc.layout = &nand_hw_eccoob_16;
999 break;
1000 case 64:
1001 this->ecc.layout = &nand_hw_eccoob_64;
1002 break;
1003 default:
1004 /* page size not handled by HW ECC */
1005 /* switching back to soft ECC */
1006 this->ecc.size = 512;
1007 this->ecc.bytes = 3;
1008 this->ecc.layout = &nand_hw_eccoob_8;
1009 this->ecc.mode = NAND_ECC_SOFT;
1010 this->ecc.calculate = NULL;
1011 this->ecc.correct = NULL;
1012 this->ecc.hwctl = NULL;
1013 tmp = readw(host->regs + NFC_CONFIG1);
1014 tmp &= ~NFC_ECC_EN;
1015 writew(tmp, host->regs + NFC_CONFIG1);
1016 break;
1017 }
1018 }
1019 828
1020 /* second phase scan */ 829 /* second phase scan */
1021 if (nand_scan_tail(mtd)) { 830 if (nand_scan_tail(mtd)) {
@@ -1043,7 +852,7 @@ static int __init mxcnd_probe(struct platform_device *pdev)
1043escan: 852escan:
1044 free_irq(host->irq, host); 853 free_irq(host->irq, host);
1045eirq: 854eirq:
1046 iounmap(host->regs); 855 iounmap(host->base);
1047eres: 856eres:
1048 clk_put(host->clk); 857 clk_put(host->clk);
1049eclk: 858eclk:
@@ -1062,7 +871,7 @@ static int __devexit mxcnd_remove(struct platform_device *pdev)
1062 871
1063 nand_release(&host->mtd); 872 nand_release(&host->mtd);
1064 free_irq(host->irq, host); 873 free_irq(host->irq, host);
1065 iounmap(host->regs); 874 iounmap(host->base);
1066 kfree(host); 875 kfree(host);
1067 876
1068 return 0; 877 return 0;
@@ -1113,7 +922,7 @@ static struct platform_driver mxcnd_driver = {
1113 .driver = { 922 .driver = {
1114 .name = DRIVER_NAME, 923 .name = DRIVER_NAME,
1115 }, 924 },
1116 .remove = __exit_p(mxcnd_remove), 925 .remove = __devexit_p(mxcnd_remove),
1117 .suspend = mxcnd_suspend, 926 .suspend = mxcnd_suspend,
1118 .resume = mxcnd_resume, 927 .resume = mxcnd_resume,
1119}; 928};
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 2957cc70da3d..8f2958fe2148 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -428,6 +428,28 @@ static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int getchip,
428 return nand_isbad_bbt(mtd, ofs, allowbbt); 428 return nand_isbad_bbt(mtd, ofs, allowbbt);
429} 429}
430 430
431/**
432 * panic_nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
433 * @mtd: MTD device structure
434 * @timeo: Timeout
435 *
436 * Helper function for nand_wait_ready used when needing to wait in interrupt
437 * context.
438 */
439static void panic_nand_wait_ready(struct mtd_info *mtd, unsigned long timeo)
440{
441 struct nand_chip *chip = mtd->priv;
442 int i;
443
444 /* Wait for the device to get ready */
445 for (i = 0; i < timeo; i++) {
446 if (chip->dev_ready(mtd))
447 break;
448 touch_softlockup_watchdog();
449 mdelay(1);
450 }
451}
452
431/* 453/*
432 * Wait for the ready pin, after a command 454 * Wait for the ready pin, after a command
433 * The timeout is catched later. 455 * The timeout is catched later.
@@ -437,6 +459,10 @@ void nand_wait_ready(struct mtd_info *mtd)
437 struct nand_chip *chip = mtd->priv; 459 struct nand_chip *chip = mtd->priv;
438 unsigned long timeo = jiffies + 2; 460 unsigned long timeo = jiffies + 2;
439 461
462 /* 400ms timeout */
463 if (in_interrupt() || oops_in_progress)
464 return panic_nand_wait_ready(mtd, 400);
465
440 led_trigger_event(nand_led_trigger, LED_FULL); 466 led_trigger_event(nand_led_trigger, LED_FULL);
441 /* wait until command is processed or timeout occures */ 467 /* wait until command is processed or timeout occures */
442 do { 468 do {
@@ -672,6 +698,22 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
672} 698}
673 699
674/** 700/**
701 * panic_nand_get_device - [GENERIC] Get chip for selected access
702 * @chip: the nand chip descriptor
703 * @mtd: MTD device structure
704 * @new_state: the state which is requested
705 *
706 * Used when in panic, no locks are taken.
707 */
708static void panic_nand_get_device(struct nand_chip *chip,
709 struct mtd_info *mtd, int new_state)
710{
711 /* Hardware controller shared among independend devices */
712 chip->controller->active = chip;
713 chip->state = new_state;
714}
715
716/**
675 * nand_get_device - [GENERIC] Get chip for selected access 717 * nand_get_device - [GENERIC] Get chip for selected access
676 * @chip: the nand chip descriptor 718 * @chip: the nand chip descriptor
677 * @mtd: MTD device structure 719 * @mtd: MTD device structure
@@ -698,8 +740,14 @@ nand_get_device(struct nand_chip *chip, struct mtd_info *mtd, int new_state)
698 return 0; 740 return 0;
699 } 741 }
700 if (new_state == FL_PM_SUSPENDED) { 742 if (new_state == FL_PM_SUSPENDED) {
701 spin_unlock(lock); 743 if (chip->controller->active->state == FL_PM_SUSPENDED) {
702 return (chip->state == FL_PM_SUSPENDED) ? 0 : -EAGAIN; 744 chip->state = FL_PM_SUSPENDED;
745 spin_unlock(lock);
746 return 0;
747 } else {
748 spin_unlock(lock);
749 return -EAGAIN;
750 }
703 } 751 }
704 set_current_state(TASK_UNINTERRUPTIBLE); 752 set_current_state(TASK_UNINTERRUPTIBLE);
705 add_wait_queue(wq, &wait); 753 add_wait_queue(wq, &wait);
@@ -710,6 +758,32 @@ nand_get_device(struct nand_chip *chip, struct mtd_info *mtd, int new_state)
710} 758}
711 759
712/** 760/**
761 * panic_nand_wait - [GENERIC] wait until the command is done
762 * @mtd: MTD device structure
763 * @chip: NAND chip structure
764 * @timeo: Timeout
765 *
766 * Wait for command done. This is a helper function for nand_wait used when
767 * we are in interrupt context. May happen when in panic and trying to write
768 * an oops trough mtdoops.
769 */
770static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip,
771 unsigned long timeo)
772{
773 int i;
774 for (i = 0; i < timeo; i++) {
775 if (chip->dev_ready) {
776 if (chip->dev_ready(mtd))
777 break;
778 } else {
779 if (chip->read_byte(mtd) & NAND_STATUS_READY)
780 break;
781 }
782 mdelay(1);
783 }
784}
785
786/**
713 * nand_wait - [DEFAULT] wait until the command is done 787 * nand_wait - [DEFAULT] wait until the command is done
714 * @mtd: MTD device structure 788 * @mtd: MTD device structure
715 * @chip: NAND chip structure 789 * @chip: NAND chip structure
@@ -740,15 +814,19 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
740 else 814 else
741 chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1); 815 chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
742 816
743 while (time_before(jiffies, timeo)) { 817 if (in_interrupt() || oops_in_progress)
744 if (chip->dev_ready) { 818 panic_nand_wait(mtd, chip, timeo);
745 if (chip->dev_ready(mtd)) 819 else {
746 break; 820 while (time_before(jiffies, timeo)) {
747 } else { 821 if (chip->dev_ready) {
748 if (chip->read_byte(mtd) & NAND_STATUS_READY) 822 if (chip->dev_ready(mtd))
749 break; 823 break;
824 } else {
825 if (chip->read_byte(mtd) & NAND_STATUS_READY)
826 break;
827 }
828 cond_resched();
750 } 829 }
751 cond_resched();
752 } 830 }
753 led_trigger_event(nand_led_trigger, LED_OFF); 831 led_trigger_event(nand_led_trigger, LED_OFF);
754 832
@@ -1949,6 +2027,45 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
1949} 2027}
1950 2028
1951/** 2029/**
2030 * panic_nand_write - [MTD Interface] NAND write with ECC
2031 * @mtd: MTD device structure
2032 * @to: offset to write to
2033 * @len: number of bytes to write
2034 * @retlen: pointer to variable to store the number of written bytes
2035 * @buf: the data to write
2036 *
2037 * NAND write with ECC. Used when performing writes in interrupt context, this
2038 * may for example be called by mtdoops when writing an oops while in panic.
2039 */
2040static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
2041 size_t *retlen, const uint8_t *buf)
2042{
2043 struct nand_chip *chip = mtd->priv;
2044 int ret;
2045
2046 /* Do not allow reads past end of device */
2047 if ((to + len) > mtd->size)
2048 return -EINVAL;
2049 if (!len)
2050 return 0;
2051
2052 /* Wait for the device to get ready. */
2053 panic_nand_wait(mtd, chip, 400);
2054
2055 /* Grab the device. */
2056 panic_nand_get_device(chip, mtd, FL_WRITING);
2057
2058 chip->ops.len = len;
2059 chip->ops.datbuf = (uint8_t *)buf;
2060 chip->ops.oobbuf = NULL;
2061
2062 ret = nand_do_write_ops(mtd, to, &chip->ops);
2063
2064 *retlen = chip->ops.retlen;
2065 return ret;
2066}
2067
2068/**
1952 * nand_write - [MTD Interface] NAND write with ECC 2069 * nand_write - [MTD Interface] NAND write with ECC
1953 * @mtd: MTD device structure 2070 * @mtd: MTD device structure
1954 * @to: offset to write to 2071 * @to: offset to write to
@@ -2645,7 +2762,8 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips)
2645 type = nand_get_flash_type(mtd, chip, busw, &nand_maf_id); 2762 type = nand_get_flash_type(mtd, chip, busw, &nand_maf_id);
2646 2763
2647 if (IS_ERR(type)) { 2764 if (IS_ERR(type)) {
2648 printk(KERN_WARNING "No NAND device found!!!\n"); 2765 if (!(chip->options & NAND_SCAN_SILENT_NODEV))
2766 printk(KERN_WARNING "No NAND device found.\n");
2649 chip->select_chip(mtd, -1); 2767 chip->select_chip(mtd, -1);
2650 return PTR_ERR(type); 2768 return PTR_ERR(type);
2651 } 2769 }
@@ -2877,6 +2995,7 @@ int nand_scan_tail(struct mtd_info *mtd)
2877 mtd->unpoint = NULL; 2995 mtd->unpoint = NULL;
2878 mtd->read = nand_read; 2996 mtd->read = nand_read;
2879 mtd->write = nand_write; 2997 mtd->write = nand_write;
2998 mtd->panic_write = panic_nand_write;
2880 mtd->read_oob = nand_read_oob; 2999 mtd->read_oob = nand_read_oob;
2881 mtd->write_oob = nand_write_oob; 3000 mtd->write_oob = nand_write_oob;
2882 mtd->sync = nand_sync; 3001 mtd->sync = nand_sync;
diff --git a/drivers/mtd/nand/nand_bcm_umi.c b/drivers/mtd/nand/nand_bcm_umi.c
new file mode 100644
index 000000000000..46a6bc9c4b74
--- /dev/null
+++ b/drivers/mtd/nand/nand_bcm_umi.c
@@ -0,0 +1,149 @@
1/*****************************************************************************
2* Copyright 2004 - 2009 Broadcom Corporation. All rights reserved.
3*
4* Unless you and Broadcom execute a separate written software license
5* agreement governing use of this software, this software is licensed to you
6* under the terms of the GNU General Public License version 2, available at
7* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
8*
9* Notwithstanding the above, under no circumstances may you combine this
10* software in any way with any other Broadcom software provided under a
11* license other than the GPL, without Broadcom's express prior written
12* consent.
13*****************************************************************************/
14
15/* ---- Include Files ---------------------------------------------------- */
16#include <mach/reg_umi.h>
17#include "nand_bcm_umi.h"
18#ifdef BOOT0_BUILD
19#include <uart.h>
20#endif
21
22/* ---- External Variable Declarations ----------------------------------- */
23/* ---- External Function Prototypes ------------------------------------- */
24/* ---- Public Variables ------------------------------------------------- */
25/* ---- Private Constants and Types -------------------------------------- */
26/* ---- Private Function Prototypes -------------------------------------- */
27/* ---- Private Variables ------------------------------------------------ */
28/* ---- Private Functions ------------------------------------------------ */
29
30#if NAND_ECC_BCH
31/****************************************************************************
32* nand_bch_ecc_flip_bit - Routine to flip an errored bit
33*
34* PURPOSE:
35* This is a helper routine that flips the bit (0 -> 1 or 1 -> 0) of the
36* errored bit specified
37*
38* PARAMETERS:
39* datap - Container that holds the 512 byte data
40* errorLocation - Location of the bit that needs to be flipped
41*
42* RETURNS:
43* None
44****************************************************************************/
45static void nand_bcm_umi_bch_ecc_flip_bit(uint8_t *datap, int errorLocation)
46{
47 int locWithinAByte = (errorLocation & REG_UMI_BCH_ERR_LOC_BYTE) >> 0;
48 int locWithinAWord = (errorLocation & REG_UMI_BCH_ERR_LOC_WORD) >> 3;
49 int locWithinAPage = (errorLocation & REG_UMI_BCH_ERR_LOC_PAGE) >> 5;
50
51 uint8_t errorByte = 0;
52 uint8_t byteMask = 1 << locWithinAByte;
53
54 /* BCH uses big endian, need to change the location
55 * bits to little endian */
56 locWithinAWord = 3 - locWithinAWord;
57
58 errorByte = datap[locWithinAPage * sizeof(uint32_t) + locWithinAWord];
59
60#ifdef BOOT0_BUILD
61 puthexs("\nECC Correct Offset: ",
62 locWithinAPage * sizeof(uint32_t) + locWithinAWord);
63 puthexs(" errorByte:", errorByte);
64 puthex8(" Bit: ", locWithinAByte);
65#endif
66
67 if (errorByte & byteMask) {
68 /* bit needs to be cleared */
69 errorByte &= ~byteMask;
70 } else {
71 /* bit needs to be set */
72 errorByte |= byteMask;
73 }
74
75 /* write back the value with the fixed bit */
76 datap[locWithinAPage * sizeof(uint32_t) + locWithinAWord] = errorByte;
77}
78
79/****************************************************************************
80* nand_correct_page_bch - Routine to correct bit errors when reading NAND
81*
82* PURPOSE:
83* This routine reads the BCH registers to determine if there are any bit
84* errors during the read of the last 512 bytes of data + ECC bytes. If
85* errors exists, the routine fixes it.
86*
87* PARAMETERS:
88* datap - Container that holds the 512 byte data
89*
90* RETURNS:
91* 0 or greater = Number of errors corrected
92* (No errors are found or errors have been fixed)
93* -1 = Error(s) cannot be fixed
94****************************************************************************/
95int nand_bcm_umi_bch_correct_page(uint8_t *datap, uint8_t *readEccData,
96 int numEccBytes)
97{
98 int numErrors;
99 int errorLocation;
100 int idx;
101 uint32_t regValue;
102
103 /* wait for read ECC to be valid */
104 regValue = nand_bcm_umi_bch_poll_read_ecc_calc();
105
106 /*
107 * read the control status register to determine if there
108 * are error'ed bits
109 * see if errors are correctible
110 */
111 if ((regValue & REG_UMI_BCH_CTRL_STATUS_UNCORR_ERR) > 0) {
112 int i;
113
114 for (i = 0; i < numEccBytes; i++) {
115 if (readEccData[i] != 0xff) {
116 /* errors cannot be fixed, return -1 */
117 return -1;
118 }
119 }
120 /* If ECC is unprogrammed then we can't correct,
121 * assume everything OK */
122 return 0;
123 }
124
125 if ((regValue & REG_UMI_BCH_CTRL_STATUS_CORR_ERR) == 0) {
126 /* no errors */
127 return 0;
128 }
129
130 /*
131 * Fix errored bits by doing the following:
132 * 1. Read the number of errors in the control and status register
133 * 2. Read the error location registers that corresponds to the number
134 * of errors reported
135 * 3. Invert the bit in the data
136 */
137 numErrors = (regValue & REG_UMI_BCH_CTRL_STATUS_NB_CORR_ERROR) >> 20;
138
139 for (idx = 0; idx < numErrors; idx++) {
140 errorLocation =
141 REG_UMI_BCH_ERR_LOC_ADDR(idx) & REG_UMI_BCH_ERR_LOC_MASK;
142
143 /* Flip bit */
144 nand_bcm_umi_bch_ecc_flip_bit(datap, errorLocation);
145 }
146 /* Errors corrected */
147 return numErrors;
148}
149#endif
diff --git a/drivers/mtd/nand/nand_bcm_umi.h b/drivers/mtd/nand/nand_bcm_umi.h
new file mode 100644
index 000000000000..7cec2cd97854
--- /dev/null
+++ b/drivers/mtd/nand/nand_bcm_umi.h
@@ -0,0 +1,358 @@
1/*****************************************************************************
2* Copyright 2003 - 2009 Broadcom Corporation. All rights reserved.
3*
4* Unless you and Broadcom execute a separate written software license
5* agreement governing use of this software, this software is licensed to you
6* under the terms of the GNU General Public License version 2, available at
7* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
8*
9* Notwithstanding the above, under no circumstances may you combine this
10* software in any way with any other Broadcom software provided under a
11* license other than the GPL, without Broadcom's express prior written
12* consent.
13*****************************************************************************/
14#ifndef NAND_BCM_UMI_H
15#define NAND_BCM_UMI_H
16
17/* ---- Include Files ---------------------------------------------------- */
18#include <mach/reg_umi.h>
19#include <mach/reg_nand.h>
20#include <cfg_global.h>
21
22/* ---- Constants and Types ---------------------------------------------- */
23#if (CFG_GLOBAL_CHIP_FAMILY == CFG_GLOBAL_CHIP_FAMILY_BCMRING)
24#define NAND_ECC_BCH (CFG_GLOBAL_CHIP_REV > 0xA0)
25#else
26#define NAND_ECC_BCH 0
27#endif
28
29#define CFG_GLOBAL_NAND_ECC_BCH_NUM_BYTES 13
30
31#if NAND_ECC_BCH
32#ifdef BOOT0_BUILD
33#define NAND_ECC_NUM_BYTES 13
34#else
35#define NAND_ECC_NUM_BYTES CFG_GLOBAL_NAND_ECC_BCH_NUM_BYTES
36#endif
37#else
38#define NAND_ECC_NUM_BYTES 3
39#endif
40
41#define NAND_DATA_ACCESS_SIZE 512
42
43/* ---- Variable Externs ------------------------------------------ */
44/* ---- Function Prototypes --------------------------------------- */
45int nand_bcm_umi_bch_correct_page(uint8_t *datap, uint8_t *readEccData,
46 int numEccBytes);
47
48/* Check in device is ready */
49static inline int nand_bcm_umi_dev_ready(void)
50{
51 return REG_UMI_NAND_RCSR & REG_UMI_NAND_RCSR_RDY;
52}
53
54/* Wait until device is ready */
55static inline void nand_bcm_umi_wait_till_ready(void)
56{
57 while (nand_bcm_umi_dev_ready() == 0)
58 ;
59}
60
61/* Enable Hamming ECC */
62static inline void nand_bcm_umi_hamming_enable_hwecc(void)
63{
64 /* disable and reset ECC, 512 byte page */
65 REG_UMI_NAND_ECC_CSR &= ~(REG_UMI_NAND_ECC_CSR_ECC_ENABLE |
66 REG_UMI_NAND_ECC_CSR_256BYTE);
67 /* enable ECC */
68 REG_UMI_NAND_ECC_CSR |= REG_UMI_NAND_ECC_CSR_ECC_ENABLE;
69}
70
71#if NAND_ECC_BCH
72/* BCH ECC specifics */
73#define ECC_BITS_PER_CORRECTABLE_BIT 13
74
75/* Enable BCH Read ECC */
76static inline void nand_bcm_umi_bch_enable_read_hwecc(void)
77{
78 /* disable and reset ECC */
79 REG_UMI_BCH_CTRL_STATUS = REG_UMI_BCH_CTRL_STATUS_RD_ECC_VALID;
80 /* Turn on ECC */
81 REG_UMI_BCH_CTRL_STATUS = REG_UMI_BCH_CTRL_STATUS_ECC_RD_EN;
82}
83
84/* Enable BCH Write ECC */
85static inline void nand_bcm_umi_bch_enable_write_hwecc(void)
86{
87 /* disable and reset ECC */
88 REG_UMI_BCH_CTRL_STATUS = REG_UMI_BCH_CTRL_STATUS_WR_ECC_VALID;
89 /* Turn on ECC */
90 REG_UMI_BCH_CTRL_STATUS = REG_UMI_BCH_CTRL_STATUS_ECC_WR_EN;
91}
92
93/* Config number of BCH ECC bytes */
94static inline void nand_bcm_umi_bch_config_ecc(uint8_t numEccBytes)
95{
96 uint32_t nValue;
97 uint32_t tValue;
98 uint32_t kValue;
99 uint32_t numBits = numEccBytes * 8;
100
101 /* disable and reset ECC */
102 REG_UMI_BCH_CTRL_STATUS =
103 REG_UMI_BCH_CTRL_STATUS_WR_ECC_VALID |
104 REG_UMI_BCH_CTRL_STATUS_RD_ECC_VALID;
105
106 /* Every correctible bit requires 13 ECC bits */
107 tValue = (uint32_t) (numBits / ECC_BITS_PER_CORRECTABLE_BIT);
108
109 /* Total data in number of bits for generating and computing BCH ECC */
110 nValue = (NAND_DATA_ACCESS_SIZE + numEccBytes) * 8;
111
112 /* K parameter is used internally. K = N - (T * 13) */
113 kValue = nValue - (tValue * ECC_BITS_PER_CORRECTABLE_BIT);
114
115 /* Write the settings */
116 REG_UMI_BCH_N = nValue;
117 REG_UMI_BCH_T = tValue;
118 REG_UMI_BCH_K = kValue;
119}
120
121/* Pause during ECC read calculation to skip bytes in OOB */
122static inline void nand_bcm_umi_bch_pause_read_ecc_calc(void)
123{
124 REG_UMI_BCH_CTRL_STATUS =
125 REG_UMI_BCH_CTRL_STATUS_ECC_RD_EN |
126 REG_UMI_BCH_CTRL_STATUS_PAUSE_ECC_DEC;
127}
128
129/* Resume during ECC read calculation after skipping bytes in OOB */
130static inline void nand_bcm_umi_bch_resume_read_ecc_calc(void)
131{
132 REG_UMI_BCH_CTRL_STATUS = REG_UMI_BCH_CTRL_STATUS_ECC_RD_EN;
133}
134
135/* Poll read ECC calc to check when hardware completes */
136static inline uint32_t nand_bcm_umi_bch_poll_read_ecc_calc(void)
137{
138 uint32_t regVal;
139
140 do {
141 /* wait for ECC to be valid */
142 regVal = REG_UMI_BCH_CTRL_STATUS;
143 } while ((regVal & REG_UMI_BCH_CTRL_STATUS_RD_ECC_VALID) == 0);
144
145 return regVal;
146}
147
148/* Poll write ECC calc to check when hardware completes */
149static inline void nand_bcm_umi_bch_poll_write_ecc_calc(void)
150{
151 /* wait for ECC to be valid */
152 while ((REG_UMI_BCH_CTRL_STATUS & REG_UMI_BCH_CTRL_STATUS_WR_ECC_VALID)
153 == 0)
154 ;
155}
156
157/* Read the OOB and ECC, for kernel write OOB to a buffer */
158#if defined(__KERNEL__) && !defined(STANDALONE)
159static inline void nand_bcm_umi_bch_read_oobEcc(uint32_t pageSize,
160 uint8_t *eccCalc, int numEccBytes, uint8_t *oobp)
161#else
162static inline void nand_bcm_umi_bch_read_oobEcc(uint32_t pageSize,
163 uint8_t *eccCalc, int numEccBytes)
164#endif
165{
166 int eccPos = 0;
167 int numToRead = 16; /* There are 16 bytes per sector in the OOB */
168
169 /* ECC is already paused when this function is called */
170
171 if (pageSize == NAND_DATA_ACCESS_SIZE) {
172 while (numToRead > numEccBytes) {
173 /* skip free oob region */
174#if defined(__KERNEL__) && !defined(STANDALONE)
175 *oobp++ = REG_NAND_DATA8;
176#else
177 REG_NAND_DATA8;
178#endif
179 numToRead--;
180 }
181
182 /* read ECC bytes before BI */
183 nand_bcm_umi_bch_resume_read_ecc_calc();
184
185 while (numToRead > 11) {
186#if defined(__KERNEL__) && !defined(STANDALONE)
187 *oobp = REG_NAND_DATA8;
188 eccCalc[eccPos++] = *oobp;
189 oobp++;
190#else
191 eccCalc[eccPos++] = REG_NAND_DATA8;
192#endif
193 }
194
195 nand_bcm_umi_bch_pause_read_ecc_calc();
196
197 if (numToRead == 11) {
198 /* read BI */
199#if defined(__KERNEL__) && !defined(STANDALONE)
200 *oobp++ = REG_NAND_DATA8;
201#else
202 REG_NAND_DATA8;
203#endif
204 numToRead--;
205 }
206
207 /* read ECC bytes */
208 nand_bcm_umi_bch_resume_read_ecc_calc();
209 while (numToRead) {
210#if defined(__KERNEL__) && !defined(STANDALONE)
211 *oobp = REG_NAND_DATA8;
212 eccCalc[eccPos++] = *oobp;
213 oobp++;
214#else
215 eccCalc[eccPos++] = REG_NAND_DATA8;
216#endif
217 numToRead--;
218 }
219 } else {
220 /* skip BI */
221#if defined(__KERNEL__) && !defined(STANDALONE)
222 *oobp++ = REG_NAND_DATA8;
223#else
224 REG_NAND_DATA8;
225#endif
226 numToRead--;
227
228 while (numToRead > numEccBytes) {
229 /* skip free oob region */
230#if defined(__KERNEL__) && !defined(STANDALONE)
231 *oobp++ = REG_NAND_DATA8;
232#else
233 REG_NAND_DATA8;
234#endif
235 numToRead--;
236 }
237
238 /* read ECC bytes */
239 nand_bcm_umi_bch_resume_read_ecc_calc();
240 while (numToRead) {
241#if defined(__KERNEL__) && !defined(STANDALONE)
242 *oobp = REG_NAND_DATA8;
243 eccCalc[eccPos++] = *oobp;
244 oobp++;
245#else
246 eccCalc[eccPos++] = REG_NAND_DATA8;
247#endif
248 numToRead--;
249 }
250 }
251}
252
253/* Helper function to write ECC */
254static inline void NAND_BCM_UMI_ECC_WRITE(int numEccBytes, int eccBytePos,
255 uint8_t *oobp, uint8_t eccVal)
256{
257 if (eccBytePos <= numEccBytes)
258 *oobp = eccVal;
259}
260
261/* Write OOB with ECC */
262static inline void nand_bcm_umi_bch_write_oobEcc(uint32_t pageSize,
263 uint8_t *oobp, int numEccBytes)
264{
265 uint32_t eccVal = 0xffffffff;
266
267 /* wait for write ECC to be valid */
268 nand_bcm_umi_bch_poll_write_ecc_calc();
269
270 /*
271 ** Get the hardware ecc from the 32-bit result registers.
272 ** Read after 512 byte accesses. Format B3B2B1B0
273 ** where B3 = ecc3, etc.
274 */
275
276 if (pageSize == NAND_DATA_ACCESS_SIZE) {
277 /* Now fill in the ECC bytes */
278 if (numEccBytes >= 13)
279 eccVal = REG_UMI_BCH_WR_ECC_3;
280
281 /* Usually we skip CM in oob[0,1] */
282 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 15, &oobp[0],
283 (eccVal >> 16) & 0xff);
284 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 14, &oobp[1],
285 (eccVal >> 8) & 0xff);
286
287 /* Write ECC in oob[2,3,4] */
288 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 13, &oobp[2],
289 eccVal & 0xff); /* ECC 12 */
290
291 if (numEccBytes >= 9)
292 eccVal = REG_UMI_BCH_WR_ECC_2;
293
294 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 12, &oobp[3],
295 (eccVal >> 24) & 0xff); /* ECC11 */
296 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 11, &oobp[4],
297 (eccVal >> 16) & 0xff); /* ECC10 */
298
299 /* Always Skip BI in oob[5] */
300 } else {
301 /* Always Skip BI in oob[0] */
302
303 /* Now fill in the ECC bytes */
304 if (numEccBytes >= 13)
305 eccVal = REG_UMI_BCH_WR_ECC_3;
306
307 /* Usually skip CM in oob[1,2] */
308 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 15, &oobp[1],
309 (eccVal >> 16) & 0xff);
310 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 14, &oobp[2],
311 (eccVal >> 8) & 0xff);
312
313 /* Write ECC in oob[3-15] */
314 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 13, &oobp[3],
315 eccVal & 0xff); /* ECC12 */
316
317 if (numEccBytes >= 9)
318 eccVal = REG_UMI_BCH_WR_ECC_2;
319
320 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 12, &oobp[4],
321 (eccVal >> 24) & 0xff); /* ECC11 */
322 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 11, &oobp[5],
323 (eccVal >> 16) & 0xff); /* ECC10 */
324 }
325
326 /* Fill in the remainder of ECC locations */
327 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 10, &oobp[6],
328 (eccVal >> 8) & 0xff); /* ECC9 */
329 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 9, &oobp[7],
330 eccVal & 0xff); /* ECC8 */
331
332 if (numEccBytes >= 5)
333 eccVal = REG_UMI_BCH_WR_ECC_1;
334
335 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 8, &oobp[8],
336 (eccVal >> 24) & 0xff); /* ECC7 */
337 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 7, &oobp[9],
338 (eccVal >> 16) & 0xff); /* ECC6 */
339 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 6, &oobp[10],
340 (eccVal >> 8) & 0xff); /* ECC5 */
341 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 5, &oobp[11],
342 eccVal & 0xff); /* ECC4 */
343
344 if (numEccBytes >= 1)
345 eccVal = REG_UMI_BCH_WR_ECC_0;
346
347 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 4, &oobp[12],
348 (eccVal >> 24) & 0xff); /* ECC3 */
349 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 3, &oobp[13],
350 (eccVal >> 16) & 0xff); /* ECC2 */
351 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 2, &oobp[14],
352 (eccVal >> 8) & 0xff); /* ECC1 */
353 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 1, &oobp[15],
354 eccVal & 0xff); /* ECC0 */
355}
356#endif
357
358#endif /* NAND_BCM_UMI_H */
diff --git a/drivers/mtd/nand/nand_ecc.c b/drivers/mtd/nand/nand_ecc.c
index 92320a643275..271b8e735e8f 100644
--- a/drivers/mtd/nand/nand_ecc.c
+++ b/drivers/mtd/nand/nand_ecc.c
@@ -150,20 +150,19 @@ static const char addressbits[256] = {
150}; 150};
151 151
152/** 152/**
153 * nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte 153 * __nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte
154 * block 154 * block
155 * @mtd: MTD block structure
156 * @buf: input buffer with raw data 155 * @buf: input buffer with raw data
156 * @eccsize: data bytes per ecc step (256 or 512)
157 * @code: output buffer with ECC 157 * @code: output buffer with ECC
158 */ 158 */
159int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf, 159void __nand_calculate_ecc(const unsigned char *buf, unsigned int eccsize,
160 unsigned char *code) 160 unsigned char *code)
161{ 161{
162 int i; 162 int i;
163 const uint32_t *bp = (uint32_t *)buf; 163 const uint32_t *bp = (uint32_t *)buf;
164 /* 256 or 512 bytes/ecc */ 164 /* 256 or 512 bytes/ecc */
165 const uint32_t eccsize_mult = 165 const uint32_t eccsize_mult = eccsize >> 8;
166 (((struct nand_chip *)mtd->priv)->ecc.size) >> 8;
167 uint32_t cur; /* current value in buffer */ 166 uint32_t cur; /* current value in buffer */
168 /* rp0..rp15..rp17 are the various accumulated parities (per byte) */ 167 /* rp0..rp15..rp17 are the various accumulated parities (per byte) */
169 uint32_t rp0, rp1, rp2, rp3, rp4, rp5, rp6, rp7; 168 uint32_t rp0, rp1, rp2, rp3, rp4, rp5, rp6, rp7;
@@ -412,6 +411,22 @@ int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf,
412 (invparity[par & 0x55] << 2) | 411 (invparity[par & 0x55] << 2) |
413 (invparity[rp17] << 1) | 412 (invparity[rp17] << 1) |
414 (invparity[rp16] << 0); 413 (invparity[rp16] << 0);
414}
415EXPORT_SYMBOL(__nand_calculate_ecc);
416
417/**
418 * nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte
419 * block
420 * @mtd: MTD block structure
421 * @buf: input buffer with raw data
422 * @code: output buffer with ECC
423 */
424int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf,
425 unsigned char *code)
426{
427 __nand_calculate_ecc(buf,
428 ((struct nand_chip *)mtd->priv)->ecc.size, code);
429
415 return 0; 430 return 0;
416} 431}
417EXPORT_SYMBOL(nand_calculate_ecc); 432EXPORT_SYMBOL(nand_calculate_ecc);
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index cd0711b83ac4..7281000fef2d 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -161,7 +161,7 @@ MODULE_PARM_DESC(overridesize, "Specifies the NAND Flash size overriding the I
161MODULE_PARM_DESC(cache_file, "File to use to cache nand pages instead of memory"); 161MODULE_PARM_DESC(cache_file, "File to use to cache nand pages instead of memory");
162 162
163/* The largest possible page size */ 163/* The largest possible page size */
164#define NS_LARGEST_PAGE_SIZE 2048 164#define NS_LARGEST_PAGE_SIZE 4096
165 165
166/* The prefix for simulator output */ 166/* The prefix for simulator output */
167#define NS_OUTPUT_PREFIX "[nandsim]" 167#define NS_OUTPUT_PREFIX "[nandsim]"
@@ -259,7 +259,8 @@ MODULE_PARM_DESC(cache_file, "File to use to cache nand pages instead of mem
259#define OPT_SMARTMEDIA 0x00000010 /* SmartMedia technology chips */ 259#define OPT_SMARTMEDIA 0x00000010 /* SmartMedia technology chips */
260#define OPT_AUTOINCR 0x00000020 /* page number auto inctimentation is possible */ 260#define OPT_AUTOINCR 0x00000020 /* page number auto inctimentation is possible */
261#define OPT_PAGE512_8BIT 0x00000040 /* 512-byte page chips with 8-bit bus width */ 261#define OPT_PAGE512_8BIT 0x00000040 /* 512-byte page chips with 8-bit bus width */
262#define OPT_LARGEPAGE (OPT_PAGE2048) /* 2048-byte page chips */ 262#define OPT_PAGE4096 0x00000080 /* 4096-byte page chips */
263#define OPT_LARGEPAGE (OPT_PAGE2048 | OPT_PAGE4096) /* 2048 & 4096-byte page chips */
263#define OPT_SMALLPAGE (OPT_PAGE256 | OPT_PAGE512) /* 256 and 512-byte page chips */ 264#define OPT_SMALLPAGE (OPT_PAGE256 | OPT_PAGE512) /* 256 and 512-byte page chips */
264 265
265/* Remove action bits ftom state */ 266/* Remove action bits ftom state */
@@ -588,6 +589,8 @@ static int init_nandsim(struct mtd_info *mtd)
588 ns->options |= OPT_PAGE512_8BIT; 589 ns->options |= OPT_PAGE512_8BIT;
589 } else if (ns->geom.pgsz == 2048) { 590 } else if (ns->geom.pgsz == 2048) {
590 ns->options |= OPT_PAGE2048; 591 ns->options |= OPT_PAGE2048;
592 } else if (ns->geom.pgsz == 4096) {
593 ns->options |= OPT_PAGE4096;
591 } else { 594 } else {
592 NS_ERR("init_nandsim: unknown page size %u\n", ns->geom.pgsz); 595 NS_ERR("init_nandsim: unknown page size %u\n", ns->geom.pgsz);
593 return -EIO; 596 return -EIO;
diff --git a/drivers/mtd/nand/nomadik_nand.c b/drivers/mtd/nand/nomadik_nand.c
index 7c302d55910e..66123419f65d 100644
--- a/drivers/mtd/nand/nomadik_nand.c
+++ b/drivers/mtd/nand/nomadik_nand.c
@@ -216,7 +216,7 @@ static int nomadik_nand_resume(struct device *dev)
216 return 0; 216 return 0;
217} 217}
218 218
219static struct dev_pm_ops nomadik_nand_pm_ops = { 219static const struct dev_pm_ops nomadik_nand_pm_ops = {
220 .suspend = nomadik_nand_suspend, 220 .suspend = nomadik_nand_suspend,
221 .resume = nomadik_nand_resume, 221 .resume = nomadik_nand_resume,
222}; 222};
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
index 4e16c6f5bdd5..8d467315f02b 100644
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -34,7 +34,12 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
34{ 34{
35 struct platform_nand_data *pdata = pdev->dev.platform_data; 35 struct platform_nand_data *pdata = pdev->dev.platform_data;
36 struct plat_nand_data *data; 36 struct plat_nand_data *data;
37 int res = 0; 37 struct resource *res;
38 int err = 0;
39
40 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
41 if (!res)
42 return -ENXIO;
38 43
39 /* Allocate memory for the device structure (and zero it) */ 44 /* Allocate memory for the device structure (and zero it) */
40 data = kzalloc(sizeof(struct plat_nand_data), GFP_KERNEL); 45 data = kzalloc(sizeof(struct plat_nand_data), GFP_KERNEL);
@@ -43,12 +48,18 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
43 return -ENOMEM; 48 return -ENOMEM;
44 } 49 }
45 50
46 data->io_base = ioremap(pdev->resource[0].start, 51 if (!request_mem_region(res->start, resource_size(res),
47 pdev->resource[0].end - pdev->resource[0].start + 1); 52 dev_name(&pdev->dev))) {
53 dev_err(&pdev->dev, "request_mem_region failed\n");
54 err = -EBUSY;
55 goto out_free;
56 }
57
58 data->io_base = ioremap(res->start, resource_size(res));
48 if (data->io_base == NULL) { 59 if (data->io_base == NULL) {
49 dev_err(&pdev->dev, "ioremap failed\n"); 60 dev_err(&pdev->dev, "ioremap failed\n");
50 kfree(data); 61 err = -EIO;
51 return -EIO; 62 goto out_release_io;
52 } 63 }
53 64
54 data->chip.priv = &data; 65 data->chip.priv = &data;
@@ -74,24 +85,24 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
74 85
75 /* Handle any platform specific setup */ 86 /* Handle any platform specific setup */
76 if (pdata->ctrl.probe) { 87 if (pdata->ctrl.probe) {
77 res = pdata->ctrl.probe(pdev); 88 err = pdata->ctrl.probe(pdev);
78 if (res) 89 if (err)
79 goto out; 90 goto out;
80 } 91 }
81 92
82 /* Scan to find existance of the device */ 93 /* Scan to find existance of the device */
83 if (nand_scan(&data->mtd, 1)) { 94 if (nand_scan(&data->mtd, 1)) {
84 res = -ENXIO; 95 err = -ENXIO;
85 goto out; 96 goto out;
86 } 97 }
87 98
88#ifdef CONFIG_MTD_PARTITIONS 99#ifdef CONFIG_MTD_PARTITIONS
89 if (pdata->chip.part_probe_types) { 100 if (pdata->chip.part_probe_types) {
90 res = parse_mtd_partitions(&data->mtd, 101 err = parse_mtd_partitions(&data->mtd,
91 pdata->chip.part_probe_types, 102 pdata->chip.part_probe_types,
92 &data->parts, 0); 103 &data->parts, 0);
93 if (res > 0) { 104 if (err > 0) {
94 add_mtd_partitions(&data->mtd, data->parts, res); 105 add_mtd_partitions(&data->mtd, data->parts, err);
95 return 0; 106 return 0;
96 } 107 }
97 } 108 }
@@ -99,14 +110,14 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
99 pdata->chip.set_parts(data->mtd.size, &pdata->chip); 110 pdata->chip.set_parts(data->mtd.size, &pdata->chip);
100 if (pdata->chip.partitions) { 111 if (pdata->chip.partitions) {
101 data->parts = pdata->chip.partitions; 112 data->parts = pdata->chip.partitions;
102 res = add_mtd_partitions(&data->mtd, data->parts, 113 err = add_mtd_partitions(&data->mtd, data->parts,
103 pdata->chip.nr_partitions); 114 pdata->chip.nr_partitions);
104 } else 115 } else
105#endif 116#endif
106 res = add_mtd_device(&data->mtd); 117 err = add_mtd_device(&data->mtd);
107 118
108 if (!res) 119 if (!err)
109 return res; 120 return err;
110 121
111 nand_release(&data->mtd); 122 nand_release(&data->mtd);
112out: 123out:
@@ -114,8 +125,11 @@ out:
114 pdata->ctrl.remove(pdev); 125 pdata->ctrl.remove(pdev);
115 platform_set_drvdata(pdev, NULL); 126 platform_set_drvdata(pdev, NULL);
116 iounmap(data->io_base); 127 iounmap(data->io_base);
128out_release_io:
129 release_mem_region(res->start, resource_size(res));
130out_free:
117 kfree(data); 131 kfree(data);
118 return res; 132 return err;
119} 133}
120 134
121/* 135/*
@@ -125,6 +139,9 @@ static int __devexit plat_nand_remove(struct platform_device *pdev)
125{ 139{
126 struct plat_nand_data *data = platform_get_drvdata(pdev); 140 struct plat_nand_data *data = platform_get_drvdata(pdev);
127 struct platform_nand_data *pdata = pdev->dev.platform_data; 141 struct platform_nand_data *pdata = pdev->dev.platform_data;
142 struct resource *res;
143
144 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
128 145
129 nand_release(&data->mtd); 146 nand_release(&data->mtd);
130#ifdef CONFIG_MTD_PARTITIONS 147#ifdef CONFIG_MTD_PARTITIONS
@@ -134,6 +151,7 @@ static int __devexit plat_nand_remove(struct platform_device *pdev)
134 if (pdata->ctrl.remove) 151 if (pdata->ctrl.remove)
135 pdata->ctrl.remove(pdev); 152 pdata->ctrl.remove(pdev);
136 iounmap(data->io_base); 153 iounmap(data->io_base);
154 release_mem_region(res->start, resource_size(res));
137 kfree(data); 155 kfree(data);
138 156
139 return 0; 157 return 0;
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index 68b5b3a486a9..fa6e9c7fe511 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -774,7 +774,7 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
774 chip->select_chip = s3c2410_nand_select_chip; 774 chip->select_chip = s3c2410_nand_select_chip;
775 chip->chip_delay = 50; 775 chip->chip_delay = 50;
776 chip->priv = nmtd; 776 chip->priv = nmtd;
777 chip->options = 0; 777 chip->options = set->options;
778 chip->controller = &info->controller; 778 chip->controller = &info->controller;
779 779
780 switch (info->cpu_type) { 780 switch (info->cpu_type) {
diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c
index 73af8324d0d0..863513c3b69a 100644
--- a/drivers/mtd/nand/txx9ndfmc.c
+++ b/drivers/mtd/nand/txx9ndfmc.c
@@ -429,11 +429,10 @@ static int __exit txx9ndfmc_remove(struct platform_device *dev)
429 chip = mtd->priv; 429 chip = mtd->priv;
430 txx9_priv = chip->priv; 430 txx9_priv = chip->priv;
431 431
432 nand_release(mtd);
432#ifdef CONFIG_MTD_PARTITIONS 433#ifdef CONFIG_MTD_PARTITIONS
433 del_mtd_partitions(mtd);
434 kfree(drvdata->parts[i]); 434 kfree(drvdata->parts[i]);
435#endif 435#endif
436 del_mtd_device(mtd);
437 kfree(txx9_priv->mtdname); 436 kfree(txx9_priv->mtdname);
438 kfree(txx9_priv); 437 kfree(txx9_priv);
439 } 438 }
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index 86c4f6dcdc65..75f38b95811e 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -112,10 +112,24 @@ static int omap2_onenand_wait(struct mtd_info *mtd, int state)
112 unsigned long timeout; 112 unsigned long timeout;
113 u32 syscfg; 113 u32 syscfg;
114 114
115 if (state == FL_RESETING) { 115 if (state == FL_RESETING || state == FL_PREPARING_ERASE ||
116 int i; 116 state == FL_VERIFYING_ERASE) {
117 int i = 21;
118 unsigned int intr_flags = ONENAND_INT_MASTER;
119
120 switch (state) {
121 case FL_RESETING:
122 intr_flags |= ONENAND_INT_RESET;
123 break;
124 case FL_PREPARING_ERASE:
125 intr_flags |= ONENAND_INT_ERASE;
126 break;
127 case FL_VERIFYING_ERASE:
128 i = 101;
129 break;
130 }
117 131
118 for (i = 0; i < 20; i++) { 132 while (--i) {
119 udelay(1); 133 udelay(1);
120 intr = read_reg(c, ONENAND_REG_INTERRUPT); 134 intr = read_reg(c, ONENAND_REG_INTERRUPT);
121 if (intr & ONENAND_INT_MASTER) 135 if (intr & ONENAND_INT_MASTER)
@@ -126,7 +140,7 @@ static int omap2_onenand_wait(struct mtd_info *mtd, int state)
126 wait_err("controller error", state, ctrl, intr); 140 wait_err("controller error", state, ctrl, intr);
127 return -EIO; 141 return -EIO;
128 } 142 }
129 if (!(intr & ONENAND_INT_RESET)) { 143 if ((intr & intr_flags) != intr_flags) {
130 wait_err("timeout", state, ctrl, intr); 144 wait_err("timeout", state, ctrl, intr);
131 return -EIO; 145 return -EIO;
132 } 146 }
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index ff66e4330aa7..f63b1db3ffb3 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -1,17 +1,19 @@
1/* 1/*
2 * linux/drivers/mtd/onenand/onenand_base.c 2 * linux/drivers/mtd/onenand/onenand_base.c
3 * 3 *
4 * Copyright (C) 2005-2007 Samsung Electronics 4 * Copyright © 2005-2009 Samsung Electronics
5 * Copyright © 2007 Nokia Corporation
6 *
5 * Kyungmin Park <kyungmin.park@samsung.com> 7 * Kyungmin Park <kyungmin.park@samsung.com>
6 * 8 *
7 * Credits: 9 * Credits:
8 * Adrian Hunter <ext-adrian.hunter@nokia.com>: 10 * Adrian Hunter <ext-adrian.hunter@nokia.com>:
9 * auto-placement support, read-while load support, various fixes 11 * auto-placement support, read-while load support, various fixes
10 * Copyright (C) Nokia Corporation, 2007
11 * 12 *
12 * Vishak G <vishak.g at samsung.com>, Rohit Hagargundgi <h.rohit at samsung.com> 13 * Vishak G <vishak.g at samsung.com>, Rohit Hagargundgi <h.rohit at samsung.com>
13 * Flex-OneNAND support 14 * Flex-OneNAND support
14 * Copyright (C) Samsung Electronics, 2008 15 * Amul Kumar Saha <amul.saha at samsung.com>
16 * OTP support
15 * 17 *
16 * This program is free software; you can redistribute it and/or modify 18 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License version 2 as 19 * it under the terms of the GNU General Public License version 2 as
@@ -32,6 +34,13 @@
32 34
33#include <asm/io.h> 35#include <asm/io.h>
34 36
37/*
38 * Multiblock erase if number of blocks to erase is 2 or more.
39 * Maximum number of blocks for simultaneous erase is 64.
40 */
41#define MB_ERASE_MIN_BLK_COUNT 2
42#define MB_ERASE_MAX_BLK_COUNT 64
43
35/* Default Flex-OneNAND boundary and lock respectively */ 44/* Default Flex-OneNAND boundary and lock respectively */
36static int flex_bdry[MAX_DIES * 2] = { -1, 0, -1, 0 }; 45static int flex_bdry[MAX_DIES * 2] = { -1, 0, -1, 0 };
37 46
@@ -43,6 +52,18 @@ MODULE_PARM_DESC(flex_bdry, "SLC Boundary information for Flex-OneNAND"
43 " : 0->Set boundary in unlocked status" 52 " : 0->Set boundary in unlocked status"
44 " : 1->Set boundary in locked status"); 53 " : 1->Set boundary in locked status");
45 54
55/* Default OneNAND/Flex-OneNAND OTP options*/
56static int otp;
57
58module_param(otp, int, 0400);
59MODULE_PARM_DESC(otp, "Corresponding behaviour of OneNAND in OTP"
60 "Syntax : otp=LOCK_TYPE"
61 "LOCK_TYPE : Keys issued, for specific OTP Lock type"
62 " : 0 -> Default (No Blocks Locked)"
63 " : 1 -> OTP Block lock"
64 " : 2 -> 1st Block lock"
65 " : 3 -> BOTH OTP Block and 1st Block lock");
66
46/** 67/**
47 * onenand_oob_128 - oob info for Flex-Onenand with 4KB page 68 * onenand_oob_128 - oob info for Flex-Onenand with 4KB page
48 * For now, we expose only 64 out of 80 ecc bytes 69 * For now, we expose only 64 out of 80 ecc bytes
@@ -339,6 +360,8 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
339 break; 360 break;
340 361
341 case ONENAND_CMD_ERASE: 362 case ONENAND_CMD_ERASE:
363 case ONENAND_CMD_MULTIBLOCK_ERASE:
364 case ONENAND_CMD_ERASE_VERIFY:
342 case ONENAND_CMD_BUFFERRAM: 365 case ONENAND_CMD_BUFFERRAM:
343 case ONENAND_CMD_OTP_ACCESS: 366 case ONENAND_CMD_OTP_ACCESS:
344 block = onenand_block(this, addr); 367 block = onenand_block(this, addr);
@@ -483,7 +506,7 @@ static int onenand_wait(struct mtd_info *mtd, int state)
483 if (interrupt & flags) 506 if (interrupt & flags)
484 break; 507 break;
485 508
486 if (state != FL_READING) 509 if (state != FL_READING && state != FL_PREPARING_ERASE)
487 cond_resched(); 510 cond_resched();
488 } 511 }
489 /* To get correct interrupt status in timeout case */ 512 /* To get correct interrupt status in timeout case */
@@ -500,25 +523,40 @@ static int onenand_wait(struct mtd_info *mtd, int state)
500 int ecc = onenand_read_ecc(this); 523 int ecc = onenand_read_ecc(this);
501 if (ecc) { 524 if (ecc) {
502 if (ecc & ONENAND_ECC_2BIT_ALL) { 525 if (ecc & ONENAND_ECC_2BIT_ALL) {
503 printk(KERN_ERR "onenand_wait: ECC error = 0x%04x\n", ecc); 526 printk(KERN_ERR "%s: ECC error = 0x%04x\n",
527 __func__, ecc);
504 mtd->ecc_stats.failed++; 528 mtd->ecc_stats.failed++;
505 return -EBADMSG; 529 return -EBADMSG;
506 } else if (ecc & ONENAND_ECC_1BIT_ALL) { 530 } else if (ecc & ONENAND_ECC_1BIT_ALL) {
507 printk(KERN_DEBUG "onenand_wait: correctable ECC error = 0x%04x\n", ecc); 531 printk(KERN_DEBUG "%s: correctable ECC error = 0x%04x\n",
532 __func__, ecc);
508 mtd->ecc_stats.corrected++; 533 mtd->ecc_stats.corrected++;
509 } 534 }
510 } 535 }
511 } else if (state == FL_READING) { 536 } else if (state == FL_READING) {
512 printk(KERN_ERR "onenand_wait: read timeout! ctrl=0x%04x intr=0x%04x\n", ctrl, interrupt); 537 printk(KERN_ERR "%s: read timeout! ctrl=0x%04x intr=0x%04x\n",
538 __func__, ctrl, interrupt);
539 return -EIO;
540 }
541
542 if (state == FL_PREPARING_ERASE && !(interrupt & ONENAND_INT_ERASE)) {
543 printk(KERN_ERR "%s: mb erase timeout! ctrl=0x%04x intr=0x%04x\n",
544 __func__, ctrl, interrupt);
545 return -EIO;
546 }
547
548 if (!(interrupt & ONENAND_INT_MASTER)) {
549 printk(KERN_ERR "%s: timeout! ctrl=0x%04x intr=0x%04x\n",
550 __func__, ctrl, interrupt);
513 return -EIO; 551 return -EIO;
514 } 552 }
515 553
516 /* If there's controller error, it's a real error */ 554 /* If there's controller error, it's a real error */
517 if (ctrl & ONENAND_CTRL_ERROR) { 555 if (ctrl & ONENAND_CTRL_ERROR) {
518 printk(KERN_ERR "onenand_wait: controller error = 0x%04x\n", 556 printk(KERN_ERR "%s: controller error = 0x%04x\n",
519 ctrl); 557 __func__, ctrl);
520 if (ctrl & ONENAND_CTRL_LOCK) 558 if (ctrl & ONENAND_CTRL_LOCK)
521 printk(KERN_ERR "onenand_wait: it's locked error.\n"); 559 printk(KERN_ERR "%s: it's locked error.\n", __func__);
522 return -EIO; 560 return -EIO;
523 } 561 }
524 562
@@ -1015,7 +1053,8 @@ static int onenand_recover_lsb(struct mtd_info *mtd, loff_t addr, int status)
1015 /* We are attempting to reread, so decrement stats.failed 1053 /* We are attempting to reread, so decrement stats.failed
1016 * which was incremented by onenand_wait due to read failure 1054 * which was incremented by onenand_wait due to read failure
1017 */ 1055 */
1018 printk(KERN_INFO "onenand_recover_lsb: Attempting to recover from uncorrectable read\n"); 1056 printk(KERN_INFO "%s: Attempting to recover from uncorrectable read\n",
1057 __func__);
1019 mtd->ecc_stats.failed--; 1058 mtd->ecc_stats.failed--;
1020 1059
1021 /* Issue the LSB page recovery command */ 1060 /* Issue the LSB page recovery command */
@@ -1046,7 +1085,8 @@ static int onenand_mlc_read_ops_nolock(struct mtd_info *mtd, loff_t from,
1046 int ret = 0; 1085 int ret = 0;
1047 int writesize = this->writesize; 1086 int writesize = this->writesize;
1048 1087
1049 DEBUG(MTD_DEBUG_LEVEL3, "onenand_mlc_read_ops_nolock: from = 0x%08x, len = %i\n", (unsigned int) from, (int) len); 1088 DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08x, len = %i\n",
1089 __func__, (unsigned int) from, (int) len);
1050 1090
1051 if (ops->mode == MTD_OOB_AUTO) 1091 if (ops->mode == MTD_OOB_AUTO)
1052 oobsize = this->ecclayout->oobavail; 1092 oobsize = this->ecclayout->oobavail;
@@ -1057,7 +1097,8 @@ static int onenand_mlc_read_ops_nolock(struct mtd_info *mtd, loff_t from,
1057 1097
1058 /* Do not allow reads past end of device */ 1098 /* Do not allow reads past end of device */
1059 if (from + len > mtd->size) { 1099 if (from + len > mtd->size) {
1060 printk(KERN_ERR "onenand_mlc_read_ops_nolock: Attempt read beyond end of device\n"); 1100 printk(KERN_ERR "%s: Attempt read beyond end of device\n",
1101 __func__);
1061 ops->retlen = 0; 1102 ops->retlen = 0;
1062 ops->oobretlen = 0; 1103 ops->oobretlen = 0;
1063 return -EINVAL; 1104 return -EINVAL;
@@ -1146,7 +1187,8 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
1146 int ret = 0, boundary = 0; 1187 int ret = 0, boundary = 0;
1147 int writesize = this->writesize; 1188 int writesize = this->writesize;
1148 1189
1149 DEBUG(MTD_DEBUG_LEVEL3, "onenand_read_ops_nolock: from = 0x%08x, len = %i\n", (unsigned int) from, (int) len); 1190 DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08x, len = %i\n",
1191 __func__, (unsigned int) from, (int) len);
1150 1192
1151 if (ops->mode == MTD_OOB_AUTO) 1193 if (ops->mode == MTD_OOB_AUTO)
1152 oobsize = this->ecclayout->oobavail; 1194 oobsize = this->ecclayout->oobavail;
@@ -1157,7 +1199,8 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
1157 1199
1158 /* Do not allow reads past end of device */ 1200 /* Do not allow reads past end of device */
1159 if ((from + len) > mtd->size) { 1201 if ((from + len) > mtd->size) {
1160 printk(KERN_ERR "onenand_read_ops_nolock: Attempt read beyond end of device\n"); 1202 printk(KERN_ERR "%s: Attempt read beyond end of device\n",
1203 __func__);
1161 ops->retlen = 0; 1204 ops->retlen = 0;
1162 ops->oobretlen = 0; 1205 ops->oobretlen = 0;
1163 return -EINVAL; 1206 return -EINVAL;
@@ -1275,7 +1318,8 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
1275 1318
1276 from += ops->ooboffs; 1319 from += ops->ooboffs;
1277 1320
1278 DEBUG(MTD_DEBUG_LEVEL3, "onenand_read_oob_nolock: from = 0x%08x, len = %i\n", (unsigned int) from, (int) len); 1321 DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08x, len = %i\n",
1322 __func__, (unsigned int) from, (int) len);
1279 1323
1280 /* Initialize return length value */ 1324 /* Initialize return length value */
1281 ops->oobretlen = 0; 1325 ops->oobretlen = 0;
@@ -1288,7 +1332,8 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
1288 column = from & (mtd->oobsize - 1); 1332 column = from & (mtd->oobsize - 1);
1289 1333
1290 if (unlikely(column >= oobsize)) { 1334 if (unlikely(column >= oobsize)) {
1291 printk(KERN_ERR "onenand_read_oob_nolock: Attempted to start read outside oob\n"); 1335 printk(KERN_ERR "%s: Attempted to start read outside oob\n",
1336 __func__);
1292 return -EINVAL; 1337 return -EINVAL;
1293 } 1338 }
1294 1339
@@ -1296,7 +1341,8 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
1296 if (unlikely(from >= mtd->size || 1341 if (unlikely(from >= mtd->size ||
1297 column + len > ((mtd->size >> this->page_shift) - 1342 column + len > ((mtd->size >> this->page_shift) -
1298 (from >> this->page_shift)) * oobsize)) { 1343 (from >> this->page_shift)) * oobsize)) {
1299 printk(KERN_ERR "onenand_read_oob_nolock: Attempted to read beyond end of device\n"); 1344 printk(KERN_ERR "%s: Attempted to read beyond end of device\n",
1345 __func__);
1300 return -EINVAL; 1346 return -EINVAL;
1301 } 1347 }
1302 1348
@@ -1319,7 +1365,8 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
1319 ret = onenand_recover_lsb(mtd, from, ret); 1365 ret = onenand_recover_lsb(mtd, from, ret);
1320 1366
1321 if (ret && ret != -EBADMSG) { 1367 if (ret && ret != -EBADMSG) {
1322 printk(KERN_ERR "onenand_read_oob_nolock: read failed = 0x%x\n", ret); 1368 printk(KERN_ERR "%s: read failed = 0x%x\n",
1369 __func__, ret);
1323 break; 1370 break;
1324 } 1371 }
1325 1372
@@ -1450,20 +1497,21 @@ static int onenand_bbt_wait(struct mtd_info *mtd, int state)
1450 if (interrupt & ONENAND_INT_READ) { 1497 if (interrupt & ONENAND_INT_READ) {
1451 int ecc = onenand_read_ecc(this); 1498 int ecc = onenand_read_ecc(this);
1452 if (ecc & ONENAND_ECC_2BIT_ALL) { 1499 if (ecc & ONENAND_ECC_2BIT_ALL) {
1453 printk(KERN_INFO "onenand_bbt_wait: ecc error = 0x%04x" 1500 printk(KERN_WARNING "%s: ecc error = 0x%04x, "
1454 ", controller error 0x%04x\n", ecc, ctrl); 1501 "controller error 0x%04x\n",
1502 __func__, ecc, ctrl);
1455 return ONENAND_BBT_READ_ECC_ERROR; 1503 return ONENAND_BBT_READ_ECC_ERROR;
1456 } 1504 }
1457 } else { 1505 } else {
1458 printk(KERN_ERR "onenand_bbt_wait: read timeout!" 1506 printk(KERN_ERR "%s: read timeout! ctrl=0x%04x intr=0x%04x\n",
1459 "ctrl=0x%04x intr=0x%04x\n", ctrl, interrupt); 1507 __func__, ctrl, interrupt);
1460 return ONENAND_BBT_READ_FATAL_ERROR; 1508 return ONENAND_BBT_READ_FATAL_ERROR;
1461 } 1509 }
1462 1510
1463 /* Initial bad block case: 0x2400 or 0x0400 */ 1511 /* Initial bad block case: 0x2400 or 0x0400 */
1464 if (ctrl & ONENAND_CTRL_ERROR) { 1512 if (ctrl & ONENAND_CTRL_ERROR) {
1465 printk(KERN_DEBUG "onenand_bbt_wait: " 1513 printk(KERN_DEBUG "%s: controller error = 0x%04x\n",
1466 "controller error = 0x%04x\n", ctrl); 1514 __func__, ctrl);
1467 return ONENAND_BBT_READ_ERROR; 1515 return ONENAND_BBT_READ_ERROR;
1468 } 1516 }
1469 1517
@@ -1487,14 +1535,16 @@ int onenand_bbt_read_oob(struct mtd_info *mtd, loff_t from,
1487 size_t len = ops->ooblen; 1535 size_t len = ops->ooblen;
1488 u_char *buf = ops->oobbuf; 1536 u_char *buf = ops->oobbuf;
1489 1537
1490 DEBUG(MTD_DEBUG_LEVEL3, "onenand_bbt_read_oob: from = 0x%08x, len = %zi\n", (unsigned int) from, len); 1538 DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08x, len = %zi\n",
1539 __func__, (unsigned int) from, len);
1491 1540
1492 /* Initialize return value */ 1541 /* Initialize return value */
1493 ops->oobretlen = 0; 1542 ops->oobretlen = 0;
1494 1543
1495 /* Do not allow reads past end of device */ 1544 /* Do not allow reads past end of device */
1496 if (unlikely((from + len) > mtd->size)) { 1545 if (unlikely((from + len) > mtd->size)) {
1497 printk(KERN_ERR "onenand_bbt_read_oob: Attempt read beyond end of device\n"); 1546 printk(KERN_ERR "%s: Attempt read beyond end of device\n",
1547 __func__);
1498 return ONENAND_BBT_READ_FATAL_ERROR; 1548 return ONENAND_BBT_READ_FATAL_ERROR;
1499 } 1549 }
1500 1550
@@ -1661,21 +1711,23 @@ static int onenand_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
1661 /* Wait for any existing operation to clear */ 1711 /* Wait for any existing operation to clear */
1662 onenand_panic_wait(mtd); 1712 onenand_panic_wait(mtd);
1663 1713
1664 DEBUG(MTD_DEBUG_LEVEL3, "onenand_panic_write: to = 0x%08x, len = %i\n", 1714 DEBUG(MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i\n",
1665 (unsigned int) to, (int) len); 1715 __func__, (unsigned int) to, (int) len);
1666 1716
1667 /* Initialize retlen, in case of early exit */ 1717 /* Initialize retlen, in case of early exit */
1668 *retlen = 0; 1718 *retlen = 0;
1669 1719
1670 /* Do not allow writes past end of device */ 1720 /* Do not allow writes past end of device */
1671 if (unlikely((to + len) > mtd->size)) { 1721 if (unlikely((to + len) > mtd->size)) {
1672 printk(KERN_ERR "onenand_panic_write: Attempt write to past end of device\n"); 1722 printk(KERN_ERR "%s: Attempt write to past end of device\n",
1723 __func__);
1673 return -EINVAL; 1724 return -EINVAL;
1674 } 1725 }
1675 1726
1676 /* Reject writes, which are not page aligned */ 1727 /* Reject writes, which are not page aligned */
1677 if (unlikely(NOTALIGNED(to) || NOTALIGNED(len))) { 1728 if (unlikely(NOTALIGNED(to) || NOTALIGNED(len))) {
1678 printk(KERN_ERR "onenand_panic_write: Attempt to write not page aligned data\n"); 1729 printk(KERN_ERR "%s: Attempt to write not page aligned data\n",
1730 __func__);
1679 return -EINVAL; 1731 return -EINVAL;
1680 } 1732 }
1681 1733
@@ -1711,7 +1763,7 @@ static int onenand_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
1711 } 1763 }
1712 1764
1713 if (ret) { 1765 if (ret) {
1714 printk(KERN_ERR "onenand_panic_write: write failed %d\n", ret); 1766 printk(KERN_ERR "%s: write failed %d\n", __func__, ret);
1715 break; 1767 break;
1716 } 1768 }
1717 1769
@@ -1792,7 +1844,8 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
1792 u_char *oobbuf; 1844 u_char *oobbuf;
1793 int ret = 0; 1845 int ret = 0;
1794 1846
1795 DEBUG(MTD_DEBUG_LEVEL3, "onenand_write_ops_nolock: to = 0x%08x, len = %i\n", (unsigned int) to, (int) len); 1847 DEBUG(MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i\n",
1848 __func__, (unsigned int) to, (int) len);
1796 1849
1797 /* Initialize retlen, in case of early exit */ 1850 /* Initialize retlen, in case of early exit */
1798 ops->retlen = 0; 1851 ops->retlen = 0;
@@ -1800,13 +1853,15 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
1800 1853
1801 /* Do not allow writes past end of device */ 1854 /* Do not allow writes past end of device */
1802 if (unlikely((to + len) > mtd->size)) { 1855 if (unlikely((to + len) > mtd->size)) {
1803 printk(KERN_ERR "onenand_write_ops_nolock: Attempt write to past end of device\n"); 1856 printk(KERN_ERR "%s: Attempt write to past end of device\n",
1857 __func__);
1804 return -EINVAL; 1858 return -EINVAL;
1805 } 1859 }
1806 1860
1807 /* Reject writes, which are not page aligned */ 1861 /* Reject writes, which are not page aligned */
1808 if (unlikely(NOTALIGNED(to) || NOTALIGNED(len))) { 1862 if (unlikely(NOTALIGNED(to) || NOTALIGNED(len))) {
1809 printk(KERN_ERR "onenand_write_ops_nolock: Attempt to write not page aligned data\n"); 1863 printk(KERN_ERR "%s: Attempt to write not page aligned data\n",
1864 __func__);
1810 return -EINVAL; 1865 return -EINVAL;
1811 } 1866 }
1812 1867
@@ -1879,7 +1934,8 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
1879 onenand_update_bufferram(mtd, prev, !ret && !prev_subpage); 1934 onenand_update_bufferram(mtd, prev, !ret && !prev_subpage);
1880 if (ret) { 1935 if (ret) {
1881 written -= prevlen; 1936 written -= prevlen;
1882 printk(KERN_ERR "onenand_write_ops_nolock: write failed %d\n", ret); 1937 printk(KERN_ERR "%s: write failed %d\n",
1938 __func__, ret);
1883 break; 1939 break;
1884 } 1940 }
1885 1941
@@ -1887,7 +1943,8 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
1887 /* Only check verify write turn on */ 1943 /* Only check verify write turn on */
1888 ret = onenand_verify(mtd, buf - len, to - len, len); 1944 ret = onenand_verify(mtd, buf - len, to - len, len);
1889 if (ret) 1945 if (ret)
1890 printk(KERN_ERR "onenand_write_ops_nolock: verify failed %d\n", ret); 1946 printk(KERN_ERR "%s: verify failed %d\n",
1947 __func__, ret);
1891 break; 1948 break;
1892 } 1949 }
1893 1950
@@ -1905,14 +1962,16 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
1905 /* In partial page write we don't update bufferram */ 1962 /* In partial page write we don't update bufferram */
1906 onenand_update_bufferram(mtd, to, !ret && !subpage); 1963 onenand_update_bufferram(mtd, to, !ret && !subpage);
1907 if (ret) { 1964 if (ret) {
1908 printk(KERN_ERR "onenand_write_ops_nolock: write failed %d\n", ret); 1965 printk(KERN_ERR "%s: write failed %d\n",
1966 __func__, ret);
1909 break; 1967 break;
1910 } 1968 }
1911 1969
1912 /* Only check verify write turn on */ 1970 /* Only check verify write turn on */
1913 ret = onenand_verify(mtd, buf, to, thislen); 1971 ret = onenand_verify(mtd, buf, to, thislen);
1914 if (ret) { 1972 if (ret) {
1915 printk(KERN_ERR "onenand_write_ops_nolock: verify failed %d\n", ret); 1973 printk(KERN_ERR "%s: verify failed %d\n",
1974 __func__, ret);
1916 break; 1975 break;
1917 } 1976 }
1918 1977
@@ -1968,7 +2027,8 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
1968 2027
1969 to += ops->ooboffs; 2028 to += ops->ooboffs;
1970 2029
1971 DEBUG(MTD_DEBUG_LEVEL3, "onenand_write_oob_nolock: to = 0x%08x, len = %i\n", (unsigned int) to, (int) len); 2030 DEBUG(MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i\n",
2031 __func__, (unsigned int) to, (int) len);
1972 2032
1973 /* Initialize retlen, in case of early exit */ 2033 /* Initialize retlen, in case of early exit */
1974 ops->oobretlen = 0; 2034 ops->oobretlen = 0;
@@ -1981,14 +2041,15 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
1981 column = to & (mtd->oobsize - 1); 2041 column = to & (mtd->oobsize - 1);
1982 2042
1983 if (unlikely(column >= oobsize)) { 2043 if (unlikely(column >= oobsize)) {
1984 printk(KERN_ERR "onenand_write_oob_nolock: Attempted to start write outside oob\n"); 2044 printk(KERN_ERR "%s: Attempted to start write outside oob\n",
2045 __func__);
1985 return -EINVAL; 2046 return -EINVAL;
1986 } 2047 }
1987 2048
1988 /* For compatibility with NAND: Do not allow write past end of page */ 2049 /* For compatibility with NAND: Do not allow write past end of page */
1989 if (unlikely(column + len > oobsize)) { 2050 if (unlikely(column + len > oobsize)) {
1990 printk(KERN_ERR "onenand_write_oob_nolock: " 2051 printk(KERN_ERR "%s: Attempt to write past end of page\n",
1991 "Attempt to write past end of page\n"); 2052 __func__);
1992 return -EINVAL; 2053 return -EINVAL;
1993 } 2054 }
1994 2055
@@ -1996,7 +2057,8 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
1996 if (unlikely(to >= mtd->size || 2057 if (unlikely(to >= mtd->size ||
1997 column + len > ((mtd->size >> this->page_shift) - 2058 column + len > ((mtd->size >> this->page_shift) -
1998 (to >> this->page_shift)) * oobsize)) { 2059 (to >> this->page_shift)) * oobsize)) {
1999 printk(KERN_ERR "onenand_write_oob_nolock: Attempted to write past end of device\n"); 2060 printk(KERN_ERR "%s: Attempted to write past end of device\n",
2061 __func__);
2000 return -EINVAL; 2062 return -EINVAL;
2001 } 2063 }
2002 2064
@@ -2038,13 +2100,14 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
2038 2100
2039 ret = this->wait(mtd, FL_WRITING); 2101 ret = this->wait(mtd, FL_WRITING);
2040 if (ret) { 2102 if (ret) {
2041 printk(KERN_ERR "onenand_write_oob_nolock: write failed %d\n", ret); 2103 printk(KERN_ERR "%s: write failed %d\n", __func__, ret);
2042 break; 2104 break;
2043 } 2105 }
2044 2106
2045 ret = onenand_verify_oob(mtd, oobbuf, to); 2107 ret = onenand_verify_oob(mtd, oobbuf, to);
2046 if (ret) { 2108 if (ret) {
2047 printk(KERN_ERR "onenand_write_oob_nolock: verify failed %d\n", ret); 2109 printk(KERN_ERR "%s: verify failed %d\n",
2110 __func__, ret);
2048 break; 2111 break;
2049 } 2112 }
2050 2113
@@ -2140,78 +2203,186 @@ static int onenand_block_isbad_nolock(struct mtd_info *mtd, loff_t ofs, int allo
2140 return bbm->isbad_bbt(mtd, ofs, allowbbt); 2203 return bbm->isbad_bbt(mtd, ofs, allowbbt);
2141} 2204}
2142 2205
2206
2207static int onenand_multiblock_erase_verify(struct mtd_info *mtd,
2208 struct erase_info *instr)
2209{
2210 struct onenand_chip *this = mtd->priv;
2211 loff_t addr = instr->addr;
2212 int len = instr->len;
2213 unsigned int block_size = (1 << this->erase_shift);
2214 int ret = 0;
2215
2216 while (len) {
2217 this->command(mtd, ONENAND_CMD_ERASE_VERIFY, addr, block_size);
2218 ret = this->wait(mtd, FL_VERIFYING_ERASE);
2219 if (ret) {
2220 printk(KERN_ERR "%s: Failed verify, block %d\n",
2221 __func__, onenand_block(this, addr));
2222 instr->state = MTD_ERASE_FAILED;
2223 instr->fail_addr = addr;
2224 return -1;
2225 }
2226 len -= block_size;
2227 addr += block_size;
2228 }
2229 return 0;
2230}
2231
2143/** 2232/**
2144 * onenand_erase - [MTD Interface] erase block(s) 2233 * onenand_multiblock_erase - [Internal] erase block(s) using multiblock erase
2145 * @param mtd MTD device structure 2234 * @param mtd MTD device structure
2146 * @param instr erase instruction 2235 * @param instr erase instruction
2236 * @param region erase region
2147 * 2237 *
2148 * Erase one ore more blocks 2238 * Erase one or more blocks up to 64 block at a time
2149 */ 2239 */
2150static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr) 2240static int onenand_multiblock_erase(struct mtd_info *mtd,
2241 struct erase_info *instr,
2242 unsigned int block_size)
2151{ 2243{
2152 struct onenand_chip *this = mtd->priv; 2244 struct onenand_chip *this = mtd->priv;
2153 unsigned int block_size;
2154 loff_t addr = instr->addr; 2245 loff_t addr = instr->addr;
2155 loff_t len = instr->len; 2246 int len = instr->len;
2156 int ret = 0, i; 2247 int eb_count = 0;
2157 struct mtd_erase_region_info *region = NULL; 2248 int ret = 0;
2158 loff_t region_end = 0; 2249 int bdry_block = 0;
2159 2250
2160 DEBUG(MTD_DEBUG_LEVEL3, "onenand_erase: start = 0x%012llx, len = %llu\n", (unsigned long long) instr->addr, (unsigned long long) instr->len); 2251 instr->state = MTD_ERASING;
2161 2252
2162 /* Do not allow erase past end of device */ 2253 if (ONENAND_IS_DDP(this)) {
2163 if (unlikely((len + addr) > mtd->size)) { 2254 loff_t bdry_addr = this->chipsize >> 1;
2164 printk(KERN_ERR "onenand_erase: Erase past end of device\n"); 2255 if (addr < bdry_addr && (addr + len) > bdry_addr)
2165 return -EINVAL; 2256 bdry_block = bdry_addr >> this->erase_shift;
2166 } 2257 }
2167 2258
2168 if (FLEXONENAND(this)) { 2259 /* Pre-check bbs */
2169 /* Find the eraseregion of this address */ 2260 while (len) {
2170 i = flexonenand_region(mtd, addr); 2261 /* Check if we have a bad block, we do not erase bad blocks */
2171 region = &mtd->eraseregions[i]; 2262 if (onenand_block_isbad_nolock(mtd, addr, 0)) {
2263 printk(KERN_WARNING "%s: attempt to erase a bad block "
2264 "at addr 0x%012llx\n",
2265 __func__, (unsigned long long) addr);
2266 instr->state = MTD_ERASE_FAILED;
2267 return -EIO;
2268 }
2269 len -= block_size;
2270 addr += block_size;
2271 }
2172 2272
2173 block_size = region->erasesize; 2273 len = instr->len;
2174 region_end = region->offset + region->erasesize * region->numblocks; 2274 addr = instr->addr;
2175 2275
2176 /* Start address within region must align on block boundary. 2276 /* loop over 64 eb batches */
2177 * Erase region's start offset is always block start address. 2277 while (len) {
2178 */ 2278 struct erase_info verify_instr = *instr;
2179 if (unlikely((addr - region->offset) & (block_size - 1))) { 2279 int max_eb_count = MB_ERASE_MAX_BLK_COUNT;
2180 printk(KERN_ERR "onenand_erase: Unaligned address\n"); 2280
2181 return -EINVAL; 2281 verify_instr.addr = addr;
2282 verify_instr.len = 0;
2283
2284 /* do not cross chip boundary */
2285 if (bdry_block) {
2286 int this_block = (addr >> this->erase_shift);
2287
2288 if (this_block < bdry_block) {
2289 max_eb_count = min(max_eb_count,
2290 (bdry_block - this_block));
2291 }
2182 } 2292 }
2183 } else {
2184 block_size = 1 << this->erase_shift;
2185 2293
2186 /* Start address must align on block boundary */ 2294 eb_count = 0;
2187 if (unlikely(addr & (block_size - 1))) { 2295
2188 printk(KERN_ERR "onenand_erase: Unaligned address\n"); 2296 while (len > block_size && eb_count < (max_eb_count - 1)) {
2189 return -EINVAL; 2297 this->command(mtd, ONENAND_CMD_MULTIBLOCK_ERASE,
2298 addr, block_size);
2299 onenand_invalidate_bufferram(mtd, addr, block_size);
2300
2301 ret = this->wait(mtd, FL_PREPARING_ERASE);
2302 if (ret) {
2303 printk(KERN_ERR "%s: Failed multiblock erase, "
2304 "block %d\n", __func__,
2305 onenand_block(this, addr));
2306 instr->state = MTD_ERASE_FAILED;
2307 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
2308 return -EIO;
2309 }
2310
2311 len -= block_size;
2312 addr += block_size;
2313 eb_count++;
2314 }
2315
2316 /* last block of 64-eb series */
2317 cond_resched();
2318 this->command(mtd, ONENAND_CMD_ERASE, addr, block_size);
2319 onenand_invalidate_bufferram(mtd, addr, block_size);
2320
2321 ret = this->wait(mtd, FL_ERASING);
2322 /* Check if it is write protected */
2323 if (ret) {
2324 printk(KERN_ERR "%s: Failed erase, block %d\n",
2325 __func__, onenand_block(this, addr));
2326 instr->state = MTD_ERASE_FAILED;
2327 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
2328 return -EIO;
2329 }
2330
2331 len -= block_size;
2332 addr += block_size;
2333 eb_count++;
2334
2335 /* verify */
2336 verify_instr.len = eb_count * block_size;
2337 if (onenand_multiblock_erase_verify(mtd, &verify_instr)) {
2338 instr->state = verify_instr.state;
2339 instr->fail_addr = verify_instr.fail_addr;
2340 return -EIO;
2190 } 2341 }
2191 }
2192 2342
2193 /* Length must align on block boundary */
2194 if (unlikely(len & (block_size - 1))) {
2195 printk(KERN_ERR "onenand_erase: Length not block aligned\n");
2196 return -EINVAL;
2197 } 2343 }
2344 return 0;
2345}
2198 2346
2199 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
2200 2347
2201 /* Grab the lock and see if the device is available */ 2348/**
2202 onenand_get_device(mtd, FL_ERASING); 2349 * onenand_block_by_block_erase - [Internal] erase block(s) using regular erase
2350 * @param mtd MTD device structure
2351 * @param instr erase instruction
2352 * @param region erase region
2353 * @param block_size erase block size
2354 *
2355 * Erase one or more blocks one block at a time
2356 */
2357static int onenand_block_by_block_erase(struct mtd_info *mtd,
2358 struct erase_info *instr,
2359 struct mtd_erase_region_info *region,
2360 unsigned int block_size)
2361{
2362 struct onenand_chip *this = mtd->priv;
2363 loff_t addr = instr->addr;
2364 int len = instr->len;
2365 loff_t region_end = 0;
2366 int ret = 0;
2367
2368 if (region) {
2369 /* region is set for Flex-OneNAND */
2370 region_end = region->offset + region->erasesize * region->numblocks;
2371 }
2203 2372
2204 /* Loop through the blocks */
2205 instr->state = MTD_ERASING; 2373 instr->state = MTD_ERASING;
2206 2374
2375 /* Loop through the blocks */
2207 while (len) { 2376 while (len) {
2208 cond_resched(); 2377 cond_resched();
2209 2378
2210 /* Check if we have a bad block, we do not erase bad blocks */ 2379 /* Check if we have a bad block, we do not erase bad blocks */
2211 if (onenand_block_isbad_nolock(mtd, addr, 0)) { 2380 if (onenand_block_isbad_nolock(mtd, addr, 0)) {
2212 printk (KERN_WARNING "onenand_erase: attempt to erase a bad block at addr 0x%012llx\n", (unsigned long long) addr); 2381 printk(KERN_WARNING "%s: attempt to erase a bad block "
2382 "at addr 0x%012llx\n",
2383 __func__, (unsigned long long) addr);
2213 instr->state = MTD_ERASE_FAILED; 2384 instr->state = MTD_ERASE_FAILED;
2214 goto erase_exit; 2385 return -EIO;
2215 } 2386 }
2216 2387
2217 this->command(mtd, ONENAND_CMD_ERASE, addr, block_size); 2388 this->command(mtd, ONENAND_CMD_ERASE, addr, block_size);
@@ -2221,11 +2392,11 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
2221 ret = this->wait(mtd, FL_ERASING); 2392 ret = this->wait(mtd, FL_ERASING);
2222 /* Check, if it is write protected */ 2393 /* Check, if it is write protected */
2223 if (ret) { 2394 if (ret) {
2224 printk(KERN_ERR "onenand_erase: Failed erase, block %d\n", 2395 printk(KERN_ERR "%s: Failed erase, block %d\n",
2225 onenand_block(this, addr)); 2396 __func__, onenand_block(this, addr));
2226 instr->state = MTD_ERASE_FAILED; 2397 instr->state = MTD_ERASE_FAILED;
2227 instr->fail_addr = addr; 2398 instr->fail_addr = addr;
2228 goto erase_exit; 2399 return -EIO;
2229 } 2400 }
2230 2401
2231 len -= block_size; 2402 len -= block_size;
@@ -2241,25 +2412,88 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
2241 2412
2242 if (len & (block_size - 1)) { 2413 if (len & (block_size - 1)) {
2243 /* FIXME: This should be handled at MTD partitioning level. */ 2414 /* FIXME: This should be handled at MTD partitioning level. */
2244 printk(KERN_ERR "onenand_erase: Unaligned address\n"); 2415 printk(KERN_ERR "%s: Unaligned address\n",
2245 goto erase_exit; 2416 __func__);
2417 return -EIO;
2246 } 2418 }
2247 } 2419 }
2420 }
2421 return 0;
2422}
2423
2424/**
2425 * onenand_erase - [MTD Interface] erase block(s)
2426 * @param mtd MTD device structure
2427 * @param instr erase instruction
2428 *
2429 * Erase one or more blocks
2430 */
2431static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
2432{
2433 struct onenand_chip *this = mtd->priv;
2434 unsigned int block_size;
2435 loff_t addr = instr->addr;
2436 loff_t len = instr->len;
2437 int ret = 0;
2438 struct mtd_erase_region_info *region = NULL;
2439 loff_t region_offset = 0;
2440
2441 DEBUG(MTD_DEBUG_LEVEL3, "%s: start=0x%012llx, len=%llu\n", __func__,
2442 (unsigned long long) instr->addr, (unsigned long long) instr->len);
2443
2444 /* Do not allow erase past end of device */
2445 if (unlikely((len + addr) > mtd->size)) {
2446 printk(KERN_ERR "%s: Erase past end of device\n", __func__);
2447 return -EINVAL;
2448 }
2449
2450 if (FLEXONENAND(this)) {
2451 /* Find the eraseregion of this address */
2452 int i = flexonenand_region(mtd, addr);
2453
2454 region = &mtd->eraseregions[i];
2455 block_size = region->erasesize;
2456
2457 /* Start address within region must align on block boundary.
2458 * Erase region's start offset is always block start address.
2459 */
2460 region_offset = region->offset;
2461 } else
2462 block_size = 1 << this->erase_shift;
2463
2464 /* Start address must align on block boundary */
2465 if (unlikely((addr - region_offset) & (block_size - 1))) {
2466 printk(KERN_ERR "%s: Unaligned address\n", __func__);
2467 return -EINVAL;
2468 }
2248 2469
2470 /* Length must align on block boundary */
2471 if (unlikely(len & (block_size - 1))) {
2472 printk(KERN_ERR "%s: Length not block aligned\n", __func__);
2473 return -EINVAL;
2249 } 2474 }
2250 2475
2251 instr->state = MTD_ERASE_DONE; 2476 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
2252 2477
2253erase_exit: 2478 /* Grab the lock and see if the device is available */
2479 onenand_get_device(mtd, FL_ERASING);
2254 2480
2255 ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO; 2481 if (region || instr->len < MB_ERASE_MIN_BLK_COUNT * block_size) {
2482 /* region is set for Flex-OneNAND (no mb erase) */
2483 ret = onenand_block_by_block_erase(mtd, instr,
2484 region, block_size);
2485 } else {
2486 ret = onenand_multiblock_erase(mtd, instr, block_size);
2487 }
2256 2488
2257 /* Deselect and wake up anyone waiting on the device */ 2489 /* Deselect and wake up anyone waiting on the device */
2258 onenand_release_device(mtd); 2490 onenand_release_device(mtd);
2259 2491
2260 /* Do call back function */ 2492 /* Do call back function */
2261 if (!ret) 2493 if (!ret) {
2494 instr->state = MTD_ERASE_DONE;
2262 mtd_erase_callback(instr); 2495 mtd_erase_callback(instr);
2496 }
2263 2497
2264 return ret; 2498 return ret;
2265} 2499}
@@ -2272,7 +2506,7 @@ erase_exit:
2272 */ 2506 */
2273static void onenand_sync(struct mtd_info *mtd) 2507static void onenand_sync(struct mtd_info *mtd)
2274{ 2508{
2275 DEBUG(MTD_DEBUG_LEVEL3, "onenand_sync: called\n"); 2509 DEBUG(MTD_DEBUG_LEVEL3, "%s: called\n", __func__);
2276 2510
2277 /* Grab the lock and see if the device is available */ 2511 /* Grab the lock and see if the device is available */
2278 onenand_get_device(mtd, FL_SYNCING); 2512 onenand_get_device(mtd, FL_SYNCING);
@@ -2406,7 +2640,8 @@ static int onenand_do_lock_cmd(struct mtd_info *mtd, loff_t ofs, size_t len, int
2406 /* Check lock status */ 2640 /* Check lock status */
2407 status = this->read_word(this->base + ONENAND_REG_WP_STATUS); 2641 status = this->read_word(this->base + ONENAND_REG_WP_STATUS);
2408 if (!(status & wp_status_mask)) 2642 if (!(status & wp_status_mask))
2409 printk(KERN_ERR "wp status = 0x%x\n", status); 2643 printk(KERN_ERR "%s: wp status = 0x%x\n",
2644 __func__, status);
2410 2645
2411 return 0; 2646 return 0;
2412 } 2647 }
@@ -2435,7 +2670,8 @@ static int onenand_do_lock_cmd(struct mtd_info *mtd, loff_t ofs, size_t len, int
2435 /* Check lock status */ 2670 /* Check lock status */
2436 status = this->read_word(this->base + ONENAND_REG_WP_STATUS); 2671 status = this->read_word(this->base + ONENAND_REG_WP_STATUS);
2437 if (!(status & wp_status_mask)) 2672 if (!(status & wp_status_mask))
2438 printk(KERN_ERR "block = %d, wp status = 0x%x\n", block, status); 2673 printk(KERN_ERR "%s: block = %d, wp status = 0x%x\n",
2674 __func__, block, status);
2439 } 2675 }
2440 2676
2441 return 0; 2677 return 0;
@@ -2502,7 +2738,8 @@ static int onenand_check_lock_status(struct onenand_chip *this)
2502 /* Check lock status */ 2738 /* Check lock status */
2503 status = this->read_word(this->base + ONENAND_REG_WP_STATUS); 2739 status = this->read_word(this->base + ONENAND_REG_WP_STATUS);
2504 if (!(status & ONENAND_WP_US)) { 2740 if (!(status & ONENAND_WP_US)) {
2505 printk(KERN_ERR "block = %d, wp status = 0x%x\n", block, status); 2741 printk(KERN_ERR "%s: block = %d, wp status = 0x%x\n",
2742 __func__, block, status);
2506 return 0; 2743 return 0;
2507 } 2744 }
2508 } 2745 }
@@ -2557,6 +2794,208 @@ static void onenand_unlock_all(struct mtd_info *mtd)
2557 2794
2558#ifdef CONFIG_MTD_ONENAND_OTP 2795#ifdef CONFIG_MTD_ONENAND_OTP
2559 2796
2797/**
2798 * onenand_otp_command - Send OTP specific command to OneNAND device
2799 * @param mtd MTD device structure
2800 * @param cmd the command to be sent
2801 * @param addr offset to read from or write to
2802 * @param len number of bytes to read or write
2803 */
2804static int onenand_otp_command(struct mtd_info *mtd, int cmd, loff_t addr,
2805 size_t len)
2806{
2807 struct onenand_chip *this = mtd->priv;
2808 int value, block, page;
2809
2810 /* Address translation */
2811 switch (cmd) {
2812 case ONENAND_CMD_OTP_ACCESS:
2813 block = (int) (addr >> this->erase_shift);
2814 page = -1;
2815 break;
2816
2817 default:
2818 block = (int) (addr >> this->erase_shift);
2819 page = (int) (addr >> this->page_shift);
2820
2821 if (ONENAND_IS_2PLANE(this)) {
2822 /* Make the even block number */
2823 block &= ~1;
2824 /* Is it the odd plane? */
2825 if (addr & this->writesize)
2826 block++;
2827 page >>= 1;
2828 }
2829 page &= this->page_mask;
2830 break;
2831 }
2832
2833 if (block != -1) {
2834 /* Write 'DFS, FBA' of Flash */
2835 value = onenand_block_address(this, block);
2836 this->write_word(value, this->base +
2837 ONENAND_REG_START_ADDRESS1);
2838 }
2839
2840 if (page != -1) {
2841 /* Now we use page size operation */
2842 int sectors = 4, count = 4;
2843 int dataram;
2844
2845 switch (cmd) {
2846 default:
2847 if (ONENAND_IS_2PLANE(this) && cmd == ONENAND_CMD_PROG)
2848 cmd = ONENAND_CMD_2X_PROG;
2849 dataram = ONENAND_CURRENT_BUFFERRAM(this);
2850 break;
2851 }
2852
2853 /* Write 'FPA, FSA' of Flash */
2854 value = onenand_page_address(page, sectors);
2855 this->write_word(value, this->base +
2856 ONENAND_REG_START_ADDRESS8);
2857
2858 /* Write 'BSA, BSC' of DataRAM */
2859 value = onenand_buffer_address(dataram, sectors, count);
2860 this->write_word(value, this->base + ONENAND_REG_START_BUFFER);
2861 }
2862
2863 /* Interrupt clear */
2864 this->write_word(ONENAND_INT_CLEAR, this->base + ONENAND_REG_INTERRUPT);
2865
2866 /* Write command */
2867 this->write_word(cmd, this->base + ONENAND_REG_COMMAND);
2868
2869 return 0;
2870}
2871
2872/**
2873 * onenand_otp_write_oob_nolock - [Internal] OneNAND write out-of-band, specific to OTP
2874 * @param mtd MTD device structure
2875 * @param to offset to write to
2876 * @param len number of bytes to write
2877 * @param retlen pointer to variable to store the number of written bytes
2878 * @param buf the data to write
2879 *
2880 * OneNAND write out-of-band only for OTP
2881 */
2882static int onenand_otp_write_oob_nolock(struct mtd_info *mtd, loff_t to,
2883 struct mtd_oob_ops *ops)
2884{
2885 struct onenand_chip *this = mtd->priv;
2886 int column, ret = 0, oobsize;
2887 int written = 0;
2888 u_char *oobbuf;
2889 size_t len = ops->ooblen;
2890 const u_char *buf = ops->oobbuf;
2891 int block, value, status;
2892
2893 to += ops->ooboffs;
2894
2895 /* Initialize retlen, in case of early exit */
2896 ops->oobretlen = 0;
2897
2898 oobsize = mtd->oobsize;
2899
2900 column = to & (mtd->oobsize - 1);
2901
2902 oobbuf = this->oob_buf;
2903
2904 /* Loop until all data write */
2905 while (written < len) {
2906 int thislen = min_t(int, oobsize, len - written);
2907
2908 cond_resched();
2909
2910 block = (int) (to >> this->erase_shift);
2911 /*
2912 * Write 'DFS, FBA' of Flash
2913 * Add: F100h DQ=DFS, FBA
2914 */
2915
2916 value = onenand_block_address(this, block);
2917 this->write_word(value, this->base +
2918 ONENAND_REG_START_ADDRESS1);
2919
2920 /*
2921 * Select DataRAM for DDP
2922 * Add: F101h DQ=DBS
2923 */
2924
2925 value = onenand_bufferram_address(this, block);
2926 this->write_word(value, this->base +
2927 ONENAND_REG_START_ADDRESS2);
2928 ONENAND_SET_NEXT_BUFFERRAM(this);
2929
2930 /*
2931 * Enter OTP access mode
2932 */
2933 this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0);
2934 this->wait(mtd, FL_OTPING);
2935
2936 /* We send data to spare ram with oobsize
2937 * to prevent byte access */
2938 memcpy(oobbuf + column, buf, thislen);
2939
2940 /*
2941 * Write Data into DataRAM
2942 * Add: 8th Word
2943 * in sector0/spare/page0
2944 * DQ=XXFCh
2945 */
2946 this->write_bufferram(mtd, ONENAND_SPARERAM,
2947 oobbuf, 0, mtd->oobsize);
2948
2949 onenand_otp_command(mtd, ONENAND_CMD_PROGOOB, to, mtd->oobsize);
2950 onenand_update_bufferram(mtd, to, 0);
2951 if (ONENAND_IS_2PLANE(this)) {
2952 ONENAND_SET_BUFFERRAM1(this);
2953 onenand_update_bufferram(mtd, to + this->writesize, 0);
2954 }
2955
2956 ret = this->wait(mtd, FL_WRITING);
2957 if (ret) {
2958 printk(KERN_ERR "%s: write failed %d\n", __func__, ret);
2959 break;
2960 }
2961
2962 /* Exit OTP access mode */
2963 this->command(mtd, ONENAND_CMD_RESET, 0, 0);
2964 this->wait(mtd, FL_RESETING);
2965
2966 status = this->read_word(this->base + ONENAND_REG_CTRL_STATUS);
2967 status &= 0x60;
2968
2969 if (status == 0x60) {
2970 printk(KERN_DEBUG "\nBLOCK\tSTATUS\n");
2971 printk(KERN_DEBUG "1st Block\tLOCKED\n");
2972 printk(KERN_DEBUG "OTP Block\tLOCKED\n");
2973 } else if (status == 0x20) {
2974 printk(KERN_DEBUG "\nBLOCK\tSTATUS\n");
2975 printk(KERN_DEBUG "1st Block\tLOCKED\n");
2976 printk(KERN_DEBUG "OTP Block\tUN-LOCKED\n");
2977 } else if (status == 0x40) {
2978 printk(KERN_DEBUG "\nBLOCK\tSTATUS\n");
2979 printk(KERN_DEBUG "1st Block\tUN-LOCKED\n");
2980 printk(KERN_DEBUG "OTP Block\tLOCKED\n");
2981 } else {
2982 printk(KERN_DEBUG "Reboot to check\n");
2983 }
2984
2985 written += thislen;
2986 if (written == len)
2987 break;
2988
2989 to += mtd->writesize;
2990 buf += thislen;
2991 column = 0;
2992 }
2993
2994 ops->oobretlen = written;
2995
2996 return ret;
2997}
2998
2560/* Internal OTP operation */ 2999/* Internal OTP operation */
2561typedef int (*otp_op_t)(struct mtd_info *mtd, loff_t form, size_t len, 3000typedef int (*otp_op_t)(struct mtd_info *mtd, loff_t form, size_t len,
2562 size_t *retlen, u_char *buf); 3001 size_t *retlen, u_char *buf);
@@ -2659,11 +3098,11 @@ static int do_otp_lock(struct mtd_info *mtd, loff_t from, size_t len,
2659 struct mtd_oob_ops ops; 3098 struct mtd_oob_ops ops;
2660 int ret; 3099 int ret;
2661 3100
2662 /* Enter OTP access mode */
2663 this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0);
2664 this->wait(mtd, FL_OTPING);
2665
2666 if (FLEXONENAND(this)) { 3101 if (FLEXONENAND(this)) {
3102
3103 /* Enter OTP access mode */
3104 this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0);
3105 this->wait(mtd, FL_OTPING);
2667 /* 3106 /*
2668 * For Flex-OneNAND, we write lock mark to 1st word of sector 4 of 3107 * For Flex-OneNAND, we write lock mark to 1st word of sector 4 of
2669 * main area of page 49. 3108 * main area of page 49.
@@ -2674,19 +3113,19 @@ static int do_otp_lock(struct mtd_info *mtd, loff_t from, size_t len,
2674 ops.oobbuf = NULL; 3113 ops.oobbuf = NULL;
2675 ret = onenand_write_ops_nolock(mtd, mtd->writesize * 49, &ops); 3114 ret = onenand_write_ops_nolock(mtd, mtd->writesize * 49, &ops);
2676 *retlen = ops.retlen; 3115 *retlen = ops.retlen;
3116
3117 /* Exit OTP access mode */
3118 this->command(mtd, ONENAND_CMD_RESET, 0, 0);
3119 this->wait(mtd, FL_RESETING);
2677 } else { 3120 } else {
2678 ops.mode = MTD_OOB_PLACE; 3121 ops.mode = MTD_OOB_PLACE;
2679 ops.ooblen = len; 3122 ops.ooblen = len;
2680 ops.oobbuf = buf; 3123 ops.oobbuf = buf;
2681 ops.ooboffs = 0; 3124 ops.ooboffs = 0;
2682 ret = onenand_write_oob_nolock(mtd, from, &ops); 3125 ret = onenand_otp_write_oob_nolock(mtd, from, &ops);
2683 *retlen = ops.oobretlen; 3126 *retlen = ops.oobretlen;
2684 } 3127 }
2685 3128
2686 /* Exit OTP access mode */
2687 this->command(mtd, ONENAND_CMD_RESET, 0, 0);
2688 this->wait(mtd, FL_RESETING);
2689
2690 return ret; 3129 return ret;
2691} 3130}
2692 3131
@@ -2717,16 +3156,21 @@ static int onenand_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2717 if (density < ONENAND_DEVICE_DENSITY_512Mb) 3156 if (density < ONENAND_DEVICE_DENSITY_512Mb)
2718 otp_pages = 20; 3157 otp_pages = 20;
2719 else 3158 else
2720 otp_pages = 10; 3159 otp_pages = 50;
2721 3160
2722 if (mode == MTD_OTP_FACTORY) { 3161 if (mode == MTD_OTP_FACTORY) {
2723 from += mtd->writesize * otp_pages; 3162 from += mtd->writesize * otp_pages;
2724 otp_pages = 64 - otp_pages; 3163 otp_pages = ONENAND_PAGES_PER_BLOCK - otp_pages;
2725 } 3164 }
2726 3165
2727 /* Check User/Factory boundary */ 3166 /* Check User/Factory boundary */
2728 if (((mtd->writesize * otp_pages) - (from + len)) < 0) 3167 if (mode == MTD_OTP_USER) {
2729 return 0; 3168 if (mtd->writesize * otp_pages < from + len)
3169 return 0;
3170 } else {
3171 if (mtd->writesize * otp_pages < len)
3172 return 0;
3173 }
2730 3174
2731 onenand_get_device(mtd, FL_OTPING); 3175 onenand_get_device(mtd, FL_OTPING);
2732 while (len > 0 && otp_pages > 0) { 3176 while (len > 0 && otp_pages > 0) {
@@ -2749,13 +3193,12 @@ static int onenand_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2749 *retlen += sizeof(struct otp_info); 3193 *retlen += sizeof(struct otp_info);
2750 } else { 3194 } else {
2751 size_t tmp_retlen; 3195 size_t tmp_retlen;
2752 int size = len;
2753 3196
2754 ret = action(mtd, from, len, &tmp_retlen, buf); 3197 ret = action(mtd, from, len, &tmp_retlen, buf);
2755 3198
2756 buf += size; 3199 buf += tmp_retlen;
2757 len -= size; 3200 len -= tmp_retlen;
2758 *retlen += size; 3201 *retlen += tmp_retlen;
2759 3202
2760 if (ret) 3203 if (ret)
2761 break; 3204 break;
@@ -2868,21 +3311,11 @@ static int onenand_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
2868 u_char *buf = FLEXONENAND(this) ? this->page_buf : this->oob_buf; 3311 u_char *buf = FLEXONENAND(this) ? this->page_buf : this->oob_buf;
2869 size_t retlen; 3312 size_t retlen;
2870 int ret; 3313 int ret;
3314 unsigned int otp_lock_offset = ONENAND_OTP_LOCK_OFFSET;
2871 3315
2872 memset(buf, 0xff, FLEXONENAND(this) ? this->writesize 3316 memset(buf, 0xff, FLEXONENAND(this) ? this->writesize
2873 : mtd->oobsize); 3317 : mtd->oobsize);
2874 /* 3318 /*
2875 * Note: OTP lock operation
2876 * OTP block : 0xXXFC
2877 * 1st block : 0xXXF3 (If chip support)
2878 * Both : 0xXXF0 (If chip support)
2879 */
2880 if (FLEXONENAND(this))
2881 buf[FLEXONENAND_OTP_LOCK_OFFSET] = 0xFC;
2882 else
2883 buf[ONENAND_OTP_LOCK_OFFSET] = 0xFC;
2884
2885 /*
2886 * Write lock mark to 8th word of sector0 of page0 of the spare0. 3319 * Write lock mark to 8th word of sector0 of page0 of the spare0.
2887 * We write 16 bytes spare area instead of 2 bytes. 3320 * We write 16 bytes spare area instead of 2 bytes.
2888 * For Flex-OneNAND, we write lock mark to 1st word of sector 4 of 3321 * For Flex-OneNAND, we write lock mark to 1st word of sector 4 of
@@ -2892,10 +3325,30 @@ static int onenand_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
2892 from = 0; 3325 from = 0;
2893 len = FLEXONENAND(this) ? mtd->writesize : 16; 3326 len = FLEXONENAND(this) ? mtd->writesize : 16;
2894 3327
3328 /*
3329 * Note: OTP lock operation
3330 * OTP block : 0xXXFC XX 1111 1100
3331 * 1st block : 0xXXF3 (If chip support) XX 1111 0011
3332 * Both : 0xXXF0 (If chip support) XX 1111 0000
3333 */
3334 if (FLEXONENAND(this))
3335 otp_lock_offset = FLEXONENAND_OTP_LOCK_OFFSET;
3336
3337 /* ONENAND_OTP_AREA | ONENAND_OTP_BLOCK0 | ONENAND_OTP_AREA_BLOCK0 */
3338 if (otp == 1)
3339 buf[otp_lock_offset] = 0xFC;
3340 else if (otp == 2)
3341 buf[otp_lock_offset] = 0xF3;
3342 else if (otp == 3)
3343 buf[otp_lock_offset] = 0xF0;
3344 else if (otp != 0)
3345 printk(KERN_DEBUG "[OneNAND] Invalid option selected for OTP\n");
3346
2895 ret = onenand_otp_walk(mtd, from, len, &retlen, buf, do_otp_lock, MTD_OTP_USER); 3347 ret = onenand_otp_walk(mtd, from, len, &retlen, buf, do_otp_lock, MTD_OTP_USER);
2896 3348
2897 return ret ? : retlen; 3349 return ret ? : retlen;
2898} 3350}
3351
2899#endif /* CONFIG_MTD_ONENAND_OTP */ 3352#endif /* CONFIG_MTD_ONENAND_OTP */
2900 3353
2901/** 3354/**
@@ -3172,7 +3625,8 @@ static int flexonenand_check_blocks_erased(struct mtd_info *mtd, int start, int
3172 break; 3625 break;
3173 3626
3174 if (i != mtd->oobsize) { 3627 if (i != mtd->oobsize) {
3175 printk(KERN_WARNING "Block %d not erased.\n", block); 3628 printk(KERN_WARNING "%s: Block %d not erased.\n",
3629 __func__, block);
3176 return 1; 3630 return 1;
3177 } 3631 }
3178 } 3632 }
@@ -3204,8 +3658,8 @@ int flexonenand_set_boundary(struct mtd_info *mtd, int die,
3204 blksperdie >>= ONENAND_IS_DDP(this) ? 1 : 0; 3658 blksperdie >>= ONENAND_IS_DDP(this) ? 1 : 0;
3205 3659
3206 if (boundary >= blksperdie) { 3660 if (boundary >= blksperdie) {
3207 printk(KERN_ERR "flexonenand_set_boundary: Invalid boundary value. " 3661 printk(KERN_ERR "%s: Invalid boundary value. "
3208 "Boundary not changed.\n"); 3662 "Boundary not changed.\n", __func__);
3209 return -EINVAL; 3663 return -EINVAL;
3210 } 3664 }
3211 3665
@@ -3214,7 +3668,8 @@ int flexonenand_set_boundary(struct mtd_info *mtd, int die,
3214 new = boundary + (die * this->density_mask); 3668 new = boundary + (die * this->density_mask);
3215 ret = flexonenand_check_blocks_erased(mtd, min(old, new) + 1, max(old, new)); 3669 ret = flexonenand_check_blocks_erased(mtd, min(old, new) + 1, max(old, new));
3216 if (ret) { 3670 if (ret) {
3217 printk(KERN_ERR "flexonenand_set_boundary: Please erase blocks before boundary change\n"); 3671 printk(KERN_ERR "%s: Please erase blocks "
3672 "before boundary change\n", __func__);
3218 return ret; 3673 return ret;
3219 } 3674 }
3220 3675
@@ -3227,12 +3682,12 @@ int flexonenand_set_boundary(struct mtd_info *mtd, int die,
3227 3682
3228 thisboundary = this->read_word(this->base + ONENAND_DATARAM); 3683 thisboundary = this->read_word(this->base + ONENAND_DATARAM);
3229 if ((thisboundary >> FLEXONENAND_PI_UNLOCK_SHIFT) != 3) { 3684 if ((thisboundary >> FLEXONENAND_PI_UNLOCK_SHIFT) != 3) {
3230 printk(KERN_ERR "flexonenand_set_boundary: boundary locked\n"); 3685 printk(KERN_ERR "%s: boundary locked\n", __func__);
3231 ret = 1; 3686 ret = 1;
3232 goto out; 3687 goto out;
3233 } 3688 }
3234 3689
3235 printk(KERN_INFO "flexonenand_set_boundary: Changing die %d boundary: %d%s\n", 3690 printk(KERN_INFO "Changing die %d boundary: %d%s\n",
3236 die, boundary, lock ? "(Locked)" : "(Unlocked)"); 3691 die, boundary, lock ? "(Locked)" : "(Unlocked)");
3237 3692
3238 addr = die ? this->diesize[0] : 0; 3693 addr = die ? this->diesize[0] : 0;
@@ -3243,7 +3698,8 @@ int flexonenand_set_boundary(struct mtd_info *mtd, int die,
3243 this->command(mtd, ONENAND_CMD_ERASE, addr, 0); 3698 this->command(mtd, ONENAND_CMD_ERASE, addr, 0);
3244 ret = this->wait(mtd, FL_ERASING); 3699 ret = this->wait(mtd, FL_ERASING);
3245 if (ret) { 3700 if (ret) {
3246 printk(KERN_ERR "flexonenand_set_boundary: Failed PI erase for Die %d\n", die); 3701 printk(KERN_ERR "%s: Failed PI erase for Die %d\n",
3702 __func__, die);
3247 goto out; 3703 goto out;
3248 } 3704 }
3249 3705
@@ -3251,7 +3707,8 @@ int flexonenand_set_boundary(struct mtd_info *mtd, int die,
3251 this->command(mtd, ONENAND_CMD_PROG, addr, 0); 3707 this->command(mtd, ONENAND_CMD_PROG, addr, 0);
3252 ret = this->wait(mtd, FL_WRITING); 3708 ret = this->wait(mtd, FL_WRITING);
3253 if (ret) { 3709 if (ret) {
3254 printk(KERN_ERR "flexonenand_set_boundary: Failed PI write for Die %d\n", die); 3710 printk(KERN_ERR "%s: Failed PI write for Die %d\n",
3711 __func__, die);
3255 goto out; 3712 goto out;
3256 } 3713 }
3257 3714
@@ -3408,8 +3865,8 @@ static void onenand_resume(struct mtd_info *mtd)
3408 if (this->state == FL_PM_SUSPENDED) 3865 if (this->state == FL_PM_SUSPENDED)
3409 onenand_release_device(mtd); 3866 onenand_release_device(mtd);
3410 else 3867 else
3411 printk(KERN_ERR "resume() called for the chip which is not" 3868 printk(KERN_ERR "%s: resume() called for the chip which is not "
3412 "in suspended state\n"); 3869 "in suspended state\n", __func__);
3413} 3870}
3414 3871
3415/** 3872/**
@@ -3464,7 +3921,8 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
3464 if (!this->page_buf) { 3921 if (!this->page_buf) {
3465 this->page_buf = kzalloc(mtd->writesize, GFP_KERNEL); 3922 this->page_buf = kzalloc(mtd->writesize, GFP_KERNEL);
3466 if (!this->page_buf) { 3923 if (!this->page_buf) {
3467 printk(KERN_ERR "onenand_scan(): Can't allocate page_buf\n"); 3924 printk(KERN_ERR "%s: Can't allocate page_buf\n",
3925 __func__);
3468 return -ENOMEM; 3926 return -ENOMEM;
3469 } 3927 }
3470 this->options |= ONENAND_PAGEBUF_ALLOC; 3928 this->options |= ONENAND_PAGEBUF_ALLOC;
@@ -3472,7 +3930,8 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
3472 if (!this->oob_buf) { 3930 if (!this->oob_buf) {
3473 this->oob_buf = kzalloc(mtd->oobsize, GFP_KERNEL); 3931 this->oob_buf = kzalloc(mtd->oobsize, GFP_KERNEL);
3474 if (!this->oob_buf) { 3932 if (!this->oob_buf) {
3475 printk(KERN_ERR "onenand_scan(): Can't allocate oob_buf\n"); 3933 printk(KERN_ERR "%s: Can't allocate oob_buf\n",
3934 __func__);
3476 if (this->options & ONENAND_PAGEBUF_ALLOC) { 3935 if (this->options & ONENAND_PAGEBUF_ALLOC) {
3477 this->options &= ~ONENAND_PAGEBUF_ALLOC; 3936 this->options &= ~ONENAND_PAGEBUF_ALLOC;
3478 kfree(this->page_buf); 3937 kfree(this->page_buf);
@@ -3505,8 +3964,8 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
3505 break; 3964 break;
3506 3965
3507 default: 3966 default:
3508 printk(KERN_WARNING "No OOB scheme defined for oobsize %d\n", 3967 printk(KERN_WARNING "%s: No OOB scheme defined for oobsize %d\n",
3509 mtd->oobsize); 3968 __func__, mtd->oobsize);
3510 mtd->subpage_sft = 0; 3969 mtd->subpage_sft = 0;
3511 /* To prevent kernel oops */ 3970 /* To prevent kernel oops */
3512 this->ecclayout = &onenand_oob_32; 3971 this->ecclayout = &onenand_oob_32;
diff --git a/drivers/mtd/tests/Makefile b/drivers/mtd/tests/Makefile
index c1d501335006..b44dcab940d8 100644
--- a/drivers/mtd/tests/Makefile
+++ b/drivers/mtd/tests/Makefile
@@ -5,3 +5,4 @@ obj-$(CONFIG_MTD_TESTS) += mtd_speedtest.o
5obj-$(CONFIG_MTD_TESTS) += mtd_stresstest.o 5obj-$(CONFIG_MTD_TESTS) += mtd_stresstest.o
6obj-$(CONFIG_MTD_TESTS) += mtd_subpagetest.o 6obj-$(CONFIG_MTD_TESTS) += mtd_subpagetest.o
7obj-$(CONFIG_MTD_TESTS) += mtd_torturetest.o 7obj-$(CONFIG_MTD_TESTS) += mtd_torturetest.o
8obj-$(CONFIG_MTD_TESTS) += mtd_nandecctest.o
diff --git a/drivers/mtd/tests/mtd_nandecctest.c b/drivers/mtd/tests/mtd_nandecctest.c
new file mode 100644
index 000000000000..c1f31051784c
--- /dev/null
+++ b/drivers/mtd/tests/mtd_nandecctest.c
@@ -0,0 +1,87 @@
1#include <linux/kernel.h>
2#include <linux/module.h>
3#include <linux/list.h>
4#include <linux/slab.h>
5#include <linux/random.h>
6#include <linux/string.h>
7#include <linux/bitops.h>
8#include <linux/jiffies.h>
9#include <linux/mtd/nand_ecc.h>
10
11#if defined(CONFIG_MTD_NAND) || defined(CONFIG_MTD_NAND_MODULE)
12
13static void inject_single_bit_error(void *data, size_t size)
14{
15 unsigned long offset = random32() % (size * BITS_PER_BYTE);
16
17 __change_bit(offset, data);
18}
19
20static unsigned char data[512];
21static unsigned char error_data[512];
22
23static int nand_ecc_test(const size_t size)
24{
25 unsigned char code[3];
26 unsigned char error_code[3];
27 char testname[30];
28
29 BUG_ON(sizeof(data) < size);
30
31 sprintf(testname, "nand-ecc-%zu", size);
32
33 get_random_bytes(data, size);
34
35 memcpy(error_data, data, size);
36 inject_single_bit_error(error_data, size);
37
38 __nand_calculate_ecc(data, size, code);
39 __nand_calculate_ecc(error_data, size, error_code);
40 __nand_correct_data(error_data, code, error_code, size);
41
42 if (!memcmp(data, error_data, size)) {
43 printk(KERN_INFO "mtd_nandecctest: ok - %s\n", testname);
44 return 0;
45 }
46
47 printk(KERN_ERR "mtd_nandecctest: not ok - %s\n", testname);
48
49 printk(KERN_DEBUG "hexdump of data:\n");
50 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 4,
51 data, size, false);
52 printk(KERN_DEBUG "hexdump of error data:\n");
53 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 4,
54 error_data, size, false);
55
56 return -1;
57}
58
59#else
60
61static int nand_ecc_test(const size_t size)
62{
63 return 0;
64}
65
66#endif
67
68static int __init ecc_test_init(void)
69{
70 srandom32(jiffies);
71
72 nand_ecc_test(256);
73 nand_ecc_test(512);
74
75 return 0;
76}
77
78static void __exit ecc_test_exit(void)
79{
80}
81
82module_init(ecc_test_init);
83module_exit(ecc_test_exit);
84
85MODULE_DESCRIPTION("NAND ECC function test module");
86MODULE_AUTHOR("Akinobu Mita");
87MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/tests/mtd_oobtest.c b/drivers/mtd/tests/mtd_oobtest.c
index 5553cd4eab20..5813920e79a5 100644
--- a/drivers/mtd/tests/mtd_oobtest.c
+++ b/drivers/mtd/tests/mtd_oobtest.c
@@ -343,7 +343,6 @@ static int scan_for_bad_eraseblocks(void)
343 printk(PRINT_PREF "error: cannot allocate memory\n"); 343 printk(PRINT_PREF "error: cannot allocate memory\n");
344 return -ENOMEM; 344 return -ENOMEM;
345 } 345 }
346 memset(bbt, 0 , ebcnt);
347 346
348 printk(PRINT_PREF "scanning for bad eraseblocks\n"); 347 printk(PRINT_PREF "scanning for bad eraseblocks\n");
349 for (i = 0; i < ebcnt; ++i) { 348 for (i = 0; i < ebcnt; ++i) {
@@ -392,7 +391,6 @@ static int __init mtd_oobtest_init(void)
392 mtd->writesize, ebcnt, pgcnt, mtd->oobsize); 391 mtd->writesize, ebcnt, pgcnt, mtd->oobsize);
393 392
394 err = -ENOMEM; 393 err = -ENOMEM;
395 mtd->erasesize = mtd->erasesize;
396 readbuf = kmalloc(mtd->erasesize, GFP_KERNEL); 394 readbuf = kmalloc(mtd->erasesize, GFP_KERNEL);
397 if (!readbuf) { 395 if (!readbuf) {
398 printk(PRINT_PREF "error: cannot allocate memory\n"); 396 printk(PRINT_PREF "error: cannot allocate memory\n");
@@ -476,18 +474,10 @@ static int __init mtd_oobtest_init(void)
476 use_len_max = mtd->ecclayout->oobavail; 474 use_len_max = mtd->ecclayout->oobavail;
477 vary_offset = 1; 475 vary_offset = 1;
478 simple_srand(5); 476 simple_srand(5);
479 printk(PRINT_PREF "writing OOBs of whole device\n"); 477
480 for (i = 0; i < ebcnt; ++i) { 478 err = write_whole_device();
481 if (bbt[i]) 479 if (err)
482 continue; 480 goto out;
483 err = write_eraseblock(i);
484 if (err)
485 goto out;
486 if (i % 256 == 0)
487 printk(PRINT_PREF "written up to eraseblock %u\n", i);
488 cond_resched();
489 }
490 printk(PRINT_PREF "written %u eraseblocks\n", i);
491 481
492 /* Check all eraseblocks */ 482 /* Check all eraseblocks */
493 use_offset = 0; 483 use_offset = 0;
diff --git a/drivers/mtd/tests/mtd_pagetest.c b/drivers/mtd/tests/mtd_pagetest.c
index 103cac480fee..ce17cbe918c5 100644
--- a/drivers/mtd/tests/mtd_pagetest.c
+++ b/drivers/mtd/tests/mtd_pagetest.c
@@ -523,6 +523,7 @@ static int __init mtd_pagetest_init(void)
523 do_div(tmp, mtd->erasesize); 523 do_div(tmp, mtd->erasesize);
524 ebcnt = tmp; 524 ebcnt = tmp;
525 pgcnt = mtd->erasesize / mtd->writesize; 525 pgcnt = mtd->erasesize / mtd->writesize;
526 pgsize = mtd->writesize;
526 527
527 printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, " 528 printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, "
528 "page size %u, count of eraseblocks %u, pages per " 529 "page size %u, count of eraseblocks %u, pages per "
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 78b7167a8ce3..39db0e96815d 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -837,7 +837,7 @@ static int vortex_resume(struct device *dev)
837 return 0; 837 return 0;
838} 838}
839 839
840static struct dev_pm_ops vortex_pm_ops = { 840static const struct dev_pm_ops vortex_pm_ops = {
841 .suspend = vortex_suspend, 841 .suspend = vortex_suspend,
842 .resume = vortex_resume, 842 .resume = vortex_resume,
843 .freeze = vortex_suspend, 843 .freeze = vortex_suspend,
diff --git a/drivers/net/bcm63xx_enet.c b/drivers/net/bcm63xx_enet.c
index 1f6c5486d715..0bd47d32ec42 100644
--- a/drivers/net/bcm63xx_enet.c
+++ b/drivers/net/bcm63xx_enet.c
@@ -1245,9 +1245,15 @@ static void bcm_enet_get_drvinfo(struct net_device *netdev,
1245 drvinfo->n_stats = BCM_ENET_STATS_LEN; 1245 drvinfo->n_stats = BCM_ENET_STATS_LEN;
1246} 1246}
1247 1247
1248static int bcm_enet_get_stats_count(struct net_device *netdev) 1248static int bcm_enet_get_sset_count(struct net_device *netdev,
1249 int string_set)
1249{ 1250{
1250 return BCM_ENET_STATS_LEN; 1251 switch (string_set) {
1252 case ETH_SS_STATS:
1253 return BCM_ENET_STATS_LEN;
1254 default:
1255 return -EINVAL;
1256 }
1251} 1257}
1252 1258
1253static void bcm_enet_get_strings(struct net_device *netdev, 1259static void bcm_enet_get_strings(struct net_device *netdev,
@@ -1473,7 +1479,7 @@ static int bcm_enet_set_pauseparam(struct net_device *dev,
1473 1479
1474static struct ethtool_ops bcm_enet_ethtool_ops = { 1480static struct ethtool_ops bcm_enet_ethtool_ops = {
1475 .get_strings = bcm_enet_get_strings, 1481 .get_strings = bcm_enet_get_strings,
1476 .get_stats_count = bcm_enet_get_stats_count, 1482 .get_sset_count = bcm_enet_get_sset_count,
1477 .get_ethtool_stats = bcm_enet_get_ethtool_stats, 1483 .get_ethtool_stats = bcm_enet_get_ethtool_stats,
1478 .get_settings = bcm_enet_get_settings, 1484 .get_settings = bcm_enet_get_settings,
1479 .set_settings = bcm_enet_set_settings, 1485 .set_settings = bcm_enet_set_settings,
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index d69e6838f21e..0fb7a4964e75 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -20,6 +20,8 @@
20 * 20 *
21 */ 21 */
22 22
23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
23#include <linux/skbuff.h> 25#include <linux/skbuff.h>
24#include <linux/if_ether.h> 26#include <linux/if_ether.h>
25#include <linux/netdevice.h> 27#include <linux/netdevice.h>
@@ -352,7 +354,8 @@ static u16 __get_link_speed(struct port *port)
352 } 354 }
353 } 355 }
354 356
355 pr_debug("Port %d Received link speed %d update from adapter\n", port->actor_port_number, speed); 357 pr_debug("Port %d Received link speed %d update from adapter\n",
358 port->actor_port_number, speed);
356 return speed; 359 return speed;
357} 360}
358 361
@@ -378,12 +381,14 @@ static u8 __get_duplex(struct port *port)
378 switch (slave->duplex) { 381 switch (slave->duplex) {
379 case DUPLEX_FULL: 382 case DUPLEX_FULL:
380 retval=0x1; 383 retval=0x1;
381 pr_debug("Port %d Received status full duplex update from adapter\n", port->actor_port_number); 384 pr_debug("Port %d Received status full duplex update from adapter\n",
385 port->actor_port_number);
382 break; 386 break;
383 case DUPLEX_HALF: 387 case DUPLEX_HALF:
384 default: 388 default:
385 retval=0x0; 389 retval=0x0;
386 pr_debug("Port %d Received status NOT full duplex update from adapter\n", port->actor_port_number); 390 pr_debug("Port %d Received status NOT full duplex update from adapter\n",
391 port->actor_port_number);
387 break; 392 break;
388 } 393 }
389 } 394 }
@@ -980,7 +985,9 @@ static void ad_mux_machine(struct port *port)
980 985
981 // check if the state machine was changed 986 // check if the state machine was changed
982 if (port->sm_mux_state != last_state) { 987 if (port->sm_mux_state != last_state) {
983 pr_debug("Mux Machine: Port=%d, Last State=%d, Curr State=%d\n", port->actor_port_number, last_state, port->sm_mux_state); 988 pr_debug("Mux Machine: Port=%d, Last State=%d, Curr State=%d\n",
989 port->actor_port_number, last_state,
990 port->sm_mux_state);
984 switch (port->sm_mux_state) { 991 switch (port->sm_mux_state) {
985 case AD_MUX_DETACHED: 992 case AD_MUX_DETACHED:
986 __detach_bond_from_agg(port); 993 __detach_bond_from_agg(port);
@@ -1079,7 +1086,9 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
1079 1086
1080 // check if the State machine was changed or new lacpdu arrived 1087 // check if the State machine was changed or new lacpdu arrived
1081 if ((port->sm_rx_state != last_state) || (lacpdu)) { 1088 if ((port->sm_rx_state != last_state) || (lacpdu)) {
1082 pr_debug("Rx Machine: Port=%d, Last State=%d, Curr State=%d\n", port->actor_port_number, last_state, port->sm_rx_state); 1089 pr_debug("Rx Machine: Port=%d, Last State=%d, Curr State=%d\n",
1090 port->actor_port_number, last_state,
1091 port->sm_rx_state);
1083 switch (port->sm_rx_state) { 1092 switch (port->sm_rx_state) {
1084 case AD_RX_INITIALIZE: 1093 case AD_RX_INITIALIZE:
1085 if (!(port->actor_oper_port_key & AD_DUPLEX_KEY_BITS)) { 1094 if (!(port->actor_oper_port_key & AD_DUPLEX_KEY_BITS)) {
@@ -1126,9 +1135,8 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
1126 // detect loopback situation 1135 // detect loopback situation
1127 if (!MAC_ADDRESS_COMPARE(&(lacpdu->actor_system), &(port->actor_system))) { 1136 if (!MAC_ADDRESS_COMPARE(&(lacpdu->actor_system), &(port->actor_system))) {
1128 // INFO_RECEIVED_LOOPBACK_FRAMES 1137 // INFO_RECEIVED_LOOPBACK_FRAMES
1129 pr_err(DRV_NAME ": %s: An illegal loopback occurred on " 1138 pr_err("%s: An illegal loopback occurred on adapter (%s).\n"
1130 "adapter (%s). Check the configuration to verify that all " 1139 "Check the configuration to verify that all adapters are connected to 802.3ad compliant switch ports\n",
1131 "Adapters are connected to 802.3ad compliant switch ports\n",
1132 port->slave->dev->master->name, port->slave->dev->name); 1140 port->slave->dev->master->name, port->slave->dev->name);
1133 __release_rx_machine_lock(port); 1141 __release_rx_machine_lock(port);
1134 return; 1142 return;
@@ -1166,7 +1174,8 @@ static void ad_tx_machine(struct port *port)
1166 __update_lacpdu_from_port(port); 1174 __update_lacpdu_from_port(port);
1167 1175
1168 if (ad_lacpdu_send(port) >= 0) { 1176 if (ad_lacpdu_send(port) >= 0) {
1169 pr_debug("Sent LACPDU on port %d\n", port->actor_port_number); 1177 pr_debug("Sent LACPDU on port %d\n",
1178 port->actor_port_number);
1170 1179
1171 /* mark ntt as false, so it will not be sent again until 1180 /* mark ntt as false, so it will not be sent again until
1172 demanded */ 1181 demanded */
@@ -1241,7 +1250,9 @@ static void ad_periodic_machine(struct port *port)
1241 1250
1242 // check if the state machine was changed 1251 // check if the state machine was changed
1243 if (port->sm_periodic_state != last_state) { 1252 if (port->sm_periodic_state != last_state) {
1244 pr_debug("Periodic Machine: Port=%d, Last State=%d, Curr State=%d\n", port->actor_port_number, last_state, port->sm_periodic_state); 1253 pr_debug("Periodic Machine: Port=%d, Last State=%d, Curr State=%d\n",
1254 port->actor_port_number, last_state,
1255 port->sm_periodic_state);
1245 switch (port->sm_periodic_state) { 1256 switch (port->sm_periodic_state) {
1246 case AD_NO_PERIODIC: 1257 case AD_NO_PERIODIC:
1247 port->sm_periodic_timer_counter = 0; // zero timer 1258 port->sm_periodic_timer_counter = 0; // zero timer
@@ -1298,7 +1309,9 @@ static void ad_port_selection_logic(struct port *port)
1298 port->next_port_in_aggregator=NULL; 1309 port->next_port_in_aggregator=NULL;
1299 port->actor_port_aggregator_identifier=0; 1310 port->actor_port_aggregator_identifier=0;
1300 1311
1301 pr_debug("Port %d left LAG %d\n", port->actor_port_number, temp_aggregator->aggregator_identifier); 1312 pr_debug("Port %d left LAG %d\n",
1313 port->actor_port_number,
1314 temp_aggregator->aggregator_identifier);
1302 // if the aggregator is empty, clear its parameters, and set it ready to be attached 1315 // if the aggregator is empty, clear its parameters, and set it ready to be attached
1303 if (!temp_aggregator->lag_ports) { 1316 if (!temp_aggregator->lag_ports) {
1304 ad_clear_agg(temp_aggregator); 1317 ad_clear_agg(temp_aggregator);
@@ -1307,9 +1320,7 @@ static void ad_port_selection_logic(struct port *port)
1307 } 1320 }
1308 } 1321 }
1309 if (!curr_port) { // meaning: the port was related to an aggregator but was not on the aggregator port list 1322 if (!curr_port) { // meaning: the port was related to an aggregator but was not on the aggregator port list
1310 pr_warning(DRV_NAME ": %s: Warning: Port %d (on %s) " 1323 pr_warning("%s: Warning: Port %d (on %s) was related to aggregator %d but was not on its port list\n",
1311 "was related to aggregator %d but was not "
1312 "on its port list\n",
1313 port->slave->dev->master->name, 1324 port->slave->dev->master->name,
1314 port->actor_port_number, 1325 port->actor_port_number,
1315 port->slave->dev->name, 1326 port->slave->dev->name,
@@ -1343,7 +1354,9 @@ static void ad_port_selection_logic(struct port *port)
1343 port->next_port_in_aggregator=aggregator->lag_ports; 1354 port->next_port_in_aggregator=aggregator->lag_ports;
1344 port->aggregator->num_of_ports++; 1355 port->aggregator->num_of_ports++;
1345 aggregator->lag_ports=port; 1356 aggregator->lag_ports=port;
1346 pr_debug("Port %d joined LAG %d(existing LAG)\n", port->actor_port_number, port->aggregator->aggregator_identifier); 1357 pr_debug("Port %d joined LAG %d(existing LAG)\n",
1358 port->actor_port_number,
1359 port->aggregator->aggregator_identifier);
1347 1360
1348 // mark this port as selected 1361 // mark this port as selected
1349 port->sm_vars |= AD_PORT_SELECTED; 1362 port->sm_vars |= AD_PORT_SELECTED;
@@ -1380,10 +1393,11 @@ static void ad_port_selection_logic(struct port *port)
1380 // mark this port as selected 1393 // mark this port as selected
1381 port->sm_vars |= AD_PORT_SELECTED; 1394 port->sm_vars |= AD_PORT_SELECTED;
1382 1395
1383 pr_debug("Port %d joined LAG %d(new LAG)\n", port->actor_port_number, port->aggregator->aggregator_identifier); 1396 pr_debug("Port %d joined LAG %d(new LAG)\n",
1397 port->actor_port_number,
1398 port->aggregator->aggregator_identifier);
1384 } else { 1399 } else {
1385 pr_err(DRV_NAME ": %s: Port %d (on %s) did not find " 1400 pr_err("%s: Port %d (on %s) did not find a suitable aggregator\n",
1386 "a suitable aggregator\n",
1387 port->slave->dev->master->name, 1401 port->slave->dev->master->name,
1388 port->actor_port_number, port->slave->dev->name); 1402 port->actor_port_number, port->slave->dev->name);
1389 } 1403 }
@@ -1460,8 +1474,7 @@ static struct aggregator *ad_agg_selection_test(struct aggregator *best,
1460 break; 1474 break;
1461 1475
1462 default: 1476 default:
1463 pr_warning(DRV_NAME 1477 pr_warning("%s: Impossible agg select mode %d\n",
1464 ": %s: Impossible agg select mode %d\n",
1465 curr->slave->dev->master->name, 1478 curr->slave->dev->master->name,
1466 __get_agg_selection_mode(curr->lag_ports)); 1479 __get_agg_selection_mode(curr->lag_ports));
1467 break; 1480 break;
@@ -1546,40 +1559,38 @@ static void ad_agg_selection_logic(struct aggregator *agg)
1546 // if there is new best aggregator, activate it 1559 // if there is new best aggregator, activate it
1547 if (best) { 1560 if (best) {
1548 pr_debug("best Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n", 1561 pr_debug("best Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
1549 best->aggregator_identifier, best->num_of_ports, 1562 best->aggregator_identifier, best->num_of_ports,
1550 best->actor_oper_aggregator_key, 1563 best->actor_oper_aggregator_key,
1551 best->partner_oper_aggregator_key, 1564 best->partner_oper_aggregator_key,
1552 best->is_individual, best->is_active); 1565 best->is_individual, best->is_active);
1553 pr_debug("best ports %p slave %p %s\n", 1566 pr_debug("best ports %p slave %p %s\n",
1554 best->lag_ports, best->slave, 1567 best->lag_ports, best->slave,
1555 best->slave ? best->slave->dev->name : "NULL"); 1568 best->slave ? best->slave->dev->name : "NULL");
1556 1569
1557 for (agg = __get_first_agg(best->lag_ports); agg; 1570 for (agg = __get_first_agg(best->lag_ports); agg;
1558 agg = __get_next_agg(agg)) { 1571 agg = __get_next_agg(agg)) {
1559 1572
1560 pr_debug("Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n", 1573 pr_debug("Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
1561 agg->aggregator_identifier, agg->num_of_ports, 1574 agg->aggregator_identifier, agg->num_of_ports,
1562 agg->actor_oper_aggregator_key, 1575 agg->actor_oper_aggregator_key,
1563 agg->partner_oper_aggregator_key, 1576 agg->partner_oper_aggregator_key,
1564 agg->is_individual, agg->is_active); 1577 agg->is_individual, agg->is_active);
1565 } 1578 }
1566 1579
1567 // check if any partner replys 1580 // check if any partner replys
1568 if (best->is_individual) { 1581 if (best->is_individual) {
1569 pr_warning(DRV_NAME ": %s: Warning: No 802.3ad" 1582 pr_warning("%s: Warning: No 802.3ad response from the link partner for any adapters in the bond\n",
1570 " response from the link partner for any" 1583 best->slave->dev->master->name);
1571 " adapters in the bond\n",
1572 best->slave->dev->master->name);
1573 } 1584 }
1574 1585
1575 best->is_active = 1; 1586 best->is_active = 1;
1576 pr_debug("LAG %d chosen as the active LAG\n", 1587 pr_debug("LAG %d chosen as the active LAG\n",
1577 best->aggregator_identifier); 1588 best->aggregator_identifier);
1578 pr_debug("Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n", 1589 pr_debug("Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
1579 best->aggregator_identifier, best->num_of_ports, 1590 best->aggregator_identifier, best->num_of_ports,
1580 best->actor_oper_aggregator_key, 1591 best->actor_oper_aggregator_key,
1581 best->partner_oper_aggregator_key, 1592 best->partner_oper_aggregator_key,
1582 best->is_individual, best->is_active); 1593 best->is_individual, best->is_active);
1583 1594
1584 // disable the ports that were related to the former active_aggregator 1595 // disable the ports that were related to the former active_aggregator
1585 if (active) { 1596 if (active) {
@@ -1633,7 +1644,8 @@ static void ad_clear_agg(struct aggregator *aggregator)
1633 aggregator->lag_ports = NULL; 1644 aggregator->lag_ports = NULL;
1634 aggregator->is_active = 0; 1645 aggregator->is_active = 0;
1635 aggregator->num_of_ports = 0; 1646 aggregator->num_of_ports = 0;
1636 pr_debug("LAG %d was cleared\n", aggregator->aggregator_identifier); 1647 pr_debug("LAG %d was cleared\n",
1648 aggregator->aggregator_identifier);
1637 } 1649 }
1638} 1650}
1639 1651
@@ -1728,7 +1740,9 @@ static void ad_initialize_port(struct port *port, int lacp_fast)
1728static void ad_enable_collecting_distributing(struct port *port) 1740static void ad_enable_collecting_distributing(struct port *port)
1729{ 1741{
1730 if (port->aggregator->is_active) { 1742 if (port->aggregator->is_active) {
1731 pr_debug("Enabling port %d(LAG %d)\n", port->actor_port_number, port->aggregator->aggregator_identifier); 1743 pr_debug("Enabling port %d(LAG %d)\n",
1744 port->actor_port_number,
1745 port->aggregator->aggregator_identifier);
1732 __enable_port(port); 1746 __enable_port(port);
1733 } 1747 }
1734} 1748}
@@ -1741,7 +1755,9 @@ static void ad_enable_collecting_distributing(struct port *port)
1741static void ad_disable_collecting_distributing(struct port *port) 1755static void ad_disable_collecting_distributing(struct port *port)
1742{ 1756{
1743 if (port->aggregator && MAC_ADDRESS_COMPARE(&(port->aggregator->partner_system), &(null_mac_addr))) { 1757 if (port->aggregator && MAC_ADDRESS_COMPARE(&(port->aggregator->partner_system), &(null_mac_addr))) {
1744 pr_debug("Disabling port %d(LAG %d)\n", port->actor_port_number, port->aggregator->aggregator_identifier); 1758 pr_debug("Disabling port %d(LAG %d)\n",
1759 port->actor_port_number,
1760 port->aggregator->aggregator_identifier);
1745 __disable_port(port); 1761 __disable_port(port);
1746 } 1762 }
1747} 1763}
@@ -1779,7 +1795,8 @@ static void ad_marker_info_send(struct port *port)
1779 1795
1780 // send the marker information 1796 // send the marker information
1781 if (ad_marker_send(port, &marker) >= 0) { 1797 if (ad_marker_send(port, &marker) >= 0) {
1782 pr_debug("Sent Marker Information on port %d\n", port->actor_port_number); 1798 pr_debug("Sent Marker Information on port %d\n",
1799 port->actor_port_number);
1783 } 1800 }
1784} 1801}
1785#endif 1802#endif
@@ -1803,7 +1820,8 @@ static void ad_marker_info_received(struct bond_marker *marker_info,
1803 // send the marker response 1820 // send the marker response
1804 1821
1805 if (ad_marker_send(port, &marker) >= 0) { 1822 if (ad_marker_send(port, &marker) >= 0) {
1806 pr_debug("Sent Marker Response on port %d\n", port->actor_port_number); 1823 pr_debug("Sent Marker Response on port %d\n",
1824 port->actor_port_number);
1807 } 1825 }
1808} 1826}
1809 1827
@@ -1889,8 +1907,7 @@ int bond_3ad_bind_slave(struct slave *slave)
1889 struct aggregator *aggregator; 1907 struct aggregator *aggregator;
1890 1908
1891 if (bond == NULL) { 1909 if (bond == NULL) {
1892 pr_err(DRV_NAME ": %s: The slave %s is not attached to " 1910 pr_err("%s: The slave %s is not attached to its bond\n",
1893 "its bond\n",
1894 slave->dev->master->name, slave->dev->name); 1911 slave->dev->master->name, slave->dev->name);
1895 return -1; 1912 return -1;
1896 } 1913 }
@@ -1966,13 +1983,13 @@ void bond_3ad_unbind_slave(struct slave *slave)
1966 1983
1967 // if slave is null, the whole port is not initialized 1984 // if slave is null, the whole port is not initialized
1968 if (!port->slave) { 1985 if (!port->slave) {
1969 pr_warning(DRV_NAME ": Warning: %s: Trying to " 1986 pr_warning("Warning: %s: Trying to unbind an uninitialized port on %s\n",
1970 "unbind an uninitialized port on %s\n",
1971 slave->dev->master->name, slave->dev->name); 1987 slave->dev->master->name, slave->dev->name);
1972 return; 1988 return;
1973 } 1989 }
1974 1990
1975 pr_debug("Unbinding Link Aggregation Group %d\n", aggregator->aggregator_identifier); 1991 pr_debug("Unbinding Link Aggregation Group %d\n",
1992 aggregator->aggregator_identifier);
1976 1993
1977 /* Tell the partner that this port is not suitable for aggregation */ 1994 /* Tell the partner that this port is not suitable for aggregation */
1978 port->actor_oper_port_state &= ~AD_STATE_AGGREGATION; 1995 port->actor_oper_port_state &= ~AD_STATE_AGGREGATION;
@@ -1996,10 +2013,12 @@ void bond_3ad_unbind_slave(struct slave *slave)
1996 // if new aggregator found, copy the aggregator's parameters 2013 // if new aggregator found, copy the aggregator's parameters
1997 // and connect the related lag_ports to the new aggregator 2014 // and connect the related lag_ports to the new aggregator
1998 if ((new_aggregator) && ((!new_aggregator->lag_ports) || ((new_aggregator->lag_ports == port) && !new_aggregator->lag_ports->next_port_in_aggregator))) { 2015 if ((new_aggregator) && ((!new_aggregator->lag_ports) || ((new_aggregator->lag_ports == port) && !new_aggregator->lag_ports->next_port_in_aggregator))) {
1999 pr_debug("Some port(s) related to LAG %d - replaceing with LAG %d\n", aggregator->aggregator_identifier, new_aggregator->aggregator_identifier); 2016 pr_debug("Some port(s) related to LAG %d - replaceing with LAG %d\n",
2017 aggregator->aggregator_identifier,
2018 new_aggregator->aggregator_identifier);
2000 2019
2001 if ((new_aggregator->lag_ports == port) && new_aggregator->is_active) { 2020 if ((new_aggregator->lag_ports == port) && new_aggregator->is_active) {
2002 pr_info(DRV_NAME ": %s: Removing an active aggregator\n", 2021 pr_info("%s: Removing an active aggregator\n",
2003 aggregator->slave->dev->master->name); 2022 aggregator->slave->dev->master->name);
2004 // select new active aggregator 2023 // select new active aggregator
2005 select_new_active_agg = 1; 2024 select_new_active_agg = 1;
@@ -2030,8 +2049,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
2030 ad_agg_selection_logic(__get_first_agg(port)); 2049 ad_agg_selection_logic(__get_first_agg(port));
2031 } 2050 }
2032 } else { 2051 } else {
2033 pr_warning(DRV_NAME ": %s: Warning: unbinding aggregator, " 2052 pr_warning("%s: Warning: unbinding aggregator, and could not find a new aggregator for its ports\n",
2034 "and could not find a new aggregator for its ports\n",
2035 slave->dev->master->name); 2053 slave->dev->master->name);
2036 } 2054 }
2037 } else { // in case that the only port related to this aggregator is the one we want to remove 2055 } else { // in case that the only port related to this aggregator is the one we want to remove
@@ -2039,7 +2057,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
2039 // clear the aggregator 2057 // clear the aggregator
2040 ad_clear_agg(aggregator); 2058 ad_clear_agg(aggregator);
2041 if (select_new_active_agg) { 2059 if (select_new_active_agg) {
2042 pr_info(DRV_NAME ": %s: Removing an active aggregator\n", 2060 pr_info("%s: Removing an active aggregator\n",
2043 slave->dev->master->name); 2061 slave->dev->master->name);
2044 // select new active aggregator 2062 // select new active aggregator
2045 ad_agg_selection_logic(__get_first_agg(port)); 2063 ad_agg_selection_logic(__get_first_agg(port));
@@ -2066,7 +2084,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
2066 // clear the aggregator 2084 // clear the aggregator
2067 ad_clear_agg(temp_aggregator); 2085 ad_clear_agg(temp_aggregator);
2068 if (select_new_active_agg) { 2086 if (select_new_active_agg) {
2069 pr_info(DRV_NAME ": %s: Removing an active aggregator\n", 2087 pr_info("%s: Removing an active aggregator\n",
2070 slave->dev->master->name); 2088 slave->dev->master->name);
2071 // select new active aggregator 2089 // select new active aggregator
2072 ad_agg_selection_logic(__get_first_agg(port)); 2090 ad_agg_selection_logic(__get_first_agg(port));
@@ -2115,8 +2133,8 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
2115 // select the active aggregator for the bond 2133 // select the active aggregator for the bond
2116 if ((port = __get_first_port(bond))) { 2134 if ((port = __get_first_port(bond))) {
2117 if (!port->slave) { 2135 if (!port->slave) {
2118 pr_warning(DRV_NAME ": %s: Warning: bond's first port is " 2136 pr_warning("%s: Warning: bond's first port is uninitialized\n",
2119 "uninitialized\n", bond->dev->name); 2137 bond->dev->name);
2120 goto re_arm; 2138 goto re_arm;
2121 } 2139 }
2122 2140
@@ -2129,8 +2147,8 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
2129 // for each port run the state machines 2147 // for each port run the state machines
2130 for (port = __get_first_port(bond); port; port = __get_next_port(port)) { 2148 for (port = __get_first_port(bond); port; port = __get_next_port(port)) {
2131 if (!port->slave) { 2149 if (!port->slave) {
2132 pr_warning(DRV_NAME ": %s: Warning: Found an uninitialized " 2150 pr_warning("%s: Warning: Found an uninitialized port\n",
2133 "port\n", bond->dev->name); 2151 bond->dev->name);
2134 goto re_arm; 2152 goto re_arm;
2135 } 2153 }
2136 2154
@@ -2171,15 +2189,15 @@ static void bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u
2171 port = &(SLAVE_AD_INFO(slave).port); 2189 port = &(SLAVE_AD_INFO(slave).port);
2172 2190
2173 if (!port->slave) { 2191 if (!port->slave) {
2174 pr_warning(DRV_NAME ": %s: Warning: port of slave %s " 2192 pr_warning("%s: Warning: port of slave %s is uninitialized\n",
2175 "is uninitialized\n",
2176 slave->dev->name, slave->dev->master->name); 2193 slave->dev->name, slave->dev->master->name);
2177 return; 2194 return;
2178 } 2195 }
2179 2196
2180 switch (lacpdu->subtype) { 2197 switch (lacpdu->subtype) {
2181 case AD_TYPE_LACPDU: 2198 case AD_TYPE_LACPDU:
2182 pr_debug("Received LACPDU on port %d\n", port->actor_port_number); 2199 pr_debug("Received LACPDU on port %d\n",
2200 port->actor_port_number);
2183 ad_rx_machine(lacpdu, port); 2201 ad_rx_machine(lacpdu, port);
2184 break; 2202 break;
2185 2203
@@ -2188,17 +2206,20 @@ static void bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u
2188 2206
2189 switch (((struct bond_marker *)lacpdu)->tlv_type) { 2207 switch (((struct bond_marker *)lacpdu)->tlv_type) {
2190 case AD_MARKER_INFORMATION_SUBTYPE: 2208 case AD_MARKER_INFORMATION_SUBTYPE:
2191 pr_debug("Received Marker Information on port %d\n", port->actor_port_number); 2209 pr_debug("Received Marker Information on port %d\n",
2210 port->actor_port_number);
2192 ad_marker_info_received((struct bond_marker *)lacpdu, port); 2211 ad_marker_info_received((struct bond_marker *)lacpdu, port);
2193 break; 2212 break;
2194 2213
2195 case AD_MARKER_RESPONSE_SUBTYPE: 2214 case AD_MARKER_RESPONSE_SUBTYPE:
2196 pr_debug("Received Marker Response on port %d\n", port->actor_port_number); 2215 pr_debug("Received Marker Response on port %d\n",
2216 port->actor_port_number);
2197 ad_marker_response_received((struct bond_marker *)lacpdu, port); 2217 ad_marker_response_received((struct bond_marker *)lacpdu, port);
2198 break; 2218 break;
2199 2219
2200 default: 2220 default:
2201 pr_debug("Received an unknown Marker subtype on slot %d\n", port->actor_port_number); 2221 pr_debug("Received an unknown Marker subtype on slot %d\n",
2222 port->actor_port_number);
2202 } 2223 }
2203 } 2224 }
2204 } 2225 }
@@ -2218,8 +2239,7 @@ void bond_3ad_adapter_speed_changed(struct slave *slave)
2218 2239
2219 // if slave is null, the whole port is not initialized 2240 // if slave is null, the whole port is not initialized
2220 if (!port->slave) { 2241 if (!port->slave) {
2221 pr_warning(DRV_NAME ": Warning: %s: speed " 2242 pr_warning("Warning: %s: speed changed for uninitialized port on %s\n",
2222 "changed for uninitialized port on %s\n",
2223 slave->dev->master->name, slave->dev->name); 2243 slave->dev->master->name, slave->dev->name);
2224 return; 2244 return;
2225 } 2245 }
@@ -2246,8 +2266,7 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave)
2246 2266
2247 // if slave is null, the whole port is not initialized 2267 // if slave is null, the whole port is not initialized
2248 if (!port->slave) { 2268 if (!port->slave) {
2249 pr_warning(DRV_NAME ": %s: Warning: duplex changed " 2269 pr_warning("%s: Warning: duplex changed for uninitialized port on %s\n",
2250 "for uninitialized port on %s\n",
2251 slave->dev->master->name, slave->dev->name); 2270 slave->dev->master->name, slave->dev->name);
2252 return; 2271 return;
2253 } 2272 }
@@ -2275,8 +2294,7 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
2275 2294
2276 // if slave is null, the whole port is not initialized 2295 // if slave is null, the whole port is not initialized
2277 if (!port->slave) { 2296 if (!port->slave) {
2278 pr_warning(DRV_NAME ": Warning: %s: link status changed for " 2297 pr_warning("Warning: %s: link status changed for uninitialized port on %s\n",
2279 "uninitialized port on %s\n",
2280 slave->dev->master->name, slave->dev->name); 2298 slave->dev->master->name, slave->dev->name);
2281 return; 2299 return;
2282 } 2300 }
@@ -2381,8 +2399,8 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
2381 } 2399 }
2382 2400
2383 if (bond_3ad_get_active_agg_info(bond, &ad_info)) { 2401 if (bond_3ad_get_active_agg_info(bond, &ad_info)) {
2384 pr_debug(DRV_NAME ": %s: Error: " 2402 pr_debug("%s: Error: bond_3ad_get_active_agg_info failed\n",
2385 "bond_3ad_get_active_agg_info failed\n", dev->name); 2403 dev->name);
2386 goto out; 2404 goto out;
2387 } 2405 }
2388 2406
@@ -2391,8 +2409,7 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
2391 2409
2392 if (slaves_in_agg == 0) { 2410 if (slaves_in_agg == 0) {
2393 /*the aggregator is empty*/ 2411 /*the aggregator is empty*/
2394 pr_debug(DRV_NAME ": %s: Error: active aggregator is empty\n", 2412 pr_debug("%s: Error: active aggregator is empty\n", dev->name);
2395 dev->name);
2396 goto out; 2413 goto out;
2397 } 2414 }
2398 2415
@@ -2410,8 +2427,8 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
2410 } 2427 }
2411 2428
2412 if (slave_agg_no >= 0) { 2429 if (slave_agg_no >= 0) {
2413 pr_err(DRV_NAME ": %s: Error: Couldn't find a slave to tx on " 2430 pr_err("%s: Error: Couldn't find a slave to tx on for aggregator ID %d\n",
2414 "for aggregator ID %d\n", dev->name, agg_id); 2431 dev->name, agg_id);
2415 goto out; 2432 goto out;
2416 } 2433 }
2417 2434
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 00ab51ef3129..40fdc41446cc 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -20,6 +20,8 @@
20 * 20 *
21 */ 21 */
22 22
23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
23#include <linux/skbuff.h> 25#include <linux/skbuff.h>
24#include <linux/netdevice.h> 26#include <linux/netdevice.h>
25#include <linux/etherdevice.h> 27#include <linux/etherdevice.h>
@@ -201,8 +203,7 @@ static int tlb_initialize(struct bonding *bond)
201 203
202 new_hashtbl = kzalloc(size, GFP_KERNEL); 204 new_hashtbl = kzalloc(size, GFP_KERNEL);
203 if (!new_hashtbl) { 205 if (!new_hashtbl) {
204 pr_err(DRV_NAME 206 pr_err("%s: Error: Failed to allocate TLB hash table\n",
205 ": %s: Error: Failed to allocate TLB hash table\n",
206 bond->dev->name); 207 bond->dev->name);
207 return -1; 208 return -1;
208 } 209 }
@@ -514,8 +515,7 @@ static void rlb_update_client(struct rlb_client_info *client_info)
514 client_info->slave->dev->dev_addr, 515 client_info->slave->dev->dev_addr,
515 client_info->mac_dst); 516 client_info->mac_dst);
516 if (!skb) { 517 if (!skb) {
517 pr_err(DRV_NAME 518 pr_err("%s: Error: failed to create an ARP packet\n",
518 ": %s: Error: failed to create an ARP packet\n",
519 client_info->slave->dev->master->name); 519 client_info->slave->dev->master->name);
520 continue; 520 continue;
521 } 521 }
@@ -525,8 +525,7 @@ static void rlb_update_client(struct rlb_client_info *client_info)
525 if (client_info->tag) { 525 if (client_info->tag) {
526 skb = vlan_put_tag(skb, client_info->vlan_id); 526 skb = vlan_put_tag(skb, client_info->vlan_id);
527 if (!skb) { 527 if (!skb) {
528 pr_err(DRV_NAME 528 pr_err("%s: Error: failed to insert VLAN tag\n",
529 ": %s: Error: failed to insert VLAN tag\n",
530 client_info->slave->dev->master->name); 529 client_info->slave->dev->master->name);
531 continue; 530 continue;
532 } 531 }
@@ -609,9 +608,7 @@ static void rlb_req_update_subnet_clients(struct bonding *bond, __be32 src_ip)
609 client_info = &(bond_info->rx_hashtbl[hash_index]); 608 client_info = &(bond_info->rx_hashtbl[hash_index]);
610 609
611 if (!client_info->slave) { 610 if (!client_info->slave) {
612 pr_err(DRV_NAME 611 pr_err("%s: Error: found a client with no channel in the client's hash table\n",
613 ": %s: Error: found a client with no channel in "
614 "the client's hash table\n",
615 bond->dev->name); 612 bond->dev->name);
616 continue; 613 continue;
617 } 614 }
@@ -806,8 +803,7 @@ static int rlb_initialize(struct bonding *bond)
806 803
807 new_hashtbl = kmalloc(size, GFP_KERNEL); 804 new_hashtbl = kmalloc(size, GFP_KERNEL);
808 if (!new_hashtbl) { 805 if (!new_hashtbl) {
809 pr_err(DRV_NAME 806 pr_err("%s: Error: Failed to allocate RLB hash table\n",
810 ": %s: Error: Failed to allocate RLB hash table\n",
811 bond->dev->name); 807 bond->dev->name);
812 return -1; 808 return -1;
813 } 809 }
@@ -928,8 +924,7 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[])
928 924
929 skb = vlan_put_tag(skb, vlan->vlan_id); 925 skb = vlan_put_tag(skb, vlan->vlan_id);
930 if (!skb) { 926 if (!skb) {
931 pr_err(DRV_NAME 927 pr_err("%s: Error: failed to insert VLAN tag\n",
932 ": %s: Error: failed to insert VLAN tag\n",
933 bond->dev->name); 928 bond->dev->name);
934 continue; 929 continue;
935 } 930 }
@@ -958,11 +953,8 @@ static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[], int hw)
958 memcpy(s_addr.sa_data, addr, dev->addr_len); 953 memcpy(s_addr.sa_data, addr, dev->addr_len);
959 s_addr.sa_family = dev->type; 954 s_addr.sa_family = dev->type;
960 if (dev_set_mac_address(dev, &s_addr)) { 955 if (dev_set_mac_address(dev, &s_addr)) {
961 pr_err(DRV_NAME 956 pr_err("%s: Error: dev_set_mac_address of dev %s failed!\n"
962 ": %s: Error: dev_set_mac_address of dev %s failed! ALB " 957 "ALB mode requires that the base driver support setting the hw address also when the network device's interface is open\n",
963 "mode requires that the base driver support setting "
964 "the hw address also when the network device's "
965 "interface is open\n",
966 dev->master->name, dev->name); 958 dev->master->name, dev->name);
967 return -EOPNOTSUPP; 959 return -EOPNOTSUPP;
968 } 960 }
@@ -1169,18 +1161,12 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
1169 alb_set_slave_mac_addr(slave, free_mac_slave->perm_hwaddr, 1161 alb_set_slave_mac_addr(slave, free_mac_slave->perm_hwaddr,
1170 bond->alb_info.rlb_enabled); 1162 bond->alb_info.rlb_enabled);
1171 1163
1172 pr_warning(DRV_NAME 1164 pr_warning("%s: Warning: the hw address of slave %s is in use by the bond; giving it the hw address of %s\n",
1173 ": %s: Warning: the hw address of slave %s is "
1174 "in use by the bond; giving it the hw address "
1175 "of %s\n",
1176 bond->dev->name, slave->dev->name, 1165 bond->dev->name, slave->dev->name,
1177 free_mac_slave->dev->name); 1166 free_mac_slave->dev->name);
1178 1167
1179 } else if (has_bond_addr) { 1168 } else if (has_bond_addr) {
1180 pr_err(DRV_NAME 1169 pr_err("%s: Error: the hw address of slave %s is in use by the bond; couldn't find a slave with a free hw address to give it (this should not have happened)\n",
1181 ": %s: Error: the hw address of slave %s is in use by the "
1182 "bond; couldn't find a slave with a free hw address to "
1183 "give it (this should not have happened)\n",
1184 bond->dev->name, slave->dev->name); 1170 bond->dev->name, slave->dev->name);
1185 return -EFAULT; 1171 return -EFAULT;
1186 } 1172 }
diff --git a/drivers/net/bonding/bond_ipv6.c b/drivers/net/bonding/bond_ipv6.c
index b72e1dc8cf8f..6dd64cf3cb76 100644
--- a/drivers/net/bonding/bond_ipv6.c
+++ b/drivers/net/bonding/bond_ipv6.c
@@ -20,6 +20,8 @@
20 * 20 *
21 */ 21 */
22 22
23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
23#include <linux/types.h> 25#include <linux/types.h>
24#include <linux/if_vlan.h> 26#include <linux/if_vlan.h>
25#include <net/ipv6.h> 27#include <net/ipv6.h>
@@ -74,20 +76,20 @@ static void bond_na_send(struct net_device *slave_dev,
74 addrconf_addr_solict_mult(daddr, &mcaddr); 76 addrconf_addr_solict_mult(daddr, &mcaddr);
75 77
76 pr_debug("ipv6 na on slave %s: dest %pI6, src %pI6\n", 78 pr_debug("ipv6 na on slave %s: dest %pI6, src %pI6\n",
77 slave_dev->name, &mcaddr, daddr); 79 slave_dev->name, &mcaddr, daddr);
78 80
79 skb = ndisc_build_skb(slave_dev, &mcaddr, daddr, &icmp6h, daddr, 81 skb = ndisc_build_skb(slave_dev, &mcaddr, daddr, &icmp6h, daddr,
80 ND_OPT_TARGET_LL_ADDR); 82 ND_OPT_TARGET_LL_ADDR);
81 83
82 if (!skb) { 84 if (!skb) {
83 pr_err(DRV_NAME ": NA packet allocation failed\n"); 85 pr_err("NA packet allocation failed\n");
84 return; 86 return;
85 } 87 }
86 88
87 if (vlan_id) { 89 if (vlan_id) {
88 skb = vlan_put_tag(skb, vlan_id); 90 skb = vlan_put_tag(skb, vlan_id);
89 if (!skb) { 91 if (!skb) {
90 pr_err(DRV_NAME ": failed to insert VLAN tag\n"); 92 pr_err("failed to insert VLAN tag\n");
91 return; 93 return;
92 } 94 }
93 } 95 }
@@ -109,8 +111,8 @@ void bond_send_unsolicited_na(struct bonding *bond)
109 struct inet6_dev *idev; 111 struct inet6_dev *idev;
110 int is_router; 112 int is_router;
111 113
112 pr_debug("bond_send_unsol_na: bond %s slave %s\n", bond->dev->name, 114 pr_debug("%s: bond %s slave %s\n", bond->dev->name,
113 slave ? slave->dev->name : "NULL"); 115 __func__, slave ? slave->dev->name : "NULL");
114 116
115 if (!slave || !bond->send_unsol_na || 117 if (!slave || !bond->send_unsol_na ||
116 test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state)) 118 test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index af9b9c4eb496..3f0071cfe56b 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -31,6 +31,8 @@
31 * 31 *
32 */ 32 */
33 33
34#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35
34#include <linux/kernel.h> 36#include <linux/kernel.h>
35#include <linux/module.h> 37#include <linux/module.h>
36#include <linux/types.h> 38#include <linux/types.h>
@@ -260,7 +262,7 @@ static int bond_add_vlan(struct bonding *bond, unsigned short vlan_id)
260 struct vlan_entry *vlan; 262 struct vlan_entry *vlan;
261 263
262 pr_debug("bond: %s, vlan id %d\n", 264 pr_debug("bond: %s, vlan id %d\n",
263 (bond ? bond->dev->name : "None"), vlan_id); 265 (bond ? bond->dev->name : "None"), vlan_id);
264 266
265 vlan = kzalloc(sizeof(struct vlan_entry), GFP_KERNEL); 267 vlan = kzalloc(sizeof(struct vlan_entry), GFP_KERNEL);
266 if (!vlan) 268 if (!vlan)
@@ -303,8 +305,8 @@ static int bond_del_vlan(struct bonding *bond, unsigned short vlan_id)
303 if (bond_is_lb(bond)) 305 if (bond_is_lb(bond))
304 bond_alb_clear_vlan(bond, vlan_id); 306 bond_alb_clear_vlan(bond, vlan_id);
305 307
306 pr_debug("removed VLAN ID %d from bond %s\n", vlan_id, 308 pr_debug("removed VLAN ID %d from bond %s\n",
307 bond->dev->name); 309 vlan_id, bond->dev->name);
308 310
309 kfree(vlan); 311 kfree(vlan);
310 312
@@ -323,8 +325,8 @@ static int bond_del_vlan(struct bonding *bond, unsigned short vlan_id)
323 } 325 }
324 } 326 }
325 327
326 pr_debug("couldn't find VLAN ID %d in bond %s\n", vlan_id, 328 pr_debug("couldn't find VLAN ID %d in bond %s\n",
327 bond->dev->name); 329 vlan_id, bond->dev->name);
328 330
329out: 331out:
330 write_unlock_bh(&bond->lock); 332 write_unlock_bh(&bond->lock);
@@ -348,7 +350,7 @@ static int bond_has_challenged_slaves(struct bonding *bond)
348 bond_for_each_slave(bond, slave, i) { 350 bond_for_each_slave(bond, slave, i) {
349 if (slave->dev->features & NETIF_F_VLAN_CHALLENGED) { 351 if (slave->dev->features & NETIF_F_VLAN_CHALLENGED) {
350 pr_debug("found VLAN challenged slave - %s\n", 352 pr_debug("found VLAN challenged slave - %s\n",
351 slave->dev->name); 353 slave->dev->name);
352 return 1; 354 return 1;
353 } 355 }
354 } 356 }
@@ -499,8 +501,7 @@ static void bond_vlan_rx_add_vid(struct net_device *bond_dev, uint16_t vid)
499 501
500 res = bond_add_vlan(bond, vid); 502 res = bond_add_vlan(bond, vid);
501 if (res) { 503 if (res) {
502 pr_err(DRV_NAME 504 pr_err("%s: Error: Failed to add vlan id %d\n",
503 ": %s: Error: Failed to add vlan id %d\n",
504 bond_dev->name, vid); 505 bond_dev->name, vid);
505 } 506 }
506} 507}
@@ -534,8 +535,7 @@ static void bond_vlan_rx_kill_vid(struct net_device *bond_dev, uint16_t vid)
534 535
535 res = bond_del_vlan(bond, vid); 536 res = bond_del_vlan(bond, vid);
536 if (res) { 537 if (res) {
537 pr_err(DRV_NAME 538 pr_err("%s: Error: Failed to remove vlan id %d\n",
538 ": %s: Error: Failed to remove vlan id %d\n",
539 bond_dev->name, vid); 539 bond_dev->name, vid);
540 } 540 }
541} 541}
@@ -1053,8 +1053,7 @@ static void bond_do_fail_over_mac(struct bonding *bond,
1053 1053
1054 rv = dev_set_mac_address(new_active->dev, &saddr); 1054 rv = dev_set_mac_address(new_active->dev, &saddr);
1055 if (rv) { 1055 if (rv) {
1056 pr_err(DRV_NAME 1056 pr_err("%s: Error %d setting MAC of slave %s\n",
1057 ": %s: Error %d setting MAC of slave %s\n",
1058 bond->dev->name, -rv, new_active->dev->name); 1057 bond->dev->name, -rv, new_active->dev->name);
1059 goto out; 1058 goto out;
1060 } 1059 }
@@ -1067,16 +1066,14 @@ static void bond_do_fail_over_mac(struct bonding *bond,
1067 1066
1068 rv = dev_set_mac_address(old_active->dev, &saddr); 1067 rv = dev_set_mac_address(old_active->dev, &saddr);
1069 if (rv) 1068 if (rv)
1070 pr_err(DRV_NAME 1069 pr_err("%s: Error %d setting MAC of slave %s\n",
1071 ": %s: Error %d setting MAC of slave %s\n",
1072 bond->dev->name, -rv, new_active->dev->name); 1070 bond->dev->name, -rv, new_active->dev->name);
1073out: 1071out:
1074 read_lock(&bond->lock); 1072 read_lock(&bond->lock);
1075 write_lock_bh(&bond->curr_slave_lock); 1073 write_lock_bh(&bond->curr_slave_lock);
1076 break; 1074 break;
1077 default: 1075 default:
1078 pr_err(DRV_NAME 1076 pr_err("%s: bond_do_fail_over_mac impossible: bad policy %d\n",
1079 ": %s: bond_do_fail_over_mac impossible: bad policy %d\n",
1080 bond->dev->name, bond->params.fail_over_mac); 1077 bond->dev->name, bond->params.fail_over_mac);
1081 break; 1078 break;
1082 } 1079 }
@@ -1178,11 +1175,9 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
1178 1175
1179 if (new_active->link == BOND_LINK_BACK) { 1176 if (new_active->link == BOND_LINK_BACK) {
1180 if (USES_PRIMARY(bond->params.mode)) { 1177 if (USES_PRIMARY(bond->params.mode)) {
1181 pr_info(DRV_NAME 1178 pr_info("%s: making interface %s the new active one %d ms earlier.\n",
1182 ": %s: making interface %s the new " 1179 bond->dev->name, new_active->dev->name,
1183 "active one %d ms earlier.\n", 1180 (bond->params.updelay - new_active->delay) * bond->params.miimon);
1184 bond->dev->name, new_active->dev->name,
1185 (bond->params.updelay - new_active->delay) * bond->params.miimon);
1186 } 1181 }
1187 1182
1188 new_active->delay = 0; 1183 new_active->delay = 0;
@@ -1195,10 +1190,8 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
1195 bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP); 1190 bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP);
1196 } else { 1191 } else {
1197 if (USES_PRIMARY(bond->params.mode)) { 1192 if (USES_PRIMARY(bond->params.mode)) {
1198 pr_info(DRV_NAME 1193 pr_info("%s: making interface %s the new active one.\n",
1199 ": %s: making interface %s the new " 1194 bond->dev->name, new_active->dev->name);
1200 "active one.\n",
1201 bond->dev->name, new_active->dev->name);
1202 } 1195 }
1203 } 1196 }
1204 } 1197 }
@@ -1268,13 +1261,11 @@ void bond_select_active_slave(struct bonding *bond)
1268 return; 1261 return;
1269 1262
1270 if (netif_carrier_ok(bond->dev)) { 1263 if (netif_carrier_ok(bond->dev)) {
1271 pr_info(DRV_NAME 1264 pr_info("%s: first active interface up!\n",
1272 ": %s: first active interface up!\n", 1265 bond->dev->name);
1273 bond->dev->name);
1274 } else { 1266 } else {
1275 pr_info(DRV_NAME ": %s: " 1267 pr_info("%s: now running without any active interface !\n",
1276 "now running without any active interface !\n", 1268 bond->dev->name);
1277 bond->dev->name);
1278 } 1269 }
1279 } 1270 }
1280} 1271}
@@ -1423,16 +1414,14 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1423 1414
1424 if (!bond->params.use_carrier && slave_dev->ethtool_ops == NULL && 1415 if (!bond->params.use_carrier && slave_dev->ethtool_ops == NULL &&
1425 slave_ops->ndo_do_ioctl == NULL) { 1416 slave_ops->ndo_do_ioctl == NULL) {
1426 pr_warning(DRV_NAME 1417 pr_warning("%s: Warning: no link monitoring support for %s\n",
1427 ": %s: Warning: no link monitoring support for %s\n", 1418 bond_dev->name, slave_dev->name);
1428 bond_dev->name, slave_dev->name);
1429 } 1419 }
1430 1420
1431 /* bond must be initialized by bond_open() before enslaving */ 1421 /* bond must be initialized by bond_open() before enslaving */
1432 if (!(bond_dev->flags & IFF_UP)) { 1422 if (!(bond_dev->flags & IFF_UP)) {
1433 pr_warning(DRV_NAME 1423 pr_warning("%s: master_dev is not up in bond_enslave\n",
1434 " %s: master_dev is not up in bond_enslave\n", 1424 bond_dev->name);
1435 bond_dev->name);
1436 } 1425 }
1437 1426
1438 /* already enslaved */ 1427 /* already enslaved */
@@ -1446,19 +1435,13 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1446 if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) { 1435 if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) {
1447 pr_debug("%s: NETIF_F_VLAN_CHALLENGED\n", slave_dev->name); 1436 pr_debug("%s: NETIF_F_VLAN_CHALLENGED\n", slave_dev->name);
1448 if (!list_empty(&bond->vlan_list)) { 1437 if (!list_empty(&bond->vlan_list)) {
1449 pr_err(DRV_NAME 1438 pr_err("%s: Error: cannot enslave VLAN challenged slave %s on VLAN enabled bond %s\n",
1450 ": %s: Error: cannot enslave VLAN " 1439 bond_dev->name, slave_dev->name, bond_dev->name);
1451 "challenged slave %s on VLAN enabled "
1452 "bond %s\n", bond_dev->name, slave_dev->name,
1453 bond_dev->name);
1454 return -EPERM; 1440 return -EPERM;
1455 } else { 1441 } else {
1456 pr_warning(DRV_NAME 1442 pr_warning("%s: Warning: enslaved VLAN challenged slave %s. Adding VLANs will be blocked as long as %s is part of bond %s\n",
1457 ": %s: Warning: enslaved VLAN challenged " 1443 bond_dev->name, slave_dev->name,
1458 "slave %s. Adding VLANs will be blocked as " 1444 slave_dev->name, bond_dev->name);
1459 "long as %s is part of bond %s\n",
1460 bond_dev->name, slave_dev->name, slave_dev->name,
1461 bond_dev->name);
1462 bond_dev->features |= NETIF_F_VLAN_CHALLENGED; 1445 bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
1463 } 1446 }
1464 } else { 1447 } else {
@@ -1478,8 +1461,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1478 * enslaving it; the old ifenslave will not. 1461 * enslaving it; the old ifenslave will not.
1479 */ 1462 */
1480 if ((slave_dev->flags & IFF_UP)) { 1463 if ((slave_dev->flags & IFF_UP)) {
1481 pr_err(DRV_NAME ": %s is up. " 1464 pr_err("%s is up. This may be due to an out of date ifenslave.\n",
1482 "This may be due to an out of date ifenslave.\n",
1483 slave_dev->name); 1465 slave_dev->name);
1484 res = -EPERM; 1466 res = -EPERM;
1485 goto err_undo_flags; 1467 goto err_undo_flags;
@@ -1495,7 +1477,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1495 if (bond->slave_cnt == 0) { 1477 if (bond->slave_cnt == 0) {
1496 if (bond_dev->type != slave_dev->type) { 1478 if (bond_dev->type != slave_dev->type) {
1497 pr_debug("%s: change device type from %d to %d\n", 1479 pr_debug("%s: change device type from %d to %d\n",
1498 bond_dev->name, bond_dev->type, slave_dev->type); 1480 bond_dev->name,
1481 bond_dev->type, slave_dev->type);
1499 1482
1500 netdev_bonding_change(bond_dev, NETDEV_BONDING_OLDTYPE); 1483 netdev_bonding_change(bond_dev, NETDEV_BONDING_OLDTYPE);
1501 1484
@@ -1507,28 +1490,21 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1507 netdev_bonding_change(bond_dev, NETDEV_BONDING_NEWTYPE); 1490 netdev_bonding_change(bond_dev, NETDEV_BONDING_NEWTYPE);
1508 } 1491 }
1509 } else if (bond_dev->type != slave_dev->type) { 1492 } else if (bond_dev->type != slave_dev->type) {
1510 pr_err(DRV_NAME ": %s ether type (%d) is different " 1493 pr_err("%s ether type (%d) is different from other slaves (%d), can not enslave it.\n",
1511 "from other slaves (%d), can not enslave it.\n", 1494 slave_dev->name,
1512 slave_dev->name, 1495 slave_dev->type, bond_dev->type);
1513 slave_dev->type, bond_dev->type); 1496 res = -EINVAL;
1514 res = -EINVAL; 1497 goto err_undo_flags;
1515 goto err_undo_flags;
1516 } 1498 }
1517 1499
1518 if (slave_ops->ndo_set_mac_address == NULL) { 1500 if (slave_ops->ndo_set_mac_address == NULL) {
1519 if (bond->slave_cnt == 0) { 1501 if (bond->slave_cnt == 0) {
1520 pr_warning(DRV_NAME 1502 pr_warning("%s: Warning: The first slave device specified does not support setting the MAC address. Setting fail_over_mac to active.",
1521 ": %s: Warning: The first slave device " 1503 bond_dev->name);
1522 "specified does not support setting the MAC "
1523 "address. Setting fail_over_mac to active.",
1524 bond_dev->name);
1525 bond->params.fail_over_mac = BOND_FOM_ACTIVE; 1504 bond->params.fail_over_mac = BOND_FOM_ACTIVE;
1526 } else if (bond->params.fail_over_mac != BOND_FOM_ACTIVE) { 1505 } else if (bond->params.fail_over_mac != BOND_FOM_ACTIVE) {
1527 pr_err(DRV_NAME 1506 pr_err("%s: Error: The slave device specified does not support setting the MAC address, but fail_over_mac is not set to active.\n",
1528 ": %s: Error: The slave device specified " 1507 bond_dev->name);
1529 "does not support setting the MAC address, "
1530 "but fail_over_mac is not set to active.\n"
1531 , bond_dev->name);
1532 res = -EOPNOTSUPP; 1508 res = -EOPNOTSUPP;
1533 goto err_undo_flags; 1509 goto err_undo_flags;
1534 } 1510 }
@@ -1655,22 +1631,12 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1655 * supported); thus, we don't need to change 1631 * supported); thus, we don't need to change
1656 * the messages for netif_carrier. 1632 * the messages for netif_carrier.
1657 */ 1633 */
1658 pr_warning(DRV_NAME 1634 pr_warning("%s: Warning: MII and ETHTOOL support not available for interface %s, and arp_interval/arp_ip_target module parameters not specified, thus bonding will not detect link failures! see bonding.txt for details.\n",
1659 ": %s: Warning: MII and ETHTOOL support not "
1660 "available for interface %s, and "
1661 "arp_interval/arp_ip_target module parameters "
1662 "not specified, thus bonding will not detect "
1663 "link failures! see bonding.txt for details.\n",
1664 bond_dev->name, slave_dev->name); 1635 bond_dev->name, slave_dev->name);
1665 } else if (link_reporting == -1) { 1636 } else if (link_reporting == -1) {
1666 /* unable get link status using mii/ethtool */ 1637 /* unable get link status using mii/ethtool */
1667 pr_warning(DRV_NAME 1638 pr_warning("%s: Warning: can't get link status from interface %s; the network driver associated with this interface does not support MII or ETHTOOL link status reporting, thus miimon has no effect on this interface.\n",
1668 ": %s: Warning: can't get link status from " 1639 bond_dev->name, slave_dev->name);
1669 "interface %s; the network driver associated "
1670 "with this interface does not support MII or "
1671 "ETHTOOL link status reporting, thus miimon "
1672 "has no effect on this interface.\n",
1673 bond_dev->name, slave_dev->name);
1674 } 1640 }
1675 } 1641 }
1676 1642
@@ -1678,34 +1644,27 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1678 if (!bond->params.miimon || 1644 if (!bond->params.miimon ||
1679 (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS)) { 1645 (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS)) {
1680 if (bond->params.updelay) { 1646 if (bond->params.updelay) {
1681 pr_debug("Initial state of slave_dev is " 1647 pr_debug("Initial state of slave_dev is BOND_LINK_BACK\n");
1682 "BOND_LINK_BACK\n");
1683 new_slave->link = BOND_LINK_BACK; 1648 new_slave->link = BOND_LINK_BACK;
1684 new_slave->delay = bond->params.updelay; 1649 new_slave->delay = bond->params.updelay;
1685 } else { 1650 } else {
1686 pr_debug("Initial state of slave_dev is " 1651 pr_debug("Initial state of slave_dev is BOND_LINK_UP\n");
1687 "BOND_LINK_UP\n");
1688 new_slave->link = BOND_LINK_UP; 1652 new_slave->link = BOND_LINK_UP;
1689 } 1653 }
1690 new_slave->jiffies = jiffies; 1654 new_slave->jiffies = jiffies;
1691 } else { 1655 } else {
1692 pr_debug("Initial state of slave_dev is " 1656 pr_debug("Initial state of slave_dev is BOND_LINK_DOWN\n");
1693 "BOND_LINK_DOWN\n");
1694 new_slave->link = BOND_LINK_DOWN; 1657 new_slave->link = BOND_LINK_DOWN;
1695 } 1658 }
1696 1659
1697 if (bond_update_speed_duplex(new_slave) && 1660 if (bond_update_speed_duplex(new_slave) &&
1698 (new_slave->link != BOND_LINK_DOWN)) { 1661 (new_slave->link != BOND_LINK_DOWN)) {
1699 pr_warning(DRV_NAME 1662 pr_warning("%s: Warning: failed to get speed and duplex from %s, assumed to be 100Mb/sec and Full.\n",
1700 ": %s: Warning: failed to get speed and duplex from %s, " 1663 bond_dev->name, new_slave->dev->name);
1701 "assumed to be 100Mb/sec and Full.\n",
1702 bond_dev->name, new_slave->dev->name);
1703 1664
1704 if (bond->params.mode == BOND_MODE_8023AD) { 1665 if (bond->params.mode == BOND_MODE_8023AD) {
1705 pr_warning(DRV_NAME 1666 pr_warning("%s: Warning: Operation of 802.3ad mode requires ETHTOOL support in base driver for proper aggregator selection.\n",
1706 ": %s: Warning: Operation of 802.3ad mode requires ETHTOOL " 1667 bond_dev->name);
1707 "support in base driver for proper aggregator "
1708 "selection.\n", bond_dev->name);
1709 } 1668 }
1710 } 1669 }
1711 1670
@@ -1777,11 +1736,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1777 if (res) 1736 if (res)
1778 goto err_close; 1737 goto err_close;
1779 1738
1780 pr_info(DRV_NAME 1739 pr_info("%s: enslaving %s as a%s interface with a%s link.\n",
1781 ": %s: enslaving %s as a%s interface with a%s link.\n", 1740 bond_dev->name, slave_dev->name,
1782 bond_dev->name, slave_dev->name, 1741 new_slave->state == BOND_STATE_ACTIVE ? "n active" : " backup",
1783 new_slave->state == BOND_STATE_ACTIVE ? "n active" : " backup", 1742 new_slave->link != BOND_LINK_DOWN ? "n up" : " down");
1784 new_slave->link != BOND_LINK_DOWN ? "n up" : " down");
1785 1743
1786 /* enslave is successful */ 1744 /* enslave is successful */
1787 return 0; 1745 return 0;
@@ -1833,8 +1791,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
1833 /* slave is not a slave or master is not master of this slave */ 1791 /* slave is not a slave or master is not master of this slave */
1834 if (!(slave_dev->flags & IFF_SLAVE) || 1792 if (!(slave_dev->flags & IFF_SLAVE) ||
1835 (slave_dev->master != bond_dev)) { 1793 (slave_dev->master != bond_dev)) {
1836 pr_err(DRV_NAME 1794 pr_err("%s: Error: cannot release %s.\n",
1837 ": %s: Error: cannot release %s.\n",
1838 bond_dev->name, slave_dev->name); 1795 bond_dev->name, slave_dev->name);
1839 return -EINVAL; 1796 return -EINVAL;
1840 } 1797 }
@@ -1844,9 +1801,8 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
1844 slave = bond_get_slave_by_dev(bond, slave_dev); 1801 slave = bond_get_slave_by_dev(bond, slave_dev);
1845 if (!slave) { 1802 if (!slave) {
1846 /* not a slave of this bond */ 1803 /* not a slave of this bond */
1847 pr_info(DRV_NAME 1804 pr_info("%s: %s not enslaved\n",
1848 ": %s: %s not enslaved\n", 1805 bond_dev->name, slave_dev->name);
1849 bond_dev->name, slave_dev->name);
1850 write_unlock_bh(&bond->lock); 1806 write_unlock_bh(&bond->lock);
1851 return -EINVAL; 1807 return -EINVAL;
1852 } 1808 }
@@ -1854,14 +1810,10 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
1854 if (!bond->params.fail_over_mac) { 1810 if (!bond->params.fail_over_mac) {
1855 if (!compare_ether_addr(bond_dev->dev_addr, slave->perm_hwaddr) && 1811 if (!compare_ether_addr(bond_dev->dev_addr, slave->perm_hwaddr) &&
1856 bond->slave_cnt > 1) 1812 bond->slave_cnt > 1)
1857 pr_warning(DRV_NAME 1813 pr_warning("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s. Set the HWaddr of %s to a different address to avoid conflicts.\n",
1858 ": %s: Warning: the permanent HWaddr of %s - " 1814 bond_dev->name, slave_dev->name,
1859 "%pM - is still in use by %s. " 1815 slave->perm_hwaddr,
1860 "Set the HWaddr of %s to a different address " 1816 bond_dev->name, slave_dev->name);
1861 "to avoid conflicts.\n",
1862 bond_dev->name, slave_dev->name,
1863 slave->perm_hwaddr,
1864 bond_dev->name, slave_dev->name);
1865 } 1817 }
1866 1818
1867 /* Inform AD package of unbinding of slave. */ 1819 /* Inform AD package of unbinding of slave. */
@@ -1872,12 +1824,10 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
1872 bond_3ad_unbind_slave(slave); 1824 bond_3ad_unbind_slave(slave);
1873 } 1825 }
1874 1826
1875 pr_info(DRV_NAME 1827 pr_info("%s: releasing %s interface %s\n",
1876 ": %s: releasing %s interface %s\n", 1828 bond_dev->name,
1877 bond_dev->name, 1829 (slave->state == BOND_STATE_ACTIVE) ? "active" : "backup",
1878 (slave->state == BOND_STATE_ACTIVE) 1830 slave_dev->name);
1879 ? "active" : "backup",
1880 slave_dev->name);
1881 1831
1882 oldcurrent = bond->curr_active_slave; 1832 oldcurrent = bond->curr_active_slave;
1883 1833
@@ -1934,21 +1884,15 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
1934 if (list_empty(&bond->vlan_list)) { 1884 if (list_empty(&bond->vlan_list)) {
1935 bond_dev->features |= NETIF_F_VLAN_CHALLENGED; 1885 bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
1936 } else { 1886 } else {
1937 pr_warning(DRV_NAME 1887 pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n",
1938 ": %s: Warning: clearing HW address of %s while it " 1888 bond_dev->name, bond_dev->name);
1939 "still has VLANs.\n", 1889 pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n",
1940 bond_dev->name, bond_dev->name); 1890 bond_dev->name);
1941 pr_warning(DRV_NAME
1942 ": %s: When re-adding slaves, make sure the bond's "
1943 "HW address matches its VLANs'.\n",
1944 bond_dev->name);
1945 } 1891 }
1946 } else if ((bond_dev->features & NETIF_F_VLAN_CHALLENGED) && 1892 } else if ((bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
1947 !bond_has_challenged_slaves(bond)) { 1893 !bond_has_challenged_slaves(bond)) {
1948 pr_info(DRV_NAME 1894 pr_info("%s: last VLAN challenged slave %s left bond %s. VLAN blocking is removed\n",
1949 ": %s: last VLAN challenged slave %s " 1895 bond_dev->name, slave_dev->name, bond_dev->name);
1950 "left bond %s. VLAN blocking is removed\n",
1951 bond_dev->name, slave_dev->name, bond_dev->name);
1952 bond_dev->features &= ~NETIF_F_VLAN_CHALLENGED; 1896 bond_dev->features &= ~NETIF_F_VLAN_CHALLENGED;
1953 } 1897 }
1954 1898
@@ -2011,8 +1955,8 @@ int bond_release_and_destroy(struct net_device *bond_dev,
2011 1955
2012 ret = bond_release(bond_dev, slave_dev); 1956 ret = bond_release(bond_dev, slave_dev);
2013 if ((ret == 0) && (bond->slave_cnt == 0)) { 1957 if ((ret == 0) && (bond->slave_cnt == 0)) {
2014 pr_info(DRV_NAME ": %s: destroying bond %s.\n", 1958 pr_info("%s: destroying bond %s.\n",
2015 bond_dev->name, bond_dev->name); 1959 bond_dev->name, bond_dev->name);
2016 unregister_netdevice(bond_dev); 1960 unregister_netdevice(bond_dev);
2017 } 1961 }
2018 return ret; 1962 return ret;
@@ -2116,19 +2060,13 @@ static int bond_release_all(struct net_device *bond_dev)
2116 if (list_empty(&bond->vlan_list)) 2060 if (list_empty(&bond->vlan_list))
2117 bond_dev->features |= NETIF_F_VLAN_CHALLENGED; 2061 bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
2118 else { 2062 else {
2119 pr_warning(DRV_NAME 2063 pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n",
2120 ": %s: Warning: clearing HW address of %s while it " 2064 bond_dev->name, bond_dev->name);
2121 "still has VLANs.\n", 2065 pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n",
2122 bond_dev->name, bond_dev->name); 2066 bond_dev->name);
2123 pr_warning(DRV_NAME
2124 ": %s: When re-adding slaves, make sure the bond's "
2125 "HW address matches its VLANs'.\n",
2126 bond_dev->name);
2127 } 2067 }
2128 2068
2129 pr_info(DRV_NAME 2069 pr_info("%s: released all slaves\n", bond_dev->name);
2130 ": %s: released all slaves\n",
2131 bond_dev->name);
2132 2070
2133out: 2071out:
2134 write_unlock_bh(&bond->lock); 2072 write_unlock_bh(&bond->lock);
@@ -2254,16 +2192,14 @@ static int bond_miimon_inspect(struct bonding *bond)
2254 slave->link = BOND_LINK_FAIL; 2192 slave->link = BOND_LINK_FAIL;
2255 slave->delay = bond->params.downdelay; 2193 slave->delay = bond->params.downdelay;
2256 if (slave->delay) { 2194 if (slave->delay) {
2257 pr_info(DRV_NAME 2195 pr_info("%s: link status down for %sinterface %s, disabling it in %d ms.\n",
2258 ": %s: link status down for %s" 2196 bond->dev->name,
2259 "interface %s, disabling it in %d ms.\n", 2197 (bond->params.mode ==
2260 bond->dev->name, 2198 BOND_MODE_ACTIVEBACKUP) ?
2261 (bond->params.mode == 2199 ((slave->state == BOND_STATE_ACTIVE) ?
2262 BOND_MODE_ACTIVEBACKUP) ? 2200 "active " : "backup ") : "",
2263 ((slave->state == BOND_STATE_ACTIVE) ? 2201 slave->dev->name,
2264 "active " : "backup ") : "", 2202 bond->params.downdelay * bond->params.miimon);
2265 slave->dev->name,
2266 bond->params.downdelay * bond->params.miimon);
2267 } 2203 }
2268 /*FALLTHRU*/ 2204 /*FALLTHRU*/
2269 case BOND_LINK_FAIL: 2205 case BOND_LINK_FAIL:
@@ -2273,13 +2209,11 @@ static int bond_miimon_inspect(struct bonding *bond)
2273 */ 2209 */
2274 slave->link = BOND_LINK_UP; 2210 slave->link = BOND_LINK_UP;
2275 slave->jiffies = jiffies; 2211 slave->jiffies = jiffies;
2276 pr_info(DRV_NAME 2212 pr_info("%s: link status up again after %d ms for interface %s.\n",
2277 ": %s: link status up again after %d " 2213 bond->dev->name,
2278 "ms for interface %s.\n", 2214 (bond->params.downdelay - slave->delay) *
2279 bond->dev->name, 2215 bond->params.miimon,
2280 (bond->params.downdelay - slave->delay) * 2216 slave->dev->name);
2281 bond->params.miimon,
2282 slave->dev->name);
2283 continue; 2217 continue;
2284 } 2218 }
2285 2219
@@ -2300,25 +2234,21 @@ static int bond_miimon_inspect(struct bonding *bond)
2300 slave->delay = bond->params.updelay; 2234 slave->delay = bond->params.updelay;
2301 2235
2302 if (slave->delay) { 2236 if (slave->delay) {
2303 pr_info(DRV_NAME 2237 pr_info("%s: link status up for interface %s, enabling it in %d ms.\n",
2304 ": %s: link status up for " 2238 bond->dev->name, slave->dev->name,
2305 "interface %s, enabling it in %d ms.\n", 2239 ignore_updelay ? 0 :
2306 bond->dev->name, slave->dev->name, 2240 bond->params.updelay *
2307 ignore_updelay ? 0 : 2241 bond->params.miimon);
2308 bond->params.updelay *
2309 bond->params.miimon);
2310 } 2242 }
2311 /*FALLTHRU*/ 2243 /*FALLTHRU*/
2312 case BOND_LINK_BACK: 2244 case BOND_LINK_BACK:
2313 if (!link_state) { 2245 if (!link_state) {
2314 slave->link = BOND_LINK_DOWN; 2246 slave->link = BOND_LINK_DOWN;
2315 pr_info(DRV_NAME 2247 pr_info("%s: link status down again after %d ms for interface %s.\n",
2316 ": %s: link status down again after %d " 2248 bond->dev->name,
2317 "ms for interface %s.\n", 2249 (bond->params.updelay - slave->delay) *
2318 bond->dev->name, 2250 bond->params.miimon,
2319 (bond->params.updelay - slave->delay) * 2251 slave->dev->name);
2320 bond->params.miimon,
2321 slave->dev->name);
2322 2252
2323 continue; 2253 continue;
2324 } 2254 }
@@ -2366,10 +2296,8 @@ static void bond_miimon_commit(struct bonding *bond)
2366 slave->state = BOND_STATE_BACKUP; 2296 slave->state = BOND_STATE_BACKUP;
2367 } 2297 }
2368 2298
2369 pr_info(DRV_NAME 2299 pr_info("%s: link status definitely up for interface %s.\n",
2370 ": %s: link status definitely " 2300 bond->dev->name, slave->dev->name);
2371 "up for interface %s.\n",
2372 bond->dev->name, slave->dev->name);
2373 2301
2374 /* notify ad that the link status has changed */ 2302 /* notify ad that the link status has changed */
2375 if (bond->params.mode == BOND_MODE_8023AD) 2303 if (bond->params.mode == BOND_MODE_8023AD)
@@ -2395,10 +2323,8 @@ static void bond_miimon_commit(struct bonding *bond)
2395 bond->params.mode == BOND_MODE_8023AD) 2323 bond->params.mode == BOND_MODE_8023AD)
2396 bond_set_slave_inactive_flags(slave); 2324 bond_set_slave_inactive_flags(slave);
2397 2325
2398 pr_info(DRV_NAME 2326 pr_info("%s: link status definitely down for interface %s, disabling it\n",
2399 ": %s: link status definitely down for " 2327 bond->dev->name, slave->dev->name);
2400 "interface %s, disabling it\n",
2401 bond->dev->name, slave->dev->name);
2402 2328
2403 if (bond->params.mode == BOND_MODE_8023AD) 2329 if (bond->params.mode == BOND_MODE_8023AD)
2404 bond_3ad_handle_link_change(slave, 2330 bond_3ad_handle_link_change(slave,
@@ -2414,8 +2340,7 @@ static void bond_miimon_commit(struct bonding *bond)
2414 continue; 2340 continue;
2415 2341
2416 default: 2342 default:
2417 pr_err(DRV_NAME 2343 pr_err("%s: invalid new link %d on slave %s\n",
2418 ": %s: invalid new link %d on slave %s\n",
2419 bond->dev->name, slave->new_link, 2344 bond->dev->name, slave->new_link,
2420 slave->dev->name); 2345 slave->dev->name);
2421 slave->new_link = BOND_LINK_NOCHANGE; 2346 slave->new_link = BOND_LINK_NOCHANGE;
@@ -2534,19 +2459,19 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op, __be32 dest_
2534 struct sk_buff *skb; 2459 struct sk_buff *skb;
2535 2460
2536 pr_debug("arp %d on slave %s: dst %x src %x vid %d\n", arp_op, 2461 pr_debug("arp %d on slave %s: dst %x src %x vid %d\n", arp_op,
2537 slave_dev->name, dest_ip, src_ip, vlan_id); 2462 slave_dev->name, dest_ip, src_ip, vlan_id);
2538 2463
2539 skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip, 2464 skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip,
2540 NULL, slave_dev->dev_addr, NULL); 2465 NULL, slave_dev->dev_addr, NULL);
2541 2466
2542 if (!skb) { 2467 if (!skb) {
2543 pr_err(DRV_NAME ": ARP packet allocation failed\n"); 2468 pr_err("ARP packet allocation failed\n");
2544 return; 2469 return;
2545 } 2470 }
2546 if (vlan_id) { 2471 if (vlan_id) {
2547 skb = vlan_put_tag(skb, vlan_id); 2472 skb = vlan_put_tag(skb, vlan_id);
2548 if (!skb) { 2473 if (!skb) {
2549 pr_err(DRV_NAME ": failed to insert VLAN tag\n"); 2474 pr_err("failed to insert VLAN tag\n");
2550 return; 2475 return;
2551 } 2476 }
2552 } 2477 }
@@ -2586,9 +2511,8 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2586 rv = ip_route_output_key(dev_net(bond->dev), &rt, &fl); 2511 rv = ip_route_output_key(dev_net(bond->dev), &rt, &fl);
2587 if (rv) { 2512 if (rv) {
2588 if (net_ratelimit()) { 2513 if (net_ratelimit()) {
2589 pr_warning(DRV_NAME 2514 pr_warning("%s: no route to arp_ip_target %pI4\n",
2590 ": %s: no route to arp_ip_target %pI4\n", 2515 bond->dev->name, &fl.fl4_dst);
2591 bond->dev->name, &fl.fl4_dst);
2592 } 2516 }
2593 continue; 2517 continue;
2594 } 2518 }
@@ -2623,10 +2547,9 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2623 } 2547 }
2624 2548
2625 if (net_ratelimit()) { 2549 if (net_ratelimit()) {
2626 pr_warning(DRV_NAME 2550 pr_warning("%s: no path to arp_ip_target %pI4 via rt.dev %s\n",
2627 ": %s: no path to arp_ip_target %pI4 via rt.dev %s\n", 2551 bond->dev->name, &fl.fl4_dst,
2628 bond->dev->name, &fl.fl4_dst, 2552 rt->u.dst.dev ? rt->u.dst.dev->name : "NULL");
2629 rt->u.dst.dev ? rt->u.dst.dev->name : "NULL");
2630 } 2553 }
2631 ip_rt_put(rt); 2554 ip_rt_put(rt);
2632 } 2555 }
@@ -2644,8 +2567,8 @@ static void bond_send_gratuitous_arp(struct bonding *bond)
2644 struct vlan_entry *vlan; 2567 struct vlan_entry *vlan;
2645 struct net_device *vlan_dev; 2568 struct net_device *vlan_dev;
2646 2569
2647 pr_debug("bond_send_grat_arp: bond %s slave %s\n", bond->dev->name, 2570 pr_debug("bond_send_grat_arp: bond %s slave %s\n",
2648 slave ? slave->dev->name : "NULL"); 2571 bond->dev->name, slave ? slave->dev->name : "NULL");
2649 2572
2650 if (!slave || !bond->send_grat_arp || 2573 if (!slave || !bond->send_grat_arp ||
2651 test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state)) 2574 test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
@@ -2674,7 +2597,8 @@ static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32
2674 2597
2675 for (i = 0; (i < BOND_MAX_ARP_TARGETS) && targets[i]; i++) { 2598 for (i = 0; (i < BOND_MAX_ARP_TARGETS) && targets[i]; i++) {
2676 pr_debug("bva: sip %pI4 tip %pI4 t[%d] %pI4 bhti(tip) %d\n", 2599 pr_debug("bva: sip %pI4 tip %pI4 t[%d] %pI4 bhti(tip) %d\n",
2677 &sip, &tip, i, &targets[i], bond_has_this_ip(bond, tip)); 2600 &sip, &tip, i, &targets[i],
2601 bond_has_this_ip(bond, tip));
2678 if (sip == targets[i]) { 2602 if (sip == targets[i]) {
2679 if (bond_has_this_ip(bond, tip)) 2603 if (bond_has_this_ip(bond, tip))
2680 slave->last_arp_rx = jiffies; 2604 slave->last_arp_rx = jiffies;
@@ -2698,8 +2622,8 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack
2698 read_lock(&bond->lock); 2622 read_lock(&bond->lock);
2699 2623
2700 pr_debug("bond_arp_rcv: bond %s skb->dev %s orig_dev %s\n", 2624 pr_debug("bond_arp_rcv: bond %s skb->dev %s orig_dev %s\n",
2701 bond->dev->name, skb->dev ? skb->dev->name : "NULL", 2625 bond->dev->name, skb->dev ? skb->dev->name : "NULL",
2702 orig_dev ? orig_dev->name : "NULL"); 2626 orig_dev ? orig_dev->name : "NULL");
2703 2627
2704 slave = bond_get_slave_by_dev(bond, orig_dev); 2628 slave = bond_get_slave_by_dev(bond, orig_dev);
2705 if (!slave || !slave_do_arp_validate(bond, slave)) 2629 if (!slave || !slave_do_arp_validate(bond, slave))
@@ -2724,9 +2648,9 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack
2724 memcpy(&tip, arp_ptr, 4); 2648 memcpy(&tip, arp_ptr, 4);
2725 2649
2726 pr_debug("bond_arp_rcv: %s %s/%d av %d sv %d sip %pI4 tip %pI4\n", 2650 pr_debug("bond_arp_rcv: %s %s/%d av %d sv %d sip %pI4 tip %pI4\n",
2727 bond->dev->name, slave->dev->name, slave->state, 2651 bond->dev->name, slave->dev->name, slave->state,
2728 bond->params.arp_validate, slave_do_arp_validate(bond, slave), 2652 bond->params.arp_validate, slave_do_arp_validate(bond, slave),
2729 &sip, &tip); 2653 &sip, &tip);
2730 2654
2731 /* 2655 /*
2732 * Backup slaves won't see the ARP reply, but do come through 2656 * Backup slaves won't see the ARP reply, but do come through
@@ -2800,17 +2724,14 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
2800 * is closed. 2724 * is closed.
2801 */ 2725 */
2802 if (!oldcurrent) { 2726 if (!oldcurrent) {
2803 pr_info(DRV_NAME 2727 pr_info("%s: link status definitely up for interface %s, ",
2804 ": %s: link status definitely " 2728 bond->dev->name,
2805 "up for interface %s, ", 2729 slave->dev->name);
2806 bond->dev->name,
2807 slave->dev->name);
2808 do_failover = 1; 2730 do_failover = 1;
2809 } else { 2731 } else {
2810 pr_info(DRV_NAME 2732 pr_info("%s: interface %s is now up\n",
2811 ": %s: interface %s is now up\n", 2733 bond->dev->name,
2812 bond->dev->name, 2734 slave->dev->name);
2813 slave->dev->name);
2814 } 2735 }
2815 } 2736 }
2816 } else { 2737 } else {
@@ -2829,10 +2750,9 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
2829 if (slave->link_failure_count < UINT_MAX) 2750 if (slave->link_failure_count < UINT_MAX)
2830 slave->link_failure_count++; 2751 slave->link_failure_count++;
2831 2752
2832 pr_info(DRV_NAME 2753 pr_info("%s: interface %s is now down.\n",
2833 ": %s: interface %s is now down.\n", 2754 bond->dev->name,
2834 bond->dev->name, 2755 slave->dev->name);
2835 slave->dev->name);
2836 2756
2837 if (slave == oldcurrent) 2757 if (slave == oldcurrent)
2838 do_failover = 1; 2758 do_failover = 1;
@@ -2965,9 +2885,7 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks)
2965 slave->link = BOND_LINK_UP; 2885 slave->link = BOND_LINK_UP;
2966 bond->current_arp_slave = NULL; 2886 bond->current_arp_slave = NULL;
2967 2887
2968 pr_info(DRV_NAME 2888 pr_info("%s: link status definitely up for interface %s.\n",
2969 ": %s: link status definitely "
2970 "up for interface %s.\n",
2971 bond->dev->name, slave->dev->name); 2889 bond->dev->name, slave->dev->name);
2972 2890
2973 if (!bond->curr_active_slave || 2891 if (!bond->curr_active_slave ||
@@ -2985,9 +2903,7 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks)
2985 slave->link = BOND_LINK_DOWN; 2903 slave->link = BOND_LINK_DOWN;
2986 bond_set_slave_inactive_flags(slave); 2904 bond_set_slave_inactive_flags(slave);
2987 2905
2988 pr_info(DRV_NAME 2906 pr_info("%s: link status definitely down for interface %s, disabling it\n",
2989 ": %s: link status definitely down for "
2990 "interface %s, disabling it\n",
2991 bond->dev->name, slave->dev->name); 2907 bond->dev->name, slave->dev->name);
2992 2908
2993 if (slave == bond->curr_active_slave) { 2909 if (slave == bond->curr_active_slave) {
@@ -2998,8 +2914,7 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks)
2998 continue; 2914 continue;
2999 2915
3000 default: 2916 default:
3001 pr_err(DRV_NAME 2917 pr_err("%s: impossible: new_link %d on slave %s\n",
3002 ": %s: impossible: new_link %d on slave %s\n",
3003 bond->dev->name, slave->new_link, 2918 bond->dev->name, slave->new_link,
3004 slave->dev->name); 2919 slave->dev->name);
3005 continue; 2920 continue;
@@ -3028,9 +2943,9 @@ static void bond_ab_arp_probe(struct bonding *bond)
3028 read_lock(&bond->curr_slave_lock); 2943 read_lock(&bond->curr_slave_lock);
3029 2944
3030 if (bond->current_arp_slave && bond->curr_active_slave) 2945 if (bond->current_arp_slave && bond->curr_active_slave)
3031 pr_info(DRV_NAME "PROBE: c_arp %s && cas %s BAD\n", 2946 pr_info("PROBE: c_arp %s && cas %s BAD\n",
3032 bond->current_arp_slave->dev->name, 2947 bond->current_arp_slave->dev->name,
3033 bond->curr_active_slave->dev->name); 2948 bond->curr_active_slave->dev->name);
3034 2949
3035 if (bond->curr_active_slave) { 2950 if (bond->curr_active_slave) {
3036 bond_arp_send_all(bond, bond->curr_active_slave); 2951 bond_arp_send_all(bond, bond->curr_active_slave);
@@ -3078,9 +2993,8 @@ static void bond_ab_arp_probe(struct bonding *bond)
3078 2993
3079 bond_set_slave_inactive_flags(slave); 2994 bond_set_slave_inactive_flags(slave);
3080 2995
3081 pr_info(DRV_NAME 2996 pr_info("%s: backup interface %s is now down.\n",
3082 ": %s: backup interface %s is now down.\n", 2997 bond->dev->name, slave->dev->name);
3083 bond->dev->name, slave->dev->name);
3084 } 2998 }
3085 } 2999 }
3086} 3000}
@@ -3360,9 +3274,8 @@ static void bond_create_proc_entry(struct bonding *bond)
3360 S_IRUGO, bn->proc_dir, 3274 S_IRUGO, bn->proc_dir,
3361 &bond_info_fops, bond); 3275 &bond_info_fops, bond);
3362 if (bond->proc_entry == NULL) 3276 if (bond->proc_entry == NULL)
3363 pr_warning(DRV_NAME 3277 pr_warning("Warning: Cannot create /proc/net/%s/%s\n",
3364 ": Warning: Cannot create /proc/net/%s/%s\n", 3278 DRV_NAME, bond_dev->name);
3365 DRV_NAME, bond_dev->name);
3366 else 3279 else
3367 memcpy(bond->proc_file_name, bond_dev->name, IFNAMSIZ); 3280 memcpy(bond->proc_file_name, bond_dev->name, IFNAMSIZ);
3368 } 3281 }
@@ -3388,9 +3301,8 @@ static void bond_create_proc_dir(struct bond_net *bn)
3388 if (!bn->proc_dir) { 3301 if (!bn->proc_dir) {
3389 bn->proc_dir = proc_mkdir(DRV_NAME, bn->net->proc_net); 3302 bn->proc_dir = proc_mkdir(DRV_NAME, bn->net->proc_net);
3390 if (!bn->proc_dir) 3303 if (!bn->proc_dir)
3391 pr_warning(DRV_NAME 3304 pr_warning("Warning: cannot create /proc/net/%s\n",
3392 ": Warning: cannot create /proc/net/%s\n", 3305 DRV_NAME);
3393 DRV_NAME);
3394 } 3306 }
3395} 3307}
3396 3308
@@ -3539,8 +3451,8 @@ static int bond_netdev_event(struct notifier_block *this,
3539 struct net_device *event_dev = (struct net_device *)ptr; 3451 struct net_device *event_dev = (struct net_device *)ptr;
3540 3452
3541 pr_debug("event_dev: %s, event: %lx\n", 3453 pr_debug("event_dev: %s, event: %lx\n",
3542 (event_dev ? event_dev->name : "None"), 3454 event_dev ? event_dev->name : "None",
3543 event); 3455 event);
3544 3456
3545 if (!(event_dev->priv_flags & IFF_BONDING)) 3457 if (!(event_dev->priv_flags & IFF_BONDING))
3546 return NOTIFY_DONE; 3458 return NOTIFY_DONE;
@@ -3875,8 +3787,7 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
3875 struct mii_ioctl_data *mii = NULL; 3787 struct mii_ioctl_data *mii = NULL;
3876 int res = 0; 3788 int res = 0;
3877 3789
3878 pr_debug("bond_ioctl: master=%s, cmd=%d\n", 3790 pr_debug("bond_ioctl: master=%s, cmd=%d\n", bond_dev->name, cmd);
3879 bond_dev->name, cmd);
3880 3791
3881 switch (cmd) { 3792 switch (cmd) {
3882 case SIOCGMIIPHY: 3793 case SIOCGMIIPHY:
@@ -3945,12 +3856,12 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
3945 3856
3946 slave_dev = dev_get_by_name(dev_net(bond_dev), ifr->ifr_slave); 3857 slave_dev = dev_get_by_name(dev_net(bond_dev), ifr->ifr_slave);
3947 3858
3948 pr_debug("slave_dev=%p: \n", slave_dev); 3859 pr_debug("slave_dev=%p:\n", slave_dev);
3949 3860
3950 if (!slave_dev) 3861 if (!slave_dev)
3951 res = -ENODEV; 3862 res = -ENODEV;
3952 else { 3863 else {
3953 pr_debug("slave_dev->name=%s: \n", slave_dev->name); 3864 pr_debug("slave_dev->name=%s:\n", slave_dev->name);
3954 switch (cmd) { 3865 switch (cmd) {
3955 case BOND_ENSLAVE_OLD: 3866 case BOND_ENSLAVE_OLD:
3956 case SIOCBONDENSLAVE: 3867 case SIOCBONDENSLAVE:
@@ -4059,7 +3970,7 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
4059 int i; 3970 int i;
4060 3971
4061 pr_debug("bond=%p, name=%s, new_mtu=%d\n", bond, 3972 pr_debug("bond=%p, name=%s, new_mtu=%d\n", bond,
4062 (bond_dev ? bond_dev->name : "None"), new_mtu); 3973 (bond_dev ? bond_dev->name : "None"), new_mtu);
4063 3974
4064 /* Can't hold bond->lock with bh disabled here since 3975 /* Can't hold bond->lock with bh disabled here since
4065 * some base drivers panic. On the other hand we can't 3976 * some base drivers panic. On the other hand we can't
@@ -4077,8 +3988,10 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
4077 */ 3988 */
4078 3989
4079 bond_for_each_slave(bond, slave, i) { 3990 bond_for_each_slave(bond, slave, i) {
4080 pr_debug("s %p s->p %p c_m %p\n", slave, 3991 pr_debug("s %p s->p %p c_m %p\n",
4081 slave->prev, slave->dev->netdev_ops->ndo_change_mtu); 3992 slave,
3993 slave->prev,
3994 slave->dev->netdev_ops->ndo_change_mtu);
4082 3995
4083 res = dev_set_mtu(slave->dev, new_mtu); 3996 res = dev_set_mtu(slave->dev, new_mtu);
4084 3997
@@ -4108,8 +4021,8 @@ unwind:
4108 4021
4109 tmp_res = dev_set_mtu(slave->dev, bond_dev->mtu); 4022 tmp_res = dev_set_mtu(slave->dev, bond_dev->mtu);
4110 if (tmp_res) { 4023 if (tmp_res) {
4111 pr_debug("unwind err %d dev %s\n", tmp_res, 4024 pr_debug("unwind err %d dev %s\n",
4112 slave->dev->name); 4025 tmp_res, slave->dev->name);
4113 } 4026 }
4114 } 4027 }
4115 4028
@@ -4135,7 +4048,8 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
4135 return bond_alb_set_mac_address(bond_dev, addr); 4048 return bond_alb_set_mac_address(bond_dev, addr);
4136 4049
4137 4050
4138 pr_debug("bond=%p, name=%s\n", bond, (bond_dev ? bond_dev->name : "None")); 4051 pr_debug("bond=%p, name=%s\n",
4052 bond, bond_dev ? bond_dev->name : "None");
4139 4053
4140 /* 4054 /*
4141 * If fail_over_mac is set to active, do nothing and return 4055 * If fail_over_mac is set to active, do nothing and return
@@ -4200,8 +4114,8 @@ unwind:
4200 4114
4201 tmp_res = dev_set_mac_address(slave->dev, &tmp_sa); 4115 tmp_res = dev_set_mac_address(slave->dev, &tmp_sa);
4202 if (tmp_res) { 4116 if (tmp_res) {
4203 pr_debug("unwind err %d dev %s\n", tmp_res, 4117 pr_debug("unwind err %d dev %s\n",
4204 slave->dev->name); 4118 tmp_res, slave->dev->name);
4205 } 4119 }
4206 } 4120 }
4207 4121
@@ -4357,9 +4271,7 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
4357 if (tx_dev) { 4271 if (tx_dev) {
4358 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 4272 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
4359 if (!skb2) { 4273 if (!skb2) {
4360 pr_err(DRV_NAME 4274 pr_err("%s: Error: bond_xmit_broadcast(): skb_clone() failed\n",
4361 ": %s: Error: bond_xmit_broadcast(): "
4362 "skb_clone() failed\n",
4363 bond_dev->name); 4275 bond_dev->name);
4364 continue; 4276 continue;
4365 } 4277 }
@@ -4425,8 +4337,8 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
4425 return bond_alb_xmit(skb, dev); 4337 return bond_alb_xmit(skb, dev);
4426 default: 4338 default:
4427 /* Should never happen, mode already checked */ 4339 /* Should never happen, mode already checked */
4428 pr_err(DRV_NAME ": %s: Error: Unknown bonding mode %d\n", 4340 pr_err("%s: Error: Unknown bonding mode %d\n",
4429 dev->name, bond->params.mode); 4341 dev->name, bond->params.mode);
4430 WARN_ON_ONCE(1); 4342 WARN_ON_ONCE(1);
4431 dev_kfree_skb(skb); 4343 dev_kfree_skb(skb);
4432 return NETDEV_TX_OK; 4344 return NETDEV_TX_OK;
@@ -4462,10 +4374,8 @@ void bond_set_mode_ops(struct bonding *bond, int mode)
4462 break; 4374 break;
4463 default: 4375 default:
4464 /* Should never happen, mode already checked */ 4376 /* Should never happen, mode already checked */
4465 pr_err(DRV_NAME 4377 pr_err("%s: Error: Unknown bonding mode %d\n",
4466 ": %s: Error: Unknown bonding mode %d\n", 4378 bond_dev->name, mode);
4467 bond_dev->name,
4468 mode);
4469 break; 4379 break;
4470 } 4380 }
4471} 4381}
@@ -4650,8 +4560,7 @@ static int bond_check_params(struct bond_params *params)
4650 if (mode) { 4560 if (mode) {
4651 bond_mode = bond_parse_parm(mode, bond_mode_tbl); 4561 bond_mode = bond_parse_parm(mode, bond_mode_tbl);
4652 if (bond_mode == -1) { 4562 if (bond_mode == -1) {
4653 pr_err(DRV_NAME 4563 pr_err("Error: Invalid bonding mode \"%s\"\n",
4654 ": Error: Invalid bonding mode \"%s\"\n",
4655 mode == NULL ? "NULL" : mode); 4564 mode == NULL ? "NULL" : mode);
4656 return -EINVAL; 4565 return -EINVAL;
4657 } 4566 }
@@ -4660,16 +4569,13 @@ static int bond_check_params(struct bond_params *params)
4660 if (xmit_hash_policy) { 4569 if (xmit_hash_policy) {
4661 if ((bond_mode != BOND_MODE_XOR) && 4570 if ((bond_mode != BOND_MODE_XOR) &&
4662 (bond_mode != BOND_MODE_8023AD)) { 4571 (bond_mode != BOND_MODE_8023AD)) {
4663 pr_info(DRV_NAME 4572 pr_info("xmit_hash_policy param is irrelevant in mode %s\n",
4664 ": xmit_hash_policy param is irrelevant in"
4665 " mode %s\n",
4666 bond_mode_name(bond_mode)); 4573 bond_mode_name(bond_mode));
4667 } else { 4574 } else {
4668 xmit_hashtype = bond_parse_parm(xmit_hash_policy, 4575 xmit_hashtype = bond_parse_parm(xmit_hash_policy,
4669 xmit_hashtype_tbl); 4576 xmit_hashtype_tbl);
4670 if (xmit_hashtype == -1) { 4577 if (xmit_hashtype == -1) {
4671 pr_err(DRV_NAME 4578 pr_err("Error: Invalid xmit_hash_policy \"%s\"\n",
4672 ": Error: Invalid xmit_hash_policy \"%s\"\n",
4673 xmit_hash_policy == NULL ? "NULL" : 4579 xmit_hash_policy == NULL ? "NULL" :
4674 xmit_hash_policy); 4580 xmit_hash_policy);
4675 return -EINVAL; 4581 return -EINVAL;
@@ -4679,14 +4585,12 @@ static int bond_check_params(struct bond_params *params)
4679 4585
4680 if (lacp_rate) { 4586 if (lacp_rate) {
4681 if (bond_mode != BOND_MODE_8023AD) { 4587 if (bond_mode != BOND_MODE_8023AD) {
4682 pr_info(DRV_NAME 4588 pr_info("lacp_rate param is irrelevant in mode %s\n",
4683 ": lacp_rate param is irrelevant in mode %s\n", 4589 bond_mode_name(bond_mode));
4684 bond_mode_name(bond_mode));
4685 } else { 4590 } else {
4686 lacp_fast = bond_parse_parm(lacp_rate, bond_lacp_tbl); 4591 lacp_fast = bond_parse_parm(lacp_rate, bond_lacp_tbl);
4687 if (lacp_fast == -1) { 4592 if (lacp_fast == -1) {
4688 pr_err(DRV_NAME 4593 pr_err("Error: Invalid lacp rate \"%s\"\n",
4689 ": Error: Invalid lacp rate \"%s\"\n",
4690 lacp_rate == NULL ? "NULL" : lacp_rate); 4594 lacp_rate == NULL ? "NULL" : lacp_rate);
4691 return -EINVAL; 4595 return -EINVAL;
4692 } 4596 }
@@ -4696,82 +4600,64 @@ static int bond_check_params(struct bond_params *params)
4696 if (ad_select) { 4600 if (ad_select) {
4697 params->ad_select = bond_parse_parm(ad_select, ad_select_tbl); 4601 params->ad_select = bond_parse_parm(ad_select, ad_select_tbl);
4698 if (params->ad_select == -1) { 4602 if (params->ad_select == -1) {
4699 pr_err(DRV_NAME 4603 pr_err("Error: Invalid ad_select \"%s\"\n",
4700 ": Error: Invalid ad_select \"%s\"\n",
4701 ad_select == NULL ? "NULL" : ad_select); 4604 ad_select == NULL ? "NULL" : ad_select);
4702 return -EINVAL; 4605 return -EINVAL;
4703 } 4606 }
4704 4607
4705 if (bond_mode != BOND_MODE_8023AD) { 4608 if (bond_mode != BOND_MODE_8023AD) {
4706 pr_warning(DRV_NAME 4609 pr_warning("ad_select param only affects 802.3ad mode\n");
4707 ": ad_select param only affects 802.3ad mode\n");
4708 } 4610 }
4709 } else { 4611 } else {
4710 params->ad_select = BOND_AD_STABLE; 4612 params->ad_select = BOND_AD_STABLE;
4711 } 4613 }
4712 4614
4713 if (max_bonds < 0) { 4615 if (max_bonds < 0) {
4714 pr_warning(DRV_NAME 4616 pr_warning("Warning: max_bonds (%d) not in range %d-%d, so it was reset to BOND_DEFAULT_MAX_BONDS (%d)\n",
4715 ": Warning: max_bonds (%d) not in range %d-%d, so it " 4617 max_bonds, 0, INT_MAX, BOND_DEFAULT_MAX_BONDS);
4716 "was reset to BOND_DEFAULT_MAX_BONDS (%d)\n",
4717 max_bonds, 0, INT_MAX, BOND_DEFAULT_MAX_BONDS);
4718 max_bonds = BOND_DEFAULT_MAX_BONDS; 4618 max_bonds = BOND_DEFAULT_MAX_BONDS;
4719 } 4619 }
4720 4620
4721 if (miimon < 0) { 4621 if (miimon < 0) {
4722 pr_warning(DRV_NAME 4622 pr_warning("Warning: miimon module parameter (%d), not in range 0-%d, so it was reset to %d\n",
4723 ": Warning: miimon module parameter (%d), " 4623 miimon, INT_MAX, BOND_LINK_MON_INTERV);
4724 "not in range 0-%d, so it was reset to %d\n",
4725 miimon, INT_MAX, BOND_LINK_MON_INTERV);
4726 miimon = BOND_LINK_MON_INTERV; 4624 miimon = BOND_LINK_MON_INTERV;
4727 } 4625 }
4728 4626
4729 if (updelay < 0) { 4627 if (updelay < 0) {
4730 pr_warning(DRV_NAME 4628 pr_warning("Warning: updelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
4731 ": Warning: updelay module parameter (%d), " 4629 updelay, INT_MAX);
4732 "not in range 0-%d, so it was reset to 0\n",
4733 updelay, INT_MAX);
4734 updelay = 0; 4630 updelay = 0;
4735 } 4631 }
4736 4632
4737 if (downdelay < 0) { 4633 if (downdelay < 0) {
4738 pr_warning(DRV_NAME 4634 pr_warning("Warning: downdelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
4739 ": Warning: downdelay module parameter (%d), " 4635 downdelay, INT_MAX);
4740 "not in range 0-%d, so it was reset to 0\n",
4741 downdelay, INT_MAX);
4742 downdelay = 0; 4636 downdelay = 0;
4743 } 4637 }
4744 4638
4745 if ((use_carrier != 0) && (use_carrier != 1)) { 4639 if ((use_carrier != 0) && (use_carrier != 1)) {
4746 pr_warning(DRV_NAME 4640 pr_warning("Warning: use_carrier module parameter (%d), not of valid value (0/1), so it was set to 1\n",
4747 ": Warning: use_carrier module parameter (%d), " 4641 use_carrier);
4748 "not of valid value (0/1), so it was set to 1\n",
4749 use_carrier);
4750 use_carrier = 1; 4642 use_carrier = 1;
4751 } 4643 }
4752 4644
4753 if (num_grat_arp < 0 || num_grat_arp > 255) { 4645 if (num_grat_arp < 0 || num_grat_arp > 255) {
4754 pr_warning(DRV_NAME 4646 pr_warning("Warning: num_grat_arp (%d) not in range 0-255 so it was reset to 1 \n",
4755 ": Warning: num_grat_arp (%d) not in range 0-255 so it " 4647 num_grat_arp);
4756 "was reset to 1 \n", num_grat_arp);
4757 num_grat_arp = 1; 4648 num_grat_arp = 1;
4758 } 4649 }
4759 4650
4760 if (num_unsol_na < 0 || num_unsol_na > 255) { 4651 if (num_unsol_na < 0 || num_unsol_na > 255) {
4761 pr_warning(DRV_NAME 4652 pr_warning("Warning: num_unsol_na (%d) not in range 0-255 so it was reset to 1 \n",
4762 ": Warning: num_unsol_na (%d) not in range 0-255 so it " 4653 num_unsol_na);
4763 "was reset to 1 \n", num_unsol_na);
4764 num_unsol_na = 1; 4654 num_unsol_na = 1;
4765 } 4655 }
4766 4656
4767 /* reset values for 802.3ad */ 4657 /* reset values for 802.3ad */
4768 if (bond_mode == BOND_MODE_8023AD) { 4658 if (bond_mode == BOND_MODE_8023AD) {
4769 if (!miimon) { 4659 if (!miimon) {
4770 pr_warning(DRV_NAME 4660 pr_warning("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n");
4771 ": Warning: miimon must be specified, "
4772 "otherwise bonding will not detect link "
4773 "failure, speed and duplex which are "
4774 "essential for 802.3ad operation\n");
4775 pr_warning("Forcing miimon to 100msec\n"); 4661 pr_warning("Forcing miimon to 100msec\n");
4776 miimon = 100; 4662 miimon = 100;
4777 } 4663 }
@@ -4781,24 +4667,15 @@ static int bond_check_params(struct bond_params *params)
4781 if ((bond_mode == BOND_MODE_TLB) || 4667 if ((bond_mode == BOND_MODE_TLB) ||
4782 (bond_mode == BOND_MODE_ALB)) { 4668 (bond_mode == BOND_MODE_ALB)) {
4783 if (!miimon) { 4669 if (!miimon) {
4784 pr_warning(DRV_NAME 4670 pr_warning("Warning: miimon must be specified, otherwise bonding will not detect link failure and link speed which are essential for TLB/ALB load balancing\n");
4785 ": Warning: miimon must be specified, "
4786 "otherwise bonding will not detect link "
4787 "failure and link speed which are essential "
4788 "for TLB/ALB load balancing\n");
4789 pr_warning("Forcing miimon to 100msec\n"); 4671 pr_warning("Forcing miimon to 100msec\n");
4790 miimon = 100; 4672 miimon = 100;
4791 } 4673 }
4792 } 4674 }
4793 4675
4794 if (bond_mode == BOND_MODE_ALB) { 4676 if (bond_mode == BOND_MODE_ALB) {
4795 pr_notice(DRV_NAME 4677 pr_notice("In ALB mode you might experience client disconnections upon reconnection of a link if the bonding module updelay parameter (%d msec) is incompatible with the forwarding delay time of the switch\n",
4796 ": In ALB mode you might experience client " 4678 updelay);
4797 "disconnections upon reconnection of a link if the "
4798 "bonding module updelay parameter (%d msec) is "
4799 "incompatible with the forwarding delay time of the "
4800 "switch\n",
4801 updelay);
4802 } 4679 }
4803 4680
4804 if (!miimon) { 4681 if (!miimon) {
@@ -4806,49 +4683,37 @@ static int bond_check_params(struct bond_params *params)
4806 /* just warn the user the up/down delay will have 4683 /* just warn the user the up/down delay will have
4807 * no effect since miimon is zero... 4684 * no effect since miimon is zero...
4808 */ 4685 */
4809 pr_warning(DRV_NAME 4686 pr_warning("Warning: miimon module parameter not set and updelay (%d) or downdelay (%d) module parameter is set; updelay and downdelay have no effect unless miimon is set\n",
4810 ": Warning: miimon module parameter not set " 4687 updelay, downdelay);
4811 "and updelay (%d) or downdelay (%d) module "
4812 "parameter is set; updelay and downdelay have "
4813 "no effect unless miimon is set\n",
4814 updelay, downdelay);
4815 } 4688 }
4816 } else { 4689 } else {
4817 /* don't allow arp monitoring */ 4690 /* don't allow arp monitoring */
4818 if (arp_interval) { 4691 if (arp_interval) {
4819 pr_warning(DRV_NAME 4692 pr_warning("Warning: miimon (%d) and arp_interval (%d) can't be used simultaneously, disabling ARP monitoring\n",
4820 ": Warning: miimon (%d) and arp_interval (%d) " 4693 miimon, arp_interval);
4821 "can't be used simultaneously, disabling ARP "
4822 "monitoring\n",
4823 miimon, arp_interval);
4824 arp_interval = 0; 4694 arp_interval = 0;
4825 } 4695 }
4826 4696
4827 if ((updelay % miimon) != 0) { 4697 if ((updelay % miimon) != 0) {
4828 pr_warning(DRV_NAME 4698 pr_warning("Warning: updelay (%d) is not a multiple of miimon (%d), updelay rounded to %d ms\n",
4829 ": Warning: updelay (%d) is not a multiple " 4699 updelay, miimon,
4830 "of miimon (%d), updelay rounded to %d ms\n", 4700 (updelay / miimon) * miimon);
4831 updelay, miimon, (updelay / miimon) * miimon);
4832 } 4701 }
4833 4702
4834 updelay /= miimon; 4703 updelay /= miimon;
4835 4704
4836 if ((downdelay % miimon) != 0) { 4705 if ((downdelay % miimon) != 0) {
4837 pr_warning(DRV_NAME 4706 pr_warning("Warning: downdelay (%d) is not a multiple of miimon (%d), downdelay rounded to %d ms\n",
4838 ": Warning: downdelay (%d) is not a multiple " 4707 downdelay, miimon,
4839 "of miimon (%d), downdelay rounded to %d ms\n", 4708 (downdelay / miimon) * miimon);
4840 downdelay, miimon,
4841 (downdelay / miimon) * miimon);
4842 } 4709 }
4843 4710
4844 downdelay /= miimon; 4711 downdelay /= miimon;
4845 } 4712 }
4846 4713
4847 if (arp_interval < 0) { 4714 if (arp_interval < 0) {
4848 pr_warning(DRV_NAME 4715 pr_warning("Warning: arp_interval module parameter (%d) , not in range 0-%d, so it was reset to %d\n",
4849 ": Warning: arp_interval module parameter (%d) " 4716 arp_interval, INT_MAX, BOND_LINK_ARP_INTERV);
4850 ", not in range 0-%d, so it was reset to %d\n",
4851 arp_interval, INT_MAX, BOND_LINK_ARP_INTERV);
4852 arp_interval = BOND_LINK_ARP_INTERV; 4717 arp_interval = BOND_LINK_ARP_INTERV;
4853 } 4718 }
4854 4719
@@ -4858,10 +4723,8 @@ static int bond_check_params(struct bond_params *params)
4858 /* not complete check, but should be good enough to 4723 /* not complete check, but should be good enough to
4859 catch mistakes */ 4724 catch mistakes */
4860 if (!isdigit(arp_ip_target[arp_ip_count][0])) { 4725 if (!isdigit(arp_ip_target[arp_ip_count][0])) {
4861 pr_warning(DRV_NAME 4726 pr_warning("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n",
4862 ": Warning: bad arp_ip_target module parameter " 4727 arp_ip_target[arp_ip_count]);
4863 "(%s), ARP monitoring will not be performed\n",
4864 arp_ip_target[arp_ip_count]);
4865 arp_interval = 0; 4728 arp_interval = 0;
4866 } else { 4729 } else {
4867 __be32 ip = in_aton(arp_ip_target[arp_ip_count]); 4730 __be32 ip = in_aton(arp_ip_target[arp_ip_count]);
@@ -4871,31 +4734,25 @@ static int bond_check_params(struct bond_params *params)
4871 4734
4872 if (arp_interval && !arp_ip_count) { 4735 if (arp_interval && !arp_ip_count) {
4873 /* don't allow arping if no arp_ip_target given... */ 4736 /* don't allow arping if no arp_ip_target given... */
4874 pr_warning(DRV_NAME 4737 pr_warning("Warning: arp_interval module parameter (%d) specified without providing an arp_ip_target parameter, arp_interval was reset to 0\n",
4875 ": Warning: arp_interval module parameter (%d) " 4738 arp_interval);
4876 "specified without providing an arp_ip_target "
4877 "parameter, arp_interval was reset to 0\n",
4878 arp_interval);
4879 arp_interval = 0; 4739 arp_interval = 0;
4880 } 4740 }
4881 4741
4882 if (arp_validate) { 4742 if (arp_validate) {
4883 if (bond_mode != BOND_MODE_ACTIVEBACKUP) { 4743 if (bond_mode != BOND_MODE_ACTIVEBACKUP) {
4884 pr_err(DRV_NAME 4744 pr_err("arp_validate only supported in active-backup mode\n");
4885 ": arp_validate only supported in active-backup mode\n");
4886 return -EINVAL; 4745 return -EINVAL;
4887 } 4746 }
4888 if (!arp_interval) { 4747 if (!arp_interval) {
4889 pr_err(DRV_NAME 4748 pr_err("arp_validate requires arp_interval\n");
4890 ": arp_validate requires arp_interval\n");
4891 return -EINVAL; 4749 return -EINVAL;
4892 } 4750 }
4893 4751
4894 arp_validate_value = bond_parse_parm(arp_validate, 4752 arp_validate_value = bond_parse_parm(arp_validate,
4895 arp_validate_tbl); 4753 arp_validate_tbl);
4896 if (arp_validate_value == -1) { 4754 if (arp_validate_value == -1) {
4897 pr_err(DRV_NAME 4755 pr_err("Error: invalid arp_validate \"%s\"\n",
4898 ": Error: invalid arp_validate \"%s\"\n",
4899 arp_validate == NULL ? "NULL" : arp_validate); 4756 arp_validate == NULL ? "NULL" : arp_validate);
4900 return -EINVAL; 4757 return -EINVAL;
4901 } 4758 }
@@ -4903,17 +4760,14 @@ static int bond_check_params(struct bond_params *params)
4903 arp_validate_value = 0; 4760 arp_validate_value = 0;
4904 4761
4905 if (miimon) { 4762 if (miimon) {
4906 pr_info(DRV_NAME 4763 pr_info("MII link monitoring set to %d ms\n", miimon);
4907 ": MII link monitoring set to %d ms\n",
4908 miimon);
4909 } else if (arp_interval) { 4764 } else if (arp_interval) {
4910 int i; 4765 int i;
4911 4766
4912 pr_info(DRV_NAME ": ARP monitoring set to %d ms," 4767 pr_info("ARP monitoring set to %d ms, validate %s, with %d target(s):",
4913 " validate %s, with %d target(s):", 4768 arp_interval,
4914 arp_interval, 4769 arp_validate_tbl[arp_validate_value].modename,
4915 arp_validate_tbl[arp_validate_value].modename, 4770 arp_ip_count);
4916 arp_ip_count);
4917 4771
4918 for (i = 0; i < arp_ip_count; i++) 4772 for (i = 0; i < arp_ip_count; i++)
4919 pr_info(" %s", arp_ip_target[i]); 4773 pr_info(" %s", arp_ip_target[i]);
@@ -4924,21 +4778,15 @@ static int bond_check_params(struct bond_params *params)
4924 /* miimon and arp_interval not set, we need one so things 4778 /* miimon and arp_interval not set, we need one so things
4925 * work as expected, see bonding.txt for details 4779 * work as expected, see bonding.txt for details
4926 */ 4780 */
4927 pr_warning(DRV_NAME 4781 pr_warning("Warning: either miimon or arp_interval and arp_ip_target module parameters must be specified, otherwise bonding will not detect link failures! see bonding.txt for details.\n");
4928 ": Warning: either miimon or arp_interval and "
4929 "arp_ip_target module parameters must be specified, "
4930 "otherwise bonding will not detect link failures! see "
4931 "bonding.txt for details.\n");
4932 } 4782 }
4933 4783
4934 if (primary && !USES_PRIMARY(bond_mode)) { 4784 if (primary && !USES_PRIMARY(bond_mode)) {
4935 /* currently, using a primary only makes sense 4785 /* currently, using a primary only makes sense
4936 * in active backup, TLB or ALB modes 4786 * in active backup, TLB or ALB modes
4937 */ 4787 */
4938 pr_warning(DRV_NAME 4788 pr_warning("Warning: %s primary device specified but has no effect in %s mode\n",
4939 ": Warning: %s primary device specified but has no " 4789 primary, bond_mode_name(bond_mode));
4940 "effect in %s mode\n",
4941 primary, bond_mode_name(bond_mode));
4942 primary = NULL; 4790 primary = NULL;
4943 } 4791 }
4944 4792
@@ -4946,8 +4794,7 @@ static int bond_check_params(struct bond_params *params)
4946 primary_reselect_value = bond_parse_parm(primary_reselect, 4794 primary_reselect_value = bond_parse_parm(primary_reselect,
4947 pri_reselect_tbl); 4795 pri_reselect_tbl);
4948 if (primary_reselect_value == -1) { 4796 if (primary_reselect_value == -1) {
4949 pr_err(DRV_NAME 4797 pr_err("Error: Invalid primary_reselect \"%s\"\n",
4950 ": Error: Invalid primary_reselect \"%s\"\n",
4951 primary_reselect == 4798 primary_reselect ==
4952 NULL ? "NULL" : primary_reselect); 4799 NULL ? "NULL" : primary_reselect);
4953 return -EINVAL; 4800 return -EINVAL;
@@ -4960,16 +4807,13 @@ static int bond_check_params(struct bond_params *params)
4960 fail_over_mac_value = bond_parse_parm(fail_over_mac, 4807 fail_over_mac_value = bond_parse_parm(fail_over_mac,
4961 fail_over_mac_tbl); 4808 fail_over_mac_tbl);
4962 if (fail_over_mac_value == -1) { 4809 if (fail_over_mac_value == -1) {
4963 pr_err(DRV_NAME 4810 pr_err("Error: invalid fail_over_mac \"%s\"\n",
4964 ": Error: invalid fail_over_mac \"%s\"\n",
4965 arp_validate == NULL ? "NULL" : arp_validate); 4811 arp_validate == NULL ? "NULL" : arp_validate);
4966 return -EINVAL; 4812 return -EINVAL;
4967 } 4813 }
4968 4814
4969 if (bond_mode != BOND_MODE_ACTIVEBACKUP) 4815 if (bond_mode != BOND_MODE_ACTIVEBACKUP)
4970 pr_warning(DRV_NAME 4816 pr_warning("Warning: fail_over_mac only affects active-backup mode.\n");
4971 ": Warning: fail_over_mac only affects "
4972 "active-backup mode.\n");
4973 } else { 4817 } else {
4974 fail_over_mac_value = BOND_FOM_NONE; 4818 fail_over_mac_value = BOND_FOM_NONE;
4975 } 4819 }
@@ -5076,8 +4920,7 @@ int bond_create(struct net *net, const char *name)
5076 bond_dev = alloc_netdev(sizeof(struct bonding), name ? name : "", 4920 bond_dev = alloc_netdev(sizeof(struct bonding), name ? name : "",
5077 bond_setup); 4921 bond_setup);
5078 if (!bond_dev) { 4922 if (!bond_dev) {
5079 pr_err(DRV_NAME ": %s: eek! can't alloc netdev!\n", 4923 pr_err("%s: eek! can't alloc netdev!\n", name);
5080 name);
5081 res = -ENOMEM; 4924 res = -ENOMEM;
5082 goto out; 4925 goto out;
5083 } 4926 }
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 4e00b4f83641..5acd557cea9b 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -19,6 +19,9 @@
19 * file called LICENSE. 19 * file called LICENSE.
20 * 20 *
21 */ 21 */
22
23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
22#include <linux/kernel.h> 25#include <linux/kernel.h>
23#include <linux/module.h> 26#include <linux/module.h>
24#include <linux/device.h> 27#include <linux/device.h>
@@ -109,11 +112,10 @@ static ssize_t bonding_store_bonds(struct class *cls,
109 goto err_no_cmd; 112 goto err_no_cmd;
110 113
111 if (command[0] == '+') { 114 if (command[0] == '+') {
112 pr_info(DRV_NAME 115 pr_info("%s is being created...\n", ifname);
113 ": %s is being created...\n", ifname);
114 rv = bond_create(net, ifname); 116 rv = bond_create(net, ifname);
115 if (rv) { 117 if (rv) {
116 pr_info(DRV_NAME ": Bond creation failed.\n"); 118 pr_info("Bond creation failed.\n");
117 res = rv; 119 res = rv;
118 } 120 }
119 } else if (command[0] == '-') { 121 } else if (command[0] == '-') {
@@ -122,12 +124,10 @@ static ssize_t bonding_store_bonds(struct class *cls,
122 rtnl_lock(); 124 rtnl_lock();
123 bond_dev = bond_get_by_name(net, ifname); 125 bond_dev = bond_get_by_name(net, ifname);
124 if (bond_dev) { 126 if (bond_dev) {
125 pr_info(DRV_NAME ": %s is being deleted...\n", 127 pr_info("%s is being deleted...\n", ifname);
126 ifname);
127 unregister_netdevice(bond_dev); 128 unregister_netdevice(bond_dev);
128 } else { 129 } else {
129 pr_err(DRV_NAME ": unable to delete non-existent %s\n", 130 pr_err("unable to delete non-existent %s\n", ifname);
130 ifname);
131 res = -ENODEV; 131 res = -ENODEV;
132 } 132 }
133 rtnl_unlock(); 133 rtnl_unlock();
@@ -140,8 +140,7 @@ static ssize_t bonding_store_bonds(struct class *cls,
140 return res; 140 return res;
141 141
142err_no_cmd: 142err_no_cmd:
143 pr_err(DRV_NAME ": no command found in bonding_masters." 143 pr_err("no command found in bonding_masters. Use +ifname or -ifname.\n");
144 " Use +ifname or -ifname.\n");
145 return -EPERM; 144 return -EPERM;
146} 145}
147 146
@@ -225,8 +224,8 @@ static ssize_t bonding_store_slaves(struct device *d,
225 224
226 /* Quick sanity check -- is the bond interface up? */ 225 /* Quick sanity check -- is the bond interface up? */
227 if (!(bond->dev->flags & IFF_UP)) { 226 if (!(bond->dev->flags & IFF_UP)) {
228 pr_warning(DRV_NAME ": %s: doing slave updates when " 227 pr_warning("%s: doing slave updates when interface is down.\n",
229 "interface is down.\n", bond->dev->name); 228 bond->dev->name);
230 } 229 }
231 230
232 /* Note: We can't hold bond->lock here, as bond_create grabs it. */ 231 /* Note: We can't hold bond->lock here, as bond_create grabs it. */
@@ -247,17 +246,14 @@ static ssize_t bonding_store_slaves(struct device *d,
247 246
248 dev = __dev_get_by_name(dev_net(bond->dev), ifname); 247 dev = __dev_get_by_name(dev_net(bond->dev), ifname);
249 if (!dev) { 248 if (!dev) {
250 pr_info(DRV_NAME 249 pr_info("%s: Interface %s does not exist!\n",
251 ": %s: Interface %s does not exist!\n", 250 bond->dev->name, ifname);
252 bond->dev->name, ifname);
253 ret = -ENODEV; 251 ret = -ENODEV;
254 goto out; 252 goto out;
255 } 253 }
256 254
257 if (dev->flags & IFF_UP) { 255 if (dev->flags & IFF_UP) {
258 pr_err(DRV_NAME 256 pr_err("%s: Error: Unable to enslave %s because it is already up.\n",
259 ": %s: Error: Unable to enslave %s "
260 "because it is already up.\n",
261 bond->dev->name, dev->name); 257 bond->dev->name, dev->name);
262 ret = -EPERM; 258 ret = -EPERM;
263 goto out; 259 goto out;
@@ -266,8 +262,7 @@ static ssize_t bonding_store_slaves(struct device *d,
266 read_lock(&bond->lock); 262 read_lock(&bond->lock);
267 bond_for_each_slave(bond, slave, i) 263 bond_for_each_slave(bond, slave, i)
268 if (slave->dev == dev) { 264 if (slave->dev == dev) {
269 pr_err(DRV_NAME 265 pr_err("%s: Interface %s is already enslaved!\n",
270 ": %s: Interface %s is already enslaved!\n",
271 bond->dev->name, ifname); 266 bond->dev->name, ifname);
272 ret = -EPERM; 267 ret = -EPERM;
273 read_unlock(&bond->lock); 268 read_unlock(&bond->lock);
@@ -275,8 +270,7 @@ static ssize_t bonding_store_slaves(struct device *d,
275 } 270 }
276 read_unlock(&bond->lock); 271 read_unlock(&bond->lock);
277 272
278 pr_info(DRV_NAME ": %s: Adding slave %s.\n", 273 pr_info("%s: Adding slave %s.\n", bond->dev->name, ifname);
279 bond->dev->name, ifname);
280 274
281 /* If this is the first slave, then we need to set 275 /* If this is the first slave, then we need to set
282 the master's hardware address to be the same as the 276 the master's hardware address to be the same as the
@@ -313,7 +307,7 @@ static ssize_t bonding_store_slaves(struct device *d,
313 break; 307 break;
314 } 308 }
315 if (dev) { 309 if (dev) {
316 pr_info(DRV_NAME ": %s: Removing slave %s\n", 310 pr_info("%s: Removing slave %s\n",
317 bond->dev->name, dev->name); 311 bond->dev->name, dev->name);
318 res = bond_release(bond->dev, dev); 312 res = bond_release(bond->dev, dev);
319 if (res) { 313 if (res) {
@@ -323,16 +317,16 @@ static ssize_t bonding_store_slaves(struct device *d,
323 /* set the slave MTU to the default */ 317 /* set the slave MTU to the default */
324 dev_set_mtu(dev, original_mtu); 318 dev_set_mtu(dev, original_mtu);
325 } else { 319 } else {
326 pr_err(DRV_NAME ": unable to remove non-existent" 320 pr_err("unable to remove non-existent slave %s for bond %s.\n",
327 " slave %s for bond %s.\n", 321 ifname, bond->dev->name);
328 ifname, bond->dev->name);
329 ret = -ENODEV; 322 ret = -ENODEV;
330 } 323 }
331 goto out; 324 goto out;
332 } 325 }
333 326
334err_no_cmd: 327err_no_cmd:
335 pr_err(DRV_NAME ": no command found in slaves file for bond %s. Use +ifname or -ifname.\n", bond->dev->name); 328 pr_err("no command found in slaves file for bond %s. Use +ifname or -ifname.\n",
329 bond->dev->name);
336 ret = -EPERM; 330 ret = -EPERM;
337 331
338out: 332out:
@@ -365,18 +359,16 @@ static ssize_t bonding_store_mode(struct device *d,
365 struct bonding *bond = to_bond(d); 359 struct bonding *bond = to_bond(d);
366 360
367 if (bond->dev->flags & IFF_UP) { 361 if (bond->dev->flags & IFF_UP) {
368 pr_err(DRV_NAME ": unable to update mode of %s" 362 pr_err("unable to update mode of %s because interface is up.\n",
369 " because interface is up.\n", bond->dev->name); 363 bond->dev->name);
370 ret = -EPERM; 364 ret = -EPERM;
371 goto out; 365 goto out;
372 } 366 }
373 367
374 new_value = bond_parse_parm(buf, bond_mode_tbl); 368 new_value = bond_parse_parm(buf, bond_mode_tbl);
375 if (new_value < 0) { 369 if (new_value < 0) {
376 pr_err(DRV_NAME 370 pr_err("%s: Ignoring invalid mode value %.*s.\n",
377 ": %s: Ignoring invalid mode value %.*s.\n", 371 bond->dev->name, (int)strlen(buf) - 1, buf);
378 bond->dev->name,
379 (int)strlen(buf) - 1, buf);
380 ret = -EINVAL; 372 ret = -EINVAL;
381 goto out; 373 goto out;
382 } else { 374 } else {
@@ -388,8 +380,8 @@ static ssize_t bonding_store_mode(struct device *d,
388 380
389 bond->params.mode = new_value; 381 bond->params.mode = new_value;
390 bond_set_mode_ops(bond, bond->params.mode); 382 bond_set_mode_ops(bond, bond->params.mode);
391 pr_info(DRV_NAME ": %s: setting mode to %s (%d).\n", 383 pr_info("%s: setting mode to %s (%d).\n",
392 bond->dev->name, bond_mode_tbl[new_value].modename, 384 bond->dev->name, bond_mode_tbl[new_value].modename,
393 new_value); 385 new_value);
394 } 386 }
395out: 387out:
@@ -421,8 +413,7 @@ static ssize_t bonding_store_xmit_hash(struct device *d,
421 struct bonding *bond = to_bond(d); 413 struct bonding *bond = to_bond(d);
422 414
423 if (bond->dev->flags & IFF_UP) { 415 if (bond->dev->flags & IFF_UP) {
424 pr_err(DRV_NAME 416 pr_err("%s: Interface is up. Unable to update xmit policy.\n",
425 "%s: Interface is up. Unable to update xmit policy.\n",
426 bond->dev->name); 417 bond->dev->name);
427 ret = -EPERM; 418 ret = -EPERM;
428 goto out; 419 goto out;
@@ -430,8 +421,7 @@ static ssize_t bonding_store_xmit_hash(struct device *d,
430 421
431 new_value = bond_parse_parm(buf, xmit_hashtype_tbl); 422 new_value = bond_parse_parm(buf, xmit_hashtype_tbl);
432 if (new_value < 0) { 423 if (new_value < 0) {
433 pr_err(DRV_NAME 424 pr_err("%s: Ignoring invalid xmit hash policy value %.*s.\n",
434 ": %s: Ignoring invalid xmit hash policy value %.*s.\n",
435 bond->dev->name, 425 bond->dev->name,
436 (int)strlen(buf) - 1, buf); 426 (int)strlen(buf) - 1, buf);
437 ret = -EINVAL; 427 ret = -EINVAL;
@@ -439,7 +429,7 @@ static ssize_t bonding_store_xmit_hash(struct device *d,
439 } else { 429 } else {
440 bond->params.xmit_policy = new_value; 430 bond->params.xmit_policy = new_value;
441 bond_set_mode_ops(bond, bond->params.mode); 431 bond_set_mode_ops(bond, bond->params.mode);
442 pr_info(DRV_NAME ": %s: setting xmit hash policy to %s (%d).\n", 432 pr_info("%s: setting xmit hash policy to %s (%d).\n",
443 bond->dev->name, 433 bond->dev->name,
444 xmit_hashtype_tbl[new_value].modename, new_value); 434 xmit_hashtype_tbl[new_value].modename, new_value);
445 } 435 }
@@ -472,20 +462,18 @@ static ssize_t bonding_store_arp_validate(struct device *d,
472 462
473 new_value = bond_parse_parm(buf, arp_validate_tbl); 463 new_value = bond_parse_parm(buf, arp_validate_tbl);
474 if (new_value < 0) { 464 if (new_value < 0) {
475 pr_err(DRV_NAME 465 pr_err("%s: Ignoring invalid arp_validate value %s\n",
476 ": %s: Ignoring invalid arp_validate value %s\n",
477 bond->dev->name, buf); 466 bond->dev->name, buf);
478 return -EINVAL; 467 return -EINVAL;
479 } 468 }
480 if (new_value && (bond->params.mode != BOND_MODE_ACTIVEBACKUP)) { 469 if (new_value && (bond->params.mode != BOND_MODE_ACTIVEBACKUP)) {
481 pr_err(DRV_NAME 470 pr_err("%s: arp_validate only supported in active-backup mode.\n",
482 ": %s: arp_validate only supported in active-backup mode.\n",
483 bond->dev->name); 471 bond->dev->name);
484 return -EINVAL; 472 return -EINVAL;
485 } 473 }
486 pr_info(DRV_NAME ": %s: setting arp_validate to %s (%d).\n", 474 pr_info("%s: setting arp_validate to %s (%d).\n",
487 bond->dev->name, arp_validate_tbl[new_value].modename, 475 bond->dev->name, arp_validate_tbl[new_value].modename,
488 new_value); 476 new_value);
489 477
490 if (!bond->params.arp_validate && new_value) 478 if (!bond->params.arp_validate && new_value)
491 bond_register_arp(bond); 479 bond_register_arp(bond);
@@ -523,24 +511,22 @@ static ssize_t bonding_store_fail_over_mac(struct device *d,
523 struct bonding *bond = to_bond(d); 511 struct bonding *bond = to_bond(d);
524 512
525 if (bond->slave_cnt != 0) { 513 if (bond->slave_cnt != 0) {
526 pr_err(DRV_NAME 514 pr_err("%s: Can't alter fail_over_mac with slaves in bond.\n",
527 ": %s: Can't alter fail_over_mac with slaves in bond.\n",
528 bond->dev->name); 515 bond->dev->name);
529 return -EPERM; 516 return -EPERM;
530 } 517 }
531 518
532 new_value = bond_parse_parm(buf, fail_over_mac_tbl); 519 new_value = bond_parse_parm(buf, fail_over_mac_tbl);
533 if (new_value < 0) { 520 if (new_value < 0) {
534 pr_err(DRV_NAME 521 pr_err("%s: Ignoring invalid fail_over_mac value %s.\n",
535 ": %s: Ignoring invalid fail_over_mac value %s.\n",
536 bond->dev->name, buf); 522 bond->dev->name, buf);
537 return -EINVAL; 523 return -EINVAL;
538 } 524 }
539 525
540 bond->params.fail_over_mac = new_value; 526 bond->params.fail_over_mac = new_value;
541 pr_info(DRV_NAME ": %s: Setting fail_over_mac to %s (%d).\n", 527 pr_info("%s: Setting fail_over_mac to %s (%d).\n",
542 bond->dev->name, fail_over_mac_tbl[new_value].modename, 528 bond->dev->name, fail_over_mac_tbl[new_value].modename,
543 new_value); 529 new_value);
544 530
545 return count; 531 return count;
546} 532}
@@ -571,31 +557,26 @@ static ssize_t bonding_store_arp_interval(struct device *d,
571 struct bonding *bond = to_bond(d); 557 struct bonding *bond = to_bond(d);
572 558
573 if (sscanf(buf, "%d", &new_value) != 1) { 559 if (sscanf(buf, "%d", &new_value) != 1) {
574 pr_err(DRV_NAME 560 pr_err("%s: no arp_interval value specified.\n",
575 ": %s: no arp_interval value specified.\n",
576 bond->dev->name); 561 bond->dev->name);
577 ret = -EINVAL; 562 ret = -EINVAL;
578 goto out; 563 goto out;
579 } 564 }
580 if (new_value < 0) { 565 if (new_value < 0) {
581 pr_err(DRV_NAME 566 pr_err("%s: Invalid arp_interval value %d not in range 1-%d; rejected.\n",
582 ": %s: Invalid arp_interval value %d not in range 1-%d; rejected.\n",
583 bond->dev->name, new_value, INT_MAX); 567 bond->dev->name, new_value, INT_MAX);
584 ret = -EINVAL; 568 ret = -EINVAL;
585 goto out; 569 goto out;
586 } 570 }
587 571
588 pr_info(DRV_NAME 572 pr_info("%s: Setting ARP monitoring interval to %d.\n",
589 ": %s: Setting ARP monitoring interval to %d.\n", 573 bond->dev->name, new_value);
590 bond->dev->name, new_value);
591 bond->params.arp_interval = new_value; 574 bond->params.arp_interval = new_value;
592 if (bond->params.arp_interval) 575 if (bond->params.arp_interval)
593 bond->dev->priv_flags |= IFF_MASTER_ARPMON; 576 bond->dev->priv_flags |= IFF_MASTER_ARPMON;
594 if (bond->params.miimon) { 577 if (bond->params.miimon) {
595 pr_info(DRV_NAME 578 pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n",
596 ": %s: ARP monitoring cannot be used with MII monitoring. " 579 bond->dev->name, bond->dev->name);
597 "%s Disabling MII monitoring.\n",
598 bond->dev->name, bond->dev->name);
599 bond->params.miimon = 0; 580 bond->params.miimon = 0;
600 if (delayed_work_pending(&bond->mii_work)) { 581 if (delayed_work_pending(&bond->mii_work)) {
601 cancel_delayed_work(&bond->mii_work); 582 cancel_delayed_work(&bond->mii_work);
@@ -603,10 +584,8 @@ static ssize_t bonding_store_arp_interval(struct device *d,
603 } 584 }
604 } 585 }
605 if (!bond->params.arp_targets[0]) { 586 if (!bond->params.arp_targets[0]) {
606 pr_info(DRV_NAME 587 pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n",
607 ": %s: ARP monitoring has been set up, " 588 bond->dev->name);
608 "but no ARP targets have been specified.\n",
609 bond->dev->name);
610 } 589 }
611 if (bond->dev->flags & IFF_UP) { 590 if (bond->dev->flags & IFF_UP) {
612 /* If the interface is up, we may need to fire off 591 /* If the interface is up, we may need to fire off
@@ -666,8 +645,7 @@ static ssize_t bonding_store_arp_targets(struct device *d,
666 /* look for adds */ 645 /* look for adds */
667 if (buf[0] == '+') { 646 if (buf[0] == '+') {
668 if ((newtarget == 0) || (newtarget == htonl(INADDR_BROADCAST))) { 647 if ((newtarget == 0) || (newtarget == htonl(INADDR_BROADCAST))) {
669 pr_err(DRV_NAME 648 pr_err("%s: invalid ARP target %pI4 specified for addition\n",
670 ": %s: invalid ARP target %pI4 specified for addition\n",
671 bond->dev->name, &newtarget); 649 bond->dev->name, &newtarget);
672 ret = -EINVAL; 650 ret = -EINVAL;
673 goto out; 651 goto out;
@@ -675,23 +653,20 @@ static ssize_t bonding_store_arp_targets(struct device *d,
675 /* look for an empty slot to put the target in, and check for dupes */ 653 /* look for an empty slot to put the target in, and check for dupes */
676 for (i = 0; (i < BOND_MAX_ARP_TARGETS) && !done; i++) { 654 for (i = 0; (i < BOND_MAX_ARP_TARGETS) && !done; i++) {
677 if (targets[i] == newtarget) { /* duplicate */ 655 if (targets[i] == newtarget) { /* duplicate */
678 pr_err(DRV_NAME 656 pr_err("%s: ARP target %pI4 is already present\n",
679 ": %s: ARP target %pI4 is already present\n",
680 bond->dev->name, &newtarget); 657 bond->dev->name, &newtarget);
681 ret = -EINVAL; 658 ret = -EINVAL;
682 goto out; 659 goto out;
683 } 660 }
684 if (targets[i] == 0) { 661 if (targets[i] == 0) {
685 pr_info(DRV_NAME 662 pr_info("%s: adding ARP target %pI4.\n",
686 ": %s: adding ARP target %pI4.\n", 663 bond->dev->name, &newtarget);
687 bond->dev->name, &newtarget);
688 done = 1; 664 done = 1;
689 targets[i] = newtarget; 665 targets[i] = newtarget;
690 } 666 }
691 } 667 }
692 if (!done) { 668 if (!done) {
693 pr_err(DRV_NAME 669 pr_err("%s: ARP target table is full!\n",
694 ": %s: ARP target table is full!\n",
695 bond->dev->name); 670 bond->dev->name);
696 ret = -EINVAL; 671 ret = -EINVAL;
697 goto out; 672 goto out;
@@ -699,8 +674,7 @@ static ssize_t bonding_store_arp_targets(struct device *d,
699 674
700 } else if (buf[0] == '-') { 675 } else if (buf[0] == '-') {
701 if ((newtarget == 0) || (newtarget == htonl(INADDR_BROADCAST))) { 676 if ((newtarget == 0) || (newtarget == htonl(INADDR_BROADCAST))) {
702 pr_err(DRV_NAME 677 pr_err("%s: invalid ARP target %pI4 specified for removal\n",
703 ": %s: invalid ARP target %pI4 specified for removal\n",
704 bond->dev->name, &newtarget); 678 bond->dev->name, &newtarget);
705 ret = -EINVAL; 679 ret = -EINVAL;
706 goto out; 680 goto out;
@@ -709,9 +683,8 @@ static ssize_t bonding_store_arp_targets(struct device *d,
709 for (i = 0; (i < BOND_MAX_ARP_TARGETS) && !done; i++) { 683 for (i = 0; (i < BOND_MAX_ARP_TARGETS) && !done; i++) {
710 if (targets[i] == newtarget) { 684 if (targets[i] == newtarget) {
711 int j; 685 int j;
712 pr_info(DRV_NAME 686 pr_info("%s: removing ARP target %pI4.\n",
713 ": %s: removing ARP target %pI4.\n", 687 bond->dev->name, &newtarget);
714 bond->dev->name, &newtarget);
715 for (j = i; (j < (BOND_MAX_ARP_TARGETS-1)) && targets[j+1]; j++) 688 for (j = i; (j < (BOND_MAX_ARP_TARGETS-1)) && targets[j+1]; j++)
716 targets[j] = targets[j+1]; 689 targets[j] = targets[j+1];
717 690
@@ -720,16 +693,14 @@ static ssize_t bonding_store_arp_targets(struct device *d,
720 } 693 }
721 } 694 }
722 if (!done) { 695 if (!done) {
723 pr_info(DRV_NAME 696 pr_info("%s: unable to remove nonexistent ARP target %pI4.\n",
724 ": %s: unable to remove nonexistent ARP target %pI4.\n", 697 bond->dev->name, &newtarget);
725 bond->dev->name, &newtarget);
726 ret = -EINVAL; 698 ret = -EINVAL;
727 goto out; 699 goto out;
728 } 700 }
729 } else { 701 } else {
730 pr_err(DRV_NAME ": no command found in arp_ip_targets file" 702 pr_err("no command found in arp_ip_targets file for bond %s. Use +<addr> or -<addr>.\n",
731 " for bond %s. Use +<addr> or -<addr>.\n", 703 bond->dev->name);
732 bond->dev->name);
733 ret = -EPERM; 704 ret = -EPERM;
734 goto out; 705 goto out;
735 } 706 }
@@ -761,41 +732,34 @@ static ssize_t bonding_store_downdelay(struct device *d,
761 struct bonding *bond = to_bond(d); 732 struct bonding *bond = to_bond(d);
762 733
763 if (!(bond->params.miimon)) { 734 if (!(bond->params.miimon)) {
764 pr_err(DRV_NAME 735 pr_err("%s: Unable to set down delay as MII monitoring is disabled\n",
765 ": %s: Unable to set down delay as MII monitoring is disabled\n",
766 bond->dev->name); 736 bond->dev->name);
767 ret = -EPERM; 737 ret = -EPERM;
768 goto out; 738 goto out;
769 } 739 }
770 740
771 if (sscanf(buf, "%d", &new_value) != 1) { 741 if (sscanf(buf, "%d", &new_value) != 1) {
772 pr_err(DRV_NAME 742 pr_err("%s: no down delay value specified.\n", bond->dev->name);
773 ": %s: no down delay value specified.\n",
774 bond->dev->name);
775 ret = -EINVAL; 743 ret = -EINVAL;
776 goto out; 744 goto out;
777 } 745 }
778 if (new_value < 0) { 746 if (new_value < 0) {
779 pr_err(DRV_NAME 747 pr_err("%s: Invalid down delay value %d not in range %d-%d; rejected.\n",
780 ": %s: Invalid down delay value %d not in range %d-%d; rejected.\n",
781 bond->dev->name, new_value, 1, INT_MAX); 748 bond->dev->name, new_value, 1, INT_MAX);
782 ret = -EINVAL; 749 ret = -EINVAL;
783 goto out; 750 goto out;
784 } else { 751 } else {
785 if ((new_value % bond->params.miimon) != 0) { 752 if ((new_value % bond->params.miimon) != 0) {
786 pr_warning(DRV_NAME 753 pr_warning("%s: Warning: down delay (%d) is not a multiple of miimon (%d), delay rounded to %d ms\n",
787 ": %s: Warning: down delay (%d) is not a "
788 "multiple of miimon (%d), delay rounded "
789 "to %d ms\n",
790 bond->dev->name, new_value, 754 bond->dev->name, new_value,
791 bond->params.miimon, 755 bond->params.miimon,
792 (new_value / bond->params.miimon) * 756 (new_value / bond->params.miimon) *
793 bond->params.miimon); 757 bond->params.miimon);
794 } 758 }
795 bond->params.downdelay = new_value / bond->params.miimon; 759 bond->params.downdelay = new_value / bond->params.miimon;
796 pr_info(DRV_NAME ": %s: Setting down delay to %d.\n", 760 pr_info("%s: Setting down delay to %d.\n",
797 bond->dev->name, 761 bond->dev->name,
798 bond->params.downdelay * bond->params.miimon); 762 bond->params.downdelay * bond->params.miimon);
799 763
800 } 764 }
801 765
@@ -823,41 +787,35 @@ static ssize_t bonding_store_updelay(struct device *d,
823 struct bonding *bond = to_bond(d); 787 struct bonding *bond = to_bond(d);
824 788
825 if (!(bond->params.miimon)) { 789 if (!(bond->params.miimon)) {
826 pr_err(DRV_NAME 790 pr_err("%s: Unable to set up delay as MII monitoring is disabled\n",
827 ": %s: Unable to set up delay as MII monitoring is disabled\n",
828 bond->dev->name); 791 bond->dev->name);
829 ret = -EPERM; 792 ret = -EPERM;
830 goto out; 793 goto out;
831 } 794 }
832 795
833 if (sscanf(buf, "%d", &new_value) != 1) { 796 if (sscanf(buf, "%d", &new_value) != 1) {
834 pr_err(DRV_NAME 797 pr_err("%s: no up delay value specified.\n",
835 ": %s: no up delay value specified.\n",
836 bond->dev->name); 798 bond->dev->name);
837 ret = -EINVAL; 799 ret = -EINVAL;
838 goto out; 800 goto out;
839 } 801 }
840 if (new_value < 0) { 802 if (new_value < 0) {
841 pr_err(DRV_NAME 803 pr_err("%s: Invalid down delay value %d not in range %d-%d; rejected.\n",
842 ": %s: Invalid down delay value %d not in range %d-%d; rejected.\n",
843 bond->dev->name, new_value, 1, INT_MAX); 804 bond->dev->name, new_value, 1, INT_MAX);
844 ret = -EINVAL; 805 ret = -EINVAL;
845 goto out; 806 goto out;
846 } else { 807 } else {
847 if ((new_value % bond->params.miimon) != 0) { 808 if ((new_value % bond->params.miimon) != 0) {
848 pr_warning(DRV_NAME 809 pr_warning("%s: Warning: up delay (%d) is not a multiple of miimon (%d), updelay rounded to %d ms\n",
849 ": %s: Warning: up delay (%d) is not a "
850 "multiple of miimon (%d), updelay rounded "
851 "to %d ms\n",
852 bond->dev->name, new_value, 810 bond->dev->name, new_value,
853 bond->params.miimon, 811 bond->params.miimon,
854 (new_value / bond->params.miimon) * 812 (new_value / bond->params.miimon) *
855 bond->params.miimon); 813 bond->params.miimon);
856 } 814 }
857 bond->params.updelay = new_value / bond->params.miimon; 815 bond->params.updelay = new_value / bond->params.miimon;
858 pr_info(DRV_NAME ": %s: Setting up delay to %d.\n", 816 pr_info("%s: Setting up delay to %d.\n",
859 bond->dev->name, bond->params.updelay * bond->params.miimon); 817 bond->dev->name,
860 818 bond->params.updelay * bond->params.miimon);
861 } 819 }
862 820
863out: 821out:
@@ -889,16 +847,14 @@ static ssize_t bonding_store_lacp(struct device *d,
889 struct bonding *bond = to_bond(d); 847 struct bonding *bond = to_bond(d);
890 848
891 if (bond->dev->flags & IFF_UP) { 849 if (bond->dev->flags & IFF_UP) {
892 pr_err(DRV_NAME 850 pr_err("%s: Unable to update LACP rate because interface is up.\n",
893 ": %s: Unable to update LACP rate because interface is up.\n",
894 bond->dev->name); 851 bond->dev->name);
895 ret = -EPERM; 852 ret = -EPERM;
896 goto out; 853 goto out;
897 } 854 }
898 855
899 if (bond->params.mode != BOND_MODE_8023AD) { 856 if (bond->params.mode != BOND_MODE_8023AD) {
900 pr_err(DRV_NAME 857 pr_err("%s: Unable to update LACP rate because bond is not in 802.3ad mode.\n",
901 ": %s: Unable to update LACP rate because bond is not in 802.3ad mode.\n",
902 bond->dev->name); 858 bond->dev->name);
903 ret = -EPERM; 859 ret = -EPERM;
904 goto out; 860 goto out;
@@ -908,12 +864,11 @@ static ssize_t bonding_store_lacp(struct device *d,
908 864
909 if ((new_value == 1) || (new_value == 0)) { 865 if ((new_value == 1) || (new_value == 0)) {
910 bond->params.lacp_fast = new_value; 866 bond->params.lacp_fast = new_value;
911 pr_info(DRV_NAME ": %s: Setting LACP rate to %s (%d).\n", 867 pr_info("%s: Setting LACP rate to %s (%d).\n",
912 bond->dev->name, bond_lacp_tbl[new_value].modename, 868 bond->dev->name, bond_lacp_tbl[new_value].modename,
913 new_value); 869 new_value);
914 } else { 870 } else {
915 pr_err(DRV_NAME 871 pr_err("%s: Ignoring invalid LACP rate value %.*s.\n",
916 ": %s: Ignoring invalid LACP rate value %.*s.\n",
917 bond->dev->name, (int)strlen(buf) - 1, buf); 872 bond->dev->name, (int)strlen(buf) - 1, buf);
918 ret = -EINVAL; 873 ret = -EINVAL;
919 } 874 }
@@ -943,9 +898,8 @@ static ssize_t bonding_store_ad_select(struct device *d,
943 struct bonding *bond = to_bond(d); 898 struct bonding *bond = to_bond(d);
944 899
945 if (bond->dev->flags & IFF_UP) { 900 if (bond->dev->flags & IFF_UP) {
946 pr_err(DRV_NAME 901 pr_err("%s: Unable to update ad_select because interface is up.\n",
947 ": %s: Unable to update ad_select because interface " 902 bond->dev->name);
948 "is up.\n", bond->dev->name);
949 ret = -EPERM; 903 ret = -EPERM;
950 goto out; 904 goto out;
951 } 905 }
@@ -954,13 +908,11 @@ static ssize_t bonding_store_ad_select(struct device *d,
954 908
955 if (new_value != -1) { 909 if (new_value != -1) {
956 bond->params.ad_select = new_value; 910 bond->params.ad_select = new_value;
957 pr_info(DRV_NAME 911 pr_info("%s: Setting ad_select to %s (%d).\n",
958 ": %s: Setting ad_select to %s (%d).\n", 912 bond->dev->name, ad_select_tbl[new_value].modename,
959 bond->dev->name, ad_select_tbl[new_value].modename, 913 new_value);
960 new_value);
961 } else { 914 } else {
962 pr_err(DRV_NAME 915 pr_err("%s: Ignoring invalid ad_select value %.*s.\n",
963 ": %s: Ignoring invalid ad_select value %.*s.\n",
964 bond->dev->name, (int)strlen(buf) - 1, buf); 916 bond->dev->name, (int)strlen(buf) - 1, buf);
965 ret = -EINVAL; 917 ret = -EINVAL;
966 } 918 }
@@ -990,15 +942,13 @@ static ssize_t bonding_store_n_grat_arp(struct device *d,
990 struct bonding *bond = to_bond(d); 942 struct bonding *bond = to_bond(d);
991 943
992 if (sscanf(buf, "%d", &new_value) != 1) { 944 if (sscanf(buf, "%d", &new_value) != 1) {
993 pr_err(DRV_NAME 945 pr_err("%s: no num_grat_arp value specified.\n",
994 ": %s: no num_grat_arp value specified.\n",
995 bond->dev->name); 946 bond->dev->name);
996 ret = -EINVAL; 947 ret = -EINVAL;
997 goto out; 948 goto out;
998 } 949 }
999 if (new_value < 0 || new_value > 255) { 950 if (new_value < 0 || new_value > 255) {
1000 pr_err(DRV_NAME 951 pr_err("%s: Invalid num_grat_arp value %d not in range 0-255; rejected.\n",
1001 ": %s: Invalid num_grat_arp value %d not in range 0-255; rejected.\n",
1002 bond->dev->name, new_value); 952 bond->dev->name, new_value);
1003 ret = -EINVAL; 953 ret = -EINVAL;
1004 goto out; 954 goto out;
@@ -1031,16 +981,14 @@ static ssize_t bonding_store_n_unsol_na(struct device *d,
1031 struct bonding *bond = to_bond(d); 981 struct bonding *bond = to_bond(d);
1032 982
1033 if (sscanf(buf, "%d", &new_value) != 1) { 983 if (sscanf(buf, "%d", &new_value) != 1) {
1034 pr_err(DRV_NAME 984 pr_err("%s: no num_unsol_na value specified.\n",
1035 ": %s: no num_unsol_na value specified.\n",
1036 bond->dev->name); 985 bond->dev->name);
1037 ret = -EINVAL; 986 ret = -EINVAL;
1038 goto out; 987 goto out;
1039 } 988 }
1040 989
1041 if (new_value < 0 || new_value > 255) { 990 if (new_value < 0 || new_value > 255) {
1042 pr_err(DRV_NAME 991 pr_err("%s: Invalid num_unsol_na value %d not in range 0-255; rejected.\n",
1043 ": %s: Invalid num_unsol_na value %d not in range 0-255; rejected.\n",
1044 bond->dev->name, new_value); 992 bond->dev->name, new_value);
1045 ret = -EINVAL; 993 ret = -EINVAL;
1046 goto out; 994 goto out;
@@ -1075,40 +1023,31 @@ static ssize_t bonding_store_miimon(struct device *d,
1075 struct bonding *bond = to_bond(d); 1023 struct bonding *bond = to_bond(d);
1076 1024
1077 if (sscanf(buf, "%d", &new_value) != 1) { 1025 if (sscanf(buf, "%d", &new_value) != 1) {
1078 pr_err(DRV_NAME 1026 pr_err("%s: no miimon value specified.\n",
1079 ": %s: no miimon value specified.\n",
1080 bond->dev->name); 1027 bond->dev->name);
1081 ret = -EINVAL; 1028 ret = -EINVAL;
1082 goto out; 1029 goto out;
1083 } 1030 }
1084 if (new_value < 0) { 1031 if (new_value < 0) {
1085 pr_err(DRV_NAME 1032 pr_err("%s: Invalid miimon value %d not in range %d-%d; rejected.\n",
1086 ": %s: Invalid miimon value %d not in range %d-%d; rejected.\n",
1087 bond->dev->name, new_value, 1, INT_MAX); 1033 bond->dev->name, new_value, 1, INT_MAX);
1088 ret = -EINVAL; 1034 ret = -EINVAL;
1089 goto out; 1035 goto out;
1090 } else { 1036 } else {
1091 pr_info(DRV_NAME 1037 pr_info("%s: Setting MII monitoring interval to %d.\n",
1092 ": %s: Setting MII monitoring interval to %d.\n", 1038 bond->dev->name, new_value);
1093 bond->dev->name, new_value);
1094 bond->params.miimon = new_value; 1039 bond->params.miimon = new_value;
1095 if (bond->params.updelay) 1040 if (bond->params.updelay)
1096 pr_info(DRV_NAME 1041 pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value.\n",
1097 ": %s: Note: Updating updelay (to %d) " 1042 bond->dev->name,
1098 "since it is a multiple of the miimon value.\n", 1043 bond->params.updelay * bond->params.miimon);
1099 bond->dev->name,
1100 bond->params.updelay * bond->params.miimon);
1101 if (bond->params.downdelay) 1044 if (bond->params.downdelay)
1102 pr_info(DRV_NAME 1045 pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value.\n",
1103 ": %s: Note: Updating downdelay (to %d) " 1046 bond->dev->name,
1104 "since it is a multiple of the miimon value.\n", 1047 bond->params.downdelay * bond->params.miimon);
1105 bond->dev->name,
1106 bond->params.downdelay * bond->params.miimon);
1107 if (bond->params.arp_interval) { 1048 if (bond->params.arp_interval) {
1108 pr_info(DRV_NAME 1049 pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n",
1109 ": %s: MII monitoring cannot be used with " 1050 bond->dev->name);
1110 "ARP monitoring. Disabling ARP monitoring...\n",
1111 bond->dev->name);
1112 bond->params.arp_interval = 0; 1051 bond->params.arp_interval = 0;
1113 bond->dev->priv_flags &= ~IFF_MASTER_ARPMON; 1052 bond->dev->priv_flags &= ~IFF_MASTER_ARPMON;
1114 if (bond->params.arp_validate) { 1053 if (bond->params.arp_validate) {
@@ -1176,17 +1115,15 @@ static ssize_t bonding_store_primary(struct device *d,
1176 write_lock_bh(&bond->curr_slave_lock); 1115 write_lock_bh(&bond->curr_slave_lock);
1177 1116
1178 if (!USES_PRIMARY(bond->params.mode)) { 1117 if (!USES_PRIMARY(bond->params.mode)) {
1179 pr_info(DRV_NAME 1118 pr_info("%s: Unable to set primary slave; %s is in mode %d\n",
1180 ": %s: Unable to set primary slave; %s is in mode %d\n", 1119 bond->dev->name, bond->dev->name, bond->params.mode);
1181 bond->dev->name, bond->dev->name, bond->params.mode);
1182 } else { 1120 } else {
1183 bond_for_each_slave(bond, slave, i) { 1121 bond_for_each_slave(bond, slave, i) {
1184 if (strnicmp 1122 if (strnicmp
1185 (slave->dev->name, buf, 1123 (slave->dev->name, buf,
1186 strlen(slave->dev->name)) == 0) { 1124 strlen(slave->dev->name)) == 0) {
1187 pr_info(DRV_NAME 1125 pr_info("%s: Setting %s as primary slave.\n",
1188 ": %s: Setting %s as primary slave.\n", 1126 bond->dev->name, slave->dev->name);
1189 bond->dev->name, slave->dev->name);
1190 bond->primary_slave = slave; 1127 bond->primary_slave = slave;
1191 strcpy(bond->params.primary, slave->dev->name); 1128 strcpy(bond->params.primary, slave->dev->name);
1192 bond_select_active_slave(bond); 1129 bond_select_active_slave(bond);
@@ -1197,15 +1134,13 @@ static ssize_t bonding_store_primary(struct device *d,
1197 /* if we got here, then we didn't match the name of any slave */ 1134 /* if we got here, then we didn't match the name of any slave */
1198 1135
1199 if (strlen(buf) == 0 || buf[0] == '\n') { 1136 if (strlen(buf) == 0 || buf[0] == '\n') {
1200 pr_info(DRV_NAME 1137 pr_info("%s: Setting primary slave to None.\n",
1201 ": %s: Setting primary slave to None.\n", 1138 bond->dev->name);
1202 bond->dev->name);
1203 bond->primary_slave = NULL; 1139 bond->primary_slave = NULL;
1204 bond_select_active_slave(bond); 1140 bond_select_active_slave(bond);
1205 } else { 1141 } else {
1206 pr_info(DRV_NAME 1142 pr_info("%s: Unable to set %.*s as primary slave as it is not a slave.\n",
1207 ": %s: Unable to set %.*s as primary slave as it is not a slave.\n", 1143 bond->dev->name, (int)strlen(buf) - 1, buf);
1208 bond->dev->name, (int)strlen(buf) - 1, buf);
1209 } 1144 }
1210 } 1145 }
1211out: 1146out:
@@ -1244,8 +1179,7 @@ static ssize_t bonding_store_primary_reselect(struct device *d,
1244 1179
1245 new_value = bond_parse_parm(buf, pri_reselect_tbl); 1180 new_value = bond_parse_parm(buf, pri_reselect_tbl);
1246 if (new_value < 0) { 1181 if (new_value < 0) {
1247 pr_err(DRV_NAME 1182 pr_err("%s: Ignoring invalid primary_reselect value %.*s.\n",
1248 ": %s: Ignoring invalid primary_reselect value %.*s.\n",
1249 bond->dev->name, 1183 bond->dev->name,
1250 (int) strlen(buf) - 1, buf); 1184 (int) strlen(buf) - 1, buf);
1251 ret = -EINVAL; 1185 ret = -EINVAL;
@@ -1253,7 +1187,7 @@ static ssize_t bonding_store_primary_reselect(struct device *d,
1253 } 1187 }
1254 1188
1255 bond->params.primary_reselect = new_value; 1189 bond->params.primary_reselect = new_value;
1256 pr_info(DRV_NAME ": %s: setting primary_reselect to %s (%d).\n", 1190 pr_info("%s: setting primary_reselect to %s (%d).\n",
1257 bond->dev->name, pri_reselect_tbl[new_value].modename, 1191 bond->dev->name, pri_reselect_tbl[new_value].modename,
1258 new_value); 1192 new_value);
1259 1193
@@ -1291,20 +1225,18 @@ static ssize_t bonding_store_carrier(struct device *d,
1291 1225
1292 1226
1293 if (sscanf(buf, "%d", &new_value) != 1) { 1227 if (sscanf(buf, "%d", &new_value) != 1) {
1294 pr_err(DRV_NAME 1228 pr_err("%s: no use_carrier value specified.\n",
1295 ": %s: no use_carrier value specified.\n",
1296 bond->dev->name); 1229 bond->dev->name);
1297 ret = -EINVAL; 1230 ret = -EINVAL;
1298 goto out; 1231 goto out;
1299 } 1232 }
1300 if ((new_value == 0) || (new_value == 1)) { 1233 if ((new_value == 0) || (new_value == 1)) {
1301 bond->params.use_carrier = new_value; 1234 bond->params.use_carrier = new_value;
1302 pr_info(DRV_NAME ": %s: Setting use_carrier to %d.\n", 1235 pr_info("%s: Setting use_carrier to %d.\n",
1303 bond->dev->name, new_value); 1236 bond->dev->name, new_value);
1304 } else { 1237 } else {
1305 pr_info(DRV_NAME 1238 pr_info("%s: Ignoring invalid use_carrier value %d.\n",
1306 ": %s: Ignoring invalid use_carrier value %d.\n", 1239 bond->dev->name, new_value);
1307 bond->dev->name, new_value);
1308 } 1240 }
1309out: 1241out:
1310 return count; 1242 return count;
@@ -1349,8 +1281,7 @@ static ssize_t bonding_store_active_slave(struct device *d,
1349 write_lock_bh(&bond->curr_slave_lock); 1281 write_lock_bh(&bond->curr_slave_lock);
1350 1282
1351 if (!USES_PRIMARY(bond->params.mode)) 1283 if (!USES_PRIMARY(bond->params.mode))
1352 pr_info(DRV_NAME ": %s: Unable to change active slave;" 1284 pr_info("%s: Unable to change active slave; %s is in mode %d\n",
1353 " %s is in mode %d\n",
1354 bond->dev->name, bond->dev->name, bond->params.mode); 1285 bond->dev->name, bond->dev->name, bond->params.mode);
1355 else { 1286 else {
1356 bond_for_each_slave(bond, slave, i) { 1287 bond_for_each_slave(bond, slave, i) {
@@ -1361,9 +1292,9 @@ static ssize_t bonding_store_active_slave(struct device *d,
1361 new_active = slave; 1292 new_active = slave;
1362 if (new_active == old_active) { 1293 if (new_active == old_active) {
1363 /* do nothing */ 1294 /* do nothing */
1364 pr_info(DRV_NAME 1295 pr_info("%s: %s is already the current active slave.\n",
1365 ": %s: %s is already the current active slave.\n", 1296 bond->dev->name,
1366 bond->dev->name, slave->dev->name); 1297 slave->dev->name);
1367 goto out; 1298 goto out;
1368 } 1299 }
1369 else { 1300 else {
@@ -1371,16 +1302,15 @@ static ssize_t bonding_store_active_slave(struct device *d,
1371 (old_active) && 1302 (old_active) &&
1372 (new_active->link == BOND_LINK_UP) && 1303 (new_active->link == BOND_LINK_UP) &&
1373 IS_UP(new_active->dev)) { 1304 IS_UP(new_active->dev)) {
1374 pr_info(DRV_NAME 1305 pr_info("%s: Setting %s as active slave.\n",
1375 ": %s: Setting %s as active slave.\n", 1306 bond->dev->name,
1376 bond->dev->name, slave->dev->name); 1307 slave->dev->name);
1377 bond_change_active_slave(bond, new_active); 1308 bond_change_active_slave(bond, new_active);
1378 } 1309 }
1379 else { 1310 else {
1380 pr_info(DRV_NAME 1311 pr_info("%s: Could not set %s as active slave; either %s is down or the link is down.\n",
1381 ": %s: Could not set %s as active slave; " 1312 bond->dev->name,
1382 "either %s is down or the link is down.\n", 1313 slave->dev->name,
1383 bond->dev->name, slave->dev->name,
1384 slave->dev->name); 1314 slave->dev->name);
1385 } 1315 }
1386 goto out; 1316 goto out;
@@ -1391,14 +1321,12 @@ static ssize_t bonding_store_active_slave(struct device *d,
1391 /* if we got here, then we didn't match the name of any slave */ 1321 /* if we got here, then we didn't match the name of any slave */
1392 1322
1393 if (strlen(buf) == 0 || buf[0] == '\n') { 1323 if (strlen(buf) == 0 || buf[0] == '\n') {
1394 pr_info(DRV_NAME 1324 pr_info("%s: Setting active slave to None.\n",
1395 ": %s: Setting active slave to None.\n",
1396 bond->dev->name); 1325 bond->dev->name);
1397 bond->primary_slave = NULL; 1326 bond->primary_slave = NULL;
1398 bond_select_active_slave(bond); 1327 bond_select_active_slave(bond);
1399 } else { 1328 } else {
1400 pr_info(DRV_NAME ": %s: Unable to set %.*s" 1329 pr_info("%s: Unable to set %.*s as active slave as it is not a slave.\n",
1401 " as active slave as it is not a slave.\n",
1402 bond->dev->name, (int)strlen(buf) - 1, buf); 1330 bond->dev->name, (int)strlen(buf) - 1, buf);
1403 } 1331 }
1404 } 1332 }
@@ -1600,8 +1528,7 @@ int bond_create_sysfs(void)
1600 /* Is someone being kinky and naming a device bonding_master? */ 1528 /* Is someone being kinky and naming a device bonding_master? */
1601 if (__dev_get_by_name(&init_net, 1529 if (__dev_get_by_name(&init_net,
1602 class_attr_bonding_masters.attr.name)) 1530 class_attr_bonding_masters.attr.name))
1603 pr_err("network device named %s already " 1531 pr_err("network device named %s already exists in sysfs",
1604 "exists in sysfs",
1605 class_attr_bonding_masters.attr.name); 1532 class_attr_bonding_masters.attr.name);
1606 ret = 0; 1533 ret = 0;
1607 } 1534 }
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 8c485aad1b94..05b751719bd5 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -50,7 +50,7 @@ config CAN_TI_HECC
50 50
51config CAN_MCP251X 51config CAN_MCP251X
52 tristate "Microchip MCP251x SPI CAN controllers" 52 tristate "Microchip MCP251x SPI CAN controllers"
53 depends on CAN_DEV && SPI 53 depends on CAN_DEV && SPI && HAS_DMA
54 ---help--- 54 ---help---
55 Driver for the Microchip MCP251x SPI CAN controllers. 55 Driver for the Microchip MCP251x SPI CAN controllers.
56 56
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index cbe3fce53e3b..d0ec17878ffc 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -474,7 +474,7 @@ static void at91_read_mb(struct net_device *dev, unsigned int mb,
474 reg_msr = at91_read(priv, AT91_MSR(mb)); 474 reg_msr = at91_read(priv, AT91_MSR(mb));
475 if (reg_msr & AT91_MSR_MRTR) 475 if (reg_msr & AT91_MSR_MRTR)
476 cf->can_id |= CAN_RTR_FLAG; 476 cf->can_id |= CAN_RTR_FLAG;
477 cf->can_dlc = min_t(__u8, (reg_msr >> 16) & 0xf, 8); 477 cf->can_dlc = get_can_dlc((reg_msr >> 16) & 0xf);
478 478
479 *(u32 *)(cf->data + 0) = at91_read(priv, AT91_MDL(mb)); 479 *(u32 *)(cf->data + 0) = at91_read(priv, AT91_MDL(mb));
480 *(u32 *)(cf->data + 4) = at91_read(priv, AT91_MDH(mb)); 480 *(u32 *)(cf->data + 4) = at91_read(priv, AT91_MDH(mb));
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index c7fc1de28173..0ec1524523cc 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -392,7 +392,7 @@ static void bfin_can_rx(struct net_device *dev, u16 isrc)
392 cf->can_id |= CAN_RTR_FLAG; 392 cf->can_id |= CAN_RTR_FLAG;
393 393
394 /* get data length code */ 394 /* get data length code */
395 cf->can_dlc = bfin_read16(&reg->chl[obj].dlc); 395 cf->can_dlc = get_can_dlc(bfin_read16(&reg->chl[obj].dlc) & 0xF);
396 396
397 /* get payload */ 397 /* get payload */
398 for (i = 0; i < 8; i += 2) { 398 for (i = 0; i < 8; i += 2) {
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index 78b1b69b2921..9c5a1537939c 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -403,9 +403,8 @@ static void mcp251x_hw_rx_frame(struct spi_device *spi, u8 *buf,
403 403
404 for (i = 1; i < RXBDAT_OFF; i++) 404 for (i = 1; i < RXBDAT_OFF; i++)
405 buf[i] = mcp251x_read_reg(spi, RXBCTRL(buf_idx) + i); 405 buf[i] = mcp251x_read_reg(spi, RXBCTRL(buf_idx) + i);
406 len = buf[RXBDLC_OFF] & RXBDLC_LEN_MASK; 406
407 if (len > 8) 407 len = get_can_dlc(buf[RXBDLC_OFF] & RXBDLC_LEN_MASK);
408 len = 8;
409 for (; i < (RXBDAT_OFF + len); i++) 408 for (; i < (RXBDAT_OFF + len); i++)
410 buf[i] = mcp251x_read_reg(spi, RXBCTRL(buf_idx) + i); 409 buf[i] = mcp251x_read_reg(spi, RXBCTRL(buf_idx) + i);
411 } else { 410 } else {
@@ -455,13 +454,7 @@ static void mcp251x_hw_rx(struct spi_device *spi, int buf_idx)
455 (buf[RXBSIDL_OFF] >> RXBSIDL_SHIFT); 454 (buf[RXBSIDL_OFF] >> RXBSIDL_SHIFT);
456 } 455 }
457 /* Data length */ 456 /* Data length */
458 frame->can_dlc = buf[RXBDLC_OFF] & RXBDLC_LEN_MASK; 457 frame->can_dlc = get_can_dlc(buf[RXBDLC_OFF] & RXBDLC_LEN_MASK);
459 if (frame->can_dlc > 8) {
460 dev_warn(&spi->dev, "invalid frame recevied\n");
461 priv->net->stats.rx_errors++;
462 dev_kfree_skb(skb);
463 return;
464 }
465 memcpy(frame->data, buf + RXBDAT_OFF, frame->can_dlc); 458 memcpy(frame->data, buf + RXBDAT_OFF, frame->can_dlc);
466 459
467 priv->net->stats.rx_packets++; 460 priv->net->stats.rx_packets++;
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index bb06dfb58f25..07346f880ca6 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -297,7 +297,8 @@ static void mscan_get_rx_frame(struct net_device *dev, struct can_frame *frame)
297 frame->can_id |= can_id >> 1; 297 frame->can_id |= can_id >> 1;
298 if (can_id & 1) 298 if (can_id & 1)
299 frame->can_id |= CAN_RTR_FLAG; 299 frame->can_id |= CAN_RTR_FLAG;
300 frame->can_dlc = in_8(&regs->rx.dlr) & 0xf; 300
301 frame->can_dlc = get_can_dlc(in_8(&regs->rx.dlr) & 0xf);
301 302
302 if (!(frame->can_id & CAN_RTR_FLAG)) { 303 if (!(frame->can_id & CAN_RTR_FLAG)) {
303 void __iomem *data = &regs->rx.dsr1_0; 304 void __iomem *data = &regs->rx.dsr1_0;
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index b4ba88a31075..542a4f7255b4 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -293,15 +293,14 @@ static void sja1000_rx(struct net_device *dev)
293 uint8_t fi; 293 uint8_t fi;
294 uint8_t dreg; 294 uint8_t dreg;
295 canid_t id; 295 canid_t id;
296 uint8_t dlc;
297 int i; 296 int i;
298 297
298 /* create zero'ed CAN frame buffer */
299 skb = alloc_can_skb(dev, &cf); 299 skb = alloc_can_skb(dev, &cf);
300 if (skb == NULL) 300 if (skb == NULL)
301 return; 301 return;
302 302
303 fi = priv->read_reg(priv, REG_FI); 303 fi = priv->read_reg(priv, REG_FI);
304 dlc = fi & 0x0F;
305 304
306 if (fi & FI_FF) { 305 if (fi & FI_FF) {
307 /* extended frame format (EFF) */ 306 /* extended frame format (EFF) */
@@ -318,16 +317,15 @@ static void sja1000_rx(struct net_device *dev)
318 | (priv->read_reg(priv, REG_ID2) >> 5); 317 | (priv->read_reg(priv, REG_ID2) >> 5);
319 } 318 }
320 319
321 if (fi & FI_RTR) 320 if (fi & FI_RTR) {
322 id |= CAN_RTR_FLAG; 321 id |= CAN_RTR_FLAG;
322 } else {
323 cf->can_dlc = get_can_dlc(fi & 0x0F);
324 for (i = 0; i < cf->can_dlc; i++)
325 cf->data[i] = priv->read_reg(priv, dreg++);
326 }
323 327
324 cf->can_id = id; 328 cf->can_id = id;
325 cf->can_dlc = dlc;
326 for (i = 0; i < dlc; i++)
327 cf->data[i] = priv->read_reg(priv, dreg++);
328
329 while (i < 8)
330 cf->data[i++] = 0;
331 329
332 /* release receive buffer */ 330 /* release receive buffer */
333 priv->write_reg(priv, REG_CMR, CMD_RRB); 331 priv->write_reg(priv, REG_CMR, CMD_RRB);
@@ -335,7 +333,7 @@ static void sja1000_rx(struct net_device *dev)
335 netif_rx(skb); 333 netif_rx(skb);
336 334
337 stats->rx_packets++; 335 stats->rx_packets++;
338 stats->rx_bytes += dlc; 336 stats->rx_bytes += cf->can_dlc;
339} 337}
340 338
341static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status) 339static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 07e8016b17ec..5c993c2da528 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -552,7 +552,7 @@ static int ti_hecc_rx_pkt(struct ti_hecc_priv *priv, int mbxno)
552 data = hecc_read_mbx(priv, mbxno, HECC_CANMCF); 552 data = hecc_read_mbx(priv, mbxno, HECC_CANMCF);
553 if (data & HECC_CANMCF_RTR) 553 if (data & HECC_CANMCF_RTR)
554 cf->can_id |= CAN_RTR_FLAG; 554 cf->can_id |= CAN_RTR_FLAG;
555 cf->can_dlc = data & 0xF; 555 cf->can_dlc = get_can_dlc(data & 0xF);
556 data = hecc_read_mbx(priv, mbxno, HECC_CANMDL); 556 data = hecc_read_mbx(priv, mbxno, HECC_CANMDL);
557 *(u32 *)(cf->data) = cpu_to_be32(data); 557 *(u32 *)(cf->data) = cpu_to_be32(data);
558 if (cf->can_dlc > 4) { 558 if (cf->can_dlc > 4) {
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 591eb0eb1c2b..efbb05c71bf4 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -316,7 +316,7 @@ static void ems_usb_rx_can_msg(struct ems_usb *dev, struct ems_cpc_msg *msg)
316 return; 316 return;
317 317
318 cf->can_id = le32_to_cpu(msg->msg.can_msg.id); 318 cf->can_id = le32_to_cpu(msg->msg.can_msg.id);
319 cf->can_dlc = min_t(u8, msg->msg.can_msg.length, 8); 319 cf->can_dlc = get_can_dlc(msg->msg.can_msg.length & 0xF);
320 320
321 if (msg->type == CPC_MSG_TYPE_EXT_CAN_FRAME || 321 if (msg->type == CPC_MSG_TYPE_EXT_CAN_FRAME ||
322 msg->type == CPC_MSG_TYPE_EXT_RTR_FRAME) 322 msg->type == CPC_MSG_TYPE_EXT_RTR_FRAME)
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index 678222389407..8d0be26f94e3 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -1163,7 +1163,7 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
1163 priv->dev = dev; 1163 priv->dev = dev;
1164 priv->ring_size = 64; 1164 priv->ring_size = 64;
1165 priv->msg_enable = netif_msg_init(debug_level, 0xff); 1165 priv->msg_enable = netif_msg_init(debug_level, 0xff);
1166 memcpy(dev->dev_addr, pdata->dev_addr, sizeof(dev->dev_addr)); 1166 memcpy(dev->dev_addr, pdata->dev_addr, sizeof(pdata->dev_addr));
1167 1167
1168 snprintf(priv->phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id); 1168 snprintf(priv->phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id);
1169 1169
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 0cbe3c0e7c06..b37730065688 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -1646,7 +1646,7 @@ dm9000_drv_resume(struct device *dev)
1646 return 0; 1646 return 0;
1647} 1647}
1648 1648
1649static struct dev_pm_ops dm9000_drv_pm_ops = { 1649static const struct dev_pm_ops dm9000_drv_pm_ops = {
1650 .suspend = dm9000_drv_suspend, 1650 .suspend = dm9000_drv_suspend,
1651 .resume = dm9000_drv_resume, 1651 .resume = dm9000_drv_resume,
1652}; 1652};
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index e2d5343f1275..204177d78cec 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -510,6 +510,40 @@ static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
510} 510}
511 511
512/** 512/**
513 * ixgbe_validate_link_ready - Function looks for phy link
514 * @hw: pointer to hardware structure
515 *
516 * Function indicates success when phy link is available. If phy is not ready
517 * within 5 seconds of MAC indicating link, the function returns error.
518 **/
519static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
520{
521 u32 timeout;
522 u16 an_reg;
523
524 if (hw->device_id != IXGBE_DEV_ID_82598AT2)
525 return 0;
526
527 for (timeout = 0;
528 timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) {
529 hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN, &an_reg);
530
531 if ((an_reg & MDIO_AN_STAT1_COMPLETE) &&
532 (an_reg & MDIO_STAT1_LSTATUS))
533 break;
534
535 msleep(100);
536 }
537
538 if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) {
539 hw_dbg(hw, "Link was indicated but link is down\n");
540 return IXGBE_ERR_LINK_SETUP;
541 }
542
543 return 0;
544}
545
546/**
513 * ixgbe_check_mac_link_82598 - Get link/speed status 547 * ixgbe_check_mac_link_82598 - Get link/speed status
514 * @hw: pointer to hardware structure 548 * @hw: pointer to hardware structure
515 * @speed: pointer to link speed 549 * @speed: pointer to link speed
@@ -589,6 +623,10 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
589 else 623 else
590 *speed = IXGBE_LINK_SPEED_1GB_FULL; 624 *speed = IXGBE_LINK_SPEED_1GB_FULL;
591 625
626 if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == true) &&
627 (ixgbe_validate_link_ready(hw) != 0))
628 *link_up = false;
629
592 /* if link is down, zero out the current_mode */ 630 /* if link is down, zero out the current_mode */
593 if (*link_up == false) { 631 if (*link_up == false) {
594 hw->fc.current_mode = ixgbe_fc_none; 632 hw->fc.current_mode = ixgbe_fc_none;
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 35ea8c93fd80..bd64387563f0 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -4511,6 +4511,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
4511 struct ixgbe_hw *hw = &adapter->hw; 4511 struct ixgbe_hw *hw = &adapter->hw;
4512 u64 total_mpc = 0; 4512 u64 total_mpc = 0;
4513 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot; 4513 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
4514 u64 non_eop_descs = 0, restart_queue = 0;
4514 4515
4515 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { 4516 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
4516 u64 rsc_count = 0; 4517 u64 rsc_count = 0;
@@ -4528,10 +4529,12 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
4528 4529
4529 /* gather some stats to the adapter struct that are per queue */ 4530 /* gather some stats to the adapter struct that are per queue */
4530 for (i = 0; i < adapter->num_tx_queues; i++) 4531 for (i = 0; i < adapter->num_tx_queues; i++)
4531 adapter->restart_queue += adapter->tx_ring[i].restart_queue; 4532 restart_queue += adapter->tx_ring[i].restart_queue;
4533 adapter->restart_queue = restart_queue;
4532 4534
4533 for (i = 0; i < adapter->num_rx_queues; i++) 4535 for (i = 0; i < adapter->num_rx_queues; i++)
4534 adapter->non_eop_descs += adapter->tx_ring[i].non_eop_descs; 4536 non_eop_descs += adapter->rx_ring[i].non_eop_descs;
4537 adapter->non_eop_descs = non_eop_descs;
4535 4538
4536 adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); 4539 adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
4537 for (i = 0; i < 8; i++) { 4540 for (i = 0; i < 8; i++) {
@@ -5003,7 +5006,18 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
5003 IXGBE_ADVTXD_DTYP_CTXT); 5006 IXGBE_ADVTXD_DTYP_CTXT);
5004 5007
5005 if (skb->ip_summed == CHECKSUM_PARTIAL) { 5008 if (skb->ip_summed == CHECKSUM_PARTIAL) {
5006 switch (skb->protocol) { 5009 __be16 protocol;
5010
5011 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
5012 const struct vlan_ethhdr *vhdr =
5013 (const struct vlan_ethhdr *)skb->data;
5014
5015 protocol = vhdr->h_vlan_encapsulated_proto;
5016 } else {
5017 protocol = skb->protocol;
5018 }
5019
5020 switch (protocol) {
5007 case cpu_to_be16(ETH_P_IP): 5021 case cpu_to_be16(ETH_P_IP):
5008 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; 5022 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
5009 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 5023 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index f3e8d52610b7..84650c6ebe03 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -841,6 +841,8 @@
841#define IXGBE_MPVC 0x04318 841#define IXGBE_MPVC 0x04318
842#define IXGBE_SGMIIC 0x04314 842#define IXGBE_SGMIIC 0x04314
843 843
844#define IXGBE_VALIDATE_LINK_READY_TIMEOUT 50
845
844/* Omer CORECTL */ 846/* Omer CORECTL */
845#define IXGBE_CORECTL 0x014F00 847#define IXGBE_CORECTL 0x014F00
846/* BARCTRL */ 848/* BARCTRL */
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c
index ad95d5f7b630..8c8515619b8e 100644
--- a/drivers/net/mlx4/alloc.c
+++ b/drivers/net/mlx4/alloc.c
@@ -72,35 +72,6 @@ void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj)
72 mlx4_bitmap_free_range(bitmap, obj, 1); 72 mlx4_bitmap_free_range(bitmap, obj, 1);
73} 73}
74 74
75static unsigned long find_aligned_range(unsigned long *bitmap,
76 u32 start, u32 nbits,
77 int len, int align)
78{
79 unsigned long end, i;
80
81again:
82 start = ALIGN(start, align);
83
84 while ((start < nbits) && test_bit(start, bitmap))
85 start += align;
86
87 if (start >= nbits)
88 return -1;
89
90 end = start+len;
91 if (end > nbits)
92 return -1;
93
94 for (i = start + 1; i < end; i++) {
95 if (test_bit(i, bitmap)) {
96 start = i + 1;
97 goto again;
98 }
99 }
100
101 return start;
102}
103
104u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align) 75u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align)
105{ 76{
106 u32 obj, i; 77 u32 obj, i;
@@ -110,13 +81,13 @@ u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align)
110 81
111 spin_lock(&bitmap->lock); 82 spin_lock(&bitmap->lock);
112 83
113 obj = find_aligned_range(bitmap->table, bitmap->last, 84 obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max,
114 bitmap->max, cnt, align); 85 bitmap->last, cnt, align - 1);
115 if (obj >= bitmap->max) { 86 if (obj >= bitmap->max) {
116 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top) 87 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
117 & bitmap->mask; 88 & bitmap->mask;
118 obj = find_aligned_range(bitmap->table, 0, bitmap->max, 89 obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max,
119 cnt, align); 90 0, cnt, align - 1);
120 } 91 }
121 92
122 if (obj < bitmap->max) { 93 if (obj < bitmap->max) {
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index 3c16602172fc..04f42ae1eda0 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -90,6 +90,7 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u32 flags)
90 [ 9] = "Q_Key violation counter", 90 [ 9] = "Q_Key violation counter",
91 [10] = "VMM", 91 [10] = "VMM",
92 [12] = "DPDP", 92 [12] = "DPDP",
93 [15] = "Big LSO headers",
93 [16] = "MW support", 94 [16] = "MW support",
94 [17] = "APM support", 95 [17] = "APM support",
95 [18] = "Atomic ops support", 96 [18] = "Atomic ops support",
@@ -235,7 +236,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
235 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MPT_OFFSET); 236 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MPT_OFFSET);
236 dev_cap->max_mpts = 1 << (field & 0x3f); 237 dev_cap->max_mpts = 1 << (field & 0x3f);
237 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_EQ_OFFSET); 238 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_EQ_OFFSET);
238 dev_cap->reserved_eqs = 1 << (field & 0xf); 239 dev_cap->reserved_eqs = field & 0xf;
239 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_EQ_OFFSET); 240 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_EQ_OFFSET);
240 dev_cap->max_eqs = 1 << (field & 0xf); 241 dev_cap->max_eqs = 1 << (field & 0xf);
241 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MTT_OFFSET); 242 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MTT_OFFSET);
diff --git a/drivers/net/mlx4/sense.c b/drivers/net/mlx4/sense.c
index f36ae691cab3..015fbe785c13 100644
--- a/drivers/net/mlx4/sense.c
+++ b/drivers/net/mlx4/sense.c
@@ -53,7 +53,7 @@ static int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
53 53
54 if (out_param > 2) { 54 if (out_param > 2) {
55 mlx4_err(dev, "Sense returned illegal value: 0x%llx\n", out_param); 55 mlx4_err(dev, "Sense returned illegal value: 0x%llx\n", out_param);
56 return EINVAL; 56 return -EINVAL;
57 } 57 }
58 58
59 *type = out_param; 59 *type = out_param;
diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c
index 17a27225cc98..98938ea9e0bd 100644
--- a/drivers/net/pcmcia/3c574_cs.c
+++ b/drivers/net/pcmcia/3c574_cs.c
@@ -912,7 +912,11 @@ static void media_check(unsigned long arg)
912 if ((inw(ioaddr + EL3_STATUS) & IntLatch) && (inb(ioaddr + Timer) == 0xff)) { 912 if ((inw(ioaddr + EL3_STATUS) & IntLatch) && (inb(ioaddr + Timer) == 0xff)) {
913 if (!lp->fast_poll) 913 if (!lp->fast_poll)
914 printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name); 914 printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name);
915
916 local_irq_save(flags);
915 el3_interrupt(dev->irq, dev); 917 el3_interrupt(dev->irq, dev);
918 local_irq_restore(flags);
919
916 lp->fast_poll = HZ; 920 lp->fast_poll = HZ;
917 } 921 }
918 if (lp->fast_poll) { 922 if (lp->fast_poll) {
diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c
index 6f8d7e2e5922..322e11df0097 100644
--- a/drivers/net/pcmcia/3c589_cs.c
+++ b/drivers/net/pcmcia/3c589_cs.c
@@ -711,7 +711,11 @@ static void media_check(unsigned long arg)
711 (inb(ioaddr + EL3_TIMER) == 0xff)) { 711 (inb(ioaddr + EL3_TIMER) == 0xff)) {
712 if (!lp->fast_poll) 712 if (!lp->fast_poll)
713 printk(KERN_WARNING "%s: interrupt(s) dropped!\n", dev->name); 713 printk(KERN_WARNING "%s: interrupt(s) dropped!\n", dev->name);
714
715 local_irq_save(flags);
714 el3_interrupt(dev->irq, dev); 716 el3_interrupt(dev->irq, dev);
717 local_irq_restore(flags);
718
715 lp->fast_poll = HZ; 719 lp->fast_poll = HZ;
716 } 720 }
717 if (lp->fast_poll) { 721 if (lp->fast_poll) {
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index acfc5a3aa490..60f96c468a24 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -4859,7 +4859,7 @@ out:
4859 return 0; 4859 return 0;
4860} 4860}
4861 4861
4862static struct dev_pm_ops rtl8169_pm_ops = { 4862static const struct dev_pm_ops rtl8169_pm_ops = {
4863 .suspend = rtl8169_suspend, 4863 .suspend = rtl8169_suspend,
4864 .resume = rtl8169_resume, 4864 .resume = rtl8169_resume,
4865 .freeze = rtl8169_suspend, 4865 .freeze = rtl8169_suspend,
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index 14949bb303a0..af3933579790 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -47,7 +47,7 @@ static const unsigned char payload_source[ETH_ALEN] = {
47 0x00, 0x0f, 0x53, 0x1b, 0x1b, 0x1b, 47 0x00, 0x0f, 0x53, 0x1b, 0x1b, 0x1b,
48}; 48};
49 49
50static const char *payload_msg = 50static const char payload_msg[] =
51 "Hello world! This is an Efx loopback test in progress!"; 51 "Hello world! This is an Efx loopback test in progress!";
52 52
53/** 53/**
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index c88bc1013047..ca6285016dfd 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -84,6 +84,8 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
84 .mpr = 1, 84 .mpr = 1,
85 .tpauser = 1, 85 .tpauser = 1,
86 .hw_swap = 1, 86 .hw_swap = 1,
87 .rpadir = 1,
88 .rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
87}; 89};
88 90
89#elif defined(CONFIG_CPU_SUBTYPE_SH7763) 91#elif defined(CONFIG_CPU_SUBTYPE_SH7763)
@@ -175,7 +177,6 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
175 .tpauser = 1, 177 .tpauser = 1,
176 .bculr = 1, 178 .bculr = 1,
177 .hw_swap = 1, 179 .hw_swap = 1,
178 .rpadir = 1,
179 .no_trimd = 1, 180 .no_trimd = 1,
180 .no_ade = 1, 181 .no_ade = 1,
181}; 182};
@@ -501,6 +502,8 @@ static int sh_eth_ring_init(struct net_device *ndev)
501 */ 502 */
502 mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ : 503 mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
503 (((ndev->mtu + 26 + 7) & ~7) + 2 + 16)); 504 (((ndev->mtu + 26 + 7) & ~7) + 2 + 16));
505 if (mdp->cd->rpadir)
506 mdp->rx_buf_sz += NET_IP_ALIGN;
504 507
505 /* Allocate RX and TX skb rings */ 508 /* Allocate RX and TX skb rings */
506 mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * RX_RING_SIZE, 509 mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * RX_RING_SIZE,
@@ -715,6 +718,8 @@ static int sh_eth_rx(struct net_device *ndev)
715 pkt_len + 2); 718 pkt_len + 2);
716 skb = mdp->rx_skbuff[entry]; 719 skb = mdp->rx_skbuff[entry];
717 mdp->rx_skbuff[entry] = NULL; 720 mdp->rx_skbuff[entry] = NULL;
721 if (mdp->cd->rpadir)
722 skb_reserve(skb, NET_IP_ALIGN);
718 skb_put(skb, pkt_len); 723 skb_put(skb, pkt_len);
719 skb->protocol = eth_type_trans(skb, ndev); 724 skb->protocol = eth_type_trans(skb, ndev);
720 netif_rx(skb); 725 netif_rx(skb);
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 89a05d674ddc..1c01b96c9611 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -644,7 +644,6 @@ static void sky2_phy_power_up(struct sky2_hw *hw, unsigned port)
644{ 644{
645 u32 reg1; 645 u32 reg1;
646 646
647 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
648 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); 647 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
649 reg1 &= ~phy_power[port]; 648 reg1 &= ~phy_power[port];
650 649
@@ -652,7 +651,6 @@ static void sky2_phy_power_up(struct sky2_hw *hw, unsigned port)
652 reg1 |= coma_mode[port]; 651 reg1 |= coma_mode[port];
653 652
654 sky2_pci_write32(hw, PCI_DEV_REG1, reg1); 653 sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
655 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
656 sky2_pci_read32(hw, PCI_DEV_REG1); 654 sky2_pci_read32(hw, PCI_DEV_REG1);
657 655
658 if (hw->chip_id == CHIP_ID_YUKON_FE) 656 if (hw->chip_id == CHIP_ID_YUKON_FE)
@@ -709,11 +707,9 @@ static void sky2_phy_power_down(struct sky2_hw *hw, unsigned port)
709 gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_PDOWN); 707 gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_PDOWN);
710 } 708 }
711 709
712 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
713 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); 710 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
714 reg1 |= phy_power[port]; /* set PHY to PowerDown/COMA Mode */ 711 reg1 |= phy_power[port]; /* set PHY to PowerDown/COMA Mode */
715 sky2_pci_write32(hw, PCI_DEV_REG1, reg1); 712 sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
716 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
717} 713}
718 714
719/* Force a renegotiation */ 715/* Force a renegotiation */
@@ -2152,9 +2148,7 @@ static void sky2_qlink_intr(struct sky2_hw *hw)
2152 2148
2153 /* reset PHY Link Detect */ 2149 /* reset PHY Link Detect */
2154 phy = sky2_pci_read16(hw, PSM_CONFIG_REG4); 2150 phy = sky2_pci_read16(hw, PSM_CONFIG_REG4);
2155 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2156 sky2_pci_write16(hw, PSM_CONFIG_REG4, phy | 1); 2151 sky2_pci_write16(hw, PSM_CONFIG_REG4, phy | 1);
2157 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2158 2152
2159 sky2_link_up(sky2); 2153 sky2_link_up(sky2);
2160} 2154}
@@ -2645,7 +2639,6 @@ static void sky2_hw_intr(struct sky2_hw *hw)
2645 if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) { 2639 if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) {
2646 u16 pci_err; 2640 u16 pci_err;
2647 2641
2648 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2649 pci_err = sky2_pci_read16(hw, PCI_STATUS); 2642 pci_err = sky2_pci_read16(hw, PCI_STATUS);
2650 if (net_ratelimit()) 2643 if (net_ratelimit())
2651 dev_err(&pdev->dev, "PCI hardware error (0x%x)\n", 2644 dev_err(&pdev->dev, "PCI hardware error (0x%x)\n",
@@ -2653,14 +2646,12 @@ static void sky2_hw_intr(struct sky2_hw *hw)
2653 2646
2654 sky2_pci_write16(hw, PCI_STATUS, 2647 sky2_pci_write16(hw, PCI_STATUS,
2655 pci_err | PCI_STATUS_ERROR_BITS); 2648 pci_err | PCI_STATUS_ERROR_BITS);
2656 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2657 } 2649 }
2658 2650
2659 if (status & Y2_IS_PCI_EXP) { 2651 if (status & Y2_IS_PCI_EXP) {
2660 /* PCI-Express uncorrectable Error occurred */ 2652 /* PCI-Express uncorrectable Error occurred */
2661 u32 err; 2653 u32 err;
2662 2654
2663 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2664 err = sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS); 2655 err = sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS);
2665 sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS, 2656 sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS,
2666 0xfffffffful); 2657 0xfffffffful);
@@ -2668,7 +2659,6 @@ static void sky2_hw_intr(struct sky2_hw *hw)
2668 dev_err(&pdev->dev, "PCI Express error (0x%x)\n", err); 2659 dev_err(&pdev->dev, "PCI Express error (0x%x)\n", err);
2669 2660
2670 sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS); 2661 sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS);
2671 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2672 } 2662 }
2673 2663
2674 if (status & Y2_HWE_L1_MASK) 2664 if (status & Y2_HWE_L1_MASK)
@@ -3047,7 +3037,6 @@ static void sky2_reset(struct sky2_hw *hw)
3047 } 3037 }
3048 3038
3049 sky2_power_on(hw); 3039 sky2_power_on(hw);
3050 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3051 3040
3052 for (i = 0; i < hw->ports; i++) { 3041 for (i = 0; i < hw->ports; i++) {
3053 sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET); 3042 sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
@@ -3084,7 +3073,6 @@ static void sky2_reset(struct sky2_hw *hw)
3084 reg <<= PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_BASE; 3073 reg <<= PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_BASE;
3085 3074
3086 /* reset PHY Link Detect */ 3075 /* reset PHY Link Detect */
3087 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3088 sky2_pci_write16(hw, PSM_CONFIG_REG4, 3076 sky2_pci_write16(hw, PSM_CONFIG_REG4,
3089 reg | PSM_CONFIG_REG4_RST_PHY_LINK_DETECT); 3077 reg | PSM_CONFIG_REG4_RST_PHY_LINK_DETECT);
3090 sky2_pci_write16(hw, PSM_CONFIG_REG4, reg); 3078 sky2_pci_write16(hw, PSM_CONFIG_REG4, reg);
@@ -3102,7 +3090,6 @@ static void sky2_reset(struct sky2_hw *hw)
3102 /* restore the PCIe Link Control register */ 3090 /* restore the PCIe Link Control register */
3103 sky2_pci_write16(hw, cap + PCI_EXP_LNKCTL, reg); 3091 sky2_pci_write16(hw, cap + PCI_EXP_LNKCTL, reg);
3104 } 3092 }
3105 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3106 3093
3107 /* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */ 3094 /* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */
3108 sky2_write32(hw, Y2_PEX_PHY_DATA, PEX_DB_ACCESS | (0x08UL << 16)); 3095 sky2_write32(hw, Y2_PEX_PHY_DATA, PEX_DB_ACCESS | (0x08UL << 16));
@@ -4530,7 +4517,7 @@ static const char *sky2_name(u8 chipid, char *buf, int sz)
4530 "Optima", /* 0xbc */ 4517 "Optima", /* 0xbc */
4531 }; 4518 };
4532 4519
4533 if (chipid >= CHIP_ID_YUKON_XL && chipid < CHIP_ID_YUKON_OPT) 4520 if (chipid >= CHIP_ID_YUKON_XL && chipid <= CHIP_ID_YUKON_OPT)
4534 strncpy(buf, name[chipid - CHIP_ID_YUKON_XL], sz); 4521 strncpy(buf, name[chipid - CHIP_ID_YUKON_XL], sz);
4535 else 4522 else
4536 snprintf(buf, sz, "(chip %#x)", chipid); 4523 snprintf(buf, sz, "(chip %#x)", chipid);
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index 20d6095cf411..494cd91ea39c 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -2154,7 +2154,7 @@ static int smsc911x_resume(struct device *dev)
2154 return (to == 0) ? -EIO : 0; 2154 return (to == 0) ? -EIO : 0;
2155} 2155}
2156 2156
2157static struct dev_pm_ops smsc911x_pm_ops = { 2157static const struct dev_pm_ops smsc911x_pm_ops = {
2158 .suspend = smsc911x_suspend, 2158 .suspend = smsc911x_suspend,
2159 .resume = smsc911x_resume, 2159 .resume = smsc911x_resume,
2160}; 2160};
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index b091e20ca167..f14d225404da 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -324,7 +324,7 @@ static int rtl8150_set_mac_address(struct net_device *netdev, void *p)
324 dbg("%02X:", netdev->dev_addr[i]); 324 dbg("%02X:", netdev->dev_addr[i]);
325 dbg("%02X\n", netdev->dev_addr[i]); 325 dbg("%02X\n", netdev->dev_addr[i]);
326 /* Set the IDR registers. */ 326 /* Set the IDR registers. */
327 set_registers(dev, IDR, sizeof(netdev->dev_addr), netdev->dev_addr); 327 set_registers(dev, IDR, netdev->addr_len, netdev->dev_addr);
328#ifdef EEPROM_WRITE 328#ifdef EEPROM_WRITE
329 { 329 {
330 u8 cr; 330 u8 cr;
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 1ceb9d0f8b97..9cc438282d77 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -2689,7 +2689,7 @@ vmxnet3_resume(struct device *device)
2689 return 0; 2689 return 0;
2690} 2690}
2691 2691
2692static struct dev_pm_ops vmxnet3_pm_ops = { 2692static const struct dev_pm_ops vmxnet3_pm_ops = {
2693 .suspend = vmxnet3_suspend, 2693 .suspend = vmxnet3_suspend,
2694 .resume = vmxnet3_resume, 2694 .resume = vmxnet3_resume,
2695}; 2695};
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index d69bde6a2343..c542c7bb7454 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -354,7 +354,7 @@ static unsigned int dino_startup_irq(unsigned int irq)
354} 354}
355 355
356static struct irq_chip dino_interrupt_type = { 356static struct irq_chip dino_interrupt_type = {
357 .typename = "GSC-PCI", 357 .name = "GSC-PCI",
358 .startup = dino_startup_irq, 358 .startup = dino_startup_irq,
359 .shutdown = dino_disable_irq, 359 .shutdown = dino_disable_irq,
360 .enable = dino_enable_irq, 360 .enable = dino_enable_irq,
diff --git a/drivers/parisc/eisa.c b/drivers/parisc/eisa.c
index 51220749cb65..46f503fb7fc5 100644
--- a/drivers/parisc/eisa.c
+++ b/drivers/parisc/eisa.c
@@ -189,7 +189,7 @@ static unsigned int eisa_startup_irq(unsigned int irq)
189} 189}
190 190
191static struct irq_chip eisa_interrupt_type = { 191static struct irq_chip eisa_interrupt_type = {
192 .typename = "EISA", 192 .name = "EISA",
193 .startup = eisa_startup_irq, 193 .startup = eisa_startup_irq,
194 .shutdown = eisa_disable_irq, 194 .shutdown = eisa_disable_irq,
195 .enable = eisa_enable_irq, 195 .enable = eisa_enable_irq,
diff --git a/drivers/parisc/gsc.c b/drivers/parisc/gsc.c
index 647adc9f85ad..c4e1f3c3c2fa 100644
--- a/drivers/parisc/gsc.c
+++ b/drivers/parisc/gsc.c
@@ -149,7 +149,7 @@ static unsigned int gsc_asic_startup_irq(unsigned int irq)
149} 149}
150 150
151static struct irq_chip gsc_asic_interrupt_type = { 151static struct irq_chip gsc_asic_interrupt_type = {
152 .typename = "GSC-ASIC", 152 .name = "GSC-ASIC",
153 .startup = gsc_asic_startup_irq, 153 .startup = gsc_asic_startup_irq,
154 .shutdown = gsc_asic_disable_irq, 154 .shutdown = gsc_asic_disable_irq,
155 .enable = gsc_asic_enable_irq, 155 .enable = gsc_asic_enable_irq,
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c
index 88e333553212..c76836727cae 100644
--- a/drivers/parisc/iosapic.c
+++ b/drivers/parisc/iosapic.c
@@ -730,7 +730,7 @@ static int iosapic_set_affinity_irq(unsigned int irq,
730#endif 730#endif
731 731
732static struct irq_chip iosapic_interrupt_type = { 732static struct irq_chip iosapic_interrupt_type = {
733 .typename = "IO-SAPIC-level", 733 .name = "IO-SAPIC-level",
734 .startup = iosapic_startup_irq, 734 .startup = iosapic_startup_irq,
735 .shutdown = iosapic_disable_irq, 735 .shutdown = iosapic_disable_irq,
736 .enable = iosapic_enable_irq, 736 .enable = iosapic_enable_irq,
diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c
index 79caf1ca4a29..188bc8496a26 100644
--- a/drivers/parisc/led.c
+++ b/drivers/parisc/led.c
@@ -38,6 +38,7 @@
38#include <linux/kernel_stat.h> 38#include <linux/kernel_stat.h>
39#include <linux/reboot.h> 39#include <linux/reboot.h>
40#include <linux/proc_fs.h> 40#include <linux/proc_fs.h>
41#include <linux/seq_file.h>
41#include <linux/ctype.h> 42#include <linux/ctype.h>
42#include <linux/blkdev.h> 43#include <linux/blkdev.h>
43#include <linux/workqueue.h> 44#include <linux/workqueue.h>
@@ -147,41 +148,34 @@ device_initcall(start_task);
147static void (*led_func_ptr) (unsigned char) __read_mostly; 148static void (*led_func_ptr) (unsigned char) __read_mostly;
148 149
149#ifdef CONFIG_PROC_FS 150#ifdef CONFIG_PROC_FS
150static int led_proc_read(char *page, char **start, off_t off, int count, 151static int led_proc_show(struct seq_file *m, void *v)
151 int *eof, void *data)
152{ 152{
153 char *out = page; 153 switch ((long)m->private)
154 int len;
155
156 switch ((long)data)
157 { 154 {
158 case LED_NOLCD: 155 case LED_NOLCD:
159 out += sprintf(out, "Heartbeat: %d\n", led_heartbeat); 156 seq_printf(m, "Heartbeat: %d\n", led_heartbeat);
160 out += sprintf(out, "Disk IO: %d\n", led_diskio); 157 seq_printf(m, "Disk IO: %d\n", led_diskio);
161 out += sprintf(out, "LAN Rx/Tx: %d\n", led_lanrxtx); 158 seq_printf(m, "LAN Rx/Tx: %d\n", led_lanrxtx);
162 break; 159 break;
163 case LED_HASLCD: 160 case LED_HASLCD:
164 out += sprintf(out, "%s\n", lcd_text); 161 seq_printf(m, "%s\n", lcd_text);
165 break; 162 break;
166 default: 163 default:
167 *eof = 1;
168 return 0; 164 return 0;
169 } 165 }
166 return 0;
167}
170 168
171 len = out - page - off; 169static int led_proc_open(struct inode *inode, struct file *file)
172 if (len < count) { 170{
173 *eof = 1; 171 return single_open(file, led_proc_show, PDE(inode)->data);
174 if (len <= 0) return 0;
175 } else {
176 len = count;
177 }
178 *start = page + off;
179 return len;
180} 172}
181 173
182static int led_proc_write(struct file *file, const char *buf, 174
183 unsigned long count, void *data) 175static ssize_t led_proc_write(struct file *file, const char *buf,
176 size_t count, loff_t *pos)
184{ 177{
178 void *data = PDE(file->f_path.dentry->d_inode)->data;
185 char *cur, lbuf[count + 1]; 179 char *cur, lbuf[count + 1];
186 int d; 180 int d;
187 181
@@ -234,6 +228,15 @@ parse_error:
234 return -EINVAL; 228 return -EINVAL;
235} 229}
236 230
231static const struct file_operations led_proc_fops = {
232 .owner = THIS_MODULE,
233 .open = led_proc_open,
234 .read = seq_read,
235 .llseek = seq_lseek,
236 .release = single_release,
237 .write = led_proc_write,
238};
239
237static int __init led_create_procfs(void) 240static int __init led_create_procfs(void)
238{ 241{
239 struct proc_dir_entry *proc_pdc_root = NULL; 242 struct proc_dir_entry *proc_pdc_root = NULL;
@@ -243,19 +246,15 @@ static int __init led_create_procfs(void)
243 246
244 proc_pdc_root = proc_mkdir("pdc", 0); 247 proc_pdc_root = proc_mkdir("pdc", 0);
245 if (!proc_pdc_root) return -1; 248 if (!proc_pdc_root) return -1;
246 ent = create_proc_entry("led", S_IFREG|S_IRUGO|S_IWUSR, proc_pdc_root); 249 ent = proc_create_data("led", S_IRUGO|S_IWUSR, proc_pdc_root,
250 &led_proc_fops, (void *)LED_NOLCD); /* LED */
247 if (!ent) return -1; 251 if (!ent) return -1;
248 ent->data = (void *)LED_NOLCD; /* LED */
249 ent->read_proc = led_proc_read;
250 ent->write_proc = led_proc_write;
251 252
252 if (led_type == LED_HASLCD) 253 if (led_type == LED_HASLCD)
253 { 254 {
254 ent = create_proc_entry("lcd", S_IFREG|S_IRUGO|S_IWUSR, proc_pdc_root); 255 ent = proc_create_data("lcd", S_IRUGO|S_IWUSR, proc_pdc_root,
256 &led_proc_fops, (void *)LED_HASLCD); /* LCD */
255 if (!ent) return -1; 257 if (!ent) return -1;
256 ent->data = (void *)LED_HASLCD; /* LCD */
257 ent->read_proc = led_proc_read;
258 ent->write_proc = led_proc_write;
259 } 258 }
260 259
261 return 0; 260 return 0;
diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c
index 13a64bc081b6..0bc5d474b168 100644
--- a/drivers/parisc/pdc_stable.c
+++ b/drivers/parisc/pdc_stable.c
@@ -779,12 +779,9 @@ static ssize_t pdcs_auto_write(struct kobject *kobj,
779 read_unlock(&pathentry->rw_lock); 779 read_unlock(&pathentry->rw_lock);
780 780
781 DPRINTK("%s: flags before: 0x%X\n", __func__, flags); 781 DPRINTK("%s: flags before: 0x%X\n", __func__, flags);
782 782
783 temp = in; 783 temp = skip_spaces(in);
784 784
785 while (*temp && isspace(*temp))
786 temp++;
787
788 c = *temp++ - '0'; 785 c = *temp++ - '0';
789 if ((c != 0) && (c != 1)) 786 if ((c != 0) && (c != 1))
790 goto parse_error; 787 goto parse_error;
diff --git a/drivers/parisc/superio.c b/drivers/parisc/superio.c
index 675f04e6597a..a35c9c5b89e8 100644
--- a/drivers/parisc/superio.c
+++ b/drivers/parisc/superio.c
@@ -326,7 +326,7 @@ static unsigned int superio_startup_irq(unsigned int irq)
326} 326}
327 327
328static struct irq_chip superio_interrupt_type = { 328static struct irq_chip superio_interrupt_type = {
329 .typename = SUPERIO, 329 .name = SUPERIO,
330 .startup = superio_startup_irq, 330 .startup = superio_startup_irq,
331 .shutdown = superio_disable_irq, 331 .shutdown = superio_disable_irq,
332 .enable = superio_enable_irq, 332 .enable = superio_enable_irq,
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 2597145a066e..ad113b0f62db 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -3403,7 +3403,7 @@ static int __init parport_parse_param(const char *s, int *val,
3403 *val = automatic; 3403 *val = automatic;
3404 else if (!strncmp(s, "none", 4)) 3404 else if (!strncmp(s, "none", 4))
3405 *val = none; 3405 *val = none;
3406 else if (nofifo && !strncmp(s, "nofifo", 4)) 3406 else if (nofifo && !strncmp(s, "nofifo", 6))
3407 *val = nofifo; 3407 *val = nofifo;
3408 else { 3408 else {
3409 char *ep; 3409 char *ep;
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index 6cdc931f7c17..83aae4747594 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -339,6 +339,35 @@ found:
339} 339}
340#endif 340#endif
341 341
342#ifdef CONFIG_ACPI_NUMA
343static int __init
344dmar_parse_one_rhsa(struct acpi_dmar_header *header)
345{
346 struct acpi_dmar_rhsa *rhsa;
347 struct dmar_drhd_unit *drhd;
348
349 rhsa = (struct acpi_dmar_rhsa *)header;
350 for_each_drhd_unit(drhd) {
351 if (drhd->reg_base_addr == rhsa->base_address) {
352 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
353
354 if (!node_online(node))
355 node = -1;
356 drhd->iommu->node = node;
357 return 0;
358 }
359 }
360 WARN(1, "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
361 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
362 drhd->reg_base_addr,
363 dmi_get_system_info(DMI_BIOS_VENDOR),
364 dmi_get_system_info(DMI_BIOS_VERSION),
365 dmi_get_system_info(DMI_PRODUCT_VERSION));
366
367 return 0;
368}
369#endif
370
342static void __init 371static void __init
343dmar_table_print_dmar_entry(struct acpi_dmar_header *header) 372dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
344{ 373{
@@ -458,7 +487,9 @@ parse_dmar_table(void)
458#endif 487#endif
459 break; 488 break;
460 case ACPI_DMAR_HARDWARE_AFFINITY: 489 case ACPI_DMAR_HARDWARE_AFFINITY:
461 /* We don't do anything with RHSA (yet?) */ 490#ifdef CONFIG_ACPI_NUMA
491 ret = dmar_parse_one_rhsa(entry_header);
492#endif
462 break; 493 break;
463 default: 494 default:
464 printk(KERN_WARNING PREFIX 495 printk(KERN_WARNING PREFIX
@@ -582,6 +613,8 @@ int __init dmar_table_init(void)
582 return 0; 613 return 0;
583} 614}
584 615
616static int bios_warned;
617
585int __init check_zero_address(void) 618int __init check_zero_address(void)
586{ 619{
587 struct acpi_table_dmar *dmar; 620 struct acpi_table_dmar *dmar;
@@ -601,6 +634,9 @@ int __init check_zero_address(void)
601 } 634 }
602 635
603 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) { 636 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
637 void __iomem *addr;
638 u64 cap, ecap;
639
604 drhd = (void *)entry_header; 640 drhd = (void *)entry_header;
605 if (!drhd->address) { 641 if (!drhd->address) {
606 /* Promote an attitude of violence to a BIOS engineer today */ 642 /* Promote an attitude of violence to a BIOS engineer today */
@@ -609,17 +645,40 @@ int __init check_zero_address(void)
609 dmi_get_system_info(DMI_BIOS_VENDOR), 645 dmi_get_system_info(DMI_BIOS_VENDOR),
610 dmi_get_system_info(DMI_BIOS_VERSION), 646 dmi_get_system_info(DMI_BIOS_VERSION),
611 dmi_get_system_info(DMI_PRODUCT_VERSION)); 647 dmi_get_system_info(DMI_PRODUCT_VERSION));
612#ifdef CONFIG_DMAR 648 bios_warned = 1;
613 dmar_disabled = 1; 649 goto failed;
614#endif 650 }
615 return 0; 651
652 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
653 if (!addr ) {
654 printk("IOMMU: can't validate: %llx\n", drhd->address);
655 goto failed;
656 }
657 cap = dmar_readq(addr + DMAR_CAP_REG);
658 ecap = dmar_readq(addr + DMAR_ECAP_REG);
659 early_iounmap(addr, VTD_PAGE_SIZE);
660 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
661 /* Promote an attitude of violence to a BIOS engineer today */
662 WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n"
663 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
664 drhd->address,
665 dmi_get_system_info(DMI_BIOS_VENDOR),
666 dmi_get_system_info(DMI_BIOS_VERSION),
667 dmi_get_system_info(DMI_PRODUCT_VERSION));
668 bios_warned = 1;
669 goto failed;
616 } 670 }
617 break;
618 } 671 }
619 672
620 entry_header = ((void *)entry_header + entry_header->length); 673 entry_header = ((void *)entry_header + entry_header->length);
621 } 674 }
622 return 1; 675 return 1;
676
677failed:
678#ifdef CONFIG_DMAR
679 dmar_disabled = 1;
680#endif
681 return 0;
623} 682}
624 683
625void __init detect_intel_iommu(void) 684void __init detect_intel_iommu(void)
@@ -670,6 +729,18 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
670 int agaw = 0; 729 int agaw = 0;
671 int msagaw = 0; 730 int msagaw = 0;
672 731
732 if (!drhd->reg_base_addr) {
733 if (!bios_warned) {
734 WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
735 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
736 dmi_get_system_info(DMI_BIOS_VENDOR),
737 dmi_get_system_info(DMI_BIOS_VERSION),
738 dmi_get_system_info(DMI_PRODUCT_VERSION));
739 bios_warned = 1;
740 }
741 return -EINVAL;
742 }
743
673 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); 744 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
674 if (!iommu) 745 if (!iommu)
675 return -ENOMEM; 746 return -ENOMEM;
@@ -686,13 +757,16 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
686 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); 757 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
687 758
688 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) { 759 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
689 /* Promote an attitude of violence to a BIOS engineer today */ 760 if (!bios_warned) {
690 WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n" 761 /* Promote an attitude of violence to a BIOS engineer today */
691 "BIOS vendor: %s; Ver: %s; Product Version: %s\n", 762 WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n"
692 drhd->reg_base_addr, 763 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
693 dmi_get_system_info(DMI_BIOS_VENDOR), 764 drhd->reg_base_addr,
694 dmi_get_system_info(DMI_BIOS_VERSION), 765 dmi_get_system_info(DMI_BIOS_VENDOR),
695 dmi_get_system_info(DMI_PRODUCT_VERSION)); 766 dmi_get_system_info(DMI_BIOS_VERSION),
767 dmi_get_system_info(DMI_PRODUCT_VERSION));
768 bios_warned = 1;
769 }
696 goto err_unmap; 770 goto err_unmap;
697 } 771 }
698 772
@@ -715,6 +789,8 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
715 iommu->agaw = agaw; 789 iommu->agaw = agaw;
716 iommu->msagaw = msagaw; 790 iommu->msagaw = msagaw;
717 791
792 iommu->node = -1;
793
718 /* the registers might be more than one page */ 794 /* the registers might be more than one page */
719 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap), 795 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
720 cap_max_fault_reg_offset(iommu->cap)); 796 cap_max_fault_reg_offset(iommu->cap));
@@ -1056,6 +1132,7 @@ static void __dmar_enable_qi(struct intel_iommu *iommu)
1056int dmar_enable_qi(struct intel_iommu *iommu) 1132int dmar_enable_qi(struct intel_iommu *iommu)
1057{ 1133{
1058 struct q_inval *qi; 1134 struct q_inval *qi;
1135 struct page *desc_page;
1059 1136
1060 if (!ecap_qis(iommu->ecap)) 1137 if (!ecap_qis(iommu->ecap))
1061 return -ENOENT; 1138 return -ENOENT;
@@ -1072,13 +1149,16 @@ int dmar_enable_qi(struct intel_iommu *iommu)
1072 1149
1073 qi = iommu->qi; 1150 qi = iommu->qi;
1074 1151
1075 qi->desc = (void *)(get_zeroed_page(GFP_ATOMIC)); 1152
1076 if (!qi->desc) { 1153 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1154 if (!desc_page) {
1077 kfree(qi); 1155 kfree(qi);
1078 iommu->qi = 0; 1156 iommu->qi = 0;
1079 return -ENOMEM; 1157 return -ENOMEM;
1080 } 1158 }
1081 1159
1160 qi->desc = page_address(desc_page);
1161
1082 qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC); 1162 qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
1083 if (!qi->desc_status) { 1163 if (!qi->desc_status) {
1084 free_page((unsigned long) qi->desc); 1164 free_page((unsigned long) qi->desc);
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 8d6159426311..e56f9bed6f2b 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -277,6 +277,7 @@ static int hw_pass_through = 1;
277 277
278struct dmar_domain { 278struct dmar_domain {
279 int id; /* domain id */ 279 int id; /* domain id */
280 int nid; /* node id */
280 unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/ 281 unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
281 282
282 struct list_head devices; /* all devices' list */ 283 struct list_head devices; /* all devices' list */
@@ -386,30 +387,14 @@ static struct kmem_cache *iommu_domain_cache;
386static struct kmem_cache *iommu_devinfo_cache; 387static struct kmem_cache *iommu_devinfo_cache;
387static struct kmem_cache *iommu_iova_cache; 388static struct kmem_cache *iommu_iova_cache;
388 389
389static inline void *iommu_kmem_cache_alloc(struct kmem_cache *cachep) 390static inline void *alloc_pgtable_page(int node)
390{ 391{
391 unsigned int flags; 392 struct page *page;
392 void *vaddr; 393 void *vaddr = NULL;
393
394 /* trying to avoid low memory issues */
395 flags = current->flags & PF_MEMALLOC;
396 current->flags |= PF_MEMALLOC;
397 vaddr = kmem_cache_alloc(cachep, GFP_ATOMIC);
398 current->flags &= (~PF_MEMALLOC | flags);
399 return vaddr;
400}
401
402 394
403static inline void *alloc_pgtable_page(void) 395 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
404{ 396 if (page)
405 unsigned int flags; 397 vaddr = page_address(page);
406 void *vaddr;
407
408 /* trying to avoid low memory issues */
409 flags = current->flags & PF_MEMALLOC;
410 current->flags |= PF_MEMALLOC;
411 vaddr = (void *)get_zeroed_page(GFP_ATOMIC);
412 current->flags &= (~PF_MEMALLOC | flags);
413 return vaddr; 398 return vaddr;
414} 399}
415 400
@@ -420,7 +405,7 @@ static inline void free_pgtable_page(void *vaddr)
420 405
421static inline void *alloc_domain_mem(void) 406static inline void *alloc_domain_mem(void)
422{ 407{
423 return iommu_kmem_cache_alloc(iommu_domain_cache); 408 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
424} 409}
425 410
426static void free_domain_mem(void *vaddr) 411static void free_domain_mem(void *vaddr)
@@ -430,7 +415,7 @@ static void free_domain_mem(void *vaddr)
430 415
431static inline void * alloc_devinfo_mem(void) 416static inline void * alloc_devinfo_mem(void)
432{ 417{
433 return iommu_kmem_cache_alloc(iommu_devinfo_cache); 418 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
434} 419}
435 420
436static inline void free_devinfo_mem(void *vaddr) 421static inline void free_devinfo_mem(void *vaddr)
@@ -440,7 +425,7 @@ static inline void free_devinfo_mem(void *vaddr)
440 425
441struct iova *alloc_iova_mem(void) 426struct iova *alloc_iova_mem(void)
442{ 427{
443 return iommu_kmem_cache_alloc(iommu_iova_cache); 428 return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
444} 429}
445 430
446void free_iova_mem(struct iova *iova) 431void free_iova_mem(struct iova *iova)
@@ -589,7 +574,8 @@ static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
589 root = &iommu->root_entry[bus]; 574 root = &iommu->root_entry[bus];
590 context = get_context_addr_from_root(root); 575 context = get_context_addr_from_root(root);
591 if (!context) { 576 if (!context) {
592 context = (struct context_entry *)alloc_pgtable_page(); 577 context = (struct context_entry *)
578 alloc_pgtable_page(iommu->node);
593 if (!context) { 579 if (!context) {
594 spin_unlock_irqrestore(&iommu->lock, flags); 580 spin_unlock_irqrestore(&iommu->lock, flags);
595 return NULL; 581 return NULL;
@@ -732,7 +718,7 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
732 if (!dma_pte_present(pte)) { 718 if (!dma_pte_present(pte)) {
733 uint64_t pteval; 719 uint64_t pteval;
734 720
735 tmp_page = alloc_pgtable_page(); 721 tmp_page = alloc_pgtable_page(domain->nid);
736 722
737 if (!tmp_page) 723 if (!tmp_page)
738 return NULL; 724 return NULL;
@@ -868,7 +854,7 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu)
868 struct root_entry *root; 854 struct root_entry *root;
869 unsigned long flags; 855 unsigned long flags;
870 856
871 root = (struct root_entry *)alloc_pgtable_page(); 857 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
872 if (!root) 858 if (!root)
873 return -ENOMEM; 859 return -ENOMEM;
874 860
@@ -1263,6 +1249,7 @@ static struct dmar_domain *alloc_domain(void)
1263 if (!domain) 1249 if (!domain)
1264 return NULL; 1250 return NULL;
1265 1251
1252 domain->nid = -1;
1266 memset(&domain->iommu_bmp, 0, sizeof(unsigned long)); 1253 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
1267 domain->flags = 0; 1254 domain->flags = 0;
1268 1255
@@ -1420,9 +1407,10 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
1420 domain->iommu_snooping = 0; 1407 domain->iommu_snooping = 0;
1421 1408
1422 domain->iommu_count = 1; 1409 domain->iommu_count = 1;
1410 domain->nid = iommu->node;
1423 1411
1424 /* always allocate the top pgd */ 1412 /* always allocate the top pgd */
1425 domain->pgd = (struct dma_pte *)alloc_pgtable_page(); 1413 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
1426 if (!domain->pgd) 1414 if (!domain->pgd)
1427 return -ENOMEM; 1415 return -ENOMEM;
1428 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE); 1416 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
@@ -1523,12 +1511,15 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1523 1511
1524 /* Skip top levels of page tables for 1512 /* Skip top levels of page tables for
1525 * iommu which has less agaw than default. 1513 * iommu which has less agaw than default.
1514 * Unnecessary for PT mode.
1526 */ 1515 */
1527 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) { 1516 if (translation != CONTEXT_TT_PASS_THROUGH) {
1528 pgd = phys_to_virt(dma_pte_addr(pgd)); 1517 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1529 if (!dma_pte_present(pgd)) { 1518 pgd = phys_to_virt(dma_pte_addr(pgd));
1530 spin_unlock_irqrestore(&iommu->lock, flags); 1519 if (!dma_pte_present(pgd)) {
1531 return -ENOMEM; 1520 spin_unlock_irqrestore(&iommu->lock, flags);
1521 return -ENOMEM;
1522 }
1532 } 1523 }
1533 } 1524 }
1534 } 1525 }
@@ -1577,6 +1568,8 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1577 spin_lock_irqsave(&domain->iommu_lock, flags); 1568 spin_lock_irqsave(&domain->iommu_lock, flags);
1578 if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) { 1569 if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
1579 domain->iommu_count++; 1570 domain->iommu_count++;
1571 if (domain->iommu_count == 1)
1572 domain->nid = iommu->node;
1580 domain_update_iommu_cap(domain); 1573 domain_update_iommu_cap(domain);
1581 } 1574 }
1582 spin_unlock_irqrestore(&domain->iommu_lock, flags); 1575 spin_unlock_irqrestore(&domain->iommu_lock, flags);
@@ -1991,6 +1984,16 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev,
1991 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n", 1984 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
1992 pci_name(pdev), start, end); 1985 pci_name(pdev), start, end);
1993 1986
1987 if (end < start) {
1988 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
1989 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
1990 dmi_get_system_info(DMI_BIOS_VENDOR),
1991 dmi_get_system_info(DMI_BIOS_VERSION),
1992 dmi_get_system_info(DMI_PRODUCT_VERSION));
1993 ret = -EIO;
1994 goto error;
1995 }
1996
1994 if (end >> agaw_to_width(domain->agaw)) { 1997 if (end >> agaw_to_width(domain->agaw)) {
1995 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n" 1998 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
1996 "BIOS vendor: %s; Ver: %s; Product Version: %s\n", 1999 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
@@ -3228,6 +3231,9 @@ static int device_notifier(struct notifier_block *nb,
3228 struct pci_dev *pdev = to_pci_dev(dev); 3231 struct pci_dev *pdev = to_pci_dev(dev);
3229 struct dmar_domain *domain; 3232 struct dmar_domain *domain;
3230 3233
3234 if (iommu_no_mapping(dev))
3235 return 0;
3236
3231 domain = find_domain(pdev); 3237 domain = find_domain(pdev);
3232 if (!domain) 3238 if (!domain)
3233 return 0; 3239 return 0;
@@ -3455,6 +3461,7 @@ static struct dmar_domain *iommu_alloc_vm_domain(void)
3455 return NULL; 3461 return NULL;
3456 3462
3457 domain->id = vm_domid++; 3463 domain->id = vm_domid++;
3464 domain->nid = -1;
3458 memset(&domain->iommu_bmp, 0, sizeof(unsigned long)); 3465 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
3459 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE; 3466 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
3460 3467
@@ -3481,9 +3488,10 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
3481 domain->iommu_coherency = 0; 3488 domain->iommu_coherency = 0;
3482 domain->iommu_snooping = 0; 3489 domain->iommu_snooping = 0;
3483 domain->max_addr = 0; 3490 domain->max_addr = 0;
3491 domain->nid = -1;
3484 3492
3485 /* always allocate the top pgd */ 3493 /* always allocate the top pgd */
3486 domain->pgd = (struct dma_pte *)alloc_pgtable_page(); 3494 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
3487 if (!domain->pgd) 3495 if (!domain->pgd)
3488 return -ENOMEM; 3496 return -ENOMEM;
3489 domain_flush_cache(domain, domain->pgd, PAGE_SIZE); 3497 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
index 1487bf2be863..8b65a489581b 100644
--- a/drivers/pci/intr_remapping.c
+++ b/drivers/pci/intr_remapping.c
@@ -590,7 +590,8 @@ static int setup_intr_remapping(struct intel_iommu *iommu, int mode)
590 if (!iommu->ir_table) 590 if (!iommu->ir_table)
591 return -ENOMEM; 591 return -ENOMEM;
592 592
593 pages = alloc_pages(GFP_ATOMIC | __GFP_ZERO, INTR_REMAP_PAGE_ORDER); 593 pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO,
594 INTR_REMAP_PAGE_ORDER);
594 595
595 if (!pages) { 596 if (!pages) {
596 printk(KERN_ERR "failed to allocate pages of order %d\n", 597 printk(KERN_ERR "failed to allocate pages of order %d\n",
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index ce52ea34fee5..a49452e2aed9 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -43,7 +43,7 @@ static int pcie_portdrv_restore_config(struct pci_dev *dev)
43} 43}
44 44
45#ifdef CONFIG_PM 45#ifdef CONFIG_PM
46static struct dev_pm_ops pcie_portdrv_pm_ops = { 46static const struct dev_pm_ops pcie_portdrv_pm_ops = {
47 .suspend = pcie_port_device_suspend, 47 .suspend = pcie_port_device_suspend,
48 .resume = pcie_port_device_resume, 48 .resume = pcie_port_device_resume,
49 .freeze = pcie_port_device_suspend, 49 .freeze = pcie_port_device_suspend,
diff --git a/drivers/pcmcia/pxa2xx_base.c b/drivers/pcmcia/pxa2xx_base.c
index da346eb7e77e..3aabf1e37988 100644
--- a/drivers/pcmcia/pxa2xx_base.c
+++ b/drivers/pcmcia/pxa2xx_base.c
@@ -336,7 +336,7 @@ static int pxa2xx_drv_pcmcia_resume(struct device *dev)
336 return pcmcia_socket_dev_resume(dev); 336 return pcmcia_socket_dev_resume(dev);
337} 337}
338 338
339static struct dev_pm_ops pxa2xx_drv_pcmcia_pm_ops = { 339static const struct dev_pm_ops pxa2xx_drv_pcmcia_pm_ops = {
340 .suspend = pxa2xx_drv_pcmcia_suspend, 340 .suspend = pxa2xx_drv_pcmcia_suspend,
341 .resume = pxa2xx_drv_pcmcia_resume, 341 .resume = pxa2xx_drv_pcmcia_resume,
342}; 342};
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index fe02cfd4b5e9..e4d12acdd525 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -1330,7 +1330,7 @@ static int yenta_dev_resume(struct device *dev)
1330 return 0; 1330 return 0;
1331} 1331}
1332 1332
1333static struct dev_pm_ops yenta_pm_ops = { 1333static const struct dev_pm_ops yenta_pm_ops = {
1334 .suspend_noirq = yenta_dev_suspend_noirq, 1334 .suspend_noirq = yenta_dev_suspend_noirq,
1335 .resume_noirq = yenta_dev_resume_noirq, 1335 .resume_noirq = yenta_dev_resume_noirq,
1336 .resume = yenta_dev_resume, 1336 .resume = yenta_dev_resume,
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
index ab64522aaa64..be27aa47e810 100644
--- a/drivers/platform/x86/acerhdf.c
+++ b/drivers/platform/x86/acerhdf.c
@@ -460,7 +460,7 @@ static int acerhdf_remove(struct platform_device *device)
460 return 0; 460 return 0;
461} 461}
462 462
463static struct dev_pm_ops acerhdf_pm_ops = { 463static const struct dev_pm_ops acerhdf_pm_ops = {
464 .suspend = acerhdf_suspend, 464 .suspend = acerhdf_suspend,
465 .freeze = acerhdf_suspend, 465 .freeze = acerhdf_suspend,
466}; 466};
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index 4226e5352738..e647a856b9bf 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -154,7 +154,7 @@ static struct eeepc_hotk *ehotk;
154static int eeepc_hotk_thaw(struct device *device); 154static int eeepc_hotk_thaw(struct device *device);
155static int eeepc_hotk_restore(struct device *device); 155static int eeepc_hotk_restore(struct device *device);
156 156
157static struct dev_pm_ops eeepc_pm_ops = { 157static const struct dev_pm_ops eeepc_pm_ops = {
158 .thaw = eeepc_hotk_thaw, 158 .thaw = eeepc_hotk_thaw,
159 .restore = eeepc_hotk_restore, 159 .restore = eeepc_hotk_restore,
160}; 160};
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index c2842171cec6..f00a71c58e69 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -94,7 +94,7 @@ static struct rfkill *wifi_rfkill;
94static struct rfkill *bluetooth_rfkill; 94static struct rfkill *bluetooth_rfkill;
95static struct rfkill *wwan_rfkill; 95static struct rfkill *wwan_rfkill;
96 96
97static struct dev_pm_ops hp_wmi_pm_ops = { 97static const struct dev_pm_ops hp_wmi_pm_ops = {
98 .resume = hp_wmi_resume_handler, 98 .resume = hp_wmi_resume_handler,
99 .restore = hp_wmi_resume_handler, 99 .restore = hp_wmi_resume_handler,
100}; 100};
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 0ed84806f8ae..cf61d6a8ef6f 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -1006,11 +1006,8 @@ static int parse_strtoul(const char *buf,
1006{ 1006{
1007 char *endp; 1007 char *endp;
1008 1008
1009 while (*buf && isspace(*buf)) 1009 *value = simple_strtoul(skip_spaces(buf), &endp, 0);
1010 buf++; 1010 endp = skip_spaces(endp);
1011 *value = simple_strtoul(buf, &endp, 0);
1012 while (*endp && isspace(*endp))
1013 endp++;
1014 if (*endp || *value > max) 1011 if (*endp || *value > max)
1015 return -EINVAL; 1012 return -EINVAL;
1016 1013
diff --git a/drivers/pnp/interface.c b/drivers/pnp/interface.c
index c3f1c8e9d254..68b0c04987e4 100644
--- a/drivers/pnp/interface.c
+++ b/drivers/pnp/interface.c
@@ -310,8 +310,7 @@ static ssize_t pnp_set_current_resources(struct device *dmdev,
310 goto done; 310 goto done;
311 } 311 }
312 312
313 while (isspace(*buf)) 313 buf = skip_spaces(buf);
314 ++buf;
315 if (!strnicmp(buf, "disable", 7)) { 314 if (!strnicmp(buf, "disable", 7)) {
316 retval = pnp_disable_dev(dev); 315 retval = pnp_disable_dev(dev);
317 goto done; 316 goto done;
@@ -353,19 +352,13 @@ static ssize_t pnp_set_current_resources(struct device *dmdev,
353 pnp_init_resources(dev); 352 pnp_init_resources(dev);
354 mutex_lock(&pnp_res_mutex); 353 mutex_lock(&pnp_res_mutex);
355 while (1) { 354 while (1) {
356 while (isspace(*buf)) 355 buf = skip_spaces(buf);
357 ++buf;
358 if (!strnicmp(buf, "io", 2)) { 356 if (!strnicmp(buf, "io", 2)) {
359 buf += 2; 357 buf = skip_spaces(buf + 2);
360 while (isspace(*buf))
361 ++buf;
362 start = simple_strtoul(buf, &buf, 0); 358 start = simple_strtoul(buf, &buf, 0);
363 while (isspace(*buf)) 359 buf = skip_spaces(buf);
364 ++buf;
365 if (*buf == '-') { 360 if (*buf == '-') {
366 buf += 1; 361 buf = skip_spaces(buf + 1);
367 while (isspace(*buf))
368 ++buf;
369 end = simple_strtoul(buf, &buf, 0); 362 end = simple_strtoul(buf, &buf, 0);
370 } else 363 } else
371 end = start; 364 end = start;
@@ -373,16 +366,11 @@ static ssize_t pnp_set_current_resources(struct device *dmdev,
373 continue; 366 continue;
374 } 367 }
375 if (!strnicmp(buf, "mem", 3)) { 368 if (!strnicmp(buf, "mem", 3)) {
376 buf += 3; 369 buf = skip_spaces(buf + 3);
377 while (isspace(*buf))
378 ++buf;
379 start = simple_strtoul(buf, &buf, 0); 370 start = simple_strtoul(buf, &buf, 0);
380 while (isspace(*buf)) 371 buf = skip_spaces(buf);
381 ++buf;
382 if (*buf == '-') { 372 if (*buf == '-') {
383 buf += 1; 373 buf = skip_spaces(buf + 1);
384 while (isspace(*buf))
385 ++buf;
386 end = simple_strtoul(buf, &buf, 0); 374 end = simple_strtoul(buf, &buf, 0);
387 } else 375 } else
388 end = start; 376 end = start;
@@ -390,17 +378,13 @@ static ssize_t pnp_set_current_resources(struct device *dmdev,
390 continue; 378 continue;
391 } 379 }
392 if (!strnicmp(buf, "irq", 3)) { 380 if (!strnicmp(buf, "irq", 3)) {
393 buf += 3; 381 buf = skip_spaces(buf + 3);
394 while (isspace(*buf))
395 ++buf;
396 start = simple_strtoul(buf, &buf, 0); 382 start = simple_strtoul(buf, &buf, 0);
397 pnp_add_irq_resource(dev, start, 0); 383 pnp_add_irq_resource(dev, start, 0);
398 continue; 384 continue;
399 } 385 }
400 if (!strnicmp(buf, "dma", 3)) { 386 if (!strnicmp(buf, "dma", 3)) {
401 buf += 3; 387 buf = skip_spaces(buf + 3);
402 while (isspace(*buf))
403 ++buf;
404 start = simple_strtoul(buf, &buf, 0); 388 start = simple_strtoul(buf, &buf, 0);
405 pnp_add_dma_resource(dev, start, 0); 389 pnp_add_dma_resource(dev, start, 0);
406 continue; 390 continue;
diff --git a/drivers/pnp/pnpbios/proc.c b/drivers/pnp/pnpbios/proc.c
index b35d921bac6e..2d8ac43f78e8 100644
--- a/drivers/pnp/pnpbios/proc.c
+++ b/drivers/pnp/pnpbios/proc.c
@@ -24,6 +24,7 @@
24#include <linux/types.h> 24#include <linux/types.h>
25#include <linux/proc_fs.h> 25#include <linux/proc_fs.h>
26#include <linux/pnp.h> 26#include <linux/pnp.h>
27#include <linux/seq_file.h>
27#include <linux/init.h> 28#include <linux/init.h>
28 29
29#include <asm/uaccess.h> 30#include <asm/uaccess.h>
@@ -33,42 +34,65 @@
33static struct proc_dir_entry *proc_pnp = NULL; 34static struct proc_dir_entry *proc_pnp = NULL;
34static struct proc_dir_entry *proc_pnp_boot = NULL; 35static struct proc_dir_entry *proc_pnp_boot = NULL;
35 36
36static int proc_read_pnpconfig(char *buf, char **start, off_t pos, 37static int pnpconfig_proc_show(struct seq_file *m, void *v)
37 int count, int *eof, void *data)
38{ 38{
39 struct pnp_isa_config_struc pnps; 39 struct pnp_isa_config_struc pnps;
40 40
41 if (pnp_bios_isapnp_config(&pnps)) 41 if (pnp_bios_isapnp_config(&pnps))
42 return -EIO; 42 return -EIO;
43 return snprintf(buf, count, 43 seq_printf(m, "structure_revision %d\n"
44 "structure_revision %d\n" 44 "number_of_CSNs %d\n"
45 "number_of_CSNs %d\n" 45 "ISA_read_data_port 0x%x\n",
46 "ISA_read_data_port 0x%x\n", 46 pnps.revision, pnps.no_csns, pnps.isa_rd_data_port);
47 pnps.revision, pnps.no_csns, pnps.isa_rd_data_port); 47 return 0;
48} 48}
49 49
50static int proc_read_escdinfo(char *buf, char **start, off_t pos, 50static int pnpconfig_proc_open(struct inode *inode, struct file *file)
51 int count, int *eof, void *data) 51{
52 return single_open(file, pnpconfig_proc_show, NULL);
53}
54
55static const struct file_operations pnpconfig_proc_fops = {
56 .owner = THIS_MODULE,
57 .open = pnpconfig_proc_open,
58 .read = seq_read,
59 .llseek = seq_lseek,
60 .release = single_release,
61};
62
63static int escd_info_proc_show(struct seq_file *m, void *v)
52{ 64{
53 struct escd_info_struc escd; 65 struct escd_info_struc escd;
54 66
55 if (pnp_bios_escd_info(&escd)) 67 if (pnp_bios_escd_info(&escd))
56 return -EIO; 68 return -EIO;
57 return snprintf(buf, count, 69 seq_printf(m, "min_ESCD_write_size %d\n"
58 "min_ESCD_write_size %d\n"
59 "ESCD_size %d\n" 70 "ESCD_size %d\n"
60 "NVRAM_base 0x%x\n", 71 "NVRAM_base 0x%x\n",
61 escd.min_escd_write_size, 72 escd.min_escd_write_size,
62 escd.escd_size, escd.nv_storage_base); 73 escd.escd_size, escd.nv_storage_base);
74 return 0;
63} 75}
64 76
77static int escd_info_proc_open(struct inode *inode, struct file *file)
78{
79 return single_open(file, escd_info_proc_show, NULL);
80}
81
82static const struct file_operations escd_info_proc_fops = {
83 .owner = THIS_MODULE,
84 .open = escd_info_proc_open,
85 .read = seq_read,
86 .llseek = seq_lseek,
87 .release = single_release,
88};
89
65#define MAX_SANE_ESCD_SIZE (32*1024) 90#define MAX_SANE_ESCD_SIZE (32*1024)
66static int proc_read_escd(char *buf, char **start, off_t pos, 91static int escd_proc_show(struct seq_file *m, void *v)
67 int count, int *eof, void *data)
68{ 92{
69 struct escd_info_struc escd; 93 struct escd_info_struc escd;
70 char *tmpbuf; 94 char *tmpbuf;
71 int escd_size, escd_left_to_read, n; 95 int escd_size;
72 96
73 if (pnp_bios_escd_info(&escd)) 97 if (pnp_bios_escd_info(&escd))
74 return -EIO; 98 return -EIO;
@@ -76,7 +100,7 @@ static int proc_read_escd(char *buf, char **start, off_t pos,
76 /* sanity check */ 100 /* sanity check */
77 if (escd.escd_size > MAX_SANE_ESCD_SIZE) { 101 if (escd.escd_size > MAX_SANE_ESCD_SIZE) {
78 printk(KERN_ERR 102 printk(KERN_ERR
79 "PnPBIOS: proc_read_escd: ESCD size reported by BIOS escd_info call is too great\n"); 103 "PnPBIOS: %s: ESCD size reported by BIOS escd_info call is too great\n", __func__);
80 return -EFBIG; 104 return -EFBIG;
81 } 105 }
82 106
@@ -94,56 +118,75 @@ static int proc_read_escd(char *buf, char **start, off_t pos,
94 118
95 /* sanity check */ 119 /* sanity check */
96 if (escd_size > MAX_SANE_ESCD_SIZE) { 120 if (escd_size > MAX_SANE_ESCD_SIZE) {
97 printk(KERN_ERR "PnPBIOS: proc_read_escd: ESCD size reported by" 121 printk(KERN_ERR "PnPBIOS: %s: ESCD size reported by"
98 " BIOS read_escd call is too great\n"); 122 " BIOS read_escd call is too great\n", __func__);
99 kfree(tmpbuf); 123 kfree(tmpbuf);
100 return -EFBIG; 124 return -EFBIG;
101 } 125 }
102 126
103 escd_left_to_read = escd_size - pos; 127 seq_write(m, tmpbuf, escd_size);
104 if (escd_left_to_read < 0)
105 escd_left_to_read = 0;
106 if (escd_left_to_read == 0)
107 *eof = 1;
108 n = min(count, escd_left_to_read);
109 memcpy(buf, tmpbuf + pos, n);
110 kfree(tmpbuf); 128 kfree(tmpbuf);
111 *start = buf; 129 return 0;
112 return n;
113} 130}
114 131
115static int proc_read_legacyres(char *buf, char **start, off_t pos, 132static int escd_proc_open(struct inode *inode, struct file *file)
116 int count, int *eof, void *data) 133{
134 return single_open(file, escd_proc_show, NULL);
135}
136
137static const struct file_operations escd_proc_fops = {
138 .owner = THIS_MODULE,
139 .open = escd_proc_open,
140 .read = seq_read,
141 .llseek = seq_lseek,
142 .release = single_release,
143};
144
145static int pnp_legacyres_proc_show(struct seq_file *m, void *v)
117{ 146{
118 /* Assume that the following won't overflow the buffer */ 147 void *buf;
119 if (pnp_bios_get_stat_res(buf)) 148
149 buf = kmalloc(65536, GFP_KERNEL);
150 if (!buf)
151 return -ENOMEM;
152 if (pnp_bios_get_stat_res(buf)) {
153 kfree(buf);
120 return -EIO; 154 return -EIO;
155 }
156
157 seq_write(m, buf, 65536);
158 kfree(buf);
159 return 0;
160}
121 161
122 return count; // FIXME: Return actual length 162static int pnp_legacyres_proc_open(struct inode *inode, struct file *file)
163{
164 return single_open(file, pnp_legacyres_proc_show, NULL);
123} 165}
124 166
125static int proc_read_devices(char *buf, char **start, off_t pos, 167static const struct file_operations pnp_legacyres_proc_fops = {
126 int count, int *eof, void *data) 168 .owner = THIS_MODULE,
169 .open = pnp_legacyres_proc_open,
170 .read = seq_read,
171 .llseek = seq_lseek,
172 .release = single_release,
173};
174
175static int pnp_devices_proc_show(struct seq_file *m, void *v)
127{ 176{
128 struct pnp_bios_node *node; 177 struct pnp_bios_node *node;
129 u8 nodenum; 178 u8 nodenum;
130 char *p = buf;
131
132 if (pos >= 0xff)
133 return 0;
134 179
135 node = kzalloc(node_info.max_node_size, GFP_KERNEL); 180 node = kzalloc(node_info.max_node_size, GFP_KERNEL);
136 if (!node) 181 if (!node)
137 return -ENOMEM; 182 return -ENOMEM;
138 183
139 for (nodenum = pos; nodenum < 0xff;) { 184 for (nodenum = 0; nodenum < 0xff;) {
140 u8 thisnodenum = nodenum; 185 u8 thisnodenum = nodenum;
141 /* 26 = the number of characters per line sprintf'ed */ 186
142 if ((p - buf + 26) > count)
143 break;
144 if (pnp_bios_get_dev_node(&nodenum, PNPMODE_DYNAMIC, node)) 187 if (pnp_bios_get_dev_node(&nodenum, PNPMODE_DYNAMIC, node))
145 break; 188 break;
146 p += sprintf(p, "%02x\t%08x\t%02x:%02x:%02x\t%04x\n", 189 seq_printf(m, "%02x\t%08x\t%02x:%02x:%02x\t%04x\n",
147 node->handle, node->eisa_id, 190 node->handle, node->eisa_id,
148 node->type_code[0], node->type_code[1], 191 node->type_code[0], node->type_code[1],
149 node->type_code[2], node->flags); 192 node->type_code[2], node->flags);
@@ -153,20 +196,29 @@ static int proc_read_devices(char *buf, char **start, off_t pos,
153 "PnPBIOS: proc_read_devices:", 196 "PnPBIOS: proc_read_devices:",
154 (unsigned int)nodenum, 197 (unsigned int)nodenum,
155 (unsigned int)thisnodenum); 198 (unsigned int)thisnodenum);
156 *eof = 1;
157 break; 199 break;
158 } 200 }
159 } 201 }
160 kfree(node); 202 kfree(node);
161 if (nodenum == 0xff) 203 return 0;
162 *eof = 1; 204}
163 *start = (char *)((off_t) nodenum - pos); 205
164 return p - buf; 206static int pnp_devices_proc_open(struct inode *inode, struct file *file)
207{
208 return single_open(file, pnp_devices_proc_show, NULL);
165} 209}
166 210
167static int proc_read_node(char *buf, char **start, off_t pos, 211static const struct file_operations pnp_devices_proc_fops = {
168 int count, int *eof, void *data) 212 .owner = THIS_MODULE,
213 .open = pnp_devices_proc_open,
214 .read = seq_read,
215 .llseek = seq_lseek,
216 .release = single_release,
217};
218
219static int pnpbios_proc_show(struct seq_file *m, void *v)
169{ 220{
221 void *data = m->private;
170 struct pnp_bios_node *node; 222 struct pnp_bios_node *node;
171 int boot = (long)data >> 8; 223 int boot = (long)data >> 8;
172 u8 nodenum = (long)data; 224 u8 nodenum = (long)data;
@@ -180,14 +232,20 @@ static int proc_read_node(char *buf, char **start, off_t pos,
180 return -EIO; 232 return -EIO;
181 } 233 }
182 len = node->size - sizeof(struct pnp_bios_node); 234 len = node->size - sizeof(struct pnp_bios_node);
183 memcpy(buf, node->data, len); 235 seq_write(m, node->data, len);
184 kfree(node); 236 kfree(node);
185 return len; 237 return 0;
238}
239
240static int pnpbios_proc_open(struct inode *inode, struct file *file)
241{
242 return single_open(file, pnpbios_proc_show, PDE(inode)->data);
186} 243}
187 244
188static int proc_write_node(struct file *file, const char __user * buf, 245static ssize_t pnpbios_proc_write(struct file *file, const char __user *buf,
189 unsigned long count, void *data) 246 size_t count, loff_t *pos)
190{ 247{
248 void *data = PDE(file->f_path.dentry->d_inode)->data;
191 struct pnp_bios_node *node; 249 struct pnp_bios_node *node;
192 int boot = (long)data >> 8; 250 int boot = (long)data >> 8;
193 u8 nodenum = (long)data; 251 u8 nodenum = (long)data;
@@ -218,34 +276,33 @@ out:
218 return ret; 276 return ret;
219} 277}
220 278
279static const struct file_operations pnpbios_proc_fops = {
280 .owner = THIS_MODULE,
281 .open = pnpbios_proc_open,
282 .read = seq_read,
283 .llseek = seq_lseek,
284 .release = single_release,
285 .write = pnpbios_proc_write,
286};
287
221int pnpbios_interface_attach_device(struct pnp_bios_node *node) 288int pnpbios_interface_attach_device(struct pnp_bios_node *node)
222{ 289{
223 char name[3]; 290 char name[3];
224 struct proc_dir_entry *ent;
225 291
226 sprintf(name, "%02x", node->handle); 292 sprintf(name, "%02x", node->handle);
227 293
228 if (!proc_pnp) 294 if (!proc_pnp)
229 return -EIO; 295 return -EIO;
230 if (!pnpbios_dont_use_current_config) { 296 if (!pnpbios_dont_use_current_config) {
231 ent = create_proc_entry(name, 0, proc_pnp); 297 proc_create_data(name, 0644, proc_pnp, &pnpbios_proc_fops,
232 if (ent) { 298 (void *)(long)(node->handle));
233 ent->read_proc = proc_read_node;
234 ent->write_proc = proc_write_node;
235 ent->data = (void *)(long)(node->handle);
236 }
237 } 299 }
238 300
239 if (!proc_pnp_boot) 301 if (!proc_pnp_boot)
240 return -EIO; 302 return -EIO;
241 ent = create_proc_entry(name, 0, proc_pnp_boot); 303 if (proc_create_data(name, 0644, proc_pnp_boot, &pnpbios_proc_fops,
242 if (ent) { 304 (void *)(long)(node->handle + 0x100)))
243 ent->read_proc = proc_read_node;
244 ent->write_proc = proc_write_node;
245 ent->data = (void *)(long)(node->handle + 0x100);
246 return 0; 305 return 0;
247 }
248
249 return -EIO; 306 return -EIO;
250} 307}
251 308
@@ -262,14 +319,11 @@ int __init pnpbios_proc_init(void)
262 proc_pnp_boot = proc_mkdir("boot", proc_pnp); 319 proc_pnp_boot = proc_mkdir("boot", proc_pnp);
263 if (!proc_pnp_boot) 320 if (!proc_pnp_boot)
264 return -EIO; 321 return -EIO;
265 create_proc_read_entry("devices", 0, proc_pnp, proc_read_devices, NULL); 322 proc_create("devices", 0, proc_pnp, &pnp_devices_proc_fops);
266 create_proc_read_entry("configuration_info", 0, proc_pnp, 323 proc_create("configuration_info", 0, proc_pnp, &pnpconfig_proc_fops);
267 proc_read_pnpconfig, NULL); 324 proc_create("escd_info", 0, proc_pnp, &escd_info_proc_fops);
268 create_proc_read_entry("escd_info", 0, proc_pnp, proc_read_escdinfo, 325 proc_create("escd", S_IRUSR, proc_pnp, &escd_proc_fops);
269 NULL); 326 proc_create("legacy_device_resources", 0, proc_pnp, &pnp_legacyres_proc_fops);
270 create_proc_read_entry("escd", S_IRUSR, proc_pnp, proc_read_escd, NULL);
271 create_proc_read_entry("legacy_device_resources", 0, proc_pnp,
272 proc_read_legacyres, NULL);
273 327
274 return 0; 328 return 0;
275} 329}
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 118674925516..d4b3d67f0548 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -29,6 +29,13 @@ config APM_POWER
29 Say Y here to enable support APM status emulation using 29 Say Y here to enable support APM status emulation using
30 battery class devices. 30 battery class devices.
31 31
32config WM831X_BACKUP
33 tristate "WM831X backup battery charger support"
34 depends on MFD_WM831X
35 help
36 Say Y here to enable support for the backup battery charger
37 in the Wolfson Microelectronics WM831x PMICs.
38
32config WM831X_POWER 39config WM831X_POWER
33 tristate "WM831X PMU support" 40 tristate "WM831X PMU support"
34 depends on MFD_WM831X 41 depends on MFD_WM831X
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index 356cdfd3c8b2..573597c683b4 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_POWER_SUPPLY) += power_supply.o
16 16
17obj-$(CONFIG_PDA_POWER) += pda_power.o 17obj-$(CONFIG_PDA_POWER) += pda_power.o
18obj-$(CONFIG_APM_POWER) += apm_power.o 18obj-$(CONFIG_APM_POWER) += apm_power.o
19obj-$(CONFIG_WM831X_BACKUP) += wm831x_backup.o
19obj-$(CONFIG_WM831X_POWER) += wm831x_power.o 20obj-$(CONFIG_WM831X_POWER) += wm831x_power.o
20obj-$(CONFIG_WM8350_POWER) += wm8350_power.o 21obj-$(CONFIG_WM8350_POWER) += wm8350_power.o
21 22
diff --git a/drivers/power/pcf50633-charger.c b/drivers/power/pcf50633-charger.c
index 6a84a8eb8d7a..ea3fdfaca90d 100644
--- a/drivers/power/pcf50633-charger.c
+++ b/drivers/power/pcf50633-charger.c
@@ -29,15 +29,12 @@
29struct pcf50633_mbc { 29struct pcf50633_mbc {
30 struct pcf50633 *pcf; 30 struct pcf50633 *pcf;
31 31
32 int adapter_active;
33 int adapter_online; 32 int adapter_online;
34 int usb_active;
35 int usb_online; 33 int usb_online;
36 34
37 struct power_supply usb; 35 struct power_supply usb;
38 struct power_supply adapter; 36 struct power_supply adapter;
39 37 struct power_supply ac;
40 struct delayed_work charging_restart_work;
41}; 38};
42 39
43int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma) 40int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma)
@@ -47,16 +44,21 @@ int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma)
47 u8 bits; 44 u8 bits;
48 int charging_start = 1; 45 int charging_start = 1;
49 u8 mbcs2, chgmod; 46 u8 mbcs2, chgmod;
47 unsigned int mbcc5;
50 48
51 if (ma >= 1000) 49 if (ma >= 1000) {
52 bits = PCF50633_MBCC7_USB_1000mA; 50 bits = PCF50633_MBCC7_USB_1000mA;
53 else if (ma >= 500) 51 ma = 1000;
52 } else if (ma >= 500) {
54 bits = PCF50633_MBCC7_USB_500mA; 53 bits = PCF50633_MBCC7_USB_500mA;
55 else if (ma >= 100) 54 ma = 500;
55 } else if (ma >= 100) {
56 bits = PCF50633_MBCC7_USB_100mA; 56 bits = PCF50633_MBCC7_USB_100mA;
57 else { 57 ma = 100;
58 } else {
58 bits = PCF50633_MBCC7_USB_SUSPEND; 59 bits = PCF50633_MBCC7_USB_SUSPEND;
59 charging_start = 0; 60 charging_start = 0;
61 ma = 0;
60 } 62 }
61 63
62 ret = pcf50633_reg_set_bit_mask(pcf, PCF50633_REG_MBCC7, 64 ret = pcf50633_reg_set_bit_mask(pcf, PCF50633_REG_MBCC7,
@@ -66,21 +68,40 @@ int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma)
66 else 68 else
67 dev_info(pcf->dev, "usb curlim to %d mA\n", ma); 69 dev_info(pcf->dev, "usb curlim to %d mA\n", ma);
68 70
69 /* Manual charging start */ 71 /*
70 mbcs2 = pcf50633_reg_read(pcf, PCF50633_REG_MBCS2); 72 * We limit the charging current to be the USB current limit.
73 * The reason is that on pcf50633, when it enters PMU Standby mode,
74 * which it does when the device goes "off", the USB current limit
75 * reverts to the variant default. In at least one common case, that
76 * default is 500mA. By setting the charging current to be the same
77 * as the USB limit we set here before PMU standby, we enforce it only
78 * using the correct amount of current even when the USB current limit
79 * gets reset to the wrong thing
80 */
81
82 if (mbc->pcf->pdata->charger_reference_current_ma) {
83 mbcc5 = (ma << 8) / mbc->pcf->pdata->charger_reference_current_ma;
84 if (mbcc5 > 255)
85 mbcc5 = 255;
86 pcf50633_reg_write(mbc->pcf, PCF50633_REG_MBCC5, mbcc5);
87 }
88
89 mbcs2 = pcf50633_reg_read(mbc->pcf, PCF50633_REG_MBCS2);
71 chgmod = (mbcs2 & PCF50633_MBCS2_MBC_MASK); 90 chgmod = (mbcs2 & PCF50633_MBCS2_MBC_MASK);
72 91
73 /* If chgmod == BATFULL, setting chgena has no effect. 92 /* If chgmod == BATFULL, setting chgena has no effect.
74 * We need to set resume instead. 93 * Datasheet says we need to set resume instead but when autoresume is
94 * used resume doesn't work. Clear and set chgena instead.
75 */ 95 */
76 if (chgmod != PCF50633_MBCS2_MBC_BAT_FULL) 96 if (chgmod != PCF50633_MBCS2_MBC_BAT_FULL)
77 pcf50633_reg_set_bit_mask(pcf, PCF50633_REG_MBCC1, 97 pcf50633_reg_set_bit_mask(pcf, PCF50633_REG_MBCC1,
78 PCF50633_MBCC1_CHGENA, PCF50633_MBCC1_CHGENA); 98 PCF50633_MBCC1_CHGENA, PCF50633_MBCC1_CHGENA);
79 else 99 else {
100 pcf50633_reg_clear_bits(pcf, PCF50633_REG_MBCC1,
101 PCF50633_MBCC1_CHGENA);
80 pcf50633_reg_set_bit_mask(pcf, PCF50633_REG_MBCC1, 102 pcf50633_reg_set_bit_mask(pcf, PCF50633_REG_MBCC1,
81 PCF50633_MBCC1_RESUME, PCF50633_MBCC1_RESUME); 103 PCF50633_MBCC1_CHGENA, PCF50633_MBCC1_CHGENA);
82 104 }
83 mbc->usb_active = charging_start;
84 105
85 power_supply_changed(&mbc->usb); 106 power_supply_changed(&mbc->usb);
86 107
@@ -92,20 +113,44 @@ int pcf50633_mbc_get_status(struct pcf50633 *pcf)
92{ 113{
93 struct pcf50633_mbc *mbc = platform_get_drvdata(pcf->mbc_pdev); 114 struct pcf50633_mbc *mbc = platform_get_drvdata(pcf->mbc_pdev);
94 int status = 0; 115 int status = 0;
116 u8 chgmod;
117
118 if (!mbc)
119 return 0;
120
121 chgmod = pcf50633_reg_read(mbc->pcf, PCF50633_REG_MBCS2)
122 & PCF50633_MBCS2_MBC_MASK;
95 123
96 if (mbc->usb_online) 124 if (mbc->usb_online)
97 status |= PCF50633_MBC_USB_ONLINE; 125 status |= PCF50633_MBC_USB_ONLINE;
98 if (mbc->usb_active) 126 if (chgmod == PCF50633_MBCS2_MBC_USB_PRE ||
127 chgmod == PCF50633_MBCS2_MBC_USB_PRE_WAIT ||
128 chgmod == PCF50633_MBCS2_MBC_USB_FAST ||
129 chgmod == PCF50633_MBCS2_MBC_USB_FAST_WAIT)
99 status |= PCF50633_MBC_USB_ACTIVE; 130 status |= PCF50633_MBC_USB_ACTIVE;
100 if (mbc->adapter_online) 131 if (mbc->adapter_online)
101 status |= PCF50633_MBC_ADAPTER_ONLINE; 132 status |= PCF50633_MBC_ADAPTER_ONLINE;
102 if (mbc->adapter_active) 133 if (chgmod == PCF50633_MBCS2_MBC_ADP_PRE ||
134 chgmod == PCF50633_MBCS2_MBC_ADP_PRE_WAIT ||
135 chgmod == PCF50633_MBCS2_MBC_ADP_FAST ||
136 chgmod == PCF50633_MBCS2_MBC_ADP_FAST_WAIT)
103 status |= PCF50633_MBC_ADAPTER_ACTIVE; 137 status |= PCF50633_MBC_ADAPTER_ACTIVE;
104 138
105 return status; 139 return status;
106} 140}
107EXPORT_SYMBOL_GPL(pcf50633_mbc_get_status); 141EXPORT_SYMBOL_GPL(pcf50633_mbc_get_status);
108 142
143int pcf50633_mbc_get_usb_online_status(struct pcf50633 *pcf)
144{
145 struct pcf50633_mbc *mbc = platform_get_drvdata(pcf->mbc_pdev);
146
147 if (!mbc)
148 return 0;
149
150 return mbc->usb_online;
151}
152EXPORT_SYMBOL_GPL(pcf50633_mbc_get_usb_online_status);
153
109static ssize_t 154static ssize_t
110show_chgmode(struct device *dev, struct device_attribute *attr, char *buf) 155show_chgmode(struct device *dev, struct device_attribute *attr, char *buf)
111{ 156{
@@ -156,9 +201,55 @@ static ssize_t set_usblim(struct device *dev,
156 201
157static DEVICE_ATTR(usb_curlim, S_IRUGO | S_IWUSR, show_usblim, set_usblim); 202static DEVICE_ATTR(usb_curlim, S_IRUGO | S_IWUSR, show_usblim, set_usblim);
158 203
204static ssize_t
205show_chglim(struct device *dev, struct device_attribute *attr, char *buf)
206{
207 struct pcf50633_mbc *mbc = dev_get_drvdata(dev);
208 u8 mbcc5 = pcf50633_reg_read(mbc->pcf, PCF50633_REG_MBCC5);
209 unsigned int ma;
210
211 if (!mbc->pcf->pdata->charger_reference_current_ma)
212 return -ENODEV;
213
214 ma = (mbc->pcf->pdata->charger_reference_current_ma * mbcc5) >> 8;
215
216 return sprintf(buf, "%u\n", ma);
217}
218
219static ssize_t set_chglim(struct device *dev,
220 struct device_attribute *attr, const char *buf, size_t count)
221{
222 struct pcf50633_mbc *mbc = dev_get_drvdata(dev);
223 unsigned long ma;
224 unsigned int mbcc5;
225 int ret;
226
227 if (!mbc->pcf->pdata->charger_reference_current_ma)
228 return -ENODEV;
229
230 ret = strict_strtoul(buf, 10, &ma);
231 if (ret)
232 return -EINVAL;
233
234 mbcc5 = (ma << 8) / mbc->pcf->pdata->charger_reference_current_ma;
235 if (mbcc5 > 255)
236 mbcc5 = 255;
237 pcf50633_reg_write(mbc->pcf, PCF50633_REG_MBCC5, mbcc5);
238
239 return count;
240}
241
242/*
243 * This attribute allows to change MBC charging limit on the fly
244 * independently of usb current limit. It also gets set automatically every
245 * time usb current limit is changed.
246 */
247static DEVICE_ATTR(chg_curlim, S_IRUGO | S_IWUSR, show_chglim, set_chglim);
248
159static struct attribute *pcf50633_mbc_sysfs_entries[] = { 249static struct attribute *pcf50633_mbc_sysfs_entries[] = {
160 &dev_attr_chgmode.attr, 250 &dev_attr_chgmode.attr,
161 &dev_attr_usb_curlim.attr, 251 &dev_attr_usb_curlim.attr,
252 &dev_attr_chg_curlim.attr,
162 NULL, 253 NULL,
163}; 254};
164 255
@@ -167,76 +258,26 @@ static struct attribute_group mbc_attr_group = {
167 .attrs = pcf50633_mbc_sysfs_entries, 258 .attrs = pcf50633_mbc_sysfs_entries,
168}; 259};
169 260
170/* MBC state machine switches into charging mode when the battery voltage
171 * falls below 96% of a battery float voltage. But the voltage drop in Li-ion
172 * batteries is marginal(1~2 %) till about 80% of its capacity - which means,
173 * after a BATFULL, charging won't be restarted until 80%.
174 *
175 * This work_struct function restarts charging at regular intervals to make
176 * sure we don't discharge too much
177 */
178
179static void pcf50633_mbc_charging_restart(struct work_struct *work)
180{
181 struct pcf50633_mbc *mbc;
182 u8 mbcs2, chgmod;
183
184 mbc = container_of(work, struct pcf50633_mbc,
185 charging_restart_work.work);
186
187 mbcs2 = pcf50633_reg_read(mbc->pcf, PCF50633_REG_MBCS2);
188 chgmod = (mbcs2 & PCF50633_MBCS2_MBC_MASK);
189
190 if (chgmod != PCF50633_MBCS2_MBC_BAT_FULL)
191 return;
192
193 /* Restart charging */
194 pcf50633_reg_set_bit_mask(mbc->pcf, PCF50633_REG_MBCC1,
195 PCF50633_MBCC1_RESUME, PCF50633_MBCC1_RESUME);
196 mbc->usb_active = 1;
197 power_supply_changed(&mbc->usb);
198
199 dev_info(mbc->pcf->dev, "Charging restarted\n");
200}
201
202static void 261static void
203pcf50633_mbc_irq_handler(int irq, void *data) 262pcf50633_mbc_irq_handler(int irq, void *data)
204{ 263{
205 struct pcf50633_mbc *mbc = data; 264 struct pcf50633_mbc *mbc = data;
206 int chg_restart_interval =
207 mbc->pcf->pdata->charging_restart_interval;
208 265
209 /* USB */ 266 /* USB */
210 if (irq == PCF50633_IRQ_USBINS) { 267 if (irq == PCF50633_IRQ_USBINS) {
211 mbc->usb_online = 1; 268 mbc->usb_online = 1;
212 } else if (irq == PCF50633_IRQ_USBREM) { 269 } else if (irq == PCF50633_IRQ_USBREM) {
213 mbc->usb_online = 0; 270 mbc->usb_online = 0;
214 mbc->usb_active = 0;
215 pcf50633_mbc_usb_curlim_set(mbc->pcf, 0); 271 pcf50633_mbc_usb_curlim_set(mbc->pcf, 0);
216 cancel_delayed_work_sync(&mbc->charging_restart_work);
217 } 272 }
218 273
219 /* Adapter */ 274 /* Adapter */
220 if (irq == PCF50633_IRQ_ADPINS) { 275 if (irq == PCF50633_IRQ_ADPINS)
221 mbc->adapter_online = 1; 276 mbc->adapter_online = 1;
222 mbc->adapter_active = 1; 277 else if (irq == PCF50633_IRQ_ADPREM)
223 } else if (irq == PCF50633_IRQ_ADPREM) {
224 mbc->adapter_online = 0; 278 mbc->adapter_online = 0;
225 mbc->adapter_active = 0;
226 }
227
228 if (irq == PCF50633_IRQ_BATFULL) {
229 mbc->usb_active = 0;
230 mbc->adapter_active = 0;
231
232 if (chg_restart_interval > 0)
233 schedule_delayed_work(&mbc->charging_restart_work,
234 chg_restart_interval);
235 } else if (irq == PCF50633_IRQ_USBLIMON)
236 mbc->usb_active = 0;
237 else if (irq == PCF50633_IRQ_USBLIMOFF)
238 mbc->usb_active = 1;
239 279
280 power_supply_changed(&mbc->ac);
240 power_supply_changed(&mbc->usb); 281 power_supply_changed(&mbc->usb);
241 power_supply_changed(&mbc->adapter); 282 power_supply_changed(&mbc->adapter);
242 283
@@ -269,10 +310,34 @@ static int usb_get_property(struct power_supply *psy,
269{ 310{
270 struct pcf50633_mbc *mbc = container_of(psy, struct pcf50633_mbc, usb); 311 struct pcf50633_mbc *mbc = container_of(psy, struct pcf50633_mbc, usb);
271 int ret = 0; 312 int ret = 0;
313 u8 usblim = pcf50633_reg_read(mbc->pcf, PCF50633_REG_MBCC7) &
314 PCF50633_MBCC7_USB_MASK;
272 315
273 switch (psp) { 316 switch (psp) {
274 case POWER_SUPPLY_PROP_ONLINE: 317 case POWER_SUPPLY_PROP_ONLINE:
275 val->intval = mbc->usb_online; 318 val->intval = mbc->usb_online &&
319 (usblim <= PCF50633_MBCC7_USB_500mA);
320 break;
321 default:
322 ret = -EINVAL;
323 break;
324 }
325 return ret;
326}
327
328static int ac_get_property(struct power_supply *psy,
329 enum power_supply_property psp,
330 union power_supply_propval *val)
331{
332 struct pcf50633_mbc *mbc = container_of(psy, struct pcf50633_mbc, ac);
333 int ret = 0;
334 u8 usblim = pcf50633_reg_read(mbc->pcf, PCF50633_REG_MBCC7) &
335 PCF50633_MBCC7_USB_MASK;
336
337 switch (psp) {
338 case POWER_SUPPLY_PROP_ONLINE:
339 val->intval = mbc->usb_online &&
340 (usblim == PCF50633_MBCC7_USB_1000mA);
276 break; 341 break;
277 default: 342 default:
278 ret = -EINVAL; 343 ret = -EINVAL;
@@ -336,6 +401,14 @@ static int __devinit pcf50633_mbc_probe(struct platform_device *pdev)
336 mbc->usb.supplied_to = mbc->pcf->pdata->batteries; 401 mbc->usb.supplied_to = mbc->pcf->pdata->batteries;
337 mbc->usb.num_supplicants = mbc->pcf->pdata->num_batteries; 402 mbc->usb.num_supplicants = mbc->pcf->pdata->num_batteries;
338 403
404 mbc->ac.name = "ac";
405 mbc->ac.type = POWER_SUPPLY_TYPE_MAINS;
406 mbc->ac.properties = power_props;
407 mbc->ac.num_properties = ARRAY_SIZE(power_props);
408 mbc->ac.get_property = ac_get_property;
409 mbc->ac.supplied_to = mbc->pcf->pdata->batteries;
410 mbc->ac.num_supplicants = mbc->pcf->pdata->num_batteries;
411
339 ret = power_supply_register(&pdev->dev, &mbc->adapter); 412 ret = power_supply_register(&pdev->dev, &mbc->adapter);
340 if (ret) { 413 if (ret) {
341 dev_err(mbc->pcf->dev, "failed to register adapter\n"); 414 dev_err(mbc->pcf->dev, "failed to register adapter\n");
@@ -351,8 +424,14 @@ static int __devinit pcf50633_mbc_probe(struct platform_device *pdev)
351 return ret; 424 return ret;
352 } 425 }
353 426
354 INIT_DELAYED_WORK(&mbc->charging_restart_work, 427 ret = power_supply_register(&pdev->dev, &mbc->ac);
355 pcf50633_mbc_charging_restart); 428 if (ret) {
429 dev_err(mbc->pcf->dev, "failed to register ac\n");
430 power_supply_unregister(&mbc->adapter);
431 power_supply_unregister(&mbc->usb);
432 kfree(mbc);
433 return ret;
434 }
356 435
357 ret = sysfs_create_group(&pdev->dev.kobj, &mbc_attr_group); 436 ret = sysfs_create_group(&pdev->dev.kobj, &mbc_attr_group);
358 if (ret) 437 if (ret)
@@ -378,8 +457,7 @@ static int __devexit pcf50633_mbc_remove(struct platform_device *pdev)
378 457
379 power_supply_unregister(&mbc->usb); 458 power_supply_unregister(&mbc->usb);
380 power_supply_unregister(&mbc->adapter); 459 power_supply_unregister(&mbc->adapter);
381 460 power_supply_unregister(&mbc->ac);
382 cancel_delayed_work_sync(&mbc->charging_restart_work);
383 461
384 kfree(mbc); 462 kfree(mbc);
385 463
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index 08144393d64b..c790e0c77d4b 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -65,7 +65,10 @@ static ssize_t power_supply_show_property(struct device *dev,
65 ret = psy->get_property(psy, off, &value); 65 ret = psy->get_property(psy, off, &value);
66 66
67 if (ret < 0) { 67 if (ret < 0) {
68 if (ret != -ENODEV) 68 if (ret == -ENODATA)
69 dev_dbg(dev, "driver has no data for `%s' property\n",
70 attr->attr.name);
71 else if (ret != -ENODEV)
69 dev_err(dev, "driver failed to report `%s' property\n", 72 dev_err(dev, "driver failed to report `%s' property\n",
70 attr->attr.name); 73 attr->attr.name);
71 return ret; 74 return ret;
diff --git a/drivers/power/wm831x_backup.c b/drivers/power/wm831x_backup.c
new file mode 100644
index 000000000000..bf4f387a8009
--- /dev/null
+++ b/drivers/power/wm831x_backup.c
@@ -0,0 +1,233 @@
1/*
2 * Backup battery driver for Wolfson Microelectronics wm831x PMICs
3 *
4 * Copyright 2009 Wolfson Microelectronics PLC.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/module.h>
12#include <linux/err.h>
13#include <linux/platform_device.h>
14#include <linux/power_supply.h>
15
16#include <linux/mfd/wm831x/core.h>
17#include <linux/mfd/wm831x/auxadc.h>
18#include <linux/mfd/wm831x/pmu.h>
19#include <linux/mfd/wm831x/pdata.h>
20
21struct wm831x_backup {
22 struct wm831x *wm831x;
23 struct power_supply backup;
24};
25
26static int wm831x_backup_read_voltage(struct wm831x *wm831x,
27 enum wm831x_auxadc src,
28 union power_supply_propval *val)
29{
30 int ret;
31
32 ret = wm831x_auxadc_read_uv(wm831x, src);
33 if (ret >= 0)
34 val->intval = ret;
35
36 return ret;
37}
38
39/*********************************************************************
40 * Backup supply properties
41 *********************************************************************/
42
43static void wm831x_config_backup(struct wm831x *wm831x)
44{
45 struct wm831x_pdata *wm831x_pdata = wm831x->dev->platform_data;
46 struct wm831x_backup_pdata *pdata;
47 int ret, reg;
48
49 if (!wm831x_pdata || !wm831x_pdata->backup) {
50 dev_warn(wm831x->dev,
51 "No backup battery charger configuration\n");
52 return;
53 }
54
55 pdata = wm831x_pdata->backup;
56
57 reg = 0;
58
59 if (pdata->charger_enable)
60 reg |= WM831X_BKUP_CHG_ENA | WM831X_BKUP_BATT_DET_ENA;
61 if (pdata->no_constant_voltage)
62 reg |= WM831X_BKUP_CHG_MODE;
63
64 switch (pdata->vlim) {
65 case 2500:
66 break;
67 case 3100:
68 reg |= WM831X_BKUP_CHG_VLIM;
69 break;
70 default:
71 dev_err(wm831x->dev, "Invalid backup voltage limit %dmV\n",
72 pdata->vlim);
73 }
74
75 switch (pdata->ilim) {
76 case 100:
77 break;
78 case 200:
79 reg |= 1;
80 break;
81 case 300:
82 reg |= 2;
83 break;
84 case 400:
85 reg |= 3;
86 break;
87 default:
88 dev_err(wm831x->dev, "Invalid backup current limit %duA\n",
89 pdata->ilim);
90 }
91
92 ret = wm831x_reg_unlock(wm831x);
93 if (ret != 0) {
94 dev_err(wm831x->dev, "Failed to unlock registers: %d\n", ret);
95 return;
96 }
97
98 ret = wm831x_set_bits(wm831x, WM831X_BACKUP_CHARGER_CONTROL,
99 WM831X_BKUP_CHG_ENA_MASK |
100 WM831X_BKUP_CHG_MODE_MASK |
101 WM831X_BKUP_BATT_DET_ENA_MASK |
102 WM831X_BKUP_CHG_VLIM_MASK |
103 WM831X_BKUP_CHG_ILIM_MASK,
104 reg);
105 if (ret != 0)
106 dev_err(wm831x->dev,
107 "Failed to set backup charger config: %d\n", ret);
108
109 wm831x_reg_lock(wm831x);
110}
111
112static int wm831x_backup_get_prop(struct power_supply *psy,
113 enum power_supply_property psp,
114 union power_supply_propval *val)
115{
116 struct wm831x_backup *devdata = dev_get_drvdata(psy->dev->parent);
117 struct wm831x *wm831x = devdata->wm831x;
118 int ret = 0;
119
120 ret = wm831x_reg_read(wm831x, WM831X_BACKUP_CHARGER_CONTROL);
121 if (ret < 0)
122 return ret;
123
124 switch (psp) {
125 case POWER_SUPPLY_PROP_STATUS:
126 if (ret & WM831X_BKUP_CHG_STS)
127 val->intval = POWER_SUPPLY_STATUS_CHARGING;
128 else
129 val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
130 break;
131
132 case POWER_SUPPLY_PROP_VOLTAGE_NOW:
133 ret = wm831x_backup_read_voltage(wm831x, WM831X_AUX_BKUP_BATT,
134 val);
135 break;
136
137 case POWER_SUPPLY_PROP_PRESENT:
138 if (ret & WM831X_BKUP_CHG_STS)
139 val->intval = 1;
140 else
141 val->intval = 0;
142 break;
143
144 default:
145 ret = -EINVAL;
146 break;
147 }
148
149 return ret;
150}
151
152static enum power_supply_property wm831x_backup_props[] = {
153 POWER_SUPPLY_PROP_STATUS,
154 POWER_SUPPLY_PROP_VOLTAGE_NOW,
155 POWER_SUPPLY_PROP_PRESENT,
156};
157
158/*********************************************************************
159 * Initialisation
160 *********************************************************************/
161
162static __devinit int wm831x_backup_probe(struct platform_device *pdev)
163{
164 struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
165 struct wm831x_backup *devdata;
166 struct power_supply *backup;
167 int ret;
168
169 devdata = kzalloc(sizeof(struct wm831x_backup), GFP_KERNEL);
170 if (devdata == NULL)
171 return -ENOMEM;
172
173 devdata->wm831x = wm831x;
174 platform_set_drvdata(pdev, devdata);
175
176 backup = &devdata->backup;
177
178 /* We ignore configuration failures since we can still read
179 * back the status without enabling the charger (which may
180 * already be enabled anyway).
181 */
182 wm831x_config_backup(wm831x);
183
184 backup->name = "wm831x-backup";
185 backup->type = POWER_SUPPLY_TYPE_BATTERY;
186 backup->properties = wm831x_backup_props;
187 backup->num_properties = ARRAY_SIZE(wm831x_backup_props);
188 backup->get_property = wm831x_backup_get_prop;
189 ret = power_supply_register(&pdev->dev, backup);
190 if (ret)
191 goto err_kmalloc;
192
193 return ret;
194
195err_kmalloc:
196 kfree(devdata);
197 return ret;
198}
199
200static __devexit int wm831x_backup_remove(struct platform_device *pdev)
201{
202 struct wm831x_backup *devdata = platform_get_drvdata(pdev);
203
204 power_supply_unregister(&devdata->backup);
205 kfree(devdata);
206
207 return 0;
208}
209
210static struct platform_driver wm831x_backup_driver = {
211 .probe = wm831x_backup_probe,
212 .remove = __devexit_p(wm831x_backup_remove),
213 .driver = {
214 .name = "wm831x-backup",
215 },
216};
217
218static int __init wm831x_backup_init(void)
219{
220 return platform_driver_register(&wm831x_backup_driver);
221}
222module_init(wm831x_backup_init);
223
224static void __exit wm831x_backup_exit(void)
225{
226 platform_driver_unregister(&wm831x_backup_driver);
227}
228module_exit(wm831x_backup_exit);
229
230MODULE_DESCRIPTION("Backup battery charger driver for WM831x PMICs");
231MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
232MODULE_LICENSE("GPL");
233MODULE_ALIAS("platform:wm831x-backup");
diff --git a/drivers/power/wm831x_power.c b/drivers/power/wm831x_power.c
index 2a4c8b0b829c..f85e80b1b400 100644
--- a/drivers/power/wm831x_power.c
+++ b/drivers/power/wm831x_power.c
@@ -21,7 +21,6 @@
21struct wm831x_power { 21struct wm831x_power {
22 struct wm831x *wm831x; 22 struct wm831x *wm831x;
23 struct power_supply wall; 23 struct power_supply wall;
24 struct power_supply backup;
25 struct power_supply usb; 24 struct power_supply usb;
26 struct power_supply battery; 25 struct power_supply battery;
27}; 26};
@@ -454,125 +453,6 @@ static irqreturn_t wm831x_bat_irq(int irq, void *data)
454 453
455 454
456/********************************************************************* 455/*********************************************************************
457 * Backup supply properties
458 *********************************************************************/
459
460static void wm831x_config_backup(struct wm831x *wm831x)
461{
462 struct wm831x_pdata *wm831x_pdata = wm831x->dev->platform_data;
463 struct wm831x_backup_pdata *pdata;
464 int ret, reg;
465
466 if (!wm831x_pdata || !wm831x_pdata->backup) {
467 dev_warn(wm831x->dev,
468 "No backup battery charger configuration\n");
469 return;
470 }
471
472 pdata = wm831x_pdata->backup;
473
474 reg = 0;
475
476 if (pdata->charger_enable)
477 reg |= WM831X_BKUP_CHG_ENA | WM831X_BKUP_BATT_DET_ENA;
478 if (pdata->no_constant_voltage)
479 reg |= WM831X_BKUP_CHG_MODE;
480
481 switch (pdata->vlim) {
482 case 2500:
483 break;
484 case 3100:
485 reg |= WM831X_BKUP_CHG_VLIM;
486 break;
487 default:
488 dev_err(wm831x->dev, "Invalid backup voltage limit %dmV\n",
489 pdata->vlim);
490 }
491
492 switch (pdata->ilim) {
493 case 100:
494 break;
495 case 200:
496 reg |= 1;
497 break;
498 case 300:
499 reg |= 2;
500 break;
501 case 400:
502 reg |= 3;
503 break;
504 default:
505 dev_err(wm831x->dev, "Invalid backup current limit %duA\n",
506 pdata->ilim);
507 }
508
509 ret = wm831x_reg_unlock(wm831x);
510 if (ret != 0) {
511 dev_err(wm831x->dev, "Failed to unlock registers: %d\n", ret);
512 return;
513 }
514
515 ret = wm831x_set_bits(wm831x, WM831X_BACKUP_CHARGER_CONTROL,
516 WM831X_BKUP_CHG_ENA_MASK |
517 WM831X_BKUP_CHG_MODE_MASK |
518 WM831X_BKUP_BATT_DET_ENA_MASK |
519 WM831X_BKUP_CHG_VLIM_MASK |
520 WM831X_BKUP_CHG_ILIM_MASK,
521 reg);
522 if (ret != 0)
523 dev_err(wm831x->dev,
524 "Failed to set backup charger config: %d\n", ret);
525
526 wm831x_reg_lock(wm831x);
527}
528
529static int wm831x_backup_get_prop(struct power_supply *psy,
530 enum power_supply_property psp,
531 union power_supply_propval *val)
532{
533 struct wm831x_power *wm831x_power = dev_get_drvdata(psy->dev->parent);
534 struct wm831x *wm831x = wm831x_power->wm831x;
535 int ret = 0;
536
537 ret = wm831x_reg_read(wm831x, WM831X_BACKUP_CHARGER_CONTROL);
538 if (ret < 0)
539 return ret;
540
541 switch (psp) {
542 case POWER_SUPPLY_PROP_STATUS:
543 if (ret & WM831X_BKUP_CHG_STS)
544 val->intval = POWER_SUPPLY_STATUS_CHARGING;
545 else
546 val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
547 break;
548
549 case POWER_SUPPLY_PROP_VOLTAGE_NOW:
550 ret = wm831x_power_read_voltage(wm831x, WM831X_AUX_BKUP_BATT,
551 val);
552 break;
553
554 case POWER_SUPPLY_PROP_PRESENT:
555 if (ret & WM831X_BKUP_CHG_STS)
556 val->intval = 1;
557 else
558 val->intval = 0;
559 break;
560
561 default:
562 ret = -EINVAL;
563 break;
564 }
565
566 return ret;
567}
568
569static enum power_supply_property wm831x_backup_props[] = {
570 POWER_SUPPLY_PROP_STATUS,
571 POWER_SUPPLY_PROP_VOLTAGE_NOW,
572 POWER_SUPPLY_PROP_PRESENT,
573};
574
575/*********************************************************************
576 * Initialisation 456 * Initialisation
577 *********************************************************************/ 457 *********************************************************************/
578 458
@@ -595,10 +475,7 @@ static irqreturn_t wm831x_pwr_src_irq(int irq, void *data)
595 475
596 dev_dbg(wm831x->dev, "Power source changed\n"); 476 dev_dbg(wm831x->dev, "Power source changed\n");
597 477
598 /* Just notify for everything - little harm in overnotifying. 478 /* Just notify for everything - little harm in overnotifying. */
599 * The backup battery is not a power source while the system
600 * is running so skip that.
601 */
602 power_supply_changed(&wm831x_power->battery); 479 power_supply_changed(&wm831x_power->battery);
603 power_supply_changed(&wm831x_power->usb); 480 power_supply_changed(&wm831x_power->usb);
604 power_supply_changed(&wm831x_power->wall); 481 power_supply_changed(&wm831x_power->wall);
@@ -613,7 +490,6 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev)
613 struct power_supply *usb; 490 struct power_supply *usb;
614 struct power_supply *battery; 491 struct power_supply *battery;
615 struct power_supply *wall; 492 struct power_supply *wall;
616 struct power_supply *backup;
617 int ret, irq, i; 493 int ret, irq, i;
618 494
619 power = kzalloc(sizeof(struct wm831x_power), GFP_KERNEL); 495 power = kzalloc(sizeof(struct wm831x_power), GFP_KERNEL);
@@ -626,13 +502,11 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev)
626 usb = &power->usb; 502 usb = &power->usb;
627 battery = &power->battery; 503 battery = &power->battery;
628 wall = &power->wall; 504 wall = &power->wall;
629 backup = &power->backup;
630 505
631 /* We ignore configuration failures since we can still read back 506 /* We ignore configuration failures since we can still read back
632 * the status without enabling either of the chargers. 507 * the status without enabling the charger.
633 */ 508 */
634 wm831x_config_battery(wm831x); 509 wm831x_config_battery(wm831x);
635 wm831x_config_backup(wm831x);
636 510
637 wall->name = "wm831x-wall"; 511 wall->name = "wm831x-wall";
638 wall->type = POWER_SUPPLY_TYPE_MAINS; 512 wall->type = POWER_SUPPLY_TYPE_MAINS;
@@ -661,15 +535,6 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev)
661 if (ret) 535 if (ret)
662 goto err_battery; 536 goto err_battery;
663 537
664 backup->name = "wm831x-backup";
665 backup->type = POWER_SUPPLY_TYPE_BATTERY;
666 backup->properties = wm831x_backup_props;
667 backup->num_properties = ARRAY_SIZE(wm831x_backup_props);
668 backup->get_property = wm831x_backup_get_prop;
669 ret = power_supply_register(&pdev->dev, backup);
670 if (ret)
671 goto err_usb;
672
673 irq = platform_get_irq_byname(pdev, "SYSLO"); 538 irq = platform_get_irq_byname(pdev, "SYSLO");
674 ret = wm831x_request_irq(wm831x, irq, wm831x_syslo_irq, 539 ret = wm831x_request_irq(wm831x, irq, wm831x_syslo_irq,
675 IRQF_TRIGGER_RISING, "SYSLO", 540 IRQF_TRIGGER_RISING, "SYSLO",
@@ -677,7 +542,7 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev)
677 if (ret != 0) { 542 if (ret != 0) {
678 dev_err(&pdev->dev, "Failed to request SYSLO IRQ %d: %d\n", 543 dev_err(&pdev->dev, "Failed to request SYSLO IRQ %d: %d\n",
679 irq, ret); 544 irq, ret);
680 goto err_backup; 545 goto err_usb;
681 } 546 }
682 547
683 irq = platform_get_irq_byname(pdev, "PWR SRC"); 548 irq = platform_get_irq_byname(pdev, "PWR SRC");
@@ -716,8 +581,6 @@ err_bat_irq:
716err_syslo: 581err_syslo:
717 irq = platform_get_irq_byname(pdev, "SYSLO"); 582 irq = platform_get_irq_byname(pdev, "SYSLO");
718 wm831x_free_irq(wm831x, irq, power); 583 wm831x_free_irq(wm831x, irq, power);
719err_backup:
720 power_supply_unregister(backup);
721err_usb: 584err_usb:
722 power_supply_unregister(usb); 585 power_supply_unregister(usb);
723err_battery: 586err_battery:
@@ -746,7 +609,6 @@ static __devexit int wm831x_power_remove(struct platform_device *pdev)
746 irq = platform_get_irq_byname(pdev, "SYSLO"); 609 irq = platform_get_irq_byname(pdev, "SYSLO");
747 wm831x_free_irq(wm831x, irq, wm831x_power); 610 wm831x_free_irq(wm831x, irq, wm831x_power);
748 611
749 power_supply_unregister(&wm831x_power->backup);
750 power_supply_unregister(&wm831x_power->battery); 612 power_supply_unregister(&wm831x_power->battery);
751 power_supply_unregister(&wm831x_power->wall); 613 power_supply_unregister(&wm831x_power->wall);
752 power_supply_unregister(&wm831x_power->usb); 614 power_supply_unregister(&wm831x_power->usb);
diff --git a/drivers/power/wm97xx_battery.c b/drivers/power/wm97xx_battery.c
index f2bfd296dbae..fa39e759a275 100644
--- a/drivers/power/wm97xx_battery.c
+++ b/drivers/power/wm97xx_battery.c
@@ -157,7 +157,7 @@ static int wm97xx_bat_resume(struct device *dev)
157 return 0; 157 return 0;
158} 158}
159 159
160static struct dev_pm_ops wm97xx_bat_pm_ops = { 160static const struct dev_pm_ops wm97xx_bat_pm_ops = {
161 .suspend = wm97xx_bat_suspend, 161 .suspend = wm97xx_bat_suspend,
162 .resume = wm97xx_bat_resume, 162 .resume = wm97xx_bat_resume,
163}; 163};
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 71fbd6e8edf7..8167e9e6827a 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -242,6 +242,15 @@ config RTC_DRV_M41T80_WDT
242 If you say Y here you will get support for the 242 If you say Y here you will get support for the
243 watchdog timer in the ST M41T60 and M41T80 RTC chips series. 243 watchdog timer in the ST M41T60 and M41T80 RTC chips series.
244 244
245config RTC_DRV_BQ32K
246 tristate "TI BQ32000"
247 help
248 If you say Y here you will get support for the TI
249 BQ32000 I2C RTC chip.
250
251 This driver can also be built as a module. If so, the module
252 will be called rtc-bq32k.
253
245config RTC_DRV_DM355EVM 254config RTC_DRV_DM355EVM
246 tristate "TI DaVinci DM355 EVM RTC" 255 tristate "TI DaVinci DM355 EVM RTC"
247 depends on MFD_DM355EVM_MSP 256 depends on MFD_DM355EVM_MSP
@@ -592,15 +601,22 @@ config RTC_DRV_AB3100
592 Select this to enable the ST-Ericsson AB3100 Mixed Signal IC RTC 601 Select this to enable the ST-Ericsson AB3100 Mixed Signal IC RTC
593 support. This chip contains a battery- and capacitor-backed RTC. 602 support. This chip contains a battery- and capacitor-backed RTC.
594 603
604config RTC_DRV_NUC900
605 tristate "NUC910/NUC920 RTC driver"
606 depends on RTC_CLASS && ARCH_W90X900
607 help
608 If you say yes here you get support for the RTC subsystem of the
609 NUC910/NUC920 used in embedded systems.
595 610
596comment "on-CPU RTC drivers" 611comment "on-CPU RTC drivers"
597 612
598config RTC_DRV_OMAP 613config RTC_DRV_OMAP
599 tristate "TI OMAP1" 614 tristate "TI OMAP1"
600 depends on ARCH_OMAP15XX || ARCH_OMAP16XX || ARCH_OMAP730 615 depends on ARCH_OMAP15XX || ARCH_OMAP16XX || ARCH_OMAP730 || ARCH_DAVINCI_DA8XX
601 help 616 help
602 Say "yes" here to support the real time clock on TI OMAP1 chips. 617 Say "yes" here to support the real time clock on TI OMAP1 and
603 This driver can also be built as a module called rtc-omap. 618 DA8xx/OMAP-L13x chips. This driver can also be built as a
619 module called rtc-omap.
604 620
605config RTC_DRV_S3C 621config RTC_DRV_S3C
606 tristate "Samsung S3C series SoC RTC" 622 tristate "Samsung S3C series SoC RTC"
@@ -846,4 +862,10 @@ config RTC_DRV_PCAP
846 If you say Y here you will get support for the RTC found on 862 If you say Y here you will get support for the RTC found on
847 the PCAP2 ASIC used on some Motorola phones. 863 the PCAP2 ASIC used on some Motorola phones.
848 864
865config RTC_DRV_MC13783
866 depends on MFD_MC13783
867 tristate "Freescale MC13783 RTC"
868 help
869 This enables support for the Freescale MC13783 PMIC RTC
870
849endif # RTC_CLASS 871endif # RTC_CLASS
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 7da6efb3e953..e5160fddc446 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_RTC_DRV_AT91RM9200)+= rtc-at91rm9200.o
23obj-$(CONFIG_RTC_DRV_AT91SAM9) += rtc-at91sam9.o 23obj-$(CONFIG_RTC_DRV_AT91SAM9) += rtc-at91sam9.o
24obj-$(CONFIG_RTC_DRV_AU1XXX) += rtc-au1xxx.o 24obj-$(CONFIG_RTC_DRV_AU1XXX) += rtc-au1xxx.o
25obj-$(CONFIG_RTC_DRV_BFIN) += rtc-bfin.o 25obj-$(CONFIG_RTC_DRV_BFIN) += rtc-bfin.o
26obj-$(CONFIG_RTC_DRV_BQ32K) += rtc-bq32k.o
26obj-$(CONFIG_RTC_DRV_BQ4802) += rtc-bq4802.o 27obj-$(CONFIG_RTC_DRV_BQ4802) += rtc-bq4802.o
27obj-$(CONFIG_RTC_DRV_CMOS) += rtc-cmos.o 28obj-$(CONFIG_RTC_DRV_CMOS) += rtc-cmos.o
28obj-$(CONFIG_RTC_DRV_COH901331) += rtc-coh901331.o 29obj-$(CONFIG_RTC_DRV_COH901331) += rtc-coh901331.o
@@ -52,8 +53,10 @@ obj-$(CONFIG_RTC_DRV_M48T86) += rtc-m48t86.o
52obj-$(CONFIG_RTC_MXC) += rtc-mxc.o 53obj-$(CONFIG_RTC_MXC) += rtc-mxc.o
53obj-$(CONFIG_RTC_DRV_MAX6900) += rtc-max6900.o 54obj-$(CONFIG_RTC_DRV_MAX6900) += rtc-max6900.o
54obj-$(CONFIG_RTC_DRV_MAX6902) += rtc-max6902.o 55obj-$(CONFIG_RTC_DRV_MAX6902) += rtc-max6902.o
56obj-$(CONFIG_RTC_DRV_MC13783) += rtc-mc13783.o
55obj-$(CONFIG_RTC_DRV_MSM6242) += rtc-msm6242.o 57obj-$(CONFIG_RTC_DRV_MSM6242) += rtc-msm6242.o
56obj-$(CONFIG_RTC_DRV_MV) += rtc-mv.o 58obj-$(CONFIG_RTC_DRV_MV) += rtc-mv.o
59obj-$(CONFIG_RTC_DRV_NUC900) += rtc-nuc900.o
57obj-$(CONFIG_RTC_DRV_OMAP) += rtc-omap.o 60obj-$(CONFIG_RTC_DRV_OMAP) += rtc-omap.o
58obj-$(CONFIG_RTC_DRV_PCAP) += rtc-pcap.o 61obj-$(CONFIG_RTC_DRV_PCAP) += rtc-pcap.o
59obj-$(CONFIG_RTC_DRV_PCF8563) += rtc-pcf8563.o 62obj-$(CONFIG_RTC_DRV_PCF8563) += rtc-pcf8563.o
diff --git a/drivers/rtc/rtc-at32ap700x.c b/drivers/rtc/rtc-at32ap700x.c
index e1ec33e40e38..8825695777df 100644
--- a/drivers/rtc/rtc-at32ap700x.c
+++ b/drivers/rtc/rtc-at32ap700x.c
@@ -256,6 +256,8 @@ static int __init at32_rtc_probe(struct platform_device *pdev)
256 goto out_iounmap; 256 goto out_iounmap;
257 } 257 }
258 258
259 platform_set_drvdata(pdev, rtc);
260
259 rtc->rtc = rtc_device_register(pdev->name, &pdev->dev, 261 rtc->rtc = rtc_device_register(pdev->name, &pdev->dev,
260 &at32_rtc_ops, THIS_MODULE); 262 &at32_rtc_ops, THIS_MODULE);
261 if (IS_ERR(rtc->rtc)) { 263 if (IS_ERR(rtc->rtc)) {
@@ -264,7 +266,6 @@ static int __init at32_rtc_probe(struct platform_device *pdev)
264 goto out_free_irq; 266 goto out_free_irq;
265 } 267 }
266 268
267 platform_set_drvdata(pdev, rtc);
268 device_init_wakeup(&pdev->dev, 1); 269 device_init_wakeup(&pdev->dev, 1);
269 270
270 dev_info(&pdev->dev, "Atmel RTC for AT32AP700x at %08lx irq %ld\n", 271 dev_info(&pdev->dev, "Atmel RTC for AT32AP700x at %08lx irq %ld\n",
@@ -273,6 +274,7 @@ static int __init at32_rtc_probe(struct platform_device *pdev)
273 return 0; 274 return 0;
274 275
275out_free_irq: 276out_free_irq:
277 platform_set_drvdata(pdev, NULL);
276 free_irq(irq, rtc); 278 free_irq(irq, rtc);
277out_iounmap: 279out_iounmap:
278 iounmap(rtc->regs); 280 iounmap(rtc->regs);
diff --git a/drivers/rtc/rtc-bq32k.c b/drivers/rtc/rtc-bq32k.c
new file mode 100644
index 000000000000..408cc8f735be
--- /dev/null
+++ b/drivers/rtc/rtc-bq32k.c
@@ -0,0 +1,204 @@
1/*
2 * Driver for TI BQ32000 RTC.
3 *
4 * Copyright (C) 2009 Semihalf.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/module.h>
12#include <linux/i2c.h>
13#include <linux/rtc.h>
14#include <linux/init.h>
15#include <linux/errno.h>
16#include <linux/bcd.h>
17
18#define BQ32K_SECONDS 0x00 /* Seconds register address */
19#define BQ32K_SECONDS_MASK 0x7F /* Mask over seconds value */
20#define BQ32K_STOP 0x80 /* Oscillator Stop flat */
21
22#define BQ32K_MINUTES 0x01 /* Minutes register address */
23#define BQ32K_MINUTES_MASK 0x7F /* Mask over minutes value */
24#define BQ32K_OF 0x80 /* Oscillator Failure flag */
25
26#define BQ32K_HOURS_MASK 0x3F /* Mask over hours value */
27#define BQ32K_CENT 0x40 /* Century flag */
28#define BQ32K_CENT_EN 0x80 /* Century flag enable bit */
29
30struct bq32k_regs {
31 uint8_t seconds;
32 uint8_t minutes;
33 uint8_t cent_hours;
34 uint8_t day;
35 uint8_t date;
36 uint8_t month;
37 uint8_t years;
38};
39
40static struct i2c_driver bq32k_driver;
41
42static int bq32k_read(struct device *dev, void *data, uint8_t off, uint8_t len)
43{
44 struct i2c_client *client = to_i2c_client(dev);
45 struct i2c_msg msgs[] = {
46 {
47 .addr = client->addr,
48 .flags = 0,
49 .len = 1,
50 .buf = &off,
51 }, {
52 .addr = client->addr,
53 .flags = I2C_M_RD,
54 .len = len,
55 .buf = data,
56 }
57 };
58
59 if (i2c_transfer(client->adapter, msgs, 2) == 2)
60 return 0;
61
62 return -EIO;
63}
64
65static int bq32k_write(struct device *dev, void *data, uint8_t off, uint8_t len)
66{
67 struct i2c_client *client = to_i2c_client(dev);
68 uint8_t buffer[len + 1];
69
70 buffer[0] = off;
71 memcpy(&buffer[1], data, len);
72
73 if (i2c_master_send(client, buffer, len + 1) == len + 1)
74 return 0;
75
76 return -EIO;
77}
78
79static int bq32k_rtc_read_time(struct device *dev, struct rtc_time *tm)
80{
81 struct bq32k_regs regs;
82 int error;
83
84 error = bq32k_read(dev, &regs, 0, sizeof(regs));
85 if (error)
86 return error;
87
88 tm->tm_sec = bcd2bin(regs.seconds & BQ32K_SECONDS_MASK);
89 tm->tm_min = bcd2bin(regs.minutes & BQ32K_SECONDS_MASK);
90 tm->tm_hour = bcd2bin(regs.cent_hours & BQ32K_HOURS_MASK);
91 tm->tm_mday = bcd2bin(regs.date);
92 tm->tm_wday = bcd2bin(regs.day) - 1;
93 tm->tm_mon = bcd2bin(regs.month) - 1;
94 tm->tm_year = bcd2bin(regs.years) +
95 ((regs.cent_hours & BQ32K_CENT) ? 100 : 0);
96
97 return rtc_valid_tm(tm);
98}
99
100static int bq32k_rtc_set_time(struct device *dev, struct rtc_time *tm)
101{
102 struct bq32k_regs regs;
103
104 regs.seconds = bin2bcd(tm->tm_sec);
105 regs.minutes = bin2bcd(tm->tm_min);
106 regs.cent_hours = bin2bcd(tm->tm_hour) | BQ32K_CENT_EN;
107 regs.day = bin2bcd(tm->tm_wday + 1);
108 regs.date = bin2bcd(tm->tm_mday);
109 regs.month = bin2bcd(tm->tm_mon + 1);
110
111 if (tm->tm_year >= 100) {
112 regs.cent_hours |= BQ32K_CENT;
113 regs.years = bin2bcd(tm->tm_year - 100);
114 } else
115 regs.years = bin2bcd(tm->tm_year);
116
117 return bq32k_write(dev, &regs, 0, sizeof(regs));
118}
119
120static const struct rtc_class_ops bq32k_rtc_ops = {
121 .read_time = bq32k_rtc_read_time,
122 .set_time = bq32k_rtc_set_time,
123};
124
125static int bq32k_probe(struct i2c_client *client,
126 const struct i2c_device_id *id)
127{
128 struct device *dev = &client->dev;
129 struct rtc_device *rtc;
130 uint8_t reg;
131 int error;
132
133 if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
134 return -ENODEV;
135
136 /* Check Oscillator Stop flag */
137 error = bq32k_read(dev, &reg, BQ32K_SECONDS, 1);
138 if (!error && (reg & BQ32K_STOP)) {
139 dev_warn(dev, "Oscillator was halted. Restarting...\n");
140 reg &= ~BQ32K_STOP;
141 error = bq32k_write(dev, &reg, BQ32K_SECONDS, 1);
142 }
143 if (error)
144 return error;
145
146 /* Check Oscillator Failure flag */
147 error = bq32k_read(dev, &reg, BQ32K_MINUTES, 1);
148 if (!error && (reg & BQ32K_OF)) {
149 dev_warn(dev, "Oscillator Failure. Check RTC battery.\n");
150 reg &= ~BQ32K_OF;
151 error = bq32k_write(dev, &reg, BQ32K_MINUTES, 1);
152 }
153 if (error)
154 return error;
155
156 rtc = rtc_device_register(bq32k_driver.driver.name, &client->dev,
157 &bq32k_rtc_ops, THIS_MODULE);
158 if (IS_ERR(rtc))
159 return PTR_ERR(rtc);
160
161 i2c_set_clientdata(client, rtc);
162
163 return 0;
164}
165
166static int __devexit bq32k_remove(struct i2c_client *client)
167{
168 struct rtc_device *rtc = i2c_get_clientdata(client);
169
170 rtc_device_unregister(rtc);
171 return 0;
172}
173
174static const struct i2c_device_id bq32k_id[] = {
175 { "bq32000", 0 },
176 { }
177};
178MODULE_DEVICE_TABLE(i2c, bq32k_id);
179
180static struct i2c_driver bq32k_driver = {
181 .driver = {
182 .name = "bq32k",
183 .owner = THIS_MODULE,
184 },
185 .probe = bq32k_probe,
186 .remove = __devexit_p(bq32k_remove),
187 .id_table = bq32k_id,
188};
189
190static __init int bq32k_init(void)
191{
192 return i2c_add_driver(&bq32k_driver);
193}
194module_init(bq32k_init);
195
196static __exit void bq32k_exit(void)
197{
198 i2c_del_driver(&bq32k_driver);
199}
200module_exit(bq32k_exit);
201
202MODULE_AUTHOR("Semihalf, Piotr Ziecik <kosmo@semihalf.com>");
203MODULE_DESCRIPTION("TI BQ32000 I2C RTC driver");
204MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-bq4802.c b/drivers/rtc/rtc-bq4802.c
index d00a274df8fc..280fe48ada0b 100644
--- a/drivers/rtc/rtc-bq4802.c
+++ b/drivers/rtc/rtc-bq4802.c
@@ -169,6 +169,8 @@ static int __devinit bq4802_probe(struct platform_device *pdev)
169 goto out_free; 169 goto out_free;
170 } 170 }
171 171
172 platform_set_drvdata(pdev, p);
173
172 p->rtc = rtc_device_register("bq4802", &pdev->dev, 174 p->rtc = rtc_device_register("bq4802", &pdev->dev,
173 &bq4802_ops, THIS_MODULE); 175 &bq4802_ops, THIS_MODULE);
174 if (IS_ERR(p->rtc)) { 176 if (IS_ERR(p->rtc)) {
@@ -176,7 +178,6 @@ static int __devinit bq4802_probe(struct platform_device *pdev)
176 goto out_iounmap; 178 goto out_iounmap;
177 } 179 }
178 180
179 platform_set_drvdata(pdev, p);
180 err = 0; 181 err = 0;
181out: 182out:
182 return err; 183 return err;
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index f7a4701bf863..eb154dc57164 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -420,49 +420,43 @@ static int cmos_irq_set_state(struct device *dev, int enabled)
420 return 0; 420 return 0;
421} 421}
422 422
423#if defined(CONFIG_RTC_INTF_DEV) || defined(CONFIG_RTC_INTF_DEV_MODULE) 423static int cmos_alarm_irq_enable(struct device *dev, unsigned int enabled)
424
425static int
426cmos_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
427{ 424{
428 struct cmos_rtc *cmos = dev_get_drvdata(dev); 425 struct cmos_rtc *cmos = dev_get_drvdata(dev);
429 unsigned long flags; 426 unsigned long flags;
430 427
431 switch (cmd) { 428 if (!is_valid_irq(cmos->irq))
432 case RTC_AIE_OFF: 429 return -EINVAL;
433 case RTC_AIE_ON:
434 case RTC_UIE_OFF:
435 case RTC_UIE_ON:
436 if (!is_valid_irq(cmos->irq))
437 return -EINVAL;
438 break;
439 /* PIE ON/OFF is handled by cmos_irq_set_state() */
440 default:
441 return -ENOIOCTLCMD;
442 }
443 430
444 spin_lock_irqsave(&rtc_lock, flags); 431 spin_lock_irqsave(&rtc_lock, flags);
445 switch (cmd) { 432
446 case RTC_AIE_OFF: /* alarm off */ 433 if (enabled)
447 cmos_irq_disable(cmos, RTC_AIE);
448 break;
449 case RTC_AIE_ON: /* alarm on */
450 cmos_irq_enable(cmos, RTC_AIE); 434 cmos_irq_enable(cmos, RTC_AIE);
451 break; 435 else
452 case RTC_UIE_OFF: /* update off */ 436 cmos_irq_disable(cmos, RTC_AIE);
453 cmos_irq_disable(cmos, RTC_UIE); 437
454 break;
455 case RTC_UIE_ON: /* update on */
456 cmos_irq_enable(cmos, RTC_UIE);
457 break;
458 }
459 spin_unlock_irqrestore(&rtc_lock, flags); 438 spin_unlock_irqrestore(&rtc_lock, flags);
460 return 0; 439 return 0;
461} 440}
462 441
463#else 442static int cmos_update_irq_enable(struct device *dev, unsigned int enabled)
464#define cmos_rtc_ioctl NULL 443{
465#endif 444 struct cmos_rtc *cmos = dev_get_drvdata(dev);
445 unsigned long flags;
446
447 if (!is_valid_irq(cmos->irq))
448 return -EINVAL;
449
450 spin_lock_irqsave(&rtc_lock, flags);
451
452 if (enabled)
453 cmos_irq_enable(cmos, RTC_UIE);
454 else
455 cmos_irq_disable(cmos, RTC_UIE);
456
457 spin_unlock_irqrestore(&rtc_lock, flags);
458 return 0;
459}
466 460
467#if defined(CONFIG_RTC_INTF_PROC) || defined(CONFIG_RTC_INTF_PROC_MODULE) 461#if defined(CONFIG_RTC_INTF_PROC) || defined(CONFIG_RTC_INTF_PROC_MODULE)
468 462
@@ -503,14 +497,15 @@ static int cmos_procfs(struct device *dev, struct seq_file *seq)
503#endif 497#endif
504 498
505static const struct rtc_class_ops cmos_rtc_ops = { 499static const struct rtc_class_ops cmos_rtc_ops = {
506 .ioctl = cmos_rtc_ioctl, 500 .read_time = cmos_read_time,
507 .read_time = cmos_read_time, 501 .set_time = cmos_set_time,
508 .set_time = cmos_set_time, 502 .read_alarm = cmos_read_alarm,
509 .read_alarm = cmos_read_alarm, 503 .set_alarm = cmos_set_alarm,
510 .set_alarm = cmos_set_alarm, 504 .proc = cmos_procfs,
511 .proc = cmos_procfs, 505 .irq_set_freq = cmos_irq_set_freq,
512 .irq_set_freq = cmos_irq_set_freq, 506 .irq_set_state = cmos_irq_set_state,
513 .irq_set_state = cmos_irq_set_state, 507 .alarm_irq_enable = cmos_alarm_irq_enable,
508 .update_irq_enable = cmos_update_irq_enable,
514}; 509};
515 510
516/*----------------------------------------------------------------*/ 511/*----------------------------------------------------------------*/
@@ -871,8 +866,9 @@ static int cmos_suspend(struct device *dev, pm_message_t mesg)
871 mask = RTC_IRQMASK; 866 mask = RTC_IRQMASK;
872 tmp &= ~mask; 867 tmp &= ~mask;
873 CMOS_WRITE(tmp, RTC_CONTROL); 868 CMOS_WRITE(tmp, RTC_CONTROL);
874 hpet_mask_rtc_irq_bit(mask);
875 869
870 /* shut down hpet emulation - we don't need it for alarm */
871 hpet_mask_rtc_irq_bit(RTC_PIE|RTC_AIE|RTC_UIE);
876 cmos_checkintr(cmos, tmp); 872 cmos_checkintr(cmos, tmp);
877 } 873 }
878 spin_unlock_irq(&rtc_lock); 874 spin_unlock_irq(&rtc_lock);
diff --git a/drivers/rtc/rtc-ds1302.c b/drivers/rtc/rtc-ds1302.c
index 1e73c8f42e38..532acf9b05d8 100644
--- a/drivers/rtc/rtc-ds1302.c
+++ b/drivers/rtc/rtc-ds1302.c
@@ -143,7 +143,6 @@ static int ds1302_rtc_ioctl(struct device *dev, unsigned int cmd,
143#ifdef RTC_SET_CHARGE 143#ifdef RTC_SET_CHARGE
144 case RTC_SET_CHARGE: 144 case RTC_SET_CHARGE:
145 { 145 {
146 struct ds1302_rtc *rtc = dev_get_drvdata(dev);
147 int tcs_val; 146 int tcs_val;
148 147
149 if (copy_from_user(&tcs_val, (int __user *)arg, sizeof(int))) 148 if (copy_from_user(&tcs_val, (int __user *)arg, sizeof(int)))
diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c
index 2736b11a1b1e..259db7f3535b 100644
--- a/drivers/rtc/rtc-ds1305.c
+++ b/drivers/rtc/rtc-ds1305.c
@@ -617,7 +617,6 @@ static struct bin_attribute nvram = {
617static int __devinit ds1305_probe(struct spi_device *spi) 617static int __devinit ds1305_probe(struct spi_device *spi)
618{ 618{
619 struct ds1305 *ds1305; 619 struct ds1305 *ds1305;
620 struct rtc_device *rtc;
621 int status; 620 int status;
622 u8 addr, value; 621 u8 addr, value;
623 struct ds1305_platform_data *pdata = spi->dev.platform_data; 622 struct ds1305_platform_data *pdata = spi->dev.platform_data;
@@ -756,14 +755,13 @@ static int __devinit ds1305_probe(struct spi_device *spi)
756 dev_dbg(&spi->dev, "AM/PM\n"); 755 dev_dbg(&spi->dev, "AM/PM\n");
757 756
758 /* register RTC ... from here on, ds1305->ctrl needs locking */ 757 /* register RTC ... from here on, ds1305->ctrl needs locking */
759 rtc = rtc_device_register("ds1305", &spi->dev, 758 ds1305->rtc = rtc_device_register("ds1305", &spi->dev,
760 &ds1305_ops, THIS_MODULE); 759 &ds1305_ops, THIS_MODULE);
761 if (IS_ERR(rtc)) { 760 if (IS_ERR(ds1305->rtc)) {
762 status = PTR_ERR(rtc); 761 status = PTR_ERR(ds1305->rtc);
763 dev_dbg(&spi->dev, "register rtc --> %d\n", status); 762 dev_dbg(&spi->dev, "register rtc --> %d\n", status);
764 goto fail0; 763 goto fail0;
765 } 764 }
766 ds1305->rtc = rtc;
767 765
768 /* Maybe set up alarm IRQ; be ready to handle it triggering right 766 /* Maybe set up alarm IRQ; be ready to handle it triggering right
769 * away. NOTE that we don't share this. The signal is active low, 767 * away. NOTE that we don't share this. The signal is active low,
@@ -774,7 +772,7 @@ static int __devinit ds1305_probe(struct spi_device *spi)
774 if (spi->irq) { 772 if (spi->irq) {
775 INIT_WORK(&ds1305->work, ds1305_work); 773 INIT_WORK(&ds1305->work, ds1305_work);
776 status = request_irq(spi->irq, ds1305_irq, 774 status = request_irq(spi->irq, ds1305_irq,
777 0, dev_name(&rtc->dev), ds1305); 775 0, dev_name(&ds1305->rtc->dev), ds1305);
778 if (status < 0) { 776 if (status < 0) {
779 dev_dbg(&spi->dev, "request_irq %d --> %d\n", 777 dev_dbg(&spi->dev, "request_irq %d --> %d\n",
780 spi->irq, status); 778 spi->irq, status);
@@ -794,7 +792,7 @@ static int __devinit ds1305_probe(struct spi_device *spi)
794fail2: 792fail2:
795 free_irq(spi->irq, ds1305); 793 free_irq(spi->irq, ds1305);
796fail1: 794fail1:
797 rtc_device_unregister(rtc); 795 rtc_device_unregister(ds1305->rtc);
798fail0: 796fail0:
799 kfree(ds1305); 797 kfree(ds1305);
800 return status; 798 return status;
@@ -802,7 +800,7 @@ fail0:
802 800
803static int __devexit ds1305_remove(struct spi_device *spi) 801static int __devexit ds1305_remove(struct spi_device *spi)
804{ 802{
805 struct ds1305 *ds1305 = spi_get_drvdata(spi); 803 struct ds1305 *ds1305 = spi_get_drvdata(spi);
806 804
807 sysfs_remove_bin_file(&spi->dev.kobj, &nvram); 805 sysfs_remove_bin_file(&spi->dev.kobj, &nvram);
808 806
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index eb99ee4fa0f5..8a99da6f2f24 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -874,7 +874,7 @@ read_rtc:
874 } 874 }
875 875
876 if (want_irq) { 876 if (want_irq) {
877 err = request_irq(client->irq, ds1307_irq, 0, 877 err = request_irq(client->irq, ds1307_irq, IRQF_SHARED,
878 ds1307->rtc->name, client); 878 ds1307->rtc->name, client);
879 if (err) { 879 if (err) {
880 dev_err(&client->dev, 880 dev_err(&client->dev,
diff --git a/drivers/rtc/rtc-ds1511.c b/drivers/rtc/rtc-ds1511.c
index 539676e25fd8..4166b84cb514 100644
--- a/drivers/rtc/rtc-ds1511.c
+++ b/drivers/rtc/rtc-ds1511.c
@@ -87,7 +87,6 @@ enum ds1511reg {
87struct rtc_plat_data { 87struct rtc_plat_data {
88 struct rtc_device *rtc; 88 struct rtc_device *rtc;
89 void __iomem *ioaddr; /* virtual base address */ 89 void __iomem *ioaddr; /* virtual base address */
90 unsigned long baseaddr; /* physical base address */
91 int size; /* amount of memory mapped */ 90 int size; /* amount of memory mapped */
92 int irq; 91 int irq;
93 unsigned int irqen; 92 unsigned int irqen;
@@ -95,6 +94,7 @@ struct rtc_plat_data {
95 int alrm_min; 94 int alrm_min;
96 int alrm_hour; 95 int alrm_hour;
97 int alrm_mday; 96 int alrm_mday;
97 spinlock_t lock;
98}; 98};
99 99
100static DEFINE_SPINLOCK(ds1511_lock); 100static DEFINE_SPINLOCK(ds1511_lock);
@@ -302,7 +302,7 @@ ds1511_rtc_update_alarm(struct rtc_plat_data *pdata)
302{ 302{
303 unsigned long flags; 303 unsigned long flags;
304 304
305 spin_lock_irqsave(&pdata->rtc->irq_lock, flags); 305 spin_lock_irqsave(&pdata->lock, flags);
306 rtc_write(pdata->alrm_mday < 0 || (pdata->irqen & RTC_UF) ? 306 rtc_write(pdata->alrm_mday < 0 || (pdata->irqen & RTC_UF) ?
307 0x80 : bin2bcd(pdata->alrm_mday) & 0x3f, 307 0x80 : bin2bcd(pdata->alrm_mday) & 0x3f,
308 RTC_ALARM_DATE); 308 RTC_ALARM_DATE);
@@ -317,7 +317,7 @@ ds1511_rtc_update_alarm(struct rtc_plat_data *pdata)
317 RTC_ALARM_SEC); 317 RTC_ALARM_SEC);
318 rtc_write(rtc_read(RTC_CMD) | (pdata->irqen ? RTC_TIE : 0), RTC_CMD); 318 rtc_write(rtc_read(RTC_CMD) | (pdata->irqen ? RTC_TIE : 0), RTC_CMD);
319 rtc_read(RTC_CMD1); /* clear interrupts */ 319 rtc_read(RTC_CMD1); /* clear interrupts */
320 spin_unlock_irqrestore(&pdata->rtc->irq_lock, flags); 320 spin_unlock_irqrestore(&pdata->lock, flags);
321} 321}
322 322
323 static int 323 static int
@@ -362,61 +362,63 @@ ds1511_interrupt(int irq, void *dev_id)
362{ 362{
363 struct platform_device *pdev = dev_id; 363 struct platform_device *pdev = dev_id;
364 struct rtc_plat_data *pdata = platform_get_drvdata(pdev); 364 struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
365 unsigned long events = RTC_IRQF; 365 unsigned long events = 0;
366 366
367 spin_lock(&pdata->lock);
367 /* 368 /*
368 * read and clear interrupt 369 * read and clear interrupt
369 */ 370 */
370 if (!(rtc_read(RTC_CMD1) & DS1511_IRQF)) { 371 if (rtc_read(RTC_CMD1) & DS1511_IRQF) {
371 return IRQ_NONE; 372 events = RTC_IRQF;
372 } 373 if (rtc_read(RTC_ALARM_SEC) & 0x80)
373 if (rtc_read(RTC_ALARM_SEC) & 0x80) { 374 events |= RTC_UF;
374 events |= RTC_UF; 375 else
375 } else { 376 events |= RTC_AF;
376 events |= RTC_AF; 377 if (likely(pdata->rtc))
377 } 378 rtc_update_irq(pdata->rtc, 1, events);
378 rtc_update_irq(pdata->rtc, 1, events); 379 }
379 return IRQ_HANDLED; 380 spin_unlock(&pdata->lock);
381 return events ? IRQ_HANDLED : IRQ_NONE;
380} 382}
381 383
382 static int 384static int ds1511_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
383ds1511_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
384{ 385{
385 struct platform_device *pdev = to_platform_device(dev); 386 struct platform_device *pdev = to_platform_device(dev);
386 struct rtc_plat_data *pdata = platform_get_drvdata(pdev); 387 struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
387 388
388 if (pdata->irq <= 0) { 389 if (pdata->irq <= 0)
389 return -ENOIOCTLCMD; /* fall back into rtc-dev's emulation */ 390 return -EINVAL;
390 } 391 if (enabled)
391 switch (cmd) {
392 case RTC_AIE_OFF:
393 pdata->irqen &= ~RTC_AF;
394 ds1511_rtc_update_alarm(pdata);
395 break;
396 case RTC_AIE_ON:
397 pdata->irqen |= RTC_AF; 392 pdata->irqen |= RTC_AF;
398 ds1511_rtc_update_alarm(pdata); 393 else
399 break; 394 pdata->irqen &= ~RTC_AF;
400 case RTC_UIE_OFF: 395 ds1511_rtc_update_alarm(pdata);
401 pdata->irqen &= ~RTC_UF; 396 return 0;
402 ds1511_rtc_update_alarm(pdata); 397}
403 break; 398
404 case RTC_UIE_ON: 399static int ds1511_rtc_update_irq_enable(struct device *dev,
400 unsigned int enabled)
401{
402 struct platform_device *pdev = to_platform_device(dev);
403 struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
404
405 if (pdata->irq <= 0)
406 return -EINVAL;
407 if (enabled)
405 pdata->irqen |= RTC_UF; 408 pdata->irqen |= RTC_UF;
406 ds1511_rtc_update_alarm(pdata); 409 else
407 break; 410 pdata->irqen &= ~RTC_UF;
408 default: 411 ds1511_rtc_update_alarm(pdata);
409 return -ENOIOCTLCMD;
410 }
411 return 0; 412 return 0;
412} 413}
413 414
414static const struct rtc_class_ops ds1511_rtc_ops = { 415static const struct rtc_class_ops ds1511_rtc_ops = {
415 .read_time = ds1511_rtc_read_time, 416 .read_time = ds1511_rtc_read_time,
416 .set_time = ds1511_rtc_set_time, 417 .set_time = ds1511_rtc_set_time,
417 .read_alarm = ds1511_rtc_read_alarm, 418 .read_alarm = ds1511_rtc_read_alarm,
418 .set_alarm = ds1511_rtc_set_alarm, 419 .set_alarm = ds1511_rtc_set_alarm,
419 .ioctl = ds1511_rtc_ioctl, 420 .alarm_irq_enable = ds1511_rtc_alarm_irq_enable,
421 .update_irq_enable = ds1511_rtc_update_irq_enable,
420}; 422};
421 423
422 static ssize_t 424 static ssize_t
@@ -492,29 +494,23 @@ ds1511_rtc_probe(struct platform_device *pdev)
492{ 494{
493 struct rtc_device *rtc; 495 struct rtc_device *rtc;
494 struct resource *res; 496 struct resource *res;
495 struct rtc_plat_data *pdata = NULL; 497 struct rtc_plat_data *pdata;
496 int ret = 0; 498 int ret = 0;
497 499
498 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 500 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
499 if (!res) { 501 if (!res) {
500 return -ENODEV; 502 return -ENODEV;
501 } 503 }
502 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); 504 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
503 if (!pdata) { 505 if (!pdata)
504 return -ENOMEM; 506 return -ENOMEM;
505 }
506 pdata->size = res->end - res->start + 1; 507 pdata->size = res->end - res->start + 1;
507 if (!request_mem_region(res->start, pdata->size, pdev->name)) { 508 if (!devm_request_mem_region(&pdev->dev, res->start, pdata->size,
508 ret = -EBUSY; 509 pdev->name))
509 goto out; 510 return -EBUSY;
510 } 511 ds1511_base = devm_ioremap(&pdev->dev, res->start, pdata->size);
511 pdata->baseaddr = res->start; 512 if (!ds1511_base)
512 pdata->size = pdata->size; 513 return -ENOMEM;
513 ds1511_base = ioremap(pdata->baseaddr, pdata->size);
514 if (!ds1511_base) {
515 ret = -ENOMEM;
516 goto out;
517 }
518 pdata->ioaddr = ds1511_base; 514 pdata->ioaddr = ds1511_base;
519 pdata->irq = platform_get_irq(pdev, 0); 515 pdata->irq = platform_get_irq(pdev, 0);
520 516
@@ -540,13 +536,15 @@ ds1511_rtc_probe(struct platform_device *pdev)
540 dev_warn(&pdev->dev, "voltage-low detected.\n"); 536 dev_warn(&pdev->dev, "voltage-low detected.\n");
541 } 537 }
542 538
539 spin_lock_init(&pdata->lock);
540 platform_set_drvdata(pdev, pdata);
543 /* 541 /*
544 * if the platform has an interrupt in mind for this device, 542 * if the platform has an interrupt in mind for this device,
545 * then by all means, set it 543 * then by all means, set it
546 */ 544 */
547 if (pdata->irq > 0) { 545 if (pdata->irq > 0) {
548 rtc_read(RTC_CMD1); 546 rtc_read(RTC_CMD1);
549 if (request_irq(pdata->irq, ds1511_interrupt, 547 if (devm_request_irq(&pdev->dev, pdata->irq, ds1511_interrupt,
550 IRQF_DISABLED | IRQF_SHARED, pdev->name, pdev) < 0) { 548 IRQF_DISABLED | IRQF_SHARED, pdev->name, pdev) < 0) {
551 549
552 dev_warn(&pdev->dev, "interrupt not available.\n"); 550 dev_warn(&pdev->dev, "interrupt not available.\n");
@@ -556,33 +554,13 @@ ds1511_rtc_probe(struct platform_device *pdev)
556 554
557 rtc = rtc_device_register(pdev->name, &pdev->dev, &ds1511_rtc_ops, 555 rtc = rtc_device_register(pdev->name, &pdev->dev, &ds1511_rtc_ops,
558 THIS_MODULE); 556 THIS_MODULE);
559 if (IS_ERR(rtc)) { 557 if (IS_ERR(rtc))
560 ret = PTR_ERR(rtc); 558 return PTR_ERR(rtc);
561 goto out;
562 }
563 pdata->rtc = rtc; 559 pdata->rtc = rtc;
564 platform_set_drvdata(pdev, pdata); 560
565 ret = sysfs_create_bin_file(&pdev->dev.kobj, &ds1511_nvram_attr); 561 ret = sysfs_create_bin_file(&pdev->dev.kobj, &ds1511_nvram_attr);
566 if (ret) { 562 if (ret)
567 goto out;
568 }
569 return 0;
570 out:
571 if (pdata->rtc) {
572 rtc_device_unregister(pdata->rtc); 563 rtc_device_unregister(pdata->rtc);
573 }
574 if (pdata->irq > 0) {
575 free_irq(pdata->irq, pdev);
576 }
577 if (ds1511_base) {
578 iounmap(ds1511_base);
579 ds1511_base = NULL;
580 }
581 if (pdata->baseaddr) {
582 release_mem_region(pdata->baseaddr, pdata->size);
583 }
584
585 kfree(pdata);
586 return ret; 564 return ret;
587} 565}
588 566
@@ -593,19 +571,13 @@ ds1511_rtc_remove(struct platform_device *pdev)
593 571
594 sysfs_remove_bin_file(&pdev->dev.kobj, &ds1511_nvram_attr); 572 sysfs_remove_bin_file(&pdev->dev.kobj, &ds1511_nvram_attr);
595 rtc_device_unregister(pdata->rtc); 573 rtc_device_unregister(pdata->rtc);
596 pdata->rtc = NULL;
597 if (pdata->irq > 0) { 574 if (pdata->irq > 0) {
598 /* 575 /*
599 * disable the alarm interrupt 576 * disable the alarm interrupt
600 */ 577 */
601 rtc_write(rtc_read(RTC_CMD) & ~RTC_TIE, RTC_CMD); 578 rtc_write(rtc_read(RTC_CMD) & ~RTC_TIE, RTC_CMD);
602 rtc_read(RTC_CMD1); 579 rtc_read(RTC_CMD1);
603 free_irq(pdata->irq, pdev);
604 } 580 }
605 iounmap(pdata->ioaddr);
606 ds1511_base = NULL;
607 release_mem_region(pdata->baseaddr, pdata->size);
608 kfree(pdata);
609 return 0; 581 return 0;
610} 582}
611 583
diff --git a/drivers/rtc/rtc-ds1553.c b/drivers/rtc/rtc-ds1553.c
index 717288527c6b..ed1ef7c9cc06 100644
--- a/drivers/rtc/rtc-ds1553.c
+++ b/drivers/rtc/rtc-ds1553.c
@@ -18,7 +18,7 @@
18#include <linux/platform_device.h> 18#include <linux/platform_device.h>
19#include <linux/io.h> 19#include <linux/io.h>
20 20
21#define DRV_VERSION "0.2" 21#define DRV_VERSION "0.3"
22 22
23#define RTC_REG_SIZE 0x2000 23#define RTC_REG_SIZE 0x2000
24#define RTC_OFFSET 0x1ff0 24#define RTC_OFFSET 0x1ff0
@@ -61,7 +61,6 @@
61struct rtc_plat_data { 61struct rtc_plat_data {
62 struct rtc_device *rtc; 62 struct rtc_device *rtc;
63 void __iomem *ioaddr; 63 void __iomem *ioaddr;
64 resource_size_t baseaddr;
65 unsigned long last_jiffies; 64 unsigned long last_jiffies;
66 int irq; 65 int irq;
67 unsigned int irqen; 66 unsigned int irqen;
@@ -69,6 +68,7 @@ struct rtc_plat_data {
69 int alrm_min; 68 int alrm_min;
70 int alrm_hour; 69 int alrm_hour;
71 int alrm_mday; 70 int alrm_mday;
71 spinlock_t lock;
72}; 72};
73 73
74static int ds1553_rtc_set_time(struct device *dev, struct rtc_time *tm) 74static int ds1553_rtc_set_time(struct device *dev, struct rtc_time *tm)
@@ -139,7 +139,7 @@ static void ds1553_rtc_update_alarm(struct rtc_plat_data *pdata)
139 void __iomem *ioaddr = pdata->ioaddr; 139 void __iomem *ioaddr = pdata->ioaddr;
140 unsigned long flags; 140 unsigned long flags;
141 141
142 spin_lock_irqsave(&pdata->rtc->irq_lock, flags); 142 spin_lock_irqsave(&pdata->lock, flags);
143 writeb(pdata->alrm_mday < 0 || (pdata->irqen & RTC_UF) ? 143 writeb(pdata->alrm_mday < 0 || (pdata->irqen & RTC_UF) ?
144 0x80 : bin2bcd(pdata->alrm_mday), 144 0x80 : bin2bcd(pdata->alrm_mday),
145 ioaddr + RTC_DATE_ALARM); 145 ioaddr + RTC_DATE_ALARM);
@@ -154,7 +154,7 @@ static void ds1553_rtc_update_alarm(struct rtc_plat_data *pdata)
154 ioaddr + RTC_SECONDS_ALARM); 154 ioaddr + RTC_SECONDS_ALARM);
155 writeb(pdata->irqen ? RTC_INTS_AE : 0, ioaddr + RTC_INTERRUPTS); 155 writeb(pdata->irqen ? RTC_INTS_AE : 0, ioaddr + RTC_INTERRUPTS);
156 readb(ioaddr + RTC_FLAGS); /* clear interrupts */ 156 readb(ioaddr + RTC_FLAGS); /* clear interrupts */
157 spin_unlock_irqrestore(&pdata->rtc->irq_lock, flags); 157 spin_unlock_irqrestore(&pdata->lock, flags);
158} 158}
159 159
160static int ds1553_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) 160static int ds1553_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
@@ -194,64 +194,69 @@ static irqreturn_t ds1553_rtc_interrupt(int irq, void *dev_id)
194 struct platform_device *pdev = dev_id; 194 struct platform_device *pdev = dev_id;
195 struct rtc_plat_data *pdata = platform_get_drvdata(pdev); 195 struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
196 void __iomem *ioaddr = pdata->ioaddr; 196 void __iomem *ioaddr = pdata->ioaddr;
197 unsigned long events = RTC_IRQF; 197 unsigned long events = 0;
198 198
199 spin_lock(&pdata->lock);
199 /* read and clear interrupt */ 200 /* read and clear interrupt */
200 if (!(readb(ioaddr + RTC_FLAGS) & RTC_FLAGS_AF)) 201 if (readb(ioaddr + RTC_FLAGS) & RTC_FLAGS_AF) {
201 return IRQ_NONE; 202 events = RTC_IRQF;
202 if (readb(ioaddr + RTC_SECONDS_ALARM) & 0x80) 203 if (readb(ioaddr + RTC_SECONDS_ALARM) & 0x80)
203 events |= RTC_UF; 204 events |= RTC_UF;
204 else 205 else
205 events |= RTC_AF; 206 events |= RTC_AF;
206 rtc_update_irq(pdata->rtc, 1, events); 207 if (likely(pdata->rtc))
207 return IRQ_HANDLED; 208 rtc_update_irq(pdata->rtc, 1, events);
209 }
210 spin_unlock(&pdata->lock);
211 return events ? IRQ_HANDLED : IRQ_NONE;
208} 212}
209 213
210static int ds1553_rtc_ioctl(struct device *dev, unsigned int cmd, 214static int ds1553_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
211 unsigned long arg)
212{ 215{
213 struct platform_device *pdev = to_platform_device(dev); 216 struct platform_device *pdev = to_platform_device(dev);
214 struct rtc_plat_data *pdata = platform_get_drvdata(pdev); 217 struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
215 218
216 if (pdata->irq <= 0) 219 if (pdata->irq <= 0)
217 return -ENOIOCTLCMD; /* fall back into rtc-dev's emulation */ 220 return -EINVAL;
218 switch (cmd) { 221 if (enabled)
219 case RTC_AIE_OFF:
220 pdata->irqen &= ~RTC_AF;
221 ds1553_rtc_update_alarm(pdata);
222 break;
223 case RTC_AIE_ON:
224 pdata->irqen |= RTC_AF; 222 pdata->irqen |= RTC_AF;
225 ds1553_rtc_update_alarm(pdata); 223 else
226 break; 224 pdata->irqen &= ~RTC_AF;
227 case RTC_UIE_OFF: 225 ds1553_rtc_update_alarm(pdata);
228 pdata->irqen &= ~RTC_UF; 226 return 0;
229 ds1553_rtc_update_alarm(pdata); 227}
230 break; 228
231 case RTC_UIE_ON: 229static int ds1553_rtc_update_irq_enable(struct device *dev,
230 unsigned int enabled)
231{
232 struct platform_device *pdev = to_platform_device(dev);
233 struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
234
235 if (pdata->irq <= 0)
236 return -EINVAL;
237 if (enabled)
232 pdata->irqen |= RTC_UF; 238 pdata->irqen |= RTC_UF;
233 ds1553_rtc_update_alarm(pdata); 239 else
234 break; 240 pdata->irqen &= ~RTC_UF;
235 default: 241 ds1553_rtc_update_alarm(pdata);
236 return -ENOIOCTLCMD;
237 }
238 return 0; 242 return 0;
239} 243}
240 244
241static const struct rtc_class_ops ds1553_rtc_ops = { 245static const struct rtc_class_ops ds1553_rtc_ops = {
242 .read_time = ds1553_rtc_read_time, 246 .read_time = ds1553_rtc_read_time,
243 .set_time = ds1553_rtc_set_time, 247 .set_time = ds1553_rtc_set_time,
244 .read_alarm = ds1553_rtc_read_alarm, 248 .read_alarm = ds1553_rtc_read_alarm,
245 .set_alarm = ds1553_rtc_set_alarm, 249 .set_alarm = ds1553_rtc_set_alarm,
246 .ioctl = ds1553_rtc_ioctl, 250 .alarm_irq_enable = ds1553_rtc_alarm_irq_enable,
251 .update_irq_enable = ds1553_rtc_update_irq_enable,
247}; 252};
248 253
249static ssize_t ds1553_nvram_read(struct kobject *kobj, 254static ssize_t ds1553_nvram_read(struct kobject *kobj,
250 struct bin_attribute *bin_attr, 255 struct bin_attribute *bin_attr,
251 char *buf, loff_t pos, size_t size) 256 char *buf, loff_t pos, size_t size)
252{ 257{
253 struct platform_device *pdev = 258 struct device *dev = container_of(kobj, struct device, kobj);
254 to_platform_device(container_of(kobj, struct device, kobj)); 259 struct platform_device *pdev = to_platform_device(dev);
255 struct rtc_plat_data *pdata = platform_get_drvdata(pdev); 260 struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
256 void __iomem *ioaddr = pdata->ioaddr; 261 void __iomem *ioaddr = pdata->ioaddr;
257 ssize_t count; 262 ssize_t count;
@@ -265,8 +270,8 @@ static ssize_t ds1553_nvram_write(struct kobject *kobj,
265 struct bin_attribute *bin_attr, 270 struct bin_attribute *bin_attr,
266 char *buf, loff_t pos, size_t size) 271 char *buf, loff_t pos, size_t size)
267{ 272{
268 struct platform_device *pdev = 273 struct device *dev = container_of(kobj, struct device, kobj);
269 to_platform_device(container_of(kobj, struct device, kobj)); 274 struct platform_device *pdev = to_platform_device(dev);
270 struct rtc_plat_data *pdata = platform_get_drvdata(pdev); 275 struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
271 void __iomem *ioaddr = pdata->ioaddr; 276 void __iomem *ioaddr = pdata->ioaddr;
272 ssize_t count; 277 ssize_t count;
@@ -291,26 +296,23 @@ static int __devinit ds1553_rtc_probe(struct platform_device *pdev)
291 struct rtc_device *rtc; 296 struct rtc_device *rtc;
292 struct resource *res; 297 struct resource *res;
293 unsigned int cen, sec; 298 unsigned int cen, sec;
294 struct rtc_plat_data *pdata = NULL; 299 struct rtc_plat_data *pdata;
295 void __iomem *ioaddr = NULL; 300 void __iomem *ioaddr;
296 int ret = 0; 301 int ret = 0;
297 302
298 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 303 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
299 if (!res) 304 if (!res)
300 return -ENODEV; 305 return -ENODEV;
301 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); 306 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
302 if (!pdata) 307 if (!pdata)
303 return -ENOMEM; 308 return -ENOMEM;
304 if (!request_mem_region(res->start, RTC_REG_SIZE, pdev->name)) { 309 if (!devm_request_mem_region(&pdev->dev, res->start, RTC_REG_SIZE,
305 ret = -EBUSY; 310 pdev->name))
306 goto out; 311 return -EBUSY;
307 } 312
308 pdata->baseaddr = res->start; 313 ioaddr = devm_ioremap(&pdev->dev, res->start, RTC_REG_SIZE);
309 ioaddr = ioremap(pdata->baseaddr, RTC_REG_SIZE); 314 if (!ioaddr)
310 if (!ioaddr) { 315 return -ENOMEM;
311 ret = -ENOMEM;
312 goto out;
313 }
314 pdata->ioaddr = ioaddr; 316 pdata->ioaddr = ioaddr;
315 pdata->irq = platform_get_irq(pdev, 0); 317 pdata->irq = platform_get_irq(pdev, 0);
316 318
@@ -326,9 +328,13 @@ static int __devinit ds1553_rtc_probe(struct platform_device *pdev)
326 if (readb(ioaddr + RTC_FLAGS) & RTC_FLAGS_BLF) 328 if (readb(ioaddr + RTC_FLAGS) & RTC_FLAGS_BLF)
327 dev_warn(&pdev->dev, "voltage-low detected.\n"); 329 dev_warn(&pdev->dev, "voltage-low detected.\n");
328 330
331 spin_lock_init(&pdata->lock);
332 pdata->last_jiffies = jiffies;
333 platform_set_drvdata(pdev, pdata);
329 if (pdata->irq > 0) { 334 if (pdata->irq > 0) {
330 writeb(0, ioaddr + RTC_INTERRUPTS); 335 writeb(0, ioaddr + RTC_INTERRUPTS);
331 if (request_irq(pdata->irq, ds1553_rtc_interrupt, 336 if (devm_request_irq(&pdev->dev, pdata->irq,
337 ds1553_rtc_interrupt,
332 IRQF_DISABLED, pdev->name, pdev) < 0) { 338 IRQF_DISABLED, pdev->name, pdev) < 0) {
333 dev_warn(&pdev->dev, "interrupt not available.\n"); 339 dev_warn(&pdev->dev, "interrupt not available.\n");
334 pdata->irq = 0; 340 pdata->irq = 0;
@@ -337,27 +343,13 @@ static int __devinit ds1553_rtc_probe(struct platform_device *pdev)
337 343
338 rtc = rtc_device_register(pdev->name, &pdev->dev, 344 rtc = rtc_device_register(pdev->name, &pdev->dev,
339 &ds1553_rtc_ops, THIS_MODULE); 345 &ds1553_rtc_ops, THIS_MODULE);
340 if (IS_ERR(rtc)) { 346 if (IS_ERR(rtc))
341 ret = PTR_ERR(rtc); 347 return PTR_ERR(rtc);
342 goto out;
343 }
344 pdata->rtc = rtc; 348 pdata->rtc = rtc;
345 pdata->last_jiffies = jiffies; 349
346 platform_set_drvdata(pdev, pdata);
347 ret = sysfs_create_bin_file(&pdev->dev.kobj, &ds1553_nvram_attr); 350 ret = sysfs_create_bin_file(&pdev->dev.kobj, &ds1553_nvram_attr);
348 if (ret) 351 if (ret)
349 goto out; 352 rtc_device_unregister(rtc);
350 return 0;
351 out:
352 if (pdata->rtc)
353 rtc_device_unregister(pdata->rtc);
354 if (pdata->irq > 0)
355 free_irq(pdata->irq, pdev);
356 if (ioaddr)
357 iounmap(ioaddr);
358 if (pdata->baseaddr)
359 release_mem_region(pdata->baseaddr, RTC_REG_SIZE);
360 kfree(pdata);
361 return ret; 353 return ret;
362} 354}
363 355
@@ -367,13 +359,8 @@ static int __devexit ds1553_rtc_remove(struct platform_device *pdev)
367 359
368 sysfs_remove_bin_file(&pdev->dev.kobj, &ds1553_nvram_attr); 360 sysfs_remove_bin_file(&pdev->dev.kobj, &ds1553_nvram_attr);
369 rtc_device_unregister(pdata->rtc); 361 rtc_device_unregister(pdata->rtc);
370 if (pdata->irq > 0) { 362 if (pdata->irq > 0)
371 writeb(0, pdata->ioaddr + RTC_INTERRUPTS); 363 writeb(0, pdata->ioaddr + RTC_INTERRUPTS);
372 free_irq(pdata->irq, pdev);
373 }
374 iounmap(pdata->ioaddr);
375 release_mem_region(pdata->baseaddr, RTC_REG_SIZE);
376 kfree(pdata);
377 return 0; 364 return 0;
378} 365}
379 366
diff --git a/drivers/rtc/rtc-ds1742.c b/drivers/rtc/rtc-ds1742.c
index 09249459e9a4..a1273360a44e 100644
--- a/drivers/rtc/rtc-ds1742.c
+++ b/drivers/rtc/rtc-ds1742.c
@@ -21,7 +21,7 @@
21#include <linux/platform_device.h> 21#include <linux/platform_device.h>
22#include <linux/io.h> 22#include <linux/io.h>
23 23
24#define DRV_VERSION "0.3" 24#define DRV_VERSION "0.4"
25 25
26#define RTC_SIZE 8 26#define RTC_SIZE 8
27 27
@@ -55,7 +55,6 @@ struct rtc_plat_data {
55 void __iomem *ioaddr_rtc; 55 void __iomem *ioaddr_rtc;
56 size_t size_nvram; 56 size_t size_nvram;
57 size_t size; 57 size_t size;
58 resource_size_t baseaddr;
59 unsigned long last_jiffies; 58 unsigned long last_jiffies;
60 struct bin_attribute nvram_attr; 59 struct bin_attribute nvram_attr;
61}; 60};
@@ -132,8 +131,8 @@ static ssize_t ds1742_nvram_read(struct kobject *kobj,
132 struct bin_attribute *bin_attr, 131 struct bin_attribute *bin_attr,
133 char *buf, loff_t pos, size_t size) 132 char *buf, loff_t pos, size_t size)
134{ 133{
135 struct platform_device *pdev = 134 struct device *dev = container_of(kobj, struct device, kobj);
136 to_platform_device(container_of(kobj, struct device, kobj)); 135 struct platform_device *pdev = to_platform_device(dev);
137 struct rtc_plat_data *pdata = platform_get_drvdata(pdev); 136 struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
138 void __iomem *ioaddr = pdata->ioaddr_nvram; 137 void __iomem *ioaddr = pdata->ioaddr_nvram;
139 ssize_t count; 138 ssize_t count;
@@ -147,8 +146,8 @@ static ssize_t ds1742_nvram_write(struct kobject *kobj,
147 struct bin_attribute *bin_attr, 146 struct bin_attribute *bin_attr,
148 char *buf, loff_t pos, size_t size) 147 char *buf, loff_t pos, size_t size)
149{ 148{
150 struct platform_device *pdev = 149 struct device *dev = container_of(kobj, struct device, kobj);
151 to_platform_device(container_of(kobj, struct device, kobj)); 150 struct platform_device *pdev = to_platform_device(dev);
152 struct rtc_plat_data *pdata = platform_get_drvdata(pdev); 151 struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
153 void __iomem *ioaddr = pdata->ioaddr_nvram; 152 void __iomem *ioaddr = pdata->ioaddr_nvram;
154 ssize_t count; 153 ssize_t count;
@@ -163,27 +162,24 @@ static int __devinit ds1742_rtc_probe(struct platform_device *pdev)
163 struct rtc_device *rtc; 162 struct rtc_device *rtc;
164 struct resource *res; 163 struct resource *res;
165 unsigned int cen, sec; 164 unsigned int cen, sec;
166 struct rtc_plat_data *pdata = NULL; 165 struct rtc_plat_data *pdata;
167 void __iomem *ioaddr = NULL; 166 void __iomem *ioaddr;
168 int ret = 0; 167 int ret = 0;
169 168
170 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 169 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
171 if (!res) 170 if (!res)
172 return -ENODEV; 171 return -ENODEV;
173 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); 172 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
174 if (!pdata) 173 if (!pdata)
175 return -ENOMEM; 174 return -ENOMEM;
176 pdata->size = res->end - res->start + 1; 175 pdata->size = res->end - res->start + 1;
177 if (!request_mem_region(res->start, pdata->size, pdev->name)) { 176 if (!devm_request_mem_region(&pdev->dev, res->start, pdata->size,
178 ret = -EBUSY; 177 pdev->name))
179 goto out; 178 return -EBUSY;
180 } 179 ioaddr = devm_ioremap(&pdev->dev, res->start, pdata->size);
181 pdata->baseaddr = res->start; 180 if (!ioaddr)
182 ioaddr = ioremap(pdata->baseaddr, pdata->size); 181 return -ENOMEM;
183 if (!ioaddr) { 182
184 ret = -ENOMEM;
185 goto out;
186 }
187 pdata->ioaddr_nvram = ioaddr; 183 pdata->ioaddr_nvram = ioaddr;
188 pdata->size_nvram = pdata->size - RTC_SIZE; 184 pdata->size_nvram = pdata->size - RTC_SIZE;
189 pdata->ioaddr_rtc = ioaddr + pdata->size_nvram; 185 pdata->ioaddr_rtc = ioaddr + pdata->size_nvram;
@@ -207,31 +203,19 @@ static int __devinit ds1742_rtc_probe(struct platform_device *pdev)
207 if (!(readb(ioaddr + RTC_DAY) & RTC_BATT_FLAG)) 203 if (!(readb(ioaddr + RTC_DAY) & RTC_BATT_FLAG))
208 dev_warn(&pdev->dev, "voltage-low detected.\n"); 204 dev_warn(&pdev->dev, "voltage-low detected.\n");
209 205
206 pdata->last_jiffies = jiffies;
207 platform_set_drvdata(pdev, pdata);
210 rtc = rtc_device_register(pdev->name, &pdev->dev, 208 rtc = rtc_device_register(pdev->name, &pdev->dev,
211 &ds1742_rtc_ops, THIS_MODULE); 209 &ds1742_rtc_ops, THIS_MODULE);
212 if (IS_ERR(rtc)) { 210 if (IS_ERR(rtc))
213 ret = PTR_ERR(rtc); 211 return PTR_ERR(rtc);
214 goto out;
215 }
216 pdata->rtc = rtc; 212 pdata->rtc = rtc;
217 pdata->last_jiffies = jiffies;
218 platform_set_drvdata(pdev, pdata);
219 213
220 ret = sysfs_create_bin_file(&pdev->dev.kobj, &pdata->nvram_attr); 214 ret = sysfs_create_bin_file(&pdev->dev.kobj, &pdata->nvram_attr);
221 if (ret) { 215 if (ret) {
222 dev_err(&pdev->dev, "creating nvram file in sysfs failed\n"); 216 dev_err(&pdev->dev, "creating nvram file in sysfs failed\n");
223 goto out; 217 rtc_device_unregister(rtc);
224 } 218 }
225
226 return 0;
227 out:
228 if (pdata->rtc)
229 rtc_device_unregister(pdata->rtc);
230 if (pdata->ioaddr_nvram)
231 iounmap(pdata->ioaddr_nvram);
232 if (pdata->baseaddr)
233 release_mem_region(pdata->baseaddr, pdata->size);
234 kfree(pdata);
235 return ret; 219 return ret;
236} 220}
237 221
@@ -241,9 +225,6 @@ static int __devexit ds1742_rtc_remove(struct platform_device *pdev)
241 225
242 sysfs_remove_bin_file(&pdev->dev.kobj, &pdata->nvram_attr); 226 sysfs_remove_bin_file(&pdev->dev.kobj, &pdata->nvram_attr);
243 rtc_device_unregister(pdata->rtc); 227 rtc_device_unregister(pdata->rtc);
244 iounmap(pdata->ioaddr_nvram);
245 release_mem_region(pdata->baseaddr, pdata->size);
246 kfree(pdata);
247 return 0; 228 return 0;
248} 229}
249 230
diff --git a/drivers/rtc/rtc-m48t35.c b/drivers/rtc/rtc-m48t35.c
index 0b2197559940..8cb5b8959e5b 100644
--- a/drivers/rtc/rtc-m48t35.c
+++ b/drivers/rtc/rtc-m48t35.c
@@ -142,7 +142,6 @@ static const struct rtc_class_ops m48t35_ops = {
142 142
143static int __devinit m48t35_probe(struct platform_device *pdev) 143static int __devinit m48t35_probe(struct platform_device *pdev)
144{ 144{
145 struct rtc_device *rtc;
146 struct resource *res; 145 struct resource *res;
147 struct m48t35_priv *priv; 146 struct m48t35_priv *priv;
148 int ret = 0; 147 int ret = 0;
@@ -171,20 +170,21 @@ static int __devinit m48t35_probe(struct platform_device *pdev)
171 ret = -ENOMEM; 170 ret = -ENOMEM;
172 goto out; 171 goto out;
173 } 172 }
173
174 spin_lock_init(&priv->lock); 174 spin_lock_init(&priv->lock);
175 rtc = rtc_device_register("m48t35", &pdev->dev, 175
176 platform_set_drvdata(pdev, priv);
177
178 priv->rtc = rtc_device_register("m48t35", &pdev->dev,
176 &m48t35_ops, THIS_MODULE); 179 &m48t35_ops, THIS_MODULE);
177 if (IS_ERR(rtc)) { 180 if (IS_ERR(priv->rtc)) {
178 ret = PTR_ERR(rtc); 181 ret = PTR_ERR(priv->rtc);
179 goto out; 182 goto out;
180 } 183 }
181 priv->rtc = rtc; 184
182 platform_set_drvdata(pdev, priv);
183 return 0; 185 return 0;
184 186
185out: 187out:
186 if (priv->rtc)
187 rtc_device_unregister(priv->rtc);
188 if (priv->reg) 188 if (priv->reg)
189 iounmap(priv->reg); 189 iounmap(priv->reg);
190 if (priv->baseaddr) 190 if (priv->baseaddr)
diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
index 33921a6b1707..ede43b846859 100644
--- a/drivers/rtc/rtc-m48t59.c
+++ b/drivers/rtc/rtc-m48t59.c
@@ -481,6 +481,9 @@ static int __devinit m48t59_rtc_probe(struct platform_device *pdev)
481 goto out; 481 goto out;
482 } 482 }
483 483
484 spin_lock_init(&m48t59->lock);
485 platform_set_drvdata(pdev, m48t59);
486
484 m48t59->rtc = rtc_device_register(name, &pdev->dev, ops, THIS_MODULE); 487 m48t59->rtc = rtc_device_register(name, &pdev->dev, ops, THIS_MODULE);
485 if (IS_ERR(m48t59->rtc)) { 488 if (IS_ERR(m48t59->rtc)) {
486 ret = PTR_ERR(m48t59->rtc); 489 ret = PTR_ERR(m48t59->rtc);
@@ -490,16 +493,14 @@ static int __devinit m48t59_rtc_probe(struct platform_device *pdev)
490 m48t59_nvram_attr.size = pdata->offset; 493 m48t59_nvram_attr.size = pdata->offset;
491 494
492 ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr); 495 ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
493 if (ret) 496 if (ret) {
497 rtc_device_unregister(m48t59->rtc);
494 goto out; 498 goto out;
499 }
495 500
496 spin_lock_init(&m48t59->lock);
497 platform_set_drvdata(pdev, m48t59);
498 return 0; 501 return 0;
499 502
500out: 503out:
501 if (!IS_ERR(m48t59->rtc))
502 rtc_device_unregister(m48t59->rtc);
503 if (m48t59->irq != NO_IRQ) 504 if (m48t59->irq != NO_IRQ)
504 free_irq(m48t59->irq, &pdev->dev); 505 free_irq(m48t59->irq, &pdev->dev);
505 if (m48t59->ioaddr) 506 if (m48t59->ioaddr)
diff --git a/drivers/rtc/rtc-mc13783.c b/drivers/rtc/rtc-mc13783.c
new file mode 100644
index 000000000000..850f983c039c
--- /dev/null
+++ b/drivers/rtc/rtc-mc13783.c
@@ -0,0 +1,262 @@
1/*
2 * Real Time Clock driver for Freescale MC13783 PMIC
3 *
4 * (C) 2009 Sascha Hauer, Pengutronix
5 * (C) 2009 Uwe Kleine-Koenig, Pengutronix
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/mfd/mc13783.h>
13#include <linux/platform_device.h>
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/rtc.h>
17
18#define DRIVER_NAME "mc13783-rtc"
19
20#define MC13783_RTCTOD 20
21#define MC13783_RTCTODA 21
22#define MC13783_RTCDAY 22
23#define MC13783_RTCDAYA 23
24
25struct mc13783_rtc {
26 struct rtc_device *rtc;
27 struct mc13783 *mc13783;
28 int valid;
29};
30
31static int mc13783_rtc_read_time(struct device *dev, struct rtc_time *tm)
32{
33 struct mc13783_rtc *priv = dev_get_drvdata(dev);
34 unsigned int seconds, days1, days2;
35 unsigned long s1970;
36 int ret;
37
38 mc13783_lock(priv->mc13783);
39
40 if (!priv->valid) {
41 ret = -ENODATA;
42 goto out;
43 }
44
45 ret = mc13783_reg_read(priv->mc13783, MC13783_RTCDAY, &days1);
46 if (unlikely(ret))
47 goto out;
48
49 ret = mc13783_reg_read(priv->mc13783, MC13783_RTCTOD, &seconds);
50 if (unlikely(ret))
51 goto out;
52
53 ret = mc13783_reg_read(priv->mc13783, MC13783_RTCDAY, &days2);
54out:
55 mc13783_unlock(priv->mc13783);
56
57 if (ret)
58 return ret;
59
60 if (days2 == days1 + 1) {
61 if (seconds >= 86400 / 2)
62 days2 = days1;
63 else
64 days1 = days2;
65 }
66
67 if (days1 != days2)
68 return -EIO;
69
70 s1970 = days1 * 86400 + seconds;
71
72 rtc_time_to_tm(s1970, tm);
73
74 return rtc_valid_tm(tm);
75}
76
77static int mc13783_rtc_set_mmss(struct device *dev, unsigned long secs)
78{
79 struct mc13783_rtc *priv = dev_get_drvdata(dev);
80 unsigned int seconds, days;
81 int ret;
82
83 seconds = secs % 86400;
84 days = secs / 86400;
85
86 mc13783_lock(priv->mc13783);
87
88 /*
89 * first write seconds=0 to prevent a day switch between writing days
90 * and seconds below
91 */
92 ret = mc13783_reg_write(priv->mc13783, MC13783_RTCTOD, 0);
93 if (unlikely(ret))
94 goto out;
95
96 ret = mc13783_reg_write(priv->mc13783, MC13783_RTCDAY, days);
97 if (unlikely(ret))
98 goto out;
99
100 ret = mc13783_reg_write(priv->mc13783, MC13783_RTCTOD, seconds);
101 if (unlikely(ret))
102 goto out;
103
104 ret = mc13783_ackirq(priv->mc13783, MC13783_IRQ_RTCRST);
105 if (unlikely(ret))
106 goto out;
107
108 ret = mc13783_unmask(priv->mc13783, MC13783_IRQ_RTCRST);
109out:
110 priv->valid = !ret;
111
112 mc13783_unlock(priv->mc13783);
113
114 return ret;
115}
116
117static irqreturn_t mc13783_rtc_update_handler(int irq, void *dev)
118{
119 struct mc13783_rtc *priv = dev;
120 struct mc13783 *mc13783 = priv->mc13783;
121
122 dev_dbg(&priv->rtc->dev, "1HZ\n");
123
124 rtc_update_irq(priv->rtc, 1, RTC_IRQF | RTC_UF);
125
126 mc13783_ackirq(mc13783, irq);
127
128 return IRQ_HANDLED;
129}
130
131static int mc13783_rtc_update_irq_enable(struct device *dev,
132 unsigned int enabled)
133{
134 struct mc13783_rtc *priv = dev_get_drvdata(dev);
135 int ret = -ENODATA;
136
137 mc13783_lock(priv->mc13783);
138 if (!priv->valid)
139 goto out;
140
141 ret = (enabled ? mc13783_unmask : mc13783_mask)(priv->mc13783,
142 MC13783_IRQ_1HZ);
143out:
144 mc13783_unlock(priv->mc13783);
145
146 return ret;
147}
148
149static const struct rtc_class_ops mc13783_rtc_ops = {
150 .read_time = mc13783_rtc_read_time,
151 .set_mmss = mc13783_rtc_set_mmss,
152 .update_irq_enable = mc13783_rtc_update_irq_enable,
153};
154
155static irqreturn_t mc13783_rtc_reset_handler(int irq, void *dev)
156{
157 struct mc13783_rtc *priv = dev;
158 struct mc13783 *mc13783 = priv->mc13783;
159
160 dev_dbg(&priv->rtc->dev, "RTCRST\n");
161 priv->valid = 0;
162
163 mc13783_mask(mc13783, irq);
164
165 return IRQ_HANDLED;
166}
167
168static int __devinit mc13783_rtc_probe(struct platform_device *pdev)
169{
170 int ret;
171 struct mc13783_rtc *priv;
172
173 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
174 if (!priv)
175 return -ENOMEM;
176
177 priv->mc13783 = dev_get_drvdata(pdev->dev.parent);
178 platform_set_drvdata(pdev, priv);
179
180 priv->valid = 1;
181
182 mc13783_lock(priv->mc13783);
183
184 ret = mc13783_irq_request(priv->mc13783, MC13783_IRQ_RTCRST,
185 mc13783_rtc_reset_handler, DRIVER_NAME, priv);
186 if (ret)
187 goto err_reset_irq_request;
188
189 ret = mc13783_irq_request_nounmask(priv->mc13783, MC13783_IRQ_1HZ,
190 mc13783_rtc_update_handler, DRIVER_NAME, priv);
191 if (ret)
192 goto err_update_irq_request;
193
194 mc13783_unlock(priv->mc13783);
195
196 priv->rtc = rtc_device_register(pdev->name,
197 &pdev->dev, &mc13783_rtc_ops, THIS_MODULE);
198
199 if (IS_ERR(priv->rtc)) {
200 ret = PTR_ERR(priv->rtc);
201
202 mc13783_lock(priv->mc13783);
203
204 mc13783_irq_free(priv->mc13783, MC13783_IRQ_1HZ, priv);
205err_update_irq_request:
206
207 mc13783_irq_free(priv->mc13783, MC13783_IRQ_RTCRST, priv);
208err_reset_irq_request:
209
210 mc13783_unlock(priv->mc13783);
211
212 platform_set_drvdata(pdev, NULL);
213 kfree(priv);
214 }
215
216 return ret;
217}
218
219static int __devexit mc13783_rtc_remove(struct platform_device *pdev)
220{
221 struct mc13783_rtc *priv = platform_get_drvdata(pdev);
222
223 rtc_device_unregister(priv->rtc);
224
225 mc13783_lock(priv->mc13783);
226
227 mc13783_irq_free(priv->mc13783, MC13783_IRQ_1HZ, priv);
228 mc13783_irq_free(priv->mc13783, MC13783_IRQ_RTCRST, priv);
229
230 mc13783_unlock(priv->mc13783);
231
232 platform_set_drvdata(pdev, NULL);
233
234 kfree(priv);
235
236 return 0;
237}
238
239static struct platform_driver mc13783_rtc_driver = {
240 .remove = __devexit_p(mc13783_rtc_remove),
241 .driver = {
242 .name = DRIVER_NAME,
243 .owner = THIS_MODULE,
244 },
245};
246
247static int __init mc13783_rtc_init(void)
248{
249 return platform_driver_probe(&mc13783_rtc_driver, &mc13783_rtc_probe);
250}
251module_init(mc13783_rtc_init);
252
253static void __exit mc13783_rtc_exit(void)
254{
255 platform_driver_unregister(&mc13783_rtc_driver);
256}
257module_exit(mc13783_rtc_exit);
258
259MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
260MODULE_DESCRIPTION("RTC driver for Freescale MC13783 PMIC");
261MODULE_LICENSE("GPL v2");
262MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/rtc/rtc-mv.c b/drivers/rtc/rtc-mv.c
index e0263d2005ee..dc052ce6e63a 100644
--- a/drivers/rtc/rtc-mv.c
+++ b/drivers/rtc/rtc-mv.c
@@ -27,10 +27,17 @@
27#define RTC_MONTH_OFFS 8 27#define RTC_MONTH_OFFS 8
28#define RTC_YEAR_OFFS 16 28#define RTC_YEAR_OFFS 16
29 29
30#define RTC_ALARM_TIME_REG_OFFS 8
31#define RTC_ALARM_DATE_REG_OFFS 0xc
32#define RTC_ALARM_VALID (1 << 7)
33
34#define RTC_ALARM_INTERRUPT_MASK_REG_OFFS 0x10
35#define RTC_ALARM_INTERRUPT_CASUE_REG_OFFS 0x14
30 36
31struct rtc_plat_data { 37struct rtc_plat_data {
32 struct rtc_device *rtc; 38 struct rtc_device *rtc;
33 void __iomem *ioaddr; 39 void __iomem *ioaddr;
40 int irq;
34}; 41};
35 42
36static int mv_rtc_set_time(struct device *dev, struct rtc_time *tm) 43static int mv_rtc_set_time(struct device *dev, struct rtc_time *tm)
@@ -84,12 +91,134 @@ static int mv_rtc_read_time(struct device *dev, struct rtc_time *tm)
84 return rtc_valid_tm(tm); 91 return rtc_valid_tm(tm);
85} 92}
86 93
94static int mv_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
95{
96 struct rtc_plat_data *pdata = dev_get_drvdata(dev);
97 void __iomem *ioaddr = pdata->ioaddr;
98 u32 rtc_time, rtc_date;
99 unsigned int year, month, day, hour, minute, second, wday;
100
101 rtc_time = readl(ioaddr + RTC_ALARM_TIME_REG_OFFS);
102 rtc_date = readl(ioaddr + RTC_ALARM_DATE_REG_OFFS);
103
104 second = rtc_time & 0x7f;
105 minute = (rtc_time >> RTC_MINUTES_OFFS) & 0x7f;
106 hour = (rtc_time >> RTC_HOURS_OFFS) & 0x3f; /* assume 24 hours mode */
107 wday = (rtc_time >> RTC_WDAY_OFFS) & 0x7;
108
109 day = rtc_date & 0x3f;
110 month = (rtc_date >> RTC_MONTH_OFFS) & 0x3f;
111 year = (rtc_date >> RTC_YEAR_OFFS) & 0xff;
112
113 alm->time.tm_sec = bcd2bin(second);
114 alm->time.tm_min = bcd2bin(minute);
115 alm->time.tm_hour = bcd2bin(hour);
116 alm->time.tm_mday = bcd2bin(day);
117 alm->time.tm_wday = bcd2bin(wday);
118 alm->time.tm_mon = bcd2bin(month) - 1;
119 /* hw counts from year 2000, but tm_year is relative to 1900 */
120 alm->time.tm_year = bcd2bin(year) + 100;
121
122 if (rtc_valid_tm(&alm->time) < 0) {
123 dev_err(dev, "retrieved alarm date/time is not valid.\n");
124 rtc_time_to_tm(0, &alm->time);
125 }
126
127 alm->enabled = !!readl(ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS);
128 return 0;
129}
130
131static int mv_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
132{
133 struct rtc_plat_data *pdata = dev_get_drvdata(dev);
134 void __iomem *ioaddr = pdata->ioaddr;
135 u32 rtc_reg = 0;
136
137 if (alm->time.tm_sec >= 0)
138 rtc_reg |= (RTC_ALARM_VALID | bin2bcd(alm->time.tm_sec))
139 << RTC_SECONDS_OFFS;
140 if (alm->time.tm_min >= 0)
141 rtc_reg |= (RTC_ALARM_VALID | bin2bcd(alm->time.tm_min))
142 << RTC_MINUTES_OFFS;
143 if (alm->time.tm_hour >= 0)
144 rtc_reg |= (RTC_ALARM_VALID | bin2bcd(alm->time.tm_hour))
145 << RTC_HOURS_OFFS;
146
147 writel(rtc_reg, ioaddr + RTC_ALARM_TIME_REG_OFFS);
148
149 if (alm->time.tm_mday >= 0)
150 rtc_reg = (RTC_ALARM_VALID | bin2bcd(alm->time.tm_mday))
151 << RTC_MDAY_OFFS;
152 else
153 rtc_reg = 0;
154
155 if (alm->time.tm_mon >= 0)
156 rtc_reg |= (RTC_ALARM_VALID | bin2bcd(alm->time.tm_mon + 1))
157 << RTC_MONTH_OFFS;
158
159 if (alm->time.tm_year >= 0)
160 rtc_reg |= (RTC_ALARM_VALID | bin2bcd(alm->time.tm_year % 100))
161 << RTC_YEAR_OFFS;
162
163 writel(rtc_reg, ioaddr + RTC_ALARM_DATE_REG_OFFS);
164 writel(0, ioaddr + RTC_ALARM_INTERRUPT_CASUE_REG_OFFS);
165 writel(alm->enabled ? 1 : 0,
166 ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS);
167
168 return 0;
169}
170
171static int mv_rtc_ioctl(struct device *dev, unsigned int cmd,
172 unsigned long arg)
173{
174 struct platform_device *pdev = to_platform_device(dev);
175 struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
176 void __iomem *ioaddr = pdata->ioaddr;
177
178 if (pdata->irq < 0)
179 return -ENOIOCTLCMD; /* fall back into rtc-dev's emulation */
180 switch (cmd) {
181 case RTC_AIE_OFF:
182 writel(0, ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS);
183 break;
184 case RTC_AIE_ON:
185 writel(1, ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS);
186 break;
187 default:
188 return -ENOIOCTLCMD;
189 }
190 return 0;
191}
192
193static irqreturn_t mv_rtc_interrupt(int irq, void *data)
194{
195 struct rtc_plat_data *pdata = data;
196 void __iomem *ioaddr = pdata->ioaddr;
197
198 /* alarm irq? */
199 if (!readl(ioaddr + RTC_ALARM_INTERRUPT_CASUE_REG_OFFS))
200 return IRQ_NONE;
201
202 /* clear interrupt */
203 writel(0, ioaddr + RTC_ALARM_INTERRUPT_CASUE_REG_OFFS);
204 rtc_update_irq(pdata->rtc, 1, RTC_IRQF | RTC_AF);
205 return IRQ_HANDLED;
206}
207
87static const struct rtc_class_ops mv_rtc_ops = { 208static const struct rtc_class_ops mv_rtc_ops = {
88 .read_time = mv_rtc_read_time, 209 .read_time = mv_rtc_read_time,
89 .set_time = mv_rtc_set_time, 210 .set_time = mv_rtc_set_time,
90}; 211};
91 212
92static int __init mv_rtc_probe(struct platform_device *pdev) 213static const struct rtc_class_ops mv_rtc_alarm_ops = {
214 .read_time = mv_rtc_read_time,
215 .set_time = mv_rtc_set_time,
216 .read_alarm = mv_rtc_read_alarm,
217 .set_alarm = mv_rtc_set_alarm,
218 .ioctl = mv_rtc_ioctl,
219};
220
221static int __devinit mv_rtc_probe(struct platform_device *pdev)
93{ 222{
94 struct resource *res; 223 struct resource *res;
95 struct rtc_plat_data *pdata; 224 struct rtc_plat_data *pdata;
@@ -130,12 +259,31 @@ static int __init mv_rtc_probe(struct platform_device *pdev)
130 } 259 }
131 } 260 }
132 261
262 pdata->irq = platform_get_irq(pdev, 0);
263
133 platform_set_drvdata(pdev, pdata); 264 platform_set_drvdata(pdev, pdata);
134 pdata->rtc = rtc_device_register(pdev->name, &pdev->dev, 265
135 &mv_rtc_ops, THIS_MODULE); 266 if (pdata->irq >= 0) {
267 device_init_wakeup(&pdev->dev, 1);
268 pdata->rtc = rtc_device_register(pdev->name, &pdev->dev,
269 &mv_rtc_alarm_ops,
270 THIS_MODULE);
271 } else
272 pdata->rtc = rtc_device_register(pdev->name, &pdev->dev,
273 &mv_rtc_ops, THIS_MODULE);
136 if (IS_ERR(pdata->rtc)) 274 if (IS_ERR(pdata->rtc))
137 return PTR_ERR(pdata->rtc); 275 return PTR_ERR(pdata->rtc);
138 276
277 if (pdata->irq >= 0) {
278 writel(0, pdata->ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS);
279 if (devm_request_irq(&pdev->dev, pdata->irq, mv_rtc_interrupt,
280 IRQF_DISABLED | IRQF_SHARED,
281 pdev->name, pdata) < 0) {
282 dev_warn(&pdev->dev, "interrupt not available.\n");
283 pdata->irq = -1;
284 }
285 }
286
139 return 0; 287 return 0;
140} 288}
141 289
@@ -143,6 +291,9 @@ static int __exit mv_rtc_remove(struct platform_device *pdev)
143{ 291{
144 struct rtc_plat_data *pdata = platform_get_drvdata(pdev); 292 struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
145 293
294 if (pdata->irq >= 0)
295 device_init_wakeup(&pdev->dev, 0);
296
146 rtc_device_unregister(pdata->rtc); 297 rtc_device_unregister(pdata->rtc);
147 return 0; 298 return 0;
148} 299}
diff --git a/drivers/rtc/rtc-nuc900.c b/drivers/rtc/rtc-nuc900.c
new file mode 100644
index 000000000000..bf59c9c586b2
--- /dev/null
+++ b/drivers/rtc/rtc-nuc900.c
@@ -0,0 +1,342 @@
1/*
2 * Copyright (c) 2008-2009 Nuvoton technology corporation.
3 *
4 * Wan ZongShun <mcuos.com@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation;version 2 of the License.
9 *
10 */
11
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/platform_device.h>
15#include <linux/rtc.h>
16#include <linux/delay.h>
17#include <linux/io.h>
18#include <linux/bcd.h>
19
20/* RTC Control Registers */
21#define REG_RTC_INIR 0x00
22#define REG_RTC_AER 0x04
23#define REG_RTC_FCR 0x08
24#define REG_RTC_TLR 0x0C
25#define REG_RTC_CLR 0x10
26#define REG_RTC_TSSR 0x14
27#define REG_RTC_DWR 0x18
28#define REG_RTC_TAR 0x1C
29#define REG_RTC_CAR 0x20
30#define REG_RTC_LIR 0x24
31#define REG_RTC_RIER 0x28
32#define REG_RTC_RIIR 0x2C
33#define REG_RTC_TTR 0x30
34
35#define RTCSET 0x01
36#define AERRWENB 0x10000
37#define INIRRESET 0xa5eb1357
38#define AERPOWERON 0xA965
39#define AERPOWEROFF 0x0000
40#define LEAPYEAR 0x0001
41#define TICKENB 0x80
42#define TICKINTENB 0x0002
43#define ALARMINTENB 0x0001
44#define MODE24 0x0001
45
46struct nuc900_rtc {
47 int irq_num;
48 void __iomem *rtc_reg;
49 struct rtc_device *rtcdev;
50};
51
52struct nuc900_bcd_time {
53 int bcd_sec;
54 int bcd_min;
55 int bcd_hour;
56 int bcd_mday;
57 int bcd_mon;
58 int bcd_year;
59};
60
61static irqreturn_t nuc900_rtc_interrupt(int irq, void *_rtc)
62{
63 struct nuc900_rtc *rtc = _rtc;
64 unsigned long events = 0, rtc_irq;
65
66 rtc_irq = __raw_readl(rtc->rtc_reg + REG_RTC_RIIR);
67
68 if (rtc_irq & ALARMINTENB) {
69 rtc_irq &= ~ALARMINTENB;
70 __raw_writel(rtc_irq, rtc->rtc_reg + REG_RTC_RIIR);
71 events |= RTC_AF | RTC_IRQF;
72 }
73
74 if (rtc_irq & TICKINTENB) {
75 rtc_irq &= ~TICKINTENB;
76 __raw_writel(rtc_irq, rtc->rtc_reg + REG_RTC_RIIR);
77 events |= RTC_UF | RTC_IRQF;
78 }
79
80 rtc_update_irq(rtc->rtcdev, 1, events);
81
82 return IRQ_HANDLED;
83}
84
85static int *check_rtc_access_enable(struct nuc900_rtc *nuc900_rtc)
86{
87 unsigned int i;
88 __raw_writel(INIRRESET, nuc900_rtc->rtc_reg + REG_RTC_INIR);
89
90 mdelay(10);
91
92 __raw_writel(AERPOWERON, nuc900_rtc->rtc_reg + REG_RTC_AER);
93
94 for (i = 0; i < 1000; i++) {
95 if (__raw_readl(nuc900_rtc->rtc_reg + REG_RTC_AER) & AERRWENB)
96 return 0;
97 }
98
99 if ((__raw_readl(nuc900_rtc->rtc_reg + REG_RTC_AER) & AERRWENB) == 0x0)
100 return ERR_PTR(-ENODEV);
101
102 return ERR_PTR(-EPERM);
103}
104
105static void nuc900_rtc_bcd2bin(unsigned int timereg,
106 unsigned int calreg, struct rtc_time *tm)
107{
108 tm->tm_mday = bcd2bin(calreg >> 0);
109 tm->tm_mon = bcd2bin(calreg >> 8);
110 tm->tm_year = bcd2bin(calreg >> 16) + 100;
111
112 tm->tm_sec = bcd2bin(timereg >> 0);
113 tm->tm_min = bcd2bin(timereg >> 8);
114 tm->tm_hour = bcd2bin(timereg >> 16);
115
116 rtc_valid_tm(tm);
117}
118
119static void nuc900_rtc_bin2bcd(struct rtc_time *settm,
120 struct nuc900_bcd_time *gettm)
121{
122 gettm->bcd_mday = bin2bcd(settm->tm_mday) << 0;
123 gettm->bcd_mon = bin2bcd(settm->tm_mon) << 8;
124 gettm->bcd_year = bin2bcd(settm->tm_year - 100) << 16;
125
126 gettm->bcd_sec = bin2bcd(settm->tm_sec) << 0;
127 gettm->bcd_min = bin2bcd(settm->tm_min) << 8;
128 gettm->bcd_hour = bin2bcd(settm->tm_hour) << 16;
129}
130
131static int nuc900_update_irq_enable(struct device *dev, unsigned int enabled)
132{
133 struct nuc900_rtc *rtc = dev_get_drvdata(dev);
134
135 if (enabled)
136 __raw_writel(__raw_readl(rtc->rtc_reg + REG_RTC_RIER)|
137 (TICKINTENB), rtc->rtc_reg + REG_RTC_RIER);
138 else
139 __raw_writel(__raw_readl(rtc->rtc_reg + REG_RTC_RIER)&
140 (~TICKINTENB), rtc->rtc_reg + REG_RTC_RIER);
141
142 return 0;
143}
144
145static int nuc900_alarm_irq_enable(struct device *dev, unsigned int enabled)
146{
147 struct nuc900_rtc *rtc = dev_get_drvdata(dev);
148
149 if (enabled)
150 __raw_writel(__raw_readl(rtc->rtc_reg + REG_RTC_RIER)|
151 (ALARMINTENB), rtc->rtc_reg + REG_RTC_RIER);
152 else
153 __raw_writel(__raw_readl(rtc->rtc_reg + REG_RTC_RIER)&
154 (~ALARMINTENB), rtc->rtc_reg + REG_RTC_RIER);
155
156 return 0;
157}
158
159static int nuc900_rtc_read_time(struct device *dev, struct rtc_time *tm)
160{
161 struct nuc900_rtc *rtc = dev_get_drvdata(dev);
162 unsigned int timeval, clrval;
163
164 timeval = __raw_readl(rtc->rtc_reg + REG_RTC_TLR);
165 clrval = __raw_readl(rtc->rtc_reg + REG_RTC_CLR);
166
167 nuc900_rtc_bcd2bin(timeval, clrval, tm);
168
169 return 0;
170}
171
172static int nuc900_rtc_set_time(struct device *dev, struct rtc_time *tm)
173{
174 struct nuc900_rtc *rtc = dev_get_drvdata(dev);
175 struct nuc900_bcd_time gettm;
176 unsigned long val;
177 int *err;
178
179 nuc900_rtc_bin2bcd(tm, &gettm);
180
181 err = check_rtc_access_enable(rtc);
182 if (IS_ERR(err))
183 return PTR_ERR(err);
184
185 val = gettm.bcd_mday | gettm.bcd_mon | gettm.bcd_year;
186 __raw_writel(val, rtc->rtc_reg + REG_RTC_CLR);
187
188 val = gettm.bcd_sec | gettm.bcd_min | gettm.bcd_hour;
189 __raw_writel(val, rtc->rtc_reg + REG_RTC_TLR);
190
191 return 0;
192}
193
194static int nuc900_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
195{
196 struct nuc900_rtc *rtc = dev_get_drvdata(dev);
197 unsigned int timeval, carval;
198
199 timeval = __raw_readl(rtc->rtc_reg + REG_RTC_TAR);
200 carval = __raw_readl(rtc->rtc_reg + REG_RTC_CAR);
201
202 nuc900_rtc_bcd2bin(timeval, carval, &alrm->time);
203
204 return 0;
205}
206
207static int nuc900_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
208{
209 struct nuc900_rtc *rtc = dev_get_drvdata(dev);
210 struct nuc900_bcd_time tm;
211 unsigned long val;
212 int *err;
213
214 nuc900_rtc_bin2bcd(&alrm->time, &tm);
215
216 err = check_rtc_access_enable(rtc);
217 if (IS_ERR(err))
218 return PTR_ERR(err);
219
220 val = tm.bcd_mday | tm.bcd_mon | tm.bcd_year;
221 __raw_writel(val, rtc->rtc_reg + REG_RTC_CAR);
222
223 val = tm.bcd_sec | tm.bcd_min | tm.bcd_hour;
224 __raw_writel(val, rtc->rtc_reg + REG_RTC_TAR);
225
226 return 0;
227}
228
229static struct rtc_class_ops nuc900_rtc_ops = {
230 .read_time = nuc900_rtc_read_time,
231 .set_time = nuc900_rtc_set_time,
232 .read_alarm = nuc900_rtc_read_alarm,
233 .set_alarm = nuc900_rtc_set_alarm,
234 .alarm_irq_enable = nuc900_alarm_irq_enable,
235 .update_irq_enable = nuc900_update_irq_enable,
236};
237
238static int __devinit nuc900_rtc_probe(struct platform_device *pdev)
239{
240 struct resource *res;
241 struct nuc900_rtc *nuc900_rtc;
242 int err = 0;
243
244 nuc900_rtc = kzalloc(sizeof(struct nuc900_rtc), GFP_KERNEL);
245 if (!nuc900_rtc) {
246 dev_err(&pdev->dev, "kzalloc nuc900_rtc failed\n");
247 return -ENOMEM;
248 }
249 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
250 if (!res) {
251 dev_err(&pdev->dev, "platform_get_resource failed\n");
252 err = -ENXIO;
253 goto fail1;
254 }
255
256 if (!request_mem_region(res->start, resource_size(res),
257 pdev->name)) {
258 dev_err(&pdev->dev, "request_mem_region failed\n");
259 err = -EBUSY;
260 goto fail1;
261 }
262
263 nuc900_rtc->rtc_reg = ioremap(res->start, resource_size(res));
264 if (!nuc900_rtc->rtc_reg) {
265 dev_err(&pdev->dev, "ioremap rtc_reg failed\n");
266 err = -ENOMEM;
267 goto fail2;
268 }
269
270 nuc900_rtc->irq_num = platform_get_irq(pdev, 0);
271 if (request_irq(nuc900_rtc->irq_num, nuc900_rtc_interrupt,
272 IRQF_DISABLED, "nuc900rtc", nuc900_rtc)) {
273 dev_err(&pdev->dev, "NUC900 RTC request irq failed\n");
274 err = -EBUSY;
275 goto fail3;
276 }
277
278 nuc900_rtc->rtcdev = rtc_device_register(pdev->name, &pdev->dev,
279 &nuc900_rtc_ops, THIS_MODULE);
280 if (IS_ERR(nuc900_rtc->rtcdev)) {
281 dev_err(&pdev->dev, "rtc device register faild\n");
282 err = PTR_ERR(nuc900_rtc->rtcdev);
283 goto fail4;
284 }
285
286 platform_set_drvdata(pdev, nuc900_rtc);
287 __raw_writel(__raw_readl(nuc900_rtc->rtc_reg + REG_RTC_TSSR) | MODE24,
288 nuc900_rtc->rtc_reg + REG_RTC_TSSR);
289
290 return 0;
291
292fail4: free_irq(nuc900_rtc->irq_num, nuc900_rtc);
293fail3: iounmap(nuc900_rtc->rtc_reg);
294fail2: release_mem_region(res->start, resource_size(res));
295fail1: kfree(nuc900_rtc);
296 return err;
297}
298
299static int __devexit nuc900_rtc_remove(struct platform_device *pdev)
300{
301 struct nuc900_rtc *nuc900_rtc = platform_get_drvdata(pdev);
302 struct resource *res;
303
304 rtc_device_unregister(nuc900_rtc->rtcdev);
305 free_irq(nuc900_rtc->irq_num, nuc900_rtc);
306 iounmap(nuc900_rtc->rtc_reg);
307
308 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
309 release_mem_region(res->start, resource_size(res));
310
311 kfree(nuc900_rtc);
312
313 platform_set_drvdata(pdev, NULL);
314
315 return 0;
316}
317
318static struct platform_driver nuc900_rtc_driver = {
319 .remove = __devexit_p(nuc900_rtc_remove),
320 .driver = {
321 .name = "nuc900-rtc",
322 .owner = THIS_MODULE,
323 },
324};
325
326static int __init nuc900_rtc_init(void)
327{
328 return platform_driver_probe(&nuc900_rtc_driver, nuc900_rtc_probe);
329}
330
331static void __exit nuc900_rtc_exit(void)
332{
333 platform_driver_unregister(&nuc900_rtc_driver);
334}
335
336module_init(nuc900_rtc_init);
337module_exit(nuc900_rtc_exit);
338
339MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
340MODULE_DESCRIPTION("nuc910/nuc920 RTC driver");
341MODULE_LICENSE("GPL");
342MODULE_ALIAS("platform:nuc900-rtc");
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index 0587d53987fe..64d9727b7229 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -87,9 +87,10 @@
87#define OMAP_RTC_INTERRUPTS_IT_ALARM (1<<3) 87#define OMAP_RTC_INTERRUPTS_IT_ALARM (1<<3)
88#define OMAP_RTC_INTERRUPTS_IT_TIMER (1<<2) 88#define OMAP_RTC_INTERRUPTS_IT_TIMER (1<<2)
89 89
90static void __iomem *rtc_base;
90 91
91#define rtc_read(addr) omap_readb(OMAP_RTC_BASE + (addr)) 92#define rtc_read(addr) __raw_readb(rtc_base + (addr))
92#define rtc_write(val, addr) omap_writeb(val, OMAP_RTC_BASE + (addr)) 93#define rtc_write(val, addr) __raw_writeb(val, rtc_base + (addr))
93 94
94 95
95/* we rely on the rtc framework to handle locking (rtc->ops_lock), 96/* we rely on the rtc framework to handle locking (rtc->ops_lock),
@@ -330,32 +331,31 @@ static int __init omap_rtc_probe(struct platform_device *pdev)
330 return -ENOENT; 331 return -ENOENT;
331 } 332 }
332 333
333 /* NOTE: using static mapping for RTC registers */
334 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 334 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
335 if (res && res->start != OMAP_RTC_BASE) { 335 if (!res) {
336 pr_debug("%s: RTC registers at %08x, expected %08x\n", 336 pr_debug("%s: RTC resource data missing\n", pdev->name);
337 pdev->name, (unsigned) res->start, OMAP_RTC_BASE);
338 return -ENOENT; 337 return -ENOENT;
339 } 338 }
340 339
341 if (res) 340 mem = request_mem_region(res->start, resource_size(res), pdev->name);
342 mem = request_mem_region(res->start,
343 res->end - res->start + 1,
344 pdev->name);
345 else
346 mem = NULL;
347 if (!mem) { 341 if (!mem) {
348 pr_debug("%s: RTC registers at %08x are not free\n", 342 pr_debug("%s: RTC registers at %08x are not free\n",
349 pdev->name, OMAP_RTC_BASE); 343 pdev->name, res->start);
350 return -EBUSY; 344 return -EBUSY;
351 } 345 }
352 346
347 rtc_base = ioremap(res->start, resource_size(res));
348 if (!rtc_base) {
349 pr_debug("%s: RTC registers can't be mapped\n", pdev->name);
350 goto fail;
351 }
352
353 rtc = rtc_device_register(pdev->name, &pdev->dev, 353 rtc = rtc_device_register(pdev->name, &pdev->dev,
354 &omap_rtc_ops, THIS_MODULE); 354 &omap_rtc_ops, THIS_MODULE);
355 if (IS_ERR(rtc)) { 355 if (IS_ERR(rtc)) {
356 pr_debug("%s: can't register RTC device, err %ld\n", 356 pr_debug("%s: can't register RTC device, err %ld\n",
357 pdev->name, PTR_ERR(rtc)); 357 pdev->name, PTR_ERR(rtc));
358 goto fail; 358 goto fail0;
359 } 359 }
360 platform_set_drvdata(pdev, rtc); 360 platform_set_drvdata(pdev, rtc);
361 dev_set_drvdata(&rtc->dev, mem); 361 dev_set_drvdata(&rtc->dev, mem);
@@ -380,13 +380,14 @@ static int __init omap_rtc_probe(struct platform_device *pdev)
380 dev_name(&rtc->dev), rtc)) { 380 dev_name(&rtc->dev), rtc)) {
381 pr_debug("%s: RTC timer interrupt IRQ%d already claimed\n", 381 pr_debug("%s: RTC timer interrupt IRQ%d already claimed\n",
382 pdev->name, omap_rtc_timer); 382 pdev->name, omap_rtc_timer);
383 goto fail0; 383 goto fail1;
384 } 384 }
385 if (request_irq(omap_rtc_alarm, rtc_irq, IRQF_DISABLED, 385 if ((omap_rtc_timer != omap_rtc_alarm) &&
386 dev_name(&rtc->dev), rtc)) { 386 (request_irq(omap_rtc_alarm, rtc_irq, IRQF_DISABLED,
387 dev_name(&rtc->dev), rtc))) {
387 pr_debug("%s: RTC alarm interrupt IRQ%d already claimed\n", 388 pr_debug("%s: RTC alarm interrupt IRQ%d already claimed\n",
388 pdev->name, omap_rtc_alarm); 389 pdev->name, omap_rtc_alarm);
389 goto fail1; 390 goto fail2;
390 } 391 }
391 392
392 /* On boards with split power, RTC_ON_NOFF won't reset the RTC */ 393 /* On boards with split power, RTC_ON_NOFF won't reset the RTC */
@@ -419,10 +420,12 @@ static int __init omap_rtc_probe(struct platform_device *pdev)
419 420
420 return 0; 421 return 0;
421 422
422fail1: 423fail2:
423 free_irq(omap_rtc_timer, NULL); 424 free_irq(omap_rtc_timer, NULL);
424fail0: 425fail1:
425 rtc_device_unregister(rtc); 426 rtc_device_unregister(rtc);
427fail0:
428 iounmap(rtc_base);
426fail: 429fail:
427 release_resource(mem); 430 release_resource(mem);
428 return -EIO; 431 return -EIO;
@@ -438,7 +441,9 @@ static int __exit omap_rtc_remove(struct platform_device *pdev)
438 rtc_write(0, OMAP_RTC_INTERRUPTS_REG); 441 rtc_write(0, OMAP_RTC_INTERRUPTS_REG);
439 442
440 free_irq(omap_rtc_timer, rtc); 443 free_irq(omap_rtc_timer, rtc);
441 free_irq(omap_rtc_alarm, rtc); 444
445 if (omap_rtc_timer != omap_rtc_alarm)
446 free_irq(omap_rtc_alarm, rtc);
442 447
443 release_resource(dev_get_drvdata(&rtc->dev)); 448 release_resource(dev_get_drvdata(&rtc->dev));
444 rtc_device_unregister(rtc); 449 rtc_device_unregister(rtc);
diff --git a/drivers/rtc/rtc-pcf50633.c b/drivers/rtc/rtc-pcf50633.c
index 9b74e9c9151c..854c3cb365a1 100644
--- a/drivers/rtc/rtc-pcf50633.c
+++ b/drivers/rtc/rtc-pcf50633.c
@@ -58,6 +58,7 @@ struct pcf50633_time {
58struct pcf50633_rtc { 58struct pcf50633_rtc {
59 int alarm_enabled; 59 int alarm_enabled;
60 int second_enabled; 60 int second_enabled;
61 int alarm_pending;
61 62
62 struct pcf50633 *pcf; 63 struct pcf50633 *pcf;
63 struct rtc_device *rtc_dev; 64 struct rtc_device *rtc_dev;
@@ -209,6 +210,7 @@ static int pcf50633_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
209 rtc = dev_get_drvdata(dev); 210 rtc = dev_get_drvdata(dev);
210 211
211 alrm->enabled = rtc->alarm_enabled; 212 alrm->enabled = rtc->alarm_enabled;
213 alrm->pending = rtc->alarm_pending;
212 214
213 ret = pcf50633_read_block(rtc->pcf, PCF50633_REG_RTCSCA, 215 ret = pcf50633_read_block(rtc->pcf, PCF50633_REG_RTCSCA,
214 PCF50633_TI_EXTENT, &pcf_tm.time[0]); 216 PCF50633_TI_EXTENT, &pcf_tm.time[0]);
@@ -244,6 +246,8 @@ static int pcf50633_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
244 /* Returns 0 on success */ 246 /* Returns 0 on success */
245 ret = pcf50633_write_block(rtc->pcf, PCF50633_REG_RTCSCA, 247 ret = pcf50633_write_block(rtc->pcf, PCF50633_REG_RTCSCA,
246 PCF50633_TI_EXTENT, &pcf_tm.time[0]); 248 PCF50633_TI_EXTENT, &pcf_tm.time[0]);
249 if (!alrm->enabled)
250 rtc->alarm_pending = 0;
247 251
248 if (!alarm_masked || alrm->enabled) 252 if (!alarm_masked || alrm->enabled)
249 pcf50633_irq_unmask(rtc->pcf, PCF50633_IRQ_ALARM); 253 pcf50633_irq_unmask(rtc->pcf, PCF50633_IRQ_ALARM);
@@ -268,6 +272,7 @@ static void pcf50633_rtc_irq(int irq, void *data)
268 switch (irq) { 272 switch (irq) {
269 case PCF50633_IRQ_ALARM: 273 case PCF50633_IRQ_ALARM:
270 rtc_update_irq(rtc->rtc_dev, 1, RTC_AF | RTC_IRQF); 274 rtc_update_irq(rtc->rtc_dev, 1, RTC_AF | RTC_IRQF);
275 rtc->alarm_pending = 1;
271 break; 276 break;
272 case PCF50633_IRQ_SECOND: 277 case PCF50633_IRQ_SECOND:
273 rtc_update_irq(rtc->rtc_dev, 1, RTC_UF | RTC_IRQF); 278 rtc_update_irq(rtc->rtc_dev, 1, RTC_UF | RTC_IRQF);
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
index b725913ccbe8..65f346b2fbae 100644
--- a/drivers/rtc/rtc-pcf8563.c
+++ b/drivers/rtc/rtc-pcf8563.c
@@ -212,6 +212,8 @@ static int pcf8563_probe(struct i2c_client *client,
212 212
213 dev_info(&client->dev, "chip found, driver version " DRV_VERSION "\n"); 213 dev_info(&client->dev, "chip found, driver version " DRV_VERSION "\n");
214 214
215 i2c_set_clientdata(client, pcf8563);
216
215 pcf8563->rtc = rtc_device_register(pcf8563_driver.driver.name, 217 pcf8563->rtc = rtc_device_register(pcf8563_driver.driver.name,
216 &client->dev, &pcf8563_rtc_ops, THIS_MODULE); 218 &client->dev, &pcf8563_rtc_ops, THIS_MODULE);
217 219
@@ -220,8 +222,6 @@ static int pcf8563_probe(struct i2c_client *client,
220 goto exit_kfree; 222 goto exit_kfree;
221 } 223 }
222 224
223 i2c_set_clientdata(client, pcf8563);
224
225 return 0; 225 return 0;
226 226
227exit_kfree: 227exit_kfree:
diff --git a/drivers/rtc/rtc-pcf8583.c b/drivers/rtc/rtc-pcf8583.c
index 7d33cda3f8f6..2d201afead3b 100644
--- a/drivers/rtc/rtc-pcf8583.c
+++ b/drivers/rtc/rtc-pcf8583.c
@@ -277,6 +277,8 @@ static int pcf8583_probe(struct i2c_client *client,
277 if (!pcf8583) 277 if (!pcf8583)
278 return -ENOMEM; 278 return -ENOMEM;
279 279
280 i2c_set_clientdata(client, pcf8583);
281
280 pcf8583->rtc = rtc_device_register(pcf8583_driver.driver.name, 282 pcf8583->rtc = rtc_device_register(pcf8583_driver.driver.name,
281 &client->dev, &pcf8583_rtc_ops, THIS_MODULE); 283 &client->dev, &pcf8583_rtc_ops, THIS_MODULE);
282 284
@@ -285,7 +287,6 @@ static int pcf8583_probe(struct i2c_client *client,
285 goto exit_kfree; 287 goto exit_kfree;
286 } 288 }
287 289
288 i2c_set_clientdata(client, pcf8583);
289 return 0; 290 return 0;
290 291
291exit_kfree: 292exit_kfree:
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index f41873f98f66..0264b117893b 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -51,10 +51,10 @@ static int pl031_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
51 51
52 switch (cmd) { 52 switch (cmd) {
53 case RTC_AIE_OFF: 53 case RTC_AIE_OFF:
54 __raw_writel(1, ldata->base + RTC_MIS); 54 writel(1, ldata->base + RTC_MIS);
55 return 0; 55 return 0;
56 case RTC_AIE_ON: 56 case RTC_AIE_ON:
57 __raw_writel(0, ldata->base + RTC_MIS); 57 writel(0, ldata->base + RTC_MIS);
58 return 0; 58 return 0;
59 } 59 }
60 60
@@ -65,7 +65,7 @@ static int pl031_read_time(struct device *dev, struct rtc_time *tm)
65{ 65{
66 struct pl031_local *ldata = dev_get_drvdata(dev); 66 struct pl031_local *ldata = dev_get_drvdata(dev);
67 67
68 rtc_time_to_tm(__raw_readl(ldata->base + RTC_DR), tm); 68 rtc_time_to_tm(readl(ldata->base + RTC_DR), tm);
69 69
70 return 0; 70 return 0;
71} 71}
@@ -76,7 +76,7 @@ static int pl031_set_time(struct device *dev, struct rtc_time *tm)
76 struct pl031_local *ldata = dev_get_drvdata(dev); 76 struct pl031_local *ldata = dev_get_drvdata(dev);
77 77
78 rtc_tm_to_time(tm, &time); 78 rtc_tm_to_time(tm, &time);
79 __raw_writel(time, ldata->base + RTC_LR); 79 writel(time, ldata->base + RTC_LR);
80 80
81 return 0; 81 return 0;
82} 82}
@@ -85,9 +85,9 @@ static int pl031_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
85{ 85{
86 struct pl031_local *ldata = dev_get_drvdata(dev); 86 struct pl031_local *ldata = dev_get_drvdata(dev);
87 87
88 rtc_time_to_tm(__raw_readl(ldata->base + RTC_MR), &alarm->time); 88 rtc_time_to_tm(readl(ldata->base + RTC_MR), &alarm->time);
89 alarm->pending = __raw_readl(ldata->base + RTC_RIS); 89 alarm->pending = readl(ldata->base + RTC_RIS);
90 alarm->enabled = __raw_readl(ldata->base + RTC_IMSC); 90 alarm->enabled = readl(ldata->base + RTC_IMSC);
91 91
92 return 0; 92 return 0;
93} 93}
@@ -99,8 +99,8 @@ static int pl031_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
99 99
100 rtc_tm_to_time(&alarm->time, &time); 100 rtc_tm_to_time(&alarm->time, &time);
101 101
102 __raw_writel(time, ldata->base + RTC_MR); 102 writel(time, ldata->base + RTC_MR);
103 __raw_writel(!alarm->enabled, ldata->base + RTC_MIS); 103 writel(!alarm->enabled, ldata->base + RTC_MIS);
104 104
105 return 0; 105 return 0;
106} 106}
@@ -180,8 +180,9 @@ err_req:
180 180
181static struct amba_id pl031_ids[] __initdata = { 181static struct amba_id pl031_ids[] __initdata = {
182 { 182 {
183 .id = 0x00041031, 183 .id = 0x00041031,
184 .mask = 0x000fffff, }, 184 .mask = 0x000fffff,
185 },
185 {0, 0}, 186 {0, 0},
186}; 187};
187 188
diff --git a/drivers/rtc/rtc-pxa.c b/drivers/rtc/rtc-pxa.c
index 747ca194fad4..e6351b743da6 100644
--- a/drivers/rtc/rtc-pxa.c
+++ b/drivers/rtc/rtc-pxa.c
@@ -456,7 +456,7 @@ static int pxa_rtc_resume(struct device *dev)
456 return 0; 456 return 0;
457} 457}
458 458
459static struct dev_pm_ops pxa_rtc_pm_ops = { 459static const struct dev_pm_ops pxa_rtc_pm_ops = {
460 .suspend = pxa_rtc_suspend, 460 .suspend = pxa_rtc_suspend,
461 .resume = pxa_rtc_resume, 461 .resume = pxa_rtc_resume,
462}; 462};
diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c
index 29f98a70586e..e4a44b641702 100644
--- a/drivers/rtc/rtc-sa1100.c
+++ b/drivers/rtc/rtc-sa1100.c
@@ -407,7 +407,7 @@ static int sa1100_rtc_resume(struct device *dev)
407 return 0; 407 return 0;
408} 408}
409 409
410static struct dev_pm_ops sa1100_rtc_pm_ops = { 410static const struct dev_pm_ops sa1100_rtc_pm_ops = {
411 .suspend = sa1100_rtc_suspend, 411 .suspend = sa1100_rtc_suspend,
412 .resume = sa1100_rtc_resume, 412 .resume = sa1100_rtc_resume,
413}; 413};
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c
index e6ed5404bca0..e95cc6f8d61e 100644
--- a/drivers/rtc/rtc-sh.c
+++ b/drivers/rtc/rtc-sh.c
@@ -826,7 +826,7 @@ static int sh_rtc_resume(struct device *dev)
826 return 0; 826 return 0;
827} 827}
828 828
829static struct dev_pm_ops sh_rtc_dev_pm_ops = { 829static const struct dev_pm_ops sh_rtc_dev_pm_ops = {
830 .suspend = sh_rtc_suspend, 830 .suspend = sh_rtc_suspend,
831 .resume = sh_rtc_resume, 831 .resume = sh_rtc_resume,
832}; 832};
diff --git a/drivers/rtc/rtc-stk17ta8.c b/drivers/rtc/rtc-stk17ta8.c
index d491eb265c38..67700831b5c9 100644
--- a/drivers/rtc/rtc-stk17ta8.c
+++ b/drivers/rtc/rtc-stk17ta8.c
@@ -62,7 +62,6 @@
62struct rtc_plat_data { 62struct rtc_plat_data {
63 struct rtc_device *rtc; 63 struct rtc_device *rtc;
64 void __iomem *ioaddr; 64 void __iomem *ioaddr;
65 unsigned long baseaddr;
66 unsigned long last_jiffies; 65 unsigned long last_jiffies;
67 int irq; 66 int irq;
68 unsigned int irqen; 67 unsigned int irqen;
@@ -70,6 +69,7 @@ struct rtc_plat_data {
70 int alrm_min; 69 int alrm_min;
71 int alrm_hour; 70 int alrm_hour;
72 int alrm_mday; 71 int alrm_mday;
72 spinlock_t lock;
73}; 73};
74 74
75static int stk17ta8_rtc_set_time(struct device *dev, struct rtc_time *tm) 75static int stk17ta8_rtc_set_time(struct device *dev, struct rtc_time *tm)
@@ -142,7 +142,7 @@ static void stk17ta8_rtc_update_alarm(struct rtc_plat_data *pdata)
142 unsigned long irqflags; 142 unsigned long irqflags;
143 u8 flags; 143 u8 flags;
144 144
145 spin_lock_irqsave(&pdata->rtc->irq_lock, irqflags); 145 spin_lock_irqsave(&pdata->lock, irqflags);
146 146
147 flags = readb(ioaddr + RTC_FLAGS); 147 flags = readb(ioaddr + RTC_FLAGS);
148 writeb(flags | RTC_WRITE, ioaddr + RTC_FLAGS); 148 writeb(flags | RTC_WRITE, ioaddr + RTC_FLAGS);
@@ -162,7 +162,7 @@ static void stk17ta8_rtc_update_alarm(struct rtc_plat_data *pdata)
162 writeb(pdata->irqen ? RTC_INTS_AIE : 0, ioaddr + RTC_INTERRUPTS); 162 writeb(pdata->irqen ? RTC_INTS_AIE : 0, ioaddr + RTC_INTERRUPTS);
163 readb(ioaddr + RTC_FLAGS); /* clear interrupts */ 163 readb(ioaddr + RTC_FLAGS); /* clear interrupts */
164 writeb(flags & ~RTC_WRITE, ioaddr + RTC_FLAGS); 164 writeb(flags & ~RTC_WRITE, ioaddr + RTC_FLAGS);
165 spin_unlock_irqrestore(&pdata->rtc->irq_lock, irqflags); 165 spin_unlock_irqrestore(&pdata->lock, irqflags);
166} 166}
167 167
168static int stk17ta8_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) 168static int stk17ta8_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
@@ -202,56 +202,53 @@ static irqreturn_t stk17ta8_rtc_interrupt(int irq, void *dev_id)
202 struct platform_device *pdev = dev_id; 202 struct platform_device *pdev = dev_id;
203 struct rtc_plat_data *pdata = platform_get_drvdata(pdev); 203 struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
204 void __iomem *ioaddr = pdata->ioaddr; 204 void __iomem *ioaddr = pdata->ioaddr;
205 unsigned long events = RTC_IRQF; 205 unsigned long events = 0;
206 206
207 spin_lock(&pdata->lock);
207 /* read and clear interrupt */ 208 /* read and clear interrupt */
208 if (!(readb(ioaddr + RTC_FLAGS) & RTC_FLAGS_AF)) 209 if (readb(ioaddr + RTC_FLAGS) & RTC_FLAGS_AF) {
209 return IRQ_NONE; 210 events = RTC_IRQF;
210 if (readb(ioaddr + RTC_SECONDS_ALARM) & 0x80) 211 if (readb(ioaddr + RTC_SECONDS_ALARM) & 0x80)
211 events |= RTC_UF; 212 events |= RTC_UF;
212 else 213 else
213 events |= RTC_AF; 214 events |= RTC_AF;
214 rtc_update_irq(pdata->rtc, 1, events); 215 if (likely(pdata->rtc))
215 return IRQ_HANDLED; 216 rtc_update_irq(pdata->rtc, 1, events);
217 }
218 spin_unlock(&pdata->lock);
219 return events ? IRQ_HANDLED : IRQ_NONE;
216} 220}
217 221
218static int stk17ta8_rtc_ioctl(struct device *dev, unsigned int cmd, 222static int stk17ta8_rtc_alarm_irq_enable(struct device *dev,
219 unsigned long arg) 223 unsigned int enabled)
220{ 224{
221 struct platform_device *pdev = to_platform_device(dev); 225 struct platform_device *pdev = to_platform_device(dev);
222 struct rtc_plat_data *pdata = platform_get_drvdata(pdev); 226 struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
223 227
224 if (pdata->irq <= 0) 228 if (pdata->irq <= 0)
225 return -ENOIOCTLCMD; /* fall back into rtc-dev's emulation */ 229 return -EINVAL;
226 switch (cmd) { 230 if (enabled)
227 case RTC_AIE_OFF:
228 pdata->irqen &= ~RTC_AF;
229 stk17ta8_rtc_update_alarm(pdata);
230 break;
231 case RTC_AIE_ON:
232 pdata->irqen |= RTC_AF; 231 pdata->irqen |= RTC_AF;
233 stk17ta8_rtc_update_alarm(pdata); 232 else
234 break; 233 pdata->irqen &= ~RTC_AF;
235 default: 234 stk17ta8_rtc_update_alarm(pdata);
236 return -ENOIOCTLCMD;
237 }
238 return 0; 235 return 0;
239} 236}
240 237
241static const struct rtc_class_ops stk17ta8_rtc_ops = { 238static const struct rtc_class_ops stk17ta8_rtc_ops = {
242 .read_time = stk17ta8_rtc_read_time, 239 .read_time = stk17ta8_rtc_read_time,
243 .set_time = stk17ta8_rtc_set_time, 240 .set_time = stk17ta8_rtc_set_time,
244 .read_alarm = stk17ta8_rtc_read_alarm, 241 .read_alarm = stk17ta8_rtc_read_alarm,
245 .set_alarm = stk17ta8_rtc_set_alarm, 242 .set_alarm = stk17ta8_rtc_set_alarm,
246 .ioctl = stk17ta8_rtc_ioctl, 243 .alarm_irq_enable = stk17ta8_rtc_alarm_irq_enable,
247}; 244};
248 245
249static ssize_t stk17ta8_nvram_read(struct kobject *kobj, 246static ssize_t stk17ta8_nvram_read(struct kobject *kobj,
250 struct bin_attribute *attr, char *buf, 247 struct bin_attribute *attr, char *buf,
251 loff_t pos, size_t size) 248 loff_t pos, size_t size)
252{ 249{
253 struct platform_device *pdev = 250 struct device *dev = container_of(kobj, struct device, kobj);
254 to_platform_device(container_of(kobj, struct device, kobj)); 251 struct platform_device *pdev = to_platform_device(dev);
255 struct rtc_plat_data *pdata = platform_get_drvdata(pdev); 252 struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
256 void __iomem *ioaddr = pdata->ioaddr; 253 void __iomem *ioaddr = pdata->ioaddr;
257 ssize_t count; 254 ssize_t count;
@@ -265,8 +262,8 @@ static ssize_t stk17ta8_nvram_write(struct kobject *kobj,
265 struct bin_attribute *attr, char *buf, 262 struct bin_attribute *attr, char *buf,
266 loff_t pos, size_t size) 263 loff_t pos, size_t size)
267{ 264{
268 struct platform_device *pdev = 265 struct device *dev = container_of(kobj, struct device, kobj);
269 to_platform_device(container_of(kobj, struct device, kobj)); 266 struct platform_device *pdev = to_platform_device(dev);
270 struct rtc_plat_data *pdata = platform_get_drvdata(pdev); 267 struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
271 void __iomem *ioaddr = pdata->ioaddr; 268 void __iomem *ioaddr = pdata->ioaddr;
272 ssize_t count; 269 ssize_t count;
@@ -288,31 +285,26 @@ static struct bin_attribute stk17ta8_nvram_attr = {
288 285
289static int __devinit stk17ta8_rtc_probe(struct platform_device *pdev) 286static int __devinit stk17ta8_rtc_probe(struct platform_device *pdev)
290{ 287{
291 struct rtc_device *rtc;
292 struct resource *res; 288 struct resource *res;
293 unsigned int cal; 289 unsigned int cal;
294 unsigned int flags; 290 unsigned int flags;
295 struct rtc_plat_data *pdata; 291 struct rtc_plat_data *pdata;
296 void __iomem *ioaddr = NULL; 292 void __iomem *ioaddr;
297 int ret = 0; 293 int ret = 0;
298 294
299 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 295 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
300 if (!res) 296 if (!res)
301 return -ENODEV; 297 return -ENODEV;
302 298
303 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); 299 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
304 if (!pdata) 300 if (!pdata)
305 return -ENOMEM; 301 return -ENOMEM;
306 if (!request_mem_region(res->start, RTC_REG_SIZE, pdev->name)) { 302 if (!devm_request_mem_region(&pdev->dev, res->start, RTC_REG_SIZE,
307 ret = -EBUSY; 303 pdev->name))
308 goto out; 304 return -EBUSY;
309 } 305 ioaddr = devm_ioremap(&pdev->dev, res->start, RTC_REG_SIZE);
310 pdata->baseaddr = res->start; 306 if (!ioaddr)
311 ioaddr = ioremap(pdata->baseaddr, RTC_REG_SIZE); 307 return -ENOMEM;
312 if (!ioaddr) {
313 ret = -ENOMEM;
314 goto out;
315 }
316 pdata->ioaddr = ioaddr; 308 pdata->ioaddr = ioaddr;
317 pdata->irq = platform_get_irq(pdev, 0); 309 pdata->irq = platform_get_irq(pdev, 0);
318 310
@@ -328,9 +320,13 @@ static int __devinit stk17ta8_rtc_probe(struct platform_device *pdev)
328 if (readb(ioaddr + RTC_FLAGS) & RTC_FLAGS_PF) 320 if (readb(ioaddr + RTC_FLAGS) & RTC_FLAGS_PF)
329 dev_warn(&pdev->dev, "voltage-low detected.\n"); 321 dev_warn(&pdev->dev, "voltage-low detected.\n");
330 322
323 spin_lock_init(&pdata->lock);
324 pdata->last_jiffies = jiffies;
325 platform_set_drvdata(pdev, pdata);
331 if (pdata->irq > 0) { 326 if (pdata->irq > 0) {
332 writeb(0, ioaddr + RTC_INTERRUPTS); 327 writeb(0, ioaddr + RTC_INTERRUPTS);
333 if (request_irq(pdata->irq, stk17ta8_rtc_interrupt, 328 if (devm_request_irq(&pdev->dev, pdata->irq,
329 stk17ta8_rtc_interrupt,
334 IRQF_DISABLED | IRQF_SHARED, 330 IRQF_DISABLED | IRQF_SHARED,
335 pdev->name, pdev) < 0) { 331 pdev->name, pdev) < 0) {
336 dev_warn(&pdev->dev, "interrupt not available.\n"); 332 dev_warn(&pdev->dev, "interrupt not available.\n");
@@ -338,29 +334,14 @@ static int __devinit stk17ta8_rtc_probe(struct platform_device *pdev)
338 } 334 }
339 } 335 }
340 336
341 rtc = rtc_device_register(pdev->name, &pdev->dev, 337 pdata->rtc = rtc_device_register(pdev->name, &pdev->dev,
342 &stk17ta8_rtc_ops, THIS_MODULE); 338 &stk17ta8_rtc_ops, THIS_MODULE);
343 if (IS_ERR(rtc)) { 339 if (IS_ERR(pdata->rtc))
344 ret = PTR_ERR(rtc); 340 return PTR_ERR(pdata->rtc);
345 goto out; 341
346 }
347 pdata->rtc = rtc;
348 pdata->last_jiffies = jiffies;
349 platform_set_drvdata(pdev, pdata);
350 ret = sysfs_create_bin_file(&pdev->dev.kobj, &stk17ta8_nvram_attr); 342 ret = sysfs_create_bin_file(&pdev->dev.kobj, &stk17ta8_nvram_attr);
351 if (ret) 343 if (ret)
352 goto out;
353 return 0;
354 out:
355 if (pdata->rtc)
356 rtc_device_unregister(pdata->rtc); 344 rtc_device_unregister(pdata->rtc);
357 if (pdata->irq > 0)
358 free_irq(pdata->irq, pdev);
359 if (ioaddr)
360 iounmap(ioaddr);
361 if (pdata->baseaddr)
362 release_mem_region(pdata->baseaddr, RTC_REG_SIZE);
363 kfree(pdata);
364 return ret; 345 return ret;
365} 346}
366 347
@@ -370,13 +351,8 @@ static int __devexit stk17ta8_rtc_remove(struct platform_device *pdev)
370 351
371 sysfs_remove_bin_file(&pdev->dev.kobj, &stk17ta8_nvram_attr); 352 sysfs_remove_bin_file(&pdev->dev.kobj, &stk17ta8_nvram_attr);
372 rtc_device_unregister(pdata->rtc); 353 rtc_device_unregister(pdata->rtc);
373 if (pdata->irq > 0) { 354 if (pdata->irq > 0)
374 writeb(0, pdata->ioaddr + RTC_INTERRUPTS); 355 writeb(0, pdata->ioaddr + RTC_INTERRUPTS);
375 free_irq(pdata->irq, pdev);
376 }
377 iounmap(pdata->ioaddr);
378 release_mem_region(pdata->baseaddr, RTC_REG_SIZE);
379 kfree(pdata);
380 return 0; 356 return 0;
381} 357}
382 358
diff --git a/drivers/rtc/rtc-tx4939.c b/drivers/rtc/rtc-tx4939.c
index 4a6ed1104fbb..9ee81d8aa7c0 100644
--- a/drivers/rtc/rtc-tx4939.c
+++ b/drivers/rtc/rtc-tx4939.c
@@ -17,6 +17,7 @@
17struct tx4939rtc_plat_data { 17struct tx4939rtc_plat_data {
18 struct rtc_device *rtc; 18 struct rtc_device *rtc;
19 struct tx4939_rtc_reg __iomem *rtcreg; 19 struct tx4939_rtc_reg __iomem *rtcreg;
20 spinlock_t lock;
20}; 21};
21 22
22static struct tx4939rtc_plat_data *get_tx4939rtc_plat_data(struct device *dev) 23static struct tx4939rtc_plat_data *get_tx4939rtc_plat_data(struct device *dev)
@@ -52,14 +53,14 @@ static int tx4939_rtc_set_mmss(struct device *dev, unsigned long secs)
52 buf[3] = secs >> 8; 53 buf[3] = secs >> 8;
53 buf[4] = secs >> 16; 54 buf[4] = secs >> 16;
54 buf[5] = secs >> 24; 55 buf[5] = secs >> 24;
55 spin_lock_irq(&pdata->rtc->irq_lock); 56 spin_lock_irq(&pdata->lock);
56 __raw_writel(0, &rtcreg->adr); 57 __raw_writel(0, &rtcreg->adr);
57 for (i = 0; i < 6; i++) 58 for (i = 0; i < 6; i++)
58 __raw_writel(buf[i], &rtcreg->dat); 59 __raw_writel(buf[i], &rtcreg->dat);
59 ret = tx4939_rtc_cmd(rtcreg, 60 ret = tx4939_rtc_cmd(rtcreg,
60 TX4939_RTCCTL_COMMAND_SETTIME | 61 TX4939_RTCCTL_COMMAND_SETTIME |
61 (__raw_readl(&rtcreg->ctl) & TX4939_RTCCTL_ALME)); 62 (__raw_readl(&rtcreg->ctl) & TX4939_RTCCTL_ALME));
62 spin_unlock_irq(&pdata->rtc->irq_lock); 63 spin_unlock_irq(&pdata->lock);
63 return ret; 64 return ret;
64} 65}
65 66
@@ -71,18 +72,18 @@ static int tx4939_rtc_read_time(struct device *dev, struct rtc_time *tm)
71 unsigned long sec; 72 unsigned long sec;
72 unsigned char buf[6]; 73 unsigned char buf[6];
73 74
74 spin_lock_irq(&pdata->rtc->irq_lock); 75 spin_lock_irq(&pdata->lock);
75 ret = tx4939_rtc_cmd(rtcreg, 76 ret = tx4939_rtc_cmd(rtcreg,
76 TX4939_RTCCTL_COMMAND_GETTIME | 77 TX4939_RTCCTL_COMMAND_GETTIME |
77 (__raw_readl(&rtcreg->ctl) & TX4939_RTCCTL_ALME)); 78 (__raw_readl(&rtcreg->ctl) & TX4939_RTCCTL_ALME));
78 if (ret) { 79 if (ret) {
79 spin_unlock_irq(&pdata->rtc->irq_lock); 80 spin_unlock_irq(&pdata->lock);
80 return ret; 81 return ret;
81 } 82 }
82 __raw_writel(2, &rtcreg->adr); 83 __raw_writel(2, &rtcreg->adr);
83 for (i = 2; i < 6; i++) 84 for (i = 2; i < 6; i++)
84 buf[i] = __raw_readl(&rtcreg->dat); 85 buf[i] = __raw_readl(&rtcreg->dat);
85 spin_unlock_irq(&pdata->rtc->irq_lock); 86 spin_unlock_irq(&pdata->lock);
86 sec = (buf[5] << 24) | (buf[4] << 16) | (buf[3] << 8) | buf[2]; 87 sec = (buf[5] << 24) | (buf[4] << 16) | (buf[3] << 8) | buf[2];
87 rtc_time_to_tm(sec, tm); 88 rtc_time_to_tm(sec, tm);
88 return rtc_valid_tm(tm); 89 return rtc_valid_tm(tm);
@@ -110,13 +111,13 @@ static int tx4939_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
110 buf[3] = sec >> 8; 111 buf[3] = sec >> 8;
111 buf[4] = sec >> 16; 112 buf[4] = sec >> 16;
112 buf[5] = sec >> 24; 113 buf[5] = sec >> 24;
113 spin_lock_irq(&pdata->rtc->irq_lock); 114 spin_lock_irq(&pdata->lock);
114 __raw_writel(0, &rtcreg->adr); 115 __raw_writel(0, &rtcreg->adr);
115 for (i = 0; i < 6; i++) 116 for (i = 0; i < 6; i++)
116 __raw_writel(buf[i], &rtcreg->dat); 117 __raw_writel(buf[i], &rtcreg->dat);
117 ret = tx4939_rtc_cmd(rtcreg, TX4939_RTCCTL_COMMAND_SETALARM | 118 ret = tx4939_rtc_cmd(rtcreg, TX4939_RTCCTL_COMMAND_SETALARM |
118 (alrm->enabled ? TX4939_RTCCTL_ALME : 0)); 119 (alrm->enabled ? TX4939_RTCCTL_ALME : 0));
119 spin_unlock_irq(&pdata->rtc->irq_lock); 120 spin_unlock_irq(&pdata->lock);
120 return ret; 121 return ret;
121} 122}
122 123
@@ -129,12 +130,12 @@ static int tx4939_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
129 unsigned char buf[6]; 130 unsigned char buf[6];
130 u32 ctl; 131 u32 ctl;
131 132
132 spin_lock_irq(&pdata->rtc->irq_lock); 133 spin_lock_irq(&pdata->lock);
133 ret = tx4939_rtc_cmd(rtcreg, 134 ret = tx4939_rtc_cmd(rtcreg,
134 TX4939_RTCCTL_COMMAND_GETALARM | 135 TX4939_RTCCTL_COMMAND_GETALARM |
135 (__raw_readl(&rtcreg->ctl) & TX4939_RTCCTL_ALME)); 136 (__raw_readl(&rtcreg->ctl) & TX4939_RTCCTL_ALME));
136 if (ret) { 137 if (ret) {
137 spin_unlock_irq(&pdata->rtc->irq_lock); 138 spin_unlock_irq(&pdata->lock);
138 return ret; 139 return ret;
139 } 140 }
140 __raw_writel(2, &rtcreg->adr); 141 __raw_writel(2, &rtcreg->adr);
@@ -143,7 +144,7 @@ static int tx4939_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
143 ctl = __raw_readl(&rtcreg->ctl); 144 ctl = __raw_readl(&rtcreg->ctl);
144 alrm->enabled = (ctl & TX4939_RTCCTL_ALME) ? 1 : 0; 145 alrm->enabled = (ctl & TX4939_RTCCTL_ALME) ? 1 : 0;
145 alrm->pending = (ctl & TX4939_RTCCTL_ALMD) ? 1 : 0; 146 alrm->pending = (ctl & TX4939_RTCCTL_ALMD) ? 1 : 0;
146 spin_unlock_irq(&pdata->rtc->irq_lock); 147 spin_unlock_irq(&pdata->lock);
147 sec = (buf[5] << 24) | (buf[4] << 16) | (buf[3] << 8) | buf[2]; 148 sec = (buf[5] << 24) | (buf[4] << 16) | (buf[3] << 8) | buf[2];
148 rtc_time_to_tm(sec, &alrm->time); 149 rtc_time_to_tm(sec, &alrm->time);
149 return rtc_valid_tm(&alrm->time); 150 return rtc_valid_tm(&alrm->time);
@@ -153,11 +154,11 @@ static int tx4939_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
153{ 154{
154 struct tx4939rtc_plat_data *pdata = get_tx4939rtc_plat_data(dev); 155 struct tx4939rtc_plat_data *pdata = get_tx4939rtc_plat_data(dev);
155 156
156 spin_lock_irq(&pdata->rtc->irq_lock); 157 spin_lock_irq(&pdata->lock);
157 tx4939_rtc_cmd(pdata->rtcreg, 158 tx4939_rtc_cmd(pdata->rtcreg,
158 TX4939_RTCCTL_COMMAND_NOP | 159 TX4939_RTCCTL_COMMAND_NOP |
159 (enabled ? TX4939_RTCCTL_ALME : 0)); 160 (enabled ? TX4939_RTCCTL_ALME : 0));
160 spin_unlock_irq(&pdata->rtc->irq_lock); 161 spin_unlock_irq(&pdata->lock);
161 return 0; 162 return 0;
162} 163}
163 164
@@ -167,13 +168,14 @@ static irqreturn_t tx4939_rtc_interrupt(int irq, void *dev_id)
167 struct tx4939_rtc_reg __iomem *rtcreg = pdata->rtcreg; 168 struct tx4939_rtc_reg __iomem *rtcreg = pdata->rtcreg;
168 unsigned long events = RTC_IRQF; 169 unsigned long events = RTC_IRQF;
169 170
170 spin_lock(&pdata->rtc->irq_lock); 171 spin_lock(&pdata->lock);
171 if (__raw_readl(&rtcreg->ctl) & TX4939_RTCCTL_ALMD) { 172 if (__raw_readl(&rtcreg->ctl) & TX4939_RTCCTL_ALMD) {
172 events |= RTC_AF; 173 events |= RTC_AF;
173 tx4939_rtc_cmd(rtcreg, TX4939_RTCCTL_COMMAND_NOP); 174 tx4939_rtc_cmd(rtcreg, TX4939_RTCCTL_COMMAND_NOP);
174 } 175 }
175 spin_unlock(&pdata->rtc->irq_lock); 176 spin_unlock(&pdata->lock);
176 rtc_update_irq(pdata->rtc, 1, events); 177 if (likely(pdata->rtc))
178 rtc_update_irq(pdata->rtc, 1, events);
177 return IRQ_HANDLED; 179 return IRQ_HANDLED;
178} 180}
179 181
@@ -194,13 +196,13 @@ static ssize_t tx4939_rtc_nvram_read(struct kobject *kobj,
194 struct tx4939_rtc_reg __iomem *rtcreg = pdata->rtcreg; 196 struct tx4939_rtc_reg __iomem *rtcreg = pdata->rtcreg;
195 ssize_t count; 197 ssize_t count;
196 198
197 spin_lock_irq(&pdata->rtc->irq_lock); 199 spin_lock_irq(&pdata->lock);
198 for (count = 0; size > 0 && pos < TX4939_RTC_REG_RAMSIZE; 200 for (count = 0; size > 0 && pos < TX4939_RTC_REG_RAMSIZE;
199 count++, size--) { 201 count++, size--) {
200 __raw_writel(pos++, &rtcreg->adr); 202 __raw_writel(pos++, &rtcreg->adr);
201 *buf++ = __raw_readl(&rtcreg->dat); 203 *buf++ = __raw_readl(&rtcreg->dat);
202 } 204 }
203 spin_unlock_irq(&pdata->rtc->irq_lock); 205 spin_unlock_irq(&pdata->lock);
204 return count; 206 return count;
205} 207}
206 208
@@ -213,13 +215,13 @@ static ssize_t tx4939_rtc_nvram_write(struct kobject *kobj,
213 struct tx4939_rtc_reg __iomem *rtcreg = pdata->rtcreg; 215 struct tx4939_rtc_reg __iomem *rtcreg = pdata->rtcreg;
214 ssize_t count; 216 ssize_t count;
215 217
216 spin_lock_irq(&pdata->rtc->irq_lock); 218 spin_lock_irq(&pdata->lock);
217 for (count = 0; size > 0 && pos < TX4939_RTC_REG_RAMSIZE; 219 for (count = 0; size > 0 && pos < TX4939_RTC_REG_RAMSIZE;
218 count++, size--) { 220 count++, size--) {
219 __raw_writel(pos++, &rtcreg->adr); 221 __raw_writel(pos++, &rtcreg->adr);
220 __raw_writel(*buf++, &rtcreg->dat); 222 __raw_writel(*buf++, &rtcreg->dat);
221 } 223 }
222 spin_unlock_irq(&pdata->rtc->irq_lock); 224 spin_unlock_irq(&pdata->lock);
223 return count; 225 return count;
224} 226}
225 227
@@ -259,6 +261,7 @@ static int __init tx4939_rtc_probe(struct platform_device *pdev)
259 if (!pdata->rtcreg) 261 if (!pdata->rtcreg)
260 return -EBUSY; 262 return -EBUSY;
261 263
264 spin_lock_init(&pdata->lock);
262 tx4939_rtc_cmd(pdata->rtcreg, TX4939_RTCCTL_COMMAND_NOP); 265 tx4939_rtc_cmd(pdata->rtcreg, TX4939_RTCCTL_COMMAND_NOP);
263 if (devm_request_irq(&pdev->dev, irq, tx4939_rtc_interrupt, 266 if (devm_request_irq(&pdev->dev, irq, tx4939_rtc_interrupt,
264 IRQF_DISABLED, pdev->name, &pdev->dev) < 0) 267 IRQF_DISABLED, pdev->name, &pdev->dev) < 0)
@@ -277,14 +280,12 @@ static int __init tx4939_rtc_probe(struct platform_device *pdev)
277static int __exit tx4939_rtc_remove(struct platform_device *pdev) 280static int __exit tx4939_rtc_remove(struct platform_device *pdev)
278{ 281{
279 struct tx4939rtc_plat_data *pdata = platform_get_drvdata(pdev); 282 struct tx4939rtc_plat_data *pdata = platform_get_drvdata(pdev);
280 struct rtc_device *rtc = pdata->rtc;
281 283
282 spin_lock_irq(&rtc->irq_lock);
283 tx4939_rtc_cmd(pdata->rtcreg, TX4939_RTCCTL_COMMAND_NOP);
284 spin_unlock_irq(&rtc->irq_lock);
285 sysfs_remove_bin_file(&pdev->dev.kobj, &tx4939_rtc_nvram_attr); 284 sysfs_remove_bin_file(&pdev->dev.kobj, &tx4939_rtc_nvram_attr);
286 rtc_device_unregister(rtc); 285 rtc_device_unregister(pdata->rtc);
287 platform_set_drvdata(pdev, NULL); 286 spin_lock_irq(&pdata->lock);
287 tx4939_rtc_cmd(pdata->rtcreg, TX4939_RTCCTL_COMMAND_NOP);
288 spin_unlock_irq(&pdata->lock);
288 return 0; 289 return 0;
289} 290}
290 291
diff --git a/drivers/rtc/rtc-v3020.c b/drivers/rtc/rtc-v3020.c
index ad741afd47d8..bed4cab07043 100644
--- a/drivers/rtc/rtc-v3020.c
+++ b/drivers/rtc/rtc-v3020.c
@@ -304,7 +304,6 @@ static int rtc_probe(struct platform_device *pdev)
304{ 304{
305 struct v3020_platform_data *pdata = pdev->dev.platform_data; 305 struct v3020_platform_data *pdata = pdev->dev.platform_data;
306 struct v3020 *chip; 306 struct v3020 *chip;
307 struct rtc_device *rtc;
308 int retval = -EBUSY; 307 int retval = -EBUSY;
309 int i; 308 int i;
310 int temp; 309 int temp;
@@ -353,13 +352,12 @@ static int rtc_probe(struct platform_device *pdev)
353 352
354 platform_set_drvdata(pdev, chip); 353 platform_set_drvdata(pdev, chip);
355 354
356 rtc = rtc_device_register("v3020", 355 chip->rtc = rtc_device_register("v3020",
357 &pdev->dev, &v3020_rtc_ops, THIS_MODULE); 356 &pdev->dev, &v3020_rtc_ops, THIS_MODULE);
358 if (IS_ERR(rtc)) { 357 if (IS_ERR(chip->rtc)) {
359 retval = PTR_ERR(rtc); 358 retval = PTR_ERR(chip->rtc);
360 goto err_io; 359 goto err_io;
361 } 360 }
362 chip->rtc = rtc;
363 361
364 return 0; 362 return 0;
365 363
diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
index fadddac1e5a4..c3244244e8cf 100644
--- a/drivers/rtc/rtc-vr41xx.c
+++ b/drivers/rtc/rtc-vr41xx.c
@@ -327,7 +327,7 @@ static int __devinit rtc_probe(struct platform_device *pdev)
327 if (!res) 327 if (!res)
328 return -EBUSY; 328 return -EBUSY;
329 329
330 rtc1_base = ioremap(res->start, res->end - res->start + 1); 330 rtc1_base = ioremap(res->start, resource_size(res));
331 if (!rtc1_base) 331 if (!rtc1_base)
332 return -EBUSY; 332 return -EBUSY;
333 333
@@ -337,7 +337,7 @@ static int __devinit rtc_probe(struct platform_device *pdev)
337 goto err_rtc1_iounmap; 337 goto err_rtc1_iounmap;
338 } 338 }
339 339
340 rtc2_base = ioremap(res->start, res->end - res->start + 1); 340 rtc2_base = ioremap(res->start, resource_size(res));
341 if (!rtc2_base) { 341 if (!rtc2_base) {
342 retval = -EBUSY; 342 retval = -EBUSY;
343 goto err_rtc1_iounmap; 343 goto err_rtc1_iounmap;
diff --git a/drivers/rtc/rtc-wm831x.c b/drivers/rtc/rtc-wm831x.c
index 79795cdf6ed8..000c7e481e59 100644
--- a/drivers/rtc/rtc-wm831x.c
+++ b/drivers/rtc/rtc-wm831x.c
@@ -485,7 +485,7 @@ static int __devexit wm831x_rtc_remove(struct platform_device *pdev)
485 return 0; 485 return 0;
486} 486}
487 487
488static struct dev_pm_ops wm831x_rtc_pm_ops = { 488static const struct dev_pm_ops wm831x_rtc_pm_ops = {
489 .suspend = wm831x_rtc_suspend, 489 .suspend = wm831x_rtc_suspend,
490 .resume = wm831x_rtc_resume, 490 .resume = wm831x_rtc_resume,
491 491
diff --git a/drivers/rtc/rtc-wm8350.c b/drivers/rtc/rtc-wm8350.c
index f16486635a8e..f1e440521c54 100644
--- a/drivers/rtc/rtc-wm8350.c
+++ b/drivers/rtc/rtc-wm8350.c
@@ -354,8 +354,9 @@ static const struct rtc_class_ops wm8350_rtc_ops = {
354}; 354};
355 355
356#ifdef CONFIG_PM 356#ifdef CONFIG_PM
357static int wm8350_rtc_suspend(struct platform_device *pdev, pm_message_t state) 357static int wm8350_rtc_suspend(struct device *dev)
358{ 358{
359 struct platform_device *pdev = to_platform_device(dev);
359 struct wm8350 *wm8350 = dev_get_drvdata(&pdev->dev); 360 struct wm8350 *wm8350 = dev_get_drvdata(&pdev->dev);
360 int ret = 0; 361 int ret = 0;
361 u16 reg; 362 u16 reg;
@@ -373,8 +374,9 @@ static int wm8350_rtc_suspend(struct platform_device *pdev, pm_message_t state)
373 return ret; 374 return ret;
374} 375}
375 376
376static int wm8350_rtc_resume(struct platform_device *pdev) 377static int wm8350_rtc_resume(struct device *dev)
377{ 378{
379 struct platform_device *pdev = to_platform_device(dev);
378 struct wm8350 *wm8350 = dev_get_drvdata(&pdev->dev); 380 struct wm8350 *wm8350 = dev_get_drvdata(&pdev->dev);
379 int ret; 381 int ret;
380 382
@@ -484,13 +486,17 @@ static int __devexit wm8350_rtc_remove(struct platform_device *pdev)
484 return 0; 486 return 0;
485} 487}
486 488
489static struct dev_pm_ops wm8350_rtc_pm_ops = {
490 .suspend = wm8350_rtc_suspend,
491 .resume = wm8350_rtc_resume,
492};
493
487static struct platform_driver wm8350_rtc_driver = { 494static struct platform_driver wm8350_rtc_driver = {
488 .probe = wm8350_rtc_probe, 495 .probe = wm8350_rtc_probe,
489 .remove = __devexit_p(wm8350_rtc_remove), 496 .remove = __devexit_p(wm8350_rtc_remove),
490 .suspend = wm8350_rtc_suspend,
491 .resume = wm8350_rtc_resume,
492 .driver = { 497 .driver = {
493 .name = "wm8350-rtc", 498 .name = "wm8350-rtc",
499 .pm = &wm8350_rtc_pm_ops,
494 }, 500 },
495}; 501};
496 502
diff --git a/drivers/rtc/rtc-x1205.c b/drivers/rtc/rtc-x1205.c
index 6583c1a8b070..9aae49139a0a 100644
--- a/drivers/rtc/rtc-x1205.c
+++ b/drivers/rtc/rtc-x1205.c
@@ -155,11 +155,11 @@ static int x1205_get_status(struct i2c_client *client, unsigned char *sr)
155} 155}
156 156
157static int x1205_set_datetime(struct i2c_client *client, struct rtc_time *tm, 157static int x1205_set_datetime(struct i2c_client *client, struct rtc_time *tm,
158 int datetoo, u8 reg_base, unsigned char alm_enable) 158 u8 reg_base, unsigned char alm_enable)
159{ 159{
160 int i, xfer, nbytes; 160 int i, xfer;
161 unsigned char buf[8];
162 unsigned char rdata[10] = { 0, reg_base }; 161 unsigned char rdata[10] = { 0, reg_base };
162 unsigned char *buf = rdata + 2;
163 163
164 static const unsigned char wel[3] = { 0, X1205_REG_SR, 164 static const unsigned char wel[3] = { 0, X1205_REG_SR,
165 X1205_SR_WEL }; 165 X1205_SR_WEL };
@@ -170,9 +170,9 @@ static int x1205_set_datetime(struct i2c_client *client, struct rtc_time *tm,
170 static const unsigned char diswe[3] = { 0, X1205_REG_SR, 0 }; 170 static const unsigned char diswe[3] = { 0, X1205_REG_SR, 0 };
171 171
172 dev_dbg(&client->dev, 172 dev_dbg(&client->dev,
173 "%s: secs=%d, mins=%d, hours=%d\n", 173 "%s: sec=%d min=%d hour=%d mday=%d mon=%d year=%d wday=%d\n",
174 __func__, 174 __func__, tm->tm_sec, tm->tm_min, tm->tm_hour, tm->tm_mday,
175 tm->tm_sec, tm->tm_min, tm->tm_hour); 175 tm->tm_mon, tm->tm_year, tm->tm_wday);
176 176
177 buf[CCR_SEC] = bin2bcd(tm->tm_sec); 177 buf[CCR_SEC] = bin2bcd(tm->tm_sec);
178 buf[CCR_MIN] = bin2bcd(tm->tm_min); 178 buf[CCR_MIN] = bin2bcd(tm->tm_min);
@@ -180,23 +180,15 @@ static int x1205_set_datetime(struct i2c_client *client, struct rtc_time *tm,
180 /* set hour and 24hr bit */ 180 /* set hour and 24hr bit */
181 buf[CCR_HOUR] = bin2bcd(tm->tm_hour) | X1205_HR_MIL; 181 buf[CCR_HOUR] = bin2bcd(tm->tm_hour) | X1205_HR_MIL;
182 182
183 /* should we also set the date? */ 183 buf[CCR_MDAY] = bin2bcd(tm->tm_mday);
184 if (datetoo) {
185 dev_dbg(&client->dev,
186 "%s: mday=%d, mon=%d, year=%d, wday=%d\n",
187 __func__,
188 tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
189 184
190 buf[CCR_MDAY] = bin2bcd(tm->tm_mday); 185 /* month, 1 - 12 */
186 buf[CCR_MONTH] = bin2bcd(tm->tm_mon + 1);
191 187
192 /* month, 1 - 12 */ 188 /* year, since the rtc epoch*/
193 buf[CCR_MONTH] = bin2bcd(tm->tm_mon + 1); 189 buf[CCR_YEAR] = bin2bcd(tm->tm_year % 100);
194 190 buf[CCR_WDAY] = tm->tm_wday & 0x07;
195 /* year, since the rtc epoch*/ 191 buf[CCR_Y2K] = bin2bcd((tm->tm_year + 1900) / 100);
196 buf[CCR_YEAR] = bin2bcd(tm->tm_year % 100);
197 buf[CCR_WDAY] = tm->tm_wday & 0x07;
198 buf[CCR_Y2K] = bin2bcd((tm->tm_year + 1900) / 100);
199 }
200 192
201 /* If writing alarm registers, set compare bits on registers 0-4 */ 193 /* If writing alarm registers, set compare bits on registers 0-4 */
202 if (reg_base < X1205_CCR_BASE) 194 if (reg_base < X1205_CCR_BASE)
@@ -214,17 +206,8 @@ static int x1205_set_datetime(struct i2c_client *client, struct rtc_time *tm,
214 return -EIO; 206 return -EIO;
215 } 207 }
216 208
217 209 xfer = i2c_master_send(client, rdata, sizeof(rdata));
218 /* write register's data */ 210 if (xfer != sizeof(rdata)) {
219 if (datetoo)
220 nbytes = 8;
221 else
222 nbytes = 3;
223 for (i = 0; i < nbytes; i++)
224 rdata[2+i] = buf[i];
225
226 xfer = i2c_master_send(client, rdata, nbytes+2);
227 if (xfer != nbytes+2) {
228 dev_err(&client->dev, 211 dev_err(&client->dev,
229 "%s: result=%d addr=%02x, data=%02x\n", 212 "%s: result=%d addr=%02x, data=%02x\n",
230 __func__, 213 __func__,
@@ -282,7 +265,7 @@ static int x1205_fix_osc(struct i2c_client *client)
282 265
283 memset(&tm, 0, sizeof(tm)); 266 memset(&tm, 0, sizeof(tm));
284 267
285 err = x1205_set_datetime(client, &tm, 1, X1205_CCR_BASE, 0); 268 err = x1205_set_datetime(client, &tm, X1205_CCR_BASE, 0);
286 if (err < 0) 269 if (err < 0)
287 dev_err(&client->dev, "unable to restart the oscillator\n"); 270 dev_err(&client->dev, "unable to restart the oscillator\n");
288 271
@@ -481,7 +464,7 @@ static int x1205_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
481static int x1205_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) 464static int x1205_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
482{ 465{
483 return x1205_set_datetime(to_i2c_client(dev), 466 return x1205_set_datetime(to_i2c_client(dev),
484 &alrm->time, 1, X1205_ALM0_BASE, alrm->enabled); 467 &alrm->time, X1205_ALM0_BASE, alrm->enabled);
485} 468}
486 469
487static int x1205_rtc_read_time(struct device *dev, struct rtc_time *tm) 470static int x1205_rtc_read_time(struct device *dev, struct rtc_time *tm)
@@ -493,7 +476,7 @@ static int x1205_rtc_read_time(struct device *dev, struct rtc_time *tm)
493static int x1205_rtc_set_time(struct device *dev, struct rtc_time *tm) 476static int x1205_rtc_set_time(struct device *dev, struct rtc_time *tm)
494{ 477{
495 return x1205_set_datetime(to_i2c_client(dev), 478 return x1205_set_datetime(to_i2c_client(dev),
496 tm, 1, X1205_CCR_BASE, 0); 479 tm, X1205_CCR_BASE, 0);
497} 480}
498 481
499static int x1205_rtc_proc(struct device *dev, struct seq_file *seq) 482static int x1205_rtc_proc(struct device *dev, struct seq_file *seq)
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
index 5f23eca82804..6315fbd8e68b 100644
--- a/drivers/s390/block/dasd_proc.c
+++ b/drivers/s390/block/dasd_proc.c
@@ -14,6 +14,7 @@
14#define KMSG_COMPONENT "dasd" 14#define KMSG_COMPONENT "dasd"
15 15
16#include <linux/ctype.h> 16#include <linux/ctype.h>
17#include <linux/string.h>
17#include <linux/seq_file.h> 18#include <linux/seq_file.h>
18#include <linux/vmalloc.h> 19#include <linux/vmalloc.h>
19#include <linux/proc_fs.h> 20#include <linux/proc_fs.h>
@@ -272,10 +273,10 @@ dasd_statistics_write(struct file *file, const char __user *user_buf,
272 DBF_EVENT(DBF_DEBUG, "/proc/dasd/statictics: '%s'\n", buffer); 273 DBF_EVENT(DBF_DEBUG, "/proc/dasd/statictics: '%s'\n", buffer);
273 274
274 /* check for valid verbs */ 275 /* check for valid verbs */
275 for (str = buffer; isspace(*str); str++); 276 str = skip_spaces(buffer);
276 if (strncmp(str, "set", 3) == 0 && isspace(str[3])) { 277 if (strncmp(str, "set", 3) == 0 && isspace(str[3])) {
277 /* 'set xxx' was given */ 278 /* 'set xxx' was given */
278 for (str = str + 4; isspace(*str); str++); 279 str = skip_spaces(str + 4);
279 if (strcmp(str, "on") == 0) { 280 if (strcmp(str, "on") == 0) {
280 /* switch on statistics profiling */ 281 /* switch on statistics profiling */
281 dasd_profile_level = DASD_PROFILE_ON; 282 dasd_profile_level = DASD_PROFILE_ON;
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index f76f4bd82b9f..9b43ae94beba 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -1005,7 +1005,7 @@ static int dcssblk_thaw(struct device *dev)
1005 return 0; 1005 return 0;
1006} 1006}
1007 1007
1008static struct dev_pm_ops dcssblk_pm_ops = { 1008static const struct dev_pm_ops dcssblk_pm_ops = {
1009 .freeze = dcssblk_freeze, 1009 .freeze = dcssblk_freeze,
1010 .thaw = dcssblk_thaw, 1010 .thaw = dcssblk_thaw,
1011 .restore = dcssblk_restore, 1011 .restore = dcssblk_restore,
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index 116d1b3eeb15..118de392af63 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -407,7 +407,7 @@ static int xpram_restore(struct device *dev)
407 return 0; 407 return 0;
408} 408}
409 409
410static struct dev_pm_ops xpram_pm_ops = { 410static const struct dev_pm_ops xpram_pm_ops = {
411 .restore = xpram_restore, 411 .restore = xpram_restore,
412}; 412};
413 413
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
index 60473f86e1f9..33e96484d54f 100644
--- a/drivers/s390/char/monreader.c
+++ b/drivers/s390/char/monreader.c
@@ -529,7 +529,7 @@ static int monreader_restore(struct device *dev)
529 return monreader_thaw(dev); 529 return monreader_thaw(dev);
530} 530}
531 531
532static struct dev_pm_ops monreader_pm_ops = { 532static const struct dev_pm_ops monreader_pm_ops = {
533 .freeze = monreader_freeze, 533 .freeze = monreader_freeze,
534 .thaw = monreader_thaw, 534 .thaw = monreader_thaw,
535 .restore = monreader_restore, 535 .restore = monreader_restore,
diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c
index 6532ed8b4afa..668a0579b26b 100644
--- a/drivers/s390/char/monwriter.c
+++ b/drivers/s390/char/monwriter.c
@@ -323,7 +323,7 @@ static int monwriter_thaw(struct device *dev)
323 return monwriter_restore(dev); 323 return monwriter_restore(dev);
324} 324}
325 325
326static struct dev_pm_ops monwriter_pm_ops = { 326static const struct dev_pm_ops monwriter_pm_ops = {
327 .freeze = monwriter_freeze, 327 .freeze = monwriter_freeze,
328 .thaw = monwriter_thaw, 328 .thaw = monwriter_thaw,
329 .restore = monwriter_restore, 329 .restore = monwriter_restore,
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index a983f5086788..ec88c59842e3 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -1019,7 +1019,7 @@ static int sclp_restore(struct device *dev)
1019 return sclp_undo_suspend(SCLP_PM_EVENT_RESTORE); 1019 return sclp_undo_suspend(SCLP_PM_EVENT_RESTORE);
1020} 1020}
1021 1021
1022static struct dev_pm_ops sclp_pm_ops = { 1022static const struct dev_pm_ops sclp_pm_ops = {
1023 .freeze = sclp_freeze, 1023 .freeze = sclp_freeze,
1024 .thaw = sclp_thaw, 1024 .thaw = sclp_thaw,
1025 .restore = sclp_restore, 1025 .restore = sclp_restore,
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index 28b5afc129c3..b3beab610da4 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -547,7 +547,7 @@ struct read_storage_sccb {
547 u32 entries[0]; 547 u32 entries[0];
548} __packed; 548} __packed;
549 549
550static struct dev_pm_ops sclp_mem_pm_ops = { 550static const struct dev_pm_ops sclp_mem_pm_ops = {
551 .freeze = sclp_mem_freeze, 551 .freeze = sclp_mem_freeze,
552}; 552};
553 553
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index 899aa795bf38..7dfa5412d5a8 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -675,7 +675,7 @@ static int vmlogrdr_pm_prepare(struct device *dev)
675} 675}
676 676
677 677
678static struct dev_pm_ops vmlogrdr_pm_ops = { 678static const struct dev_pm_ops vmlogrdr_pm_ops = {
679 .prepare = vmlogrdr_pm_prepare, 679 .prepare = vmlogrdr_pm_prepare,
680}; 680};
681 681
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index a5a62f1f7747..5f97ea2ee6b1 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -560,7 +560,7 @@ static int ccwgroup_pm_restore(struct device *dev)
560 return gdrv->restore ? gdrv->restore(gdev) : 0; 560 return gdrv->restore ? gdrv->restore(gdev) : 0;
561} 561}
562 562
563static struct dev_pm_ops ccwgroup_pm_ops = { 563static const struct dev_pm_ops ccwgroup_pm_ops = {
564 .prepare = ccwgroup_pm_prepare, 564 .prepare = ccwgroup_pm_prepare,
565 .complete = ccwgroup_pm_complete, 565 .complete = ccwgroup_pm_complete,
566 .freeze = ccwgroup_pm_freeze, 566 .freeze = ccwgroup_pm_freeze,
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 92ff88ac1107..7679aee6fa14 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -1148,7 +1148,7 @@ static int css_pm_restore(struct device *dev)
1148 return drv->restore ? drv->restore(sch) : 0; 1148 return drv->restore ? drv->restore(sch) : 0;
1149} 1149}
1150 1150
1151static struct dev_pm_ops css_pm_ops = { 1151static const struct dev_pm_ops css_pm_ops = {
1152 .prepare = css_pm_prepare, 1152 .prepare = css_pm_prepare,
1153 .complete = css_pm_complete, 1153 .complete = css_pm_complete,
1154 .freeze = css_pm_freeze, 1154 .freeze = css_pm_freeze,
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 9fecfb4223a8..73901c9e260f 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -1904,7 +1904,7 @@ out_unlock:
1904 return ret; 1904 return ret;
1905} 1905}
1906 1906
1907static struct dev_pm_ops ccw_pm_ops = { 1907static const struct dev_pm_ops ccw_pm_ops = {
1908 .prepare = ccw_device_pm_prepare, 1908 .prepare = ccw_device_pm_prepare,
1909 .complete = ccw_device_pm_complete, 1909 .complete = ccw_device_pm_complete,
1910 .freeze = ccw_device_pm_freeze, 1910 .freeze = ccw_device_pm_freeze,
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 98c04cac43c1..65ebee0a3266 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -159,7 +159,7 @@ static void netiucv_pm_complete(struct device *);
159static int netiucv_pm_freeze(struct device *); 159static int netiucv_pm_freeze(struct device *);
160static int netiucv_pm_restore_thaw(struct device *); 160static int netiucv_pm_restore_thaw(struct device *);
161 161
162static struct dev_pm_ops netiucv_pm_ops = { 162static const struct dev_pm_ops netiucv_pm_ops = {
163 .prepare = netiucv_pm_prepare, 163 .prepare = netiucv_pm_prepare,
164 .complete = netiucv_pm_complete, 164 .complete = netiucv_pm_complete,
165 .freeze = netiucv_pm_freeze, 165 .freeze = netiucv_pm_freeze,
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c
index 3012355f8304..67f2485d2372 100644
--- a/drivers/s390/net/smsgiucv.c
+++ b/drivers/s390/net/smsgiucv.c
@@ -168,7 +168,7 @@ static int smsg_pm_restore_thaw(struct device *dev)
168 return 0; 168 return 0;
169} 169}
170 170
171static struct dev_pm_ops smsg_pm_ops = { 171static const struct dev_pm_ops smsg_pm_ops = {
172 .freeze = smsg_pm_freeze, 172 .freeze = smsg_pm_freeze,
173 .thaw = smsg_pm_restore_thaw, 173 .thaw = smsg_pm_restore_thaw,
174 .restore = smsg_pm_restore_thaw, 174 .restore = smsg_pm_restore_thaw,
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 206c2fa8c1ba..8643f5089361 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -1333,7 +1333,7 @@ static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1333 1333
1334 error = &hostrcb->hcam.u.error.u.type_17_error; 1334 error = &hostrcb->hcam.u.error.u.type_17_error;
1335 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; 1335 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1336 strstrip(error->failure_reason); 1336 strim(error->failure_reason);
1337 1337
1338 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason, 1338 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1339 be32_to_cpu(hostrcb->hcam.u.error.prc)); 1339 be32_to_cpu(hostrcb->hcam.u.error.prc));
@@ -1359,7 +1359,7 @@ static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1359 1359
1360 error = &hostrcb->hcam.u.error.u.type_07_error; 1360 error = &hostrcb->hcam.u.error.u.type_07_error;
1361 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; 1361 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1362 strstrip(error->failure_reason); 1362 strim(error->failure_reason);
1363 1363
1364 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason, 1364 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1365 be32_to_cpu(hostrcb->hcam.u.error.prc)); 1365 be32_to_cpu(hostrcb->hcam.u.error.prc));
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index 2b38f6ad6e11..8b955b534a36 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -984,7 +984,7 @@ static void sym_exec_user_command (struct sym_hcb *np, struct sym_usrcmd *uc)
984 } 984 }
985} 985}
986 986
987static int skip_spaces(char *ptr, int len) 987static int sym_skip_spaces(char *ptr, int len)
988{ 988{
989 int cnt, c; 989 int cnt, c;
990 990
@@ -1012,7 +1012,7 @@ static int is_keyword(char *ptr, int len, char *verb)
1012} 1012}
1013 1013
1014#define SKIP_SPACES(ptr, len) \ 1014#define SKIP_SPACES(ptr, len) \
1015 if ((arg_len = skip_spaces(ptr, len)) < 1) \ 1015 if ((arg_len = sym_skip_spaces(ptr, len)) < 1) \
1016 return -EINVAL; \ 1016 return -EINVAL; \
1017 ptr += arg_len; len -= arg_len; 1017 ptr += arg_len; len -= arg_len;
1018 1018
diff --git a/drivers/serial/ioc3_serial.c b/drivers/serial/ioc3_serial.c
index d8983dd5c4b2..85dc0410ac1a 100644
--- a/drivers/serial/ioc3_serial.c
+++ b/drivers/serial/ioc3_serial.c
@@ -2162,7 +2162,7 @@ static struct ioc3_submodule ioc3uart_ops = {
2162/** 2162/**
2163 * ioc3_detect - module init called, 2163 * ioc3_detect - module init called,
2164 */ 2164 */
2165static int __devinit ioc3uart_init(void) 2165static int __init ioc3uart_init(void)
2166{ 2166{
2167 int ret; 2167 int ret;
2168 2168
@@ -2179,7 +2179,7 @@ static int __devinit ioc3uart_init(void)
2179 return ret; 2179 return ret;
2180} 2180}
2181 2181
2182static void __devexit ioc3uart_exit(void) 2182static void __exit ioc3uart_exit(void)
2183{ 2183{
2184 ioc3_unregister_submodule(&ioc3uart_ops); 2184 ioc3_unregister_submodule(&ioc3uart_ops);
2185 uart_unregister_driver(&ioc3_uart); 2185 uart_unregister_driver(&ioc3_uart);
diff --git a/drivers/serial/ioc4_serial.c b/drivers/serial/ioc4_serial.c
index 2e02c3026d24..836d9ab4f729 100644
--- a/drivers/serial/ioc4_serial.c
+++ b/drivers/serial/ioc4_serial.c
@@ -2904,7 +2904,7 @@ static struct ioc4_submodule ioc4_serial_submodule = {
2904/** 2904/**
2905 * ioc4_serial_init - module init 2905 * ioc4_serial_init - module init
2906 */ 2906 */
2907int ioc4_serial_init(void) 2907static int __init ioc4_serial_init(void)
2908{ 2908{
2909 int ret; 2909 int ret;
2910 2910
@@ -2913,20 +2913,30 @@ int ioc4_serial_init(void)
2913 printk(KERN_WARNING 2913 printk(KERN_WARNING
2914 "%s: Couldn't register rs232 IOC4 serial driver\n", 2914 "%s: Couldn't register rs232 IOC4 serial driver\n",
2915 __func__); 2915 __func__);
2916 return ret; 2916 goto out;
2917 } 2917 }
2918 if ((ret = uart_register_driver(&ioc4_uart_rs422)) < 0) { 2918 if ((ret = uart_register_driver(&ioc4_uart_rs422)) < 0) {
2919 printk(KERN_WARNING 2919 printk(KERN_WARNING
2920 "%s: Couldn't register rs422 IOC4 serial driver\n", 2920 "%s: Couldn't register rs422 IOC4 serial driver\n",
2921 __func__); 2921 __func__);
2922 return ret; 2922 goto out_uart_rs232;
2923 } 2923 }
2924 2924
2925 /* register with IOC4 main module */ 2925 /* register with IOC4 main module */
2926 return ioc4_register_submodule(&ioc4_serial_submodule); 2926 ret = ioc4_register_submodule(&ioc4_serial_submodule);
2927 if (ret)
2928 goto out_uart_rs422;
2929 return 0;
2930
2931out_uart_rs422:
2932 uart_unregister_driver(&ioc4_uart_rs422);
2933out_uart_rs232:
2934 uart_unregister_driver(&ioc4_uart_rs232);
2935out:
2936 return ret;
2927} 2937}
2928 2938
2929static void __devexit ioc4_serial_exit(void) 2939static void __exit ioc4_serial_exit(void)
2930{ 2940{
2931 ioc4_unregister_submodule(&ioc4_serial_submodule); 2941 ioc4_unregister_submodule(&ioc4_serial_submodule);
2932 uart_unregister_driver(&ioc4_uart_rs232); 2942 uart_unregister_driver(&ioc4_uart_rs232);
diff --git a/drivers/serial/pxa.c b/drivers/serial/pxa.c
index 4a821046baae..56ee082157aa 100644
--- a/drivers/serial/pxa.c
+++ b/drivers/serial/pxa.c
@@ -756,7 +756,7 @@ static int serial_pxa_resume(struct device *dev)
756 return 0; 756 return 0;
757} 757}
758 758
759static struct dev_pm_ops serial_pxa_pm_ops = { 759static const struct dev_pm_ops serial_pxa_pm_ops = {
760 .suspend = serial_pxa_suspend, 760 .suspend = serial_pxa_suspend,
761 .resume = serial_pxa_resume, 761 .resume = serial_pxa_resume,
762}; 762};
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c
index ff38dbdb5c6e..68c7f6cfd728 100644
--- a/drivers/serial/sh-sci.c
+++ b/drivers/serial/sh-sci.c
@@ -1043,10 +1043,14 @@ static void __devinit sci_init_single(struct platform_device *dev,
1043 sci_port->port.iotype = UPIO_MEM; 1043 sci_port->port.iotype = UPIO_MEM;
1044 sci_port->port.line = index; 1044 sci_port->port.line = index;
1045 sci_port->port.fifosize = 1; 1045 sci_port->port.fifosize = 1;
1046 sci_port->iclk = p->clk ? clk_get(&dev->dev, p->clk) : NULL; 1046
1047 sci_port->dclk = clk_get(&dev->dev, "peripheral_clk"); 1047 if (dev) {
1048 sci_port->enable = sci_clk_enable; 1048 sci_port->iclk = p->clk ? clk_get(&dev->dev, p->clk) : NULL;
1049 sci_port->disable = sci_clk_disable; 1049 sci_port->dclk = clk_get(&dev->dev, "peripheral_clk");
1050 sci_port->enable = sci_clk_enable;
1051 sci_port->disable = sci_clk_disable;
1052 sci_port->port.dev = &dev->dev;
1053 }
1050 1054
1051 sci_port->break_timer.data = (unsigned long)sci_port; 1055 sci_port->break_timer.data = (unsigned long)sci_port;
1052 sci_port->break_timer.function = sci_break_timer; 1056 sci_port->break_timer.function = sci_break_timer;
@@ -1057,7 +1061,6 @@ static void __devinit sci_init_single(struct platform_device *dev,
1057 1061
1058 sci_port->port.irq = p->irqs[SCIx_TXI_IRQ]; 1062 sci_port->port.irq = p->irqs[SCIx_TXI_IRQ];
1059 sci_port->port.flags = p->flags; 1063 sci_port->port.flags = p->flags;
1060 sci_port->port.dev = &dev->dev;
1061 sci_port->type = sci_port->port.type = p->type; 1064 sci_port->type = sci_port->port.type = p->type;
1062 1065
1063 memcpy(&sci_port->irqs, &p->irqs, sizeof(p->irqs)); 1066 memcpy(&sci_port->irqs, &p->irqs, sizeof(p->irqs));
@@ -1101,7 +1104,7 @@ static void serial_console_write(struct console *co, const char *s,
1101 sci_port->disable(port); 1104 sci_port->disable(port);
1102} 1105}
1103 1106
1104static int __init serial_console_setup(struct console *co, char *options) 1107static int __devinit serial_console_setup(struct console *co, char *options)
1105{ 1108{
1106 struct sci_port *sci_port; 1109 struct sci_port *sci_port;
1107 struct uart_port *port; 1110 struct uart_port *port;
@@ -1119,9 +1122,14 @@ static int __init serial_console_setup(struct console *co, char *options)
1119 if (co->index >= SCI_NPORTS) 1122 if (co->index >= SCI_NPORTS)
1120 co->index = 0; 1123 co->index = 0;
1121 1124
1122 sci_port = &sci_ports[co->index]; 1125 if (co->data) {
1123 port = &sci_port->port; 1126 port = co->data;
1124 co->data = port; 1127 sci_port = to_sci_port(port);
1128 } else {
1129 sci_port = &sci_ports[co->index];
1130 port = &sci_port->port;
1131 co->data = port;
1132 }
1125 1133
1126 /* 1134 /*
1127 * Also need to check port->type, we don't actually have any 1135 * Also need to check port->type, we don't actually have any
@@ -1165,6 +1173,15 @@ static int __init sci_console_init(void)
1165 return 0; 1173 return 0;
1166} 1174}
1167console_initcall(sci_console_init); 1175console_initcall(sci_console_init);
1176
1177static struct sci_port early_serial_port;
1178static struct console early_serial_console = {
1179 .name = "early_ttySC",
1180 .write = serial_console_write,
1181 .flags = CON_PRINTBUFFER,
1182};
1183static char early_serial_buf[32];
1184
1168#endif /* CONFIG_SERIAL_SH_SCI_CONSOLE */ 1185#endif /* CONFIG_SERIAL_SH_SCI_CONSOLE */
1169 1186
1170#if defined(CONFIG_SERIAL_SH_SCI_CONSOLE) 1187#if defined(CONFIG_SERIAL_SH_SCI_CONSOLE)
@@ -1250,6 +1267,21 @@ static int __devinit sci_probe(struct platform_device *dev)
1250 struct sh_sci_priv *priv; 1267 struct sh_sci_priv *priv;
1251 int i, ret = -EINVAL; 1268 int i, ret = -EINVAL;
1252 1269
1270#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
1271 if (is_early_platform_device(dev)) {
1272 if (dev->id == -1)
1273 return -ENOTSUPP;
1274 early_serial_console.index = dev->id;
1275 early_serial_console.data = &early_serial_port.port;
1276 sci_init_single(NULL, &early_serial_port, dev->id, p);
1277 serial_console_setup(&early_serial_console, early_serial_buf);
1278 if (!strstr(early_serial_buf, "keep"))
1279 early_serial_console.flags |= CON_BOOT;
1280 register_console(&early_serial_console);
1281 return 0;
1282 }
1283#endif
1284
1253 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 1285 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1254 if (!priv) 1286 if (!priv)
1255 return -ENOMEM; 1287 return -ENOMEM;
@@ -1312,7 +1344,7 @@ static int sci_resume(struct device *dev)
1312 return 0; 1344 return 0;
1313} 1345}
1314 1346
1315static struct dev_pm_ops sci_dev_pm_ops = { 1347static const struct dev_pm_ops sci_dev_pm_ops = {
1316 .suspend = sci_suspend, 1348 .suspend = sci_suspend,
1317 .resume = sci_resume, 1349 .resume = sci_resume,
1318}; 1350};
@@ -1349,6 +1381,10 @@ static void __exit sci_exit(void)
1349 uart_unregister_driver(&sci_uart_driver); 1381 uart_unregister_driver(&sci_uart_driver);
1350} 1382}
1351 1383
1384#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
1385early_platform_init_buffer("earlyprintk", &sci_driver,
1386 early_serial_buf, ARRAY_SIZE(early_serial_buf));
1387#endif
1352module_init(sci_init); 1388module_init(sci_init);
1353module_exit(sci_exit); 1389module_exit(sci_exit);
1354 1390
diff --git a/drivers/sh/intc.c b/drivers/sh/intc.c
index a7e5c2e9986c..d5d7f23c19a5 100644
--- a/drivers/sh/intc.c
+++ b/drivers/sh/intc.c
@@ -806,6 +806,8 @@ static int intc_suspend(struct sys_device *dev, pm_message_t state)
806 if (d->state.event != PM_EVENT_FREEZE) 806 if (d->state.event != PM_EVENT_FREEZE)
807 break; 807 break;
808 for_each_irq_desc(irq, desc) { 808 for_each_irq_desc(irq, desc) {
809 if (desc->handle_irq == intc_redirect_irq)
810 continue;
809 if (desc->chip != &d->chip) 811 if (desc->chip != &d->chip)
810 continue; 812 continue;
811 if (desc->status & IRQ_DISABLED) 813 if (desc->status & IRQ_DISABLED)
diff --git a/drivers/sh/pfc.c b/drivers/sh/pfc.c
index 841ed5030c8f..082604edc4c2 100644
--- a/drivers/sh/pfc.c
+++ b/drivers/sh/pfc.c
@@ -71,7 +71,7 @@ static void gpio_write_bit(struct pinmux_data_reg *dr,
71 71
72 pos = dr->reg_width - (in_pos + 1); 72 pos = dr->reg_width - (in_pos + 1);
73 73
74 pr_debug("write_bit addr = %lx, value = %ld, pos = %ld, " 74 pr_debug("write_bit addr = %lx, value = %d, pos = %ld, "
75 "r_width = %ld\n", 75 "r_width = %ld\n",
76 dr->reg, !!value, pos, dr->reg_width); 76 dr->reg, !!value, pos, dr->reg_width);
77 77
diff --git a/drivers/sn/ioc3.c b/drivers/sn/ioc3.c
index 816d4c592a3c..66802a4390cc 100644
--- a/drivers/sn/ioc3.c
+++ b/drivers/sn/ioc3.c
@@ -574,11 +574,11 @@ void ioc3_unregister_submodule(struct ioc3_submodule *is)
574 * Device management * 574 * Device management *
575 *********************/ 575 *********************/
576 576
577static char * 577static char * __devinitdata
578ioc3_class_names[]={"unknown", "IP27 BaseIO", "IP30 system", "MENET 1/2/3", 578ioc3_class_names[]={"unknown", "IP27 BaseIO", "IP30 system", "MENET 1/2/3",
579 "MENET 4", "CADduo", "Altix Serial"}; 579 "MENET 4", "CADduo", "Altix Serial"};
580 580
581static int ioc3_class(struct ioc3_driver_data *idd) 581static int __devinit ioc3_class(struct ioc3_driver_data *idd)
582{ 582{
583 int res = IOC3_CLASS_NONE; 583 int res = IOC3_CLASS_NONE;
584 /* NIC-based logic */ 584 /* NIC-based logic */
@@ -601,7 +601,8 @@ static int ioc3_class(struct ioc3_driver_data *idd)
601 return res; 601 return res;
602} 602}
603/* Adds a new instance of an IOC3 card */ 603/* Adds a new instance of an IOC3 card */
604static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id) 604static int __devinit
605ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
605{ 606{
606 struct ioc3_driver_data *idd; 607 struct ioc3_driver_data *idd;
607 uint32_t pcmd; 608 uint32_t pcmd;
@@ -753,7 +754,7 @@ out:
753} 754}
754 755
755/* Removes a particular instance of an IOC3 card. */ 756/* Removes a particular instance of an IOC3 card. */
756static void ioc3_remove(struct pci_dev *pdev) 757static void __devexit ioc3_remove(struct pci_dev *pdev)
757{ 758{
758 int id; 759 int id;
759 struct ioc3_driver_data *idd; 760 struct ioc3_driver_data *idd;
@@ -805,7 +806,7 @@ static struct pci_driver ioc3_driver = {
805 .name = "IOC3", 806 .name = "IOC3",
806 .id_table = ioc3_id_table, 807 .id_table = ioc3_id_table,
807 .probe = ioc3_probe, 808 .probe = ioc3_probe,
808 .remove = ioc3_remove, 809 .remove = __devexit_p(ioc3_remove),
809}; 810};
810 811
811MODULE_DEVICE_TABLE(pci, ioc3_id_table); 812MODULE_DEVICE_TABLE(pci, ioc3_id_table);
@@ -815,15 +816,15 @@ MODULE_DEVICE_TABLE(pci, ioc3_id_table);
815 *********************/ 816 *********************/
816 817
817/* Module load */ 818/* Module load */
818static int __devinit ioc3_init(void) 819static int __init ioc3_init(void)
819{ 820{
820 if (ia64_platform_is("sn2")) 821 if (ia64_platform_is("sn2"))
821 return pci_register_driver(&ioc3_driver); 822 return pci_register_driver(&ioc3_driver);
822 return 0; 823 return -ENODEV;
823} 824}
824 825
825/* Module unload */ 826/* Module unload */
826static void __devexit ioc3_exit(void) 827static void __exit ioc3_exit(void)
827{ 828{
828 pci_unregister_driver(&ioc3_driver); 829 pci_unregister_driver(&ioc3_driver);
829} 830}
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c
index c8c2b693ffac..c2f707e5ce74 100644
--- a/drivers/spi/pxa2xx_spi.c
+++ b/drivers/spi/pxa2xx_spi.c
@@ -1709,7 +1709,7 @@ static int pxa2xx_spi_resume(struct device *dev)
1709 return 0; 1709 return 0;
1710} 1710}
1711 1711
1712static struct dev_pm_ops pxa2xx_spi_pm_ops = { 1712static const struct dev_pm_ops pxa2xx_spi_pm_ops = {
1713 .suspend = pxa2xx_spi_suspend, 1713 .suspend = pxa2xx_spi_suspend,
1714 .resume = pxa2xx_spi_resume, 1714 .resume = pxa2xx_spi_resume,
1715}; 1715};
diff --git a/drivers/spi/spi_s3c24xx.c b/drivers/spi/spi_s3c24xx.c
index 33d94f76b9ef..276591569c8b 100644
--- a/drivers/spi/spi_s3c24xx.c
+++ b/drivers/spi/spi_s3c24xx.c
@@ -489,7 +489,7 @@ static int s3c24xx_spi_resume(struct device *dev)
489 return 0; 489 return 0;
490} 490}
491 491
492static struct dev_pm_ops s3c24xx_spi_pmops = { 492static const struct dev_pm_ops s3c24xx_spi_pmops = {
493 .suspend = s3c24xx_spi_suspend, 493 .suspend = s3c24xx_spi_suspend,
494 .resume = s3c24xx_spi_resume, 494 .resume = s3c24xx_spi_resume,
495}; 495};
diff --git a/drivers/staging/cx25821/cx25821-audups11.c b/drivers/staging/cx25821/cx25821-audups11.c
index f78b8912d905..89c8fe2997fa 100644
--- a/drivers/staging/cx25821/cx25821-audups11.c
+++ b/drivers/staging/cx25821/cx25821-audups11.c
@@ -94,36 +94,20 @@ static struct videobuf_queue_ops cx25821_video_qops = {
94 94
95static int video_open(struct file *file) 95static int video_open(struct file *file)
96{ 96{
97 int minor = video_devdata(file)->minor; 97 struct video_device *vdev = video_devdata(file);
98 struct cx25821_dev *h, *dev = NULL; 98 struct cx25821_dev *dev = video_drvdata(file);
99 struct cx25821_fh *fh; 99 struct cx25821_fh *fh;
100 struct list_head *list; 100 enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
101 enum v4l2_buf_type type = 0;
102 101
103 lock_kernel(); 102 printk("open dev=%s type=%s\n", video_device_node_name(vdev),
104 list_for_each(list, &cx25821_devlist) { 103 v4l2_type_names[type]);
105 h = list_entry(list, struct cx25821_dev, devlist);
106
107 if (h->video_dev[SRAM_CH11]
108 && h->video_dev[SRAM_CH11]->minor == minor) {
109 dev = h;
110 type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
111 }
112 }
113
114 if (NULL == dev) {
115 unlock_kernel();
116 return -ENODEV;
117 }
118
119 printk("open minor=%d type=%s\n", minor, v4l2_type_names[type]);
120 104
121 /* allocate + initialize per filehandle data */ 105 /* allocate + initialize per filehandle data */
122 fh = kzalloc(sizeof(*fh), GFP_KERNEL); 106 fh = kzalloc(sizeof(*fh), GFP_KERNEL);
123 if (NULL == fh) { 107 if (NULL == fh)
124 unlock_kernel();
125 return -ENOMEM; 108 return -ENOMEM;
126 } 109
110 lock_kernel();
127 111
128 file->private_data = fh; 112 file->private_data = fh;
129 fh->dev = dev; 113 fh->dev = dev;
@@ -427,7 +411,6 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
427struct video_device cx25821_video_template11 = { 411struct video_device cx25821_video_template11 = {
428 .name = "cx25821-audioupstream", 412 .name = "cx25821-audioupstream",
429 .fops = &video_fops, 413 .fops = &video_fops,
430 .minor = -1,
431 .ioctl_ops = &video_ioctl_ops, 414 .ioctl_ops = &video_ioctl_ops,
432 .tvnorms = CX25821_NORMS, 415 .tvnorms = CX25821_NORMS,
433 .current_norm = V4L2_STD_NTSC_M, 416 .current_norm = V4L2_STD_NTSC_M,
diff --git a/drivers/staging/cx25821/cx25821-video.c b/drivers/staging/cx25821/cx25821-video.c
index 8834bc80a5ab..c7c14c7698a7 100644
--- a/drivers/staging/cx25821/cx25821-video.c
+++ b/drivers/staging/cx25821/cx25821-video.c
@@ -184,11 +184,11 @@ struct video_device *cx25821_vdev_init(struct cx25821_dev *dev,
184 if (NULL == vfd) 184 if (NULL == vfd)
185 return NULL; 185 return NULL;
186 *vfd = *template; 186 *vfd = *template;
187 vfd->minor = -1;
188 vfd->v4l2_dev = &dev->v4l2_dev; 187 vfd->v4l2_dev = &dev->v4l2_dev;
189 vfd->release = video_device_release; 188 vfd->release = video_device_release;
190 snprintf(vfd->name, sizeof(vfd->name), "%s %s (%s)", dev->name, type, 189 snprintf(vfd->name, sizeof(vfd->name), "%s %s (%s)", dev->name, type,
191 cx25821_boards[dev->board].name); 190 cx25821_boards[dev->board].name);
191 video_set_drvdata(vfd, dev);
192 return vfd; 192 return vfd;
193} 193}
194 194
@@ -424,7 +424,7 @@ int cx25821_video_irq(struct cx25821_dev *dev, int chan_num, u32 status)
424void cx25821_videoioctl_unregister(struct cx25821_dev *dev) 424void cx25821_videoioctl_unregister(struct cx25821_dev *dev)
425{ 425{
426 if (dev->ioctl_dev) { 426 if (dev->ioctl_dev) {
427 if (dev->ioctl_dev->minor != -1) 427 if (video_is_registered(dev->ioctl_dev))
428 video_unregister_device(dev->ioctl_dev); 428 video_unregister_device(dev->ioctl_dev);
429 else 429 else
430 video_device_release(dev->ioctl_dev); 430 video_device_release(dev->ioctl_dev);
@@ -438,7 +438,7 @@ void cx25821_video_unregister(struct cx25821_dev *dev, int chan_num)
438 cx_clear(PCI_INT_MSK, 1); 438 cx_clear(PCI_INT_MSK, 1);
439 439
440 if (dev->video_dev[chan_num]) { 440 if (dev->video_dev[chan_num]) {
441 if (-1 != dev->video_dev[chan_num]->minor) 441 if (video_is_registered(dev->video_dev[chan_num]))
442 video_unregister_device(dev->video_dev[chan_num]); 442 video_unregister_device(dev->video_dev[chan_num]);
443 else 443 else
444 video_device_release(dev->video_dev[chan_num]); 444 video_device_release(dev->video_dev[chan_num]);
diff --git a/drivers/staging/cx25821/cx25821-video0.c b/drivers/staging/cx25821/cx25821-video0.c
index 950fac1d7003..ad7a69129118 100644
--- a/drivers/staging/cx25821/cx25821-video0.c
+++ b/drivers/staging/cx25821/cx25821-video0.c
@@ -94,37 +94,21 @@ static struct videobuf_queue_ops cx25821_video_qops = {
94 94
95static int video_open(struct file *file) 95static int video_open(struct file *file)
96{ 96{
97 int minor = video_devdata(file)->minor; 97 struct video_device *vdev = video_devdata(file);
98 struct cx25821_dev *h, *dev = NULL; 98 struct cx25821_dev *dev = video_drvdata(file);
99 struct cx25821_fh *fh; 99 struct cx25821_fh *fh;
100 struct list_head *list; 100 enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
101 enum v4l2_buf_type type = 0;
102 u32 pix_format; 101 u32 pix_format;
103 102
104 lock_kernel(); 103 printk("open dev=%s type=%s\n", video_device_node_name(vdev),
105 list_for_each(list, &cx25821_devlist) { 104 v4l2_type_names[type]);
106 h = list_entry(list, struct cx25821_dev, devlist);
107
108 if (h->video_dev[SRAM_CH00]
109 && h->video_dev[SRAM_CH00]->minor == minor) {
110 dev = h;
111 type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
112 }
113 }
114
115 if (NULL == dev) {
116 unlock_kernel();
117 return -ENODEV;
118 }
119
120 printk("open minor=%d type=%s\n", minor, v4l2_type_names[type]);
121 105
122 /* allocate + initialize per filehandle data */ 106 /* allocate + initialize per filehandle data */
123 fh = kzalloc(sizeof(*fh), GFP_KERNEL); 107 fh = kzalloc(sizeof(*fh), GFP_KERNEL);
124 if (NULL == fh) { 108 if (NULL == fh)
125 unlock_kernel();
126 return -ENOMEM; 109 return -ENOMEM;
127 } 110
111 lock_kernel();
128 112
129 file->private_data = fh; 113 file->private_data = fh;
130 fh->dev = dev; 114 fh->dev = dev;
@@ -444,7 +428,6 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
444struct video_device cx25821_video_template0 = { 428struct video_device cx25821_video_template0 = {
445 .name = "cx25821-video", 429 .name = "cx25821-video",
446 .fops = &video_fops, 430 .fops = &video_fops,
447 .minor = -1,
448 .ioctl_ops = &video_ioctl_ops, 431 .ioctl_ops = &video_ioctl_ops,
449 .tvnorms = CX25821_NORMS, 432 .tvnorms = CX25821_NORMS,
450 .current_norm = V4L2_STD_NTSC_M, 433 .current_norm = V4L2_STD_NTSC_M,
diff --git a/drivers/staging/cx25821/cx25821-video1.c b/drivers/staging/cx25821/cx25821-video1.c
index a4dddc684adf..e3f3c4ac7908 100644
--- a/drivers/staging/cx25821/cx25821-video1.c
+++ b/drivers/staging/cx25821/cx25821-video1.c
@@ -94,37 +94,21 @@ static struct videobuf_queue_ops cx25821_video_qops = {
94 94
95static int video_open(struct file *file) 95static int video_open(struct file *file)
96{ 96{
97 int minor = video_devdata(file)->minor; 97 struct video_device *vdev = video_devdata(file);
98 struct cx25821_dev *h, *dev = NULL; 98 struct cx25821_dev *dev = video_drvdata(file);
99 struct cx25821_fh *fh; 99 struct cx25821_fh *fh;
100 struct list_head *list; 100 enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
101 enum v4l2_buf_type type = 0;
102 u32 pix_format; 101 u32 pix_format;
103 102
104 lock_kernel(); 103 printk("open dev=%s type=%s\n", video_device_node_name(vdev),
105 list_for_each(list, &cx25821_devlist) { 104 v4l2_type_names[type]);
106 h = list_entry(list, struct cx25821_dev, devlist);
107
108 if (h->video_dev[SRAM_CH01]
109 && h->video_dev[SRAM_CH01]->minor == minor) {
110 dev = h;
111 type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
112 }
113 }
114
115 if (NULL == dev) {
116 unlock_kernel();
117 return -ENODEV;
118 }
119
120 printk("open minor=%d type=%s\n", minor, v4l2_type_names[type]);
121 105
122 /* allocate + initialize per filehandle data */ 106 /* allocate + initialize per filehandle data */
123 fh = kzalloc(sizeof(*fh), GFP_KERNEL); 107 fh = kzalloc(sizeof(*fh), GFP_KERNEL);
124 if (NULL == fh) { 108 if (NULL == fh)
125 unlock_kernel();
126 return -ENOMEM; 109 return -ENOMEM;
127 } 110
111 lock_kernel();
128 112
129 file->private_data = fh; 113 file->private_data = fh;
130 fh->dev = dev; 114 fh->dev = dev;
@@ -444,7 +428,6 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
444struct video_device cx25821_video_template1 = { 428struct video_device cx25821_video_template1 = {
445 .name = "cx25821-video", 429 .name = "cx25821-video",
446 .fops = &video_fops, 430 .fops = &video_fops,
447 .minor = -1,
448 .ioctl_ops = &video_ioctl_ops, 431 .ioctl_ops = &video_ioctl_ops,
449 .tvnorms = CX25821_NORMS, 432 .tvnorms = CX25821_NORMS,
450 .current_norm = V4L2_STD_NTSC_M, 433 .current_norm = V4L2_STD_NTSC_M,
diff --git a/drivers/staging/cx25821/cx25821-video2.c b/drivers/staging/cx25821/cx25821-video2.c
index 8e04e253f5d9..36fb855a497e 100644
--- a/drivers/staging/cx25821/cx25821-video2.c
+++ b/drivers/staging/cx25821/cx25821-video2.c
@@ -94,37 +94,22 @@ static struct videobuf_queue_ops cx25821_video_qops = {
94 94
95static int video_open(struct file *file) 95static int video_open(struct file *file)
96{ 96{
97 int minor = video_devdata(file)->minor; 97 struct video_device *vdev = video_devdata(file);
98 struct cx25821_dev *h, *dev = NULL; 98 struct cx25821_dev *dev = video_drvdata(file);
99 struct cx25821_fh *fh; 99 struct cx25821_fh *fh;
100 struct list_head *list; 100 enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
101 enum v4l2_buf_type type = 0;
102 u32 pix_format; 101 u32 pix_format;
103 102
104 lock_kernel(); 103 printk("open dev=%s type=%s\n", video_device_node_name(vdev),
105 list_for_each(list, &cx25821_devlist) { 104 v4l2_type_names[type]);
106 h = list_entry(list, struct cx25821_dev, devlist);
107
108 if (h->video_dev[SRAM_CH02]
109 && h->video_dev[SRAM_CH02]->minor == minor) {
110 dev = h;
111 type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
112 }
113 }
114
115 if (NULL == dev) {
116 unlock_kernel();
117 return -ENODEV;
118 }
119
120 printk("open minor=%d type=%s\n", minor, v4l2_type_names[type]);
121 105
122 /* allocate + initialize per filehandle data */ 106 /* allocate + initialize per filehandle data */
123 fh = kzalloc(sizeof(*fh), GFP_KERNEL); 107 fh = kzalloc(sizeof(*fh), GFP_KERNEL);
124 if (NULL == fh) { 108 if (NULL == fh)
125 unlock_kernel();
126 return -ENOMEM; 109 return -ENOMEM;
127 } 110
111 lock_kernel();
112
128 file->private_data = fh; 113 file->private_data = fh;
129 fh->dev = dev; 114 fh->dev = dev;
130 fh->type = type; 115 fh->type = type;
@@ -445,7 +430,6 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
445struct video_device cx25821_video_template2 = { 430struct video_device cx25821_video_template2 = {
446 .name = "cx25821-video", 431 .name = "cx25821-video",
447 .fops = &video_fops, 432 .fops = &video_fops,
448 .minor = -1,
449 .ioctl_ops = &video_ioctl_ops, 433 .ioctl_ops = &video_ioctl_ops,
450 .tvnorms = CX25821_NORMS, 434 .tvnorms = CX25821_NORMS,
451 .current_norm = V4L2_STD_NTSC_M, 435 .current_norm = V4L2_STD_NTSC_M,
diff --git a/drivers/staging/cx25821/cx25821-video3.c b/drivers/staging/cx25821/cx25821-video3.c
index 8801a8ead904..1e0f10abdbcd 100644
--- a/drivers/staging/cx25821/cx25821-video3.c
+++ b/drivers/staging/cx25821/cx25821-video3.c
@@ -94,37 +94,22 @@ static struct videobuf_queue_ops cx25821_video_qops = {
94 94
95static int video_open(struct file *file) 95static int video_open(struct file *file)
96{ 96{
97 int minor = video_devdata(file)->minor; 97 struct video_device *vdev = video_devdata(file);
98 struct cx25821_dev *h, *dev = NULL; 98 struct cx25821_dev *dev = video_drvdata(file);
99 struct cx25821_fh *fh; 99 struct cx25821_fh *fh;
100 struct list_head *list; 100 enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
101 enum v4l2_buf_type type = 0;
102 u32 pix_format; 101 u32 pix_format;
103 102
104 lock_kernel(); 103 printk("open dev=%s type=%s\n", video_device_node_name(vdev),
105 list_for_each(list, &cx25821_devlist) { 104 v4l2_type_names[type]);
106 h = list_entry(list, struct cx25821_dev, devlist);
107
108 if (h->video_dev[SRAM_CH03]
109 && h->video_dev[SRAM_CH03]->minor == minor) {
110 dev = h;
111 type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
112 }
113 }
114
115 if (NULL == dev) {
116 unlock_kernel();
117 return -ENODEV;
118 }
119
120 printk("open minor=%d type=%s\n", minor, v4l2_type_names[type]);
121 105
122 /* allocate + initialize per filehandle data */ 106 /* allocate + initialize per filehandle data */
123 fh = kzalloc(sizeof(*fh), GFP_KERNEL); 107 fh = kzalloc(sizeof(*fh), GFP_KERNEL);
124 if (NULL == fh) { 108 if (NULL == fh)
125 unlock_kernel();
126 return -ENOMEM; 109 return -ENOMEM;
127 } 110
111 lock_kernel();
112
128 file->private_data = fh; 113 file->private_data = fh;
129 fh->dev = dev; 114 fh->dev = dev;
130 fh->type = type; 115 fh->type = type;
@@ -444,7 +429,6 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
444struct video_device cx25821_video_template3 = { 429struct video_device cx25821_video_template3 = {
445 .name = "cx25821-video", 430 .name = "cx25821-video",
446 .fops = &video_fops, 431 .fops = &video_fops,
447 .minor = -1,
448 .ioctl_ops = &video_ioctl_ops, 432 .ioctl_ops = &video_ioctl_ops,
449 .tvnorms = CX25821_NORMS, 433 .tvnorms = CX25821_NORMS,
450 .current_norm = V4L2_STD_NTSC_M, 434 .current_norm = V4L2_STD_NTSC_M,
diff --git a/drivers/staging/cx25821/cx25821-video4.c b/drivers/staging/cx25821/cx25821-video4.c
index ab0d747138ad..0cbe7a79d8c0 100644
--- a/drivers/staging/cx25821/cx25821-video4.c
+++ b/drivers/staging/cx25821/cx25821-video4.c
@@ -94,37 +94,22 @@ static struct videobuf_queue_ops cx25821_video_qops = {
94 94
95static int video_open(struct file *file) 95static int video_open(struct file *file)
96{ 96{
97 int minor = video_devdata(file)->minor; 97 struct video_device *vdev = video_devdata(file);
98 struct cx25821_dev *h, *dev = NULL; 98 struct cx25821_dev *dev = video_drvdata(file);
99 struct cx25821_fh *fh; 99 struct cx25821_fh *fh;
100 struct list_head *list; 100 enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
101 enum v4l2_buf_type type = 0;
102 u32 pix_format; 101 u32 pix_format;
103 102
104 lock_kernel(); 103 printk("open dev=%s type=%s\n", video_device_node_name(vdev),
105 list_for_each(list, &cx25821_devlist) { 104 v4l2_type_names[type]);
106 h = list_entry(list, struct cx25821_dev, devlist);
107
108 if (h->video_dev[SRAM_CH04]
109 && h->video_dev[SRAM_CH04]->minor == minor) {
110 dev = h;
111 type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
112 }
113 }
114
115 if (NULL == dev) {
116 unlock_kernel();
117 return -ENODEV;
118 }
119
120 printk("open minor=%d type=%s\n", minor, v4l2_type_names[type]);
121 105
122 /* allocate + initialize per filehandle data */ 106 /* allocate + initialize per filehandle data */
123 fh = kzalloc(sizeof(*fh), GFP_KERNEL); 107 fh = kzalloc(sizeof(*fh), GFP_KERNEL);
124 if (NULL == fh) { 108 if (NULL == fh)
125 unlock_kernel();
126 return -ENOMEM; 109 return -ENOMEM;
127 } 110
111 lock_kernel();
112
128 file->private_data = fh; 113 file->private_data = fh;
129 fh->dev = dev; 114 fh->dev = dev;
130 fh->type = type; 115 fh->type = type;
@@ -443,7 +428,6 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
443struct video_device cx25821_video_template4 = { 428struct video_device cx25821_video_template4 = {
444 .name = "cx25821-video", 429 .name = "cx25821-video",
445 .fops = &video_fops, 430 .fops = &video_fops,
446 .minor = -1,
447 .ioctl_ops = &video_ioctl_ops, 431 .ioctl_ops = &video_ioctl_ops,
448 .tvnorms = CX25821_NORMS, 432 .tvnorms = CX25821_NORMS,
449 .current_norm = V4L2_STD_NTSC_M, 433 .current_norm = V4L2_STD_NTSC_M,
diff --git a/drivers/staging/cx25821/cx25821-video5.c b/drivers/staging/cx25821/cx25821-video5.c
index 7ef0b971f5cf..5dc08adc12e8 100644
--- a/drivers/staging/cx25821/cx25821-video5.c
+++ b/drivers/staging/cx25821/cx25821-video5.c
@@ -94,37 +94,22 @@ static struct videobuf_queue_ops cx25821_video_qops = {
94 94
95static int video_open(struct file *file) 95static int video_open(struct file *file)
96{ 96{
97 int minor = video_devdata(file)->minor; 97 struct video_device *vdev = video_devdata(file);
98 struct cx25821_dev *h, *dev = NULL; 98 struct cx25821_dev *dev = video_drvdata(file);
99 struct cx25821_fh *fh; 99 struct cx25821_fh *fh;
100 struct list_head *list; 100 enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
101 enum v4l2_buf_type type = 0;
102 u32 pix_format; 101 u32 pix_format;
103 102
104 lock_kernel(); 103 printk("open dev=%s type=%s\n", video_device_node_name(vdev),
105 list_for_each(list, &cx25821_devlist) { 104 v4l2_type_names[type]);
106 h = list_entry(list, struct cx25821_dev, devlist);
107
108 if (h->video_dev[SRAM_CH05]
109 && h->video_dev[SRAM_CH05]->minor == minor) {
110 dev = h;
111 type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
112 }
113 }
114
115 if (NULL == dev) {
116 unlock_kernel();
117 return -ENODEV;
118 }
119
120 printk("open minor=%d type=%s\n", minor, v4l2_type_names[type]);
121 105
122 /* allocate + initialize per filehandle data */ 106 /* allocate + initialize per filehandle data */
123 fh = kzalloc(sizeof(*fh), GFP_KERNEL); 107 fh = kzalloc(sizeof(*fh), GFP_KERNEL);
124 if (NULL == fh) { 108 if (NULL == fh)
125 unlock_kernel();
126 return -ENOMEM; 109 return -ENOMEM;
127 } 110
111 lock_kernel();
112
128 file->private_data = fh; 113 file->private_data = fh;
129 fh->dev = dev; 114 fh->dev = dev;
130 fh->type = type; 115 fh->type = type;
@@ -443,7 +428,6 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
443struct video_device cx25821_video_template5 = { 428struct video_device cx25821_video_template5 = {
444 .name = "cx25821-video", 429 .name = "cx25821-video",
445 .fops = &video_fops, 430 .fops = &video_fops,
446 .minor = -1,
447 .ioctl_ops = &video_ioctl_ops, 431 .ioctl_ops = &video_ioctl_ops,
448 .tvnorms = CX25821_NORMS, 432 .tvnorms = CX25821_NORMS,
449 .current_norm = V4L2_STD_NTSC_M, 433 .current_norm = V4L2_STD_NTSC_M,
diff --git a/drivers/staging/cx25821/cx25821-video6.c b/drivers/staging/cx25821/cx25821-video6.c
index 3c41b49e2ea9..2938ad3ad3c5 100644
--- a/drivers/staging/cx25821/cx25821-video6.c
+++ b/drivers/staging/cx25821/cx25821-video6.c
@@ -94,37 +94,22 @@ static struct videobuf_queue_ops cx25821_video_qops = {
94 94
95static int video_open(struct file *file) 95static int video_open(struct file *file)
96{ 96{
97 int minor = video_devdata(file)->minor; 97 struct video_device *vdev = video_devdata(file);
98 struct cx25821_dev *h, *dev = NULL; 98 struct cx25821_dev *dev = video_drvdata(file);
99 struct cx25821_fh *fh; 99 struct cx25821_fh *fh;
100 struct list_head *list; 100 enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
101 enum v4l2_buf_type type = 0;
102 u32 pix_format; 101 u32 pix_format;
103 102
104 lock_kernel(); 103 printk("open dev=%s type=%s\n", video_device_node_name(vdev),
105 list_for_each(list, &cx25821_devlist) { 104 v4l2_type_names[type]);
106 h = list_entry(list, struct cx25821_dev, devlist);
107
108 if (h->video_dev[SRAM_CH06]
109 && h->video_dev[SRAM_CH06]->minor == minor) {
110 dev = h;
111 type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
112 }
113 }
114
115 if (NULL == dev) {
116 unlock_kernel();
117 return -ENODEV;
118 }
119
120 printk("open minor=%d type=%s\n", minor, v4l2_type_names[type]);
121 105
122 /* allocate + initialize per filehandle data */ 106 /* allocate + initialize per filehandle data */
123 fh = kzalloc(sizeof(*fh), GFP_KERNEL); 107 fh = kzalloc(sizeof(*fh), GFP_KERNEL);
124 if (NULL == fh) { 108 if (NULL == fh)
125 unlock_kernel();
126 return -ENOMEM; 109 return -ENOMEM;
127 } 110
111 lock_kernel();
112
128 file->private_data = fh; 113 file->private_data = fh;
129 fh->dev = dev; 114 fh->dev = dev;
130 fh->type = type; 115 fh->type = type;
@@ -443,7 +428,6 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
443struct video_device cx25821_video_template6 = { 428struct video_device cx25821_video_template6 = {
444 .name = "cx25821-video", 429 .name = "cx25821-video",
445 .fops = &video_fops, 430 .fops = &video_fops,
446 .minor = -1,
447 .ioctl_ops = &video_ioctl_ops, 431 .ioctl_ops = &video_ioctl_ops,
448 .tvnorms = CX25821_NORMS, 432 .tvnorms = CX25821_NORMS,
449 .current_norm = V4L2_STD_NTSC_M, 433 .current_norm = V4L2_STD_NTSC_M,
diff --git a/drivers/staging/cx25821/cx25821-video7.c b/drivers/staging/cx25821/cx25821-video7.c
index 625c9b78a9cf..458e525d72af 100644
--- a/drivers/staging/cx25821/cx25821-video7.c
+++ b/drivers/staging/cx25821/cx25821-video7.c
@@ -93,37 +93,22 @@ static struct videobuf_queue_ops cx25821_video_qops = {
93 93
94static int video_open(struct file *file) 94static int video_open(struct file *file)
95{ 95{
96 int minor = video_devdata(file)->minor; 96 struct video_device *vdev = video_devdata(file);
97 struct cx25821_dev *h, *dev = NULL; 97 struct cx25821_dev *dev = video_drvdata(file);
98 struct cx25821_fh *fh; 98 struct cx25821_fh *fh;
99 struct list_head *list; 99 enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
100 enum v4l2_buf_type type = 0;
101 u32 pix_format; 100 u32 pix_format;
102 101
103 lock_kernel(); 102 printk("open dev=%s type=%s\n", video_device_node_name(vdev),
104 list_for_each(list, &cx25821_devlist) { 103 v4l2_type_names[type]);
105 h = list_entry(list, struct cx25821_dev, devlist);
106
107 if (h->video_dev[SRAM_CH07]
108 && h->video_dev[SRAM_CH07]->minor == minor) {
109 dev = h;
110 type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
111 }
112 }
113
114 if (NULL == dev) {
115 unlock_kernel();
116 return -ENODEV;
117 }
118
119 printk("open minor=%d type=%s\n", minor, v4l2_type_names[type]);
120 104
121 /* allocate + initialize per filehandle data */ 105 /* allocate + initialize per filehandle data */
122 fh = kzalloc(sizeof(*fh), GFP_KERNEL); 106 fh = kzalloc(sizeof(*fh), GFP_KERNEL);
123 if (NULL == fh) { 107 if (NULL == fh)
124 unlock_kernel();
125 return -ENOMEM; 108 return -ENOMEM;
126 } 109
110 lock_kernel();
111
127 file->private_data = fh; 112 file->private_data = fh;
128 fh->dev = dev; 113 fh->dev = dev;
129 fh->type = type; 114 fh->type = type;
@@ -442,7 +427,6 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
442struct video_device cx25821_video_template7 = { 427struct video_device cx25821_video_template7 = {
443 .name = "cx25821-video", 428 .name = "cx25821-video",
444 .fops = &video_fops, 429 .fops = &video_fops,
445 .minor = -1,
446 .ioctl_ops = &video_ioctl_ops, 430 .ioctl_ops = &video_ioctl_ops,
447 .tvnorms = CX25821_NORMS, 431 .tvnorms = CX25821_NORMS,
448 .current_norm = V4L2_STD_NTSC_M, 432 .current_norm = V4L2_STD_NTSC_M,
diff --git a/drivers/staging/cx25821/cx25821-videoioctl.c b/drivers/staging/cx25821/cx25821-videoioctl.c
index 2a312ce78c63..1da52b54a454 100644
--- a/drivers/staging/cx25821/cx25821-videoioctl.c
+++ b/drivers/staging/cx25821/cx25821-videoioctl.c
@@ -94,36 +94,21 @@ static struct videobuf_queue_ops cx25821_video_qops = {
94 94
95static int video_open(struct file *file) 95static int video_open(struct file *file)
96{ 96{
97 int minor = video_devdata(file)->minor; 97 struct video_device *vdev = video_devdata(file);
98 struct cx25821_dev *h, *dev = NULL; 98 struct cx25821_dev *dev = video_drvdata(file);
99 struct cx25821_fh *fh; 99 struct cx25821_fh *fh;
100 struct list_head *list; 100 enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
101 enum v4l2_buf_type type = 0;
102 u32 pix_format; 101 u32 pix_format;
103 102
104 lock_kernel(); 103 printk("open dev=%s type=%s\n", video_device_node_name(vdev),
105 list_for_each(list, &cx25821_devlist) { 104 v4l2_type_names[type]);
106 h = list_entry(list, struct cx25821_dev, devlist);
107
108 if (h->ioctl_dev && h->ioctl_dev->minor == minor) {
109 dev = h;
110 type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
111 }
112 }
113
114 if (NULL == dev) {
115 unlock_kernel();
116 return -ENODEV;
117 }
118
119 printk("open minor=%d type=%s\n", minor, v4l2_type_names[type]);
120 105
121 /* allocate + initialize per filehandle data */ 106 /* allocate + initialize per filehandle data */
122 fh = kzalloc(sizeof(*fh), GFP_KERNEL); 107 fh = kzalloc(sizeof(*fh), GFP_KERNEL);
123 if (NULL == fh) { 108 if (NULL == fh)
124 unlock_kernel();
125 return -ENOMEM; 109 return -ENOMEM;
126 } 110
111 lock_kernel();
127 112
128 file->private_data = fh; 113 file->private_data = fh;
129 fh->dev = dev; 114 fh->dev = dev;
@@ -489,7 +474,6 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
489struct video_device cx25821_videoioctl_template = { 474struct video_device cx25821_videoioctl_template = {
490 .name = "cx25821-videoioctl", 475 .name = "cx25821-videoioctl",
491 .fops = &video_fops, 476 .fops = &video_fops,
492 .minor = -1,
493 .ioctl_ops = &video_ioctl_ops, 477 .ioctl_ops = &video_ioctl_ops,
494 .tvnorms = CX25821_NORMS, 478 .tvnorms = CX25821_NORMS,
495 .current_norm = V4L2_STD_NTSC_M, 479 .current_norm = V4L2_STD_NTSC_M,
diff --git a/drivers/staging/cx25821/cx25821-vidups10.c b/drivers/staging/cx25821/cx25821-vidups10.c
index 77b63b060405..b76d9f62c3d1 100644
--- a/drivers/staging/cx25821/cx25821-vidups10.c
+++ b/drivers/staging/cx25821/cx25821-vidups10.c
@@ -94,36 +94,20 @@ static struct videobuf_queue_ops cx25821_video_qops = {
94 94
95static int video_open(struct file *file) 95static int video_open(struct file *file)
96{ 96{
97 int minor = video_devdata(file)->minor; 97 struct video_device *vdev = video_devdata(file);
98 struct cx25821_dev *h, *dev = NULL; 98 struct cx25821_dev *dev = video_drvdata(file);
99 struct cx25821_fh *fh; 99 struct cx25821_fh *fh;
100 struct list_head *list; 100 enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
101 enum v4l2_buf_type type = 0;
102 101
103 lock_kernel(); 102 printk("open dev=%s type=%s\n", video_device_node_name(vdev),
104 list_for_each(list, &cx25821_devlist) { 103 v4l2_type_names[type]);
105 h = list_entry(list, struct cx25821_dev, devlist);
106
107 if (h->video_dev[SRAM_CH10]
108 && h->video_dev[SRAM_CH10]->minor == minor) {
109 dev = h;
110 type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
111 }
112 }
113
114 if (NULL == dev) {
115 unlock_kernel();
116 return -ENODEV;
117 }
118
119 printk("open minor=%d type=%s\n", minor, v4l2_type_names[type]);
120 104
121 /* allocate + initialize per filehandle data */ 105 /* allocate + initialize per filehandle data */
122 fh = kzalloc(sizeof(*fh), GFP_KERNEL); 106 fh = kzalloc(sizeof(*fh), GFP_KERNEL);
123 if (NULL == fh) { 107 if (NULL == fh)
124 unlock_kernel();
125 return -ENOMEM; 108 return -ENOMEM;
126 } 109
110 lock_kernel();
127 111
128 file->private_data = fh; 112 file->private_data = fh;
129 fh->dev = dev; 113 fh->dev = dev;
@@ -428,7 +412,6 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
428struct video_device cx25821_video_template10 = { 412struct video_device cx25821_video_template10 = {
429 .name = "cx25821-upstream10", 413 .name = "cx25821-upstream10",
430 .fops = &video_fops, 414 .fops = &video_fops,
431 .minor = -1,
432 .ioctl_ops = &video_ioctl_ops, 415 .ioctl_ops = &video_ioctl_ops,
433 .tvnorms = CX25821_NORMS, 416 .tvnorms = CX25821_NORMS,
434 .current_norm = V4L2_STD_NTSC_M, 417 .current_norm = V4L2_STD_NTSC_M,
diff --git a/drivers/staging/cx25821/cx25821-vidups9.c b/drivers/staging/cx25821/cx25821-vidups9.c
index 75c8c1eed2da..1580da3b29aa 100644
--- a/drivers/staging/cx25821/cx25821-vidups9.c
+++ b/drivers/staging/cx25821/cx25821-vidups9.c
@@ -94,36 +94,20 @@ static struct videobuf_queue_ops cx25821_video_qops = {
94 94
95static int video_open(struct file *file) 95static int video_open(struct file *file)
96{ 96{
97 int minor = video_devdata(file)->minor; 97 struct video_device *vdev = video_devdata(file);
98 struct cx25821_dev *h, *dev = NULL; 98 struct cx25821_dev *dev = video_drvdata(file);
99 struct cx25821_fh *fh; 99 struct cx25821_fh *fh;
100 struct list_head *list; 100 enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
101 enum v4l2_buf_type type = 0;
102 101
103 lock_kernel(); 102 printk("open dev=%s type=%s\n", video_device_node_name(vdev),
104 list_for_each(list, &cx25821_devlist) { 103 v4l2_type_names[type]);
105 h = list_entry(list, struct cx25821_dev, devlist);
106
107 if (h->video_dev[SRAM_CH09]
108 && h->video_dev[SRAM_CH09]->minor == minor) {
109 dev = h;
110 type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
111 }
112 }
113
114 if (NULL == dev) {
115 unlock_kernel();
116 return -ENODEV;
117 }
118
119 printk("open minor=%d type=%s\n", minor, v4l2_type_names[type]);
120 104
121 /* allocate + initialize per filehandle data */ 105 /* allocate + initialize per filehandle data */
122 fh = kzalloc(sizeof(*fh), GFP_KERNEL); 106 fh = kzalloc(sizeof(*fh), GFP_KERNEL);
123 if (NULL == fh) { 107 if (NULL == fh)
124 unlock_kernel();
125 return -ENOMEM; 108 return -ENOMEM;
126 } 109
110 lock_kernel();
127 111
128 file->private_data = fh; 112 file->private_data = fh;
129 fh->dev = dev; 113 fh->dev = dev;
@@ -426,7 +410,6 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
426struct video_device cx25821_video_template9 = { 410struct video_device cx25821_video_template9 = {
427 .name = "cx25821-upstream9", 411 .name = "cx25821-upstream9",
428 .fops = &video_fops, 412 .fops = &video_fops,
429 .minor = -1,
430 .ioctl_ops = &video_ioctl_ops, 413 .ioctl_ops = &video_ioctl_ops,
431 .tvnorms = CX25821_NORMS, 414 .tvnorms = CX25821_NORMS,
432 .current_norm = V4L2_STD_NTSC_M, 415 .current_norm = V4L2_STD_NTSC_M,
diff --git a/drivers/staging/go7007/go7007-v4l2.c b/drivers/staging/go7007/go7007-v4l2.c
index b18d8e2d4c5e..3af79242313e 100644
--- a/drivers/staging/go7007/go7007-v4l2.c
+++ b/drivers/staging/go7007/go7007-v4l2.c
@@ -1787,7 +1787,6 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
1787static struct video_device go7007_template = { 1787static struct video_device go7007_template = {
1788 .name = "go7007", 1788 .name = "go7007",
1789 .fops = &go7007_fops, 1789 .fops = &go7007_fops,
1790 .minor = -1,
1791 .release = go7007_vfl_release, 1790 .release = go7007_vfl_release,
1792 .ioctl_ops = &video_ioctl_ops, 1791 .ioctl_ops = &video_ioctl_ops,
1793 .tvnorms = V4L2_STD_ALL, 1792 .tvnorms = V4L2_STD_ALL,
@@ -1817,8 +1816,8 @@ int go7007_v4l2_init(struct go7007 *go)
1817 } 1816 }
1818 video_set_drvdata(go->video_dev, go); 1817 video_set_drvdata(go->video_dev, go);
1819 ++go->ref_count; 1818 ++go->ref_count;
1820 printk(KERN_INFO "%s: registered device video%d [v4l2]\n", 1819 printk(KERN_INFO "%s: registered device %s [v4l2]\n",
1821 go->video_dev->name, go->video_dev->num); 1820 go->video_dev->name, video_device_node_name(go->video_dev));
1822 1821
1823 return 0; 1822 return 0;
1824} 1823}
diff --git a/drivers/uio/uio_pdrv_genirq.c b/drivers/uio/uio_pdrv_genirq.c
index aa53db9f2e88..1ef3b8fc50b3 100644
--- a/drivers/uio/uio_pdrv_genirq.c
+++ b/drivers/uio/uio_pdrv_genirq.c
@@ -210,7 +210,7 @@ static int uio_pdrv_genirq_runtime_nop(struct device *dev)
210 return 0; 210 return 0;
211} 211}
212 212
213static struct dev_pm_ops uio_pdrv_genirq_dev_pm_ops = { 213static const struct dev_pm_ops uio_pdrv_genirq_dev_pm_ops = {
214 .runtime_suspend = uio_pdrv_genirq_runtime_nop, 214 .runtime_suspend = uio_pdrv_genirq_runtime_nop,
215 .runtime_resume = uio_pdrv_genirq_runtime_nop, 215 .runtime_resume = uio_pdrv_genirq_runtime_nop,
216}; 216};
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index 91f2885b6ee1..2dcf906df569 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -363,7 +363,7 @@ static int hcd_pci_restore(struct device *dev)
363 return resume_common(dev, true); 363 return resume_common(dev, true);
364} 364}
365 365
366struct dev_pm_ops usb_hcd_pci_pm_ops = { 366const struct dev_pm_ops usb_hcd_pci_pm_ops = {
367 .suspend = hcd_pci_suspend, 367 .suspend = hcd_pci_suspend,
368 .suspend_noirq = hcd_pci_suspend_noirq, 368 .suspend_noirq = hcd_pci_suspend_noirq,
369 .resume_noirq = hcd_pci_resume_noirq, 369 .resume_noirq = hcd_pci_resume_noirq,
diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h
index d8b43aee581e..bbe2b924aae8 100644
--- a/drivers/usb/core/hcd.h
+++ b/drivers/usb/core/hcd.h
@@ -330,7 +330,7 @@ extern void usb_hcd_pci_remove(struct pci_dev *dev);
330extern void usb_hcd_pci_shutdown(struct pci_dev *dev); 330extern void usb_hcd_pci_shutdown(struct pci_dev *dev);
331 331
332#ifdef CONFIG_PM_SLEEP 332#ifdef CONFIG_PM_SLEEP
333extern struct dev_pm_ops usb_hcd_pci_pm_ops; 333extern const struct dev_pm_ops usb_hcd_pci_pm_ops;
334#endif 334#endif
335#endif /* CONFIG_PCI */ 335#endif /* CONFIG_PCI */
336 336
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 4e2c6df8d3cc..2fb42043b305 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -167,18 +167,23 @@ struct usb_host_interface *usb_altnum_to_altsetting(
167} 167}
168EXPORT_SYMBOL_GPL(usb_altnum_to_altsetting); 168EXPORT_SYMBOL_GPL(usb_altnum_to_altsetting);
169 169
170struct find_interface_arg {
171 int minor;
172 struct device_driver *drv;
173};
174
170static int __find_interface(struct device *dev, void *data) 175static int __find_interface(struct device *dev, void *data)
171{ 176{
172 int *minor = data; 177 struct find_interface_arg *arg = data;
173 struct usb_interface *intf; 178 struct usb_interface *intf;
174 179
175 if (!is_usb_interface(dev)) 180 if (!is_usb_interface(dev))
176 return 0; 181 return 0;
177 182
183 if (dev->driver != arg->drv)
184 return 0;
178 intf = to_usb_interface(dev); 185 intf = to_usb_interface(dev);
179 if (intf->minor != -1 && intf->minor == *minor) 186 return intf->minor == arg->minor;
180 return 1;
181 return 0;
182} 187}
183 188
184/** 189/**
@@ -187,14 +192,18 @@ static int __find_interface(struct device *dev, void *data)
187 * @minor: the minor number of the desired device 192 * @minor: the minor number of the desired device
188 * 193 *
189 * This walks the bus device list and returns a pointer to the interface 194 * This walks the bus device list and returns a pointer to the interface
190 * with the matching minor. Note, this only works for devices that share the 195 * with the matching minor and driver. Note, this only works for devices
191 * USB major number. 196 * that share the USB major number.
192 */ 197 */
193struct usb_interface *usb_find_interface(struct usb_driver *drv, int minor) 198struct usb_interface *usb_find_interface(struct usb_driver *drv, int minor)
194{ 199{
200 struct find_interface_arg argb;
195 struct device *dev; 201 struct device *dev;
196 202
197 dev = bus_find_device(&usb_bus_type, NULL, &minor, __find_interface); 203 argb.minor = minor;
204 argb.drv = &drv->drvwrap.driver;
205
206 dev = bus_find_device(&usb_bus_type, NULL, &argb, __find_interface);
198 207
199 /* Drop reference count from bus_find_device */ 208 /* Drop reference count from bus_find_device */
200 put_device(dev); 209 put_device(dev);
@@ -320,7 +329,7 @@ static int usb_dev_restore(struct device *dev)
320 return usb_resume(dev, PMSG_RESTORE); 329 return usb_resume(dev, PMSG_RESTORE);
321} 330}
322 331
323static struct dev_pm_ops usb_device_pm_ops = { 332static const struct dev_pm_ops usb_device_pm_ops = {
324 .prepare = usb_dev_prepare, 333 .prepare = usb_dev_prepare,
325 .complete = usb_dev_complete, 334 .complete = usb_dev_complete,
326 .suspend = usb_dev_suspend, 335 .suspend = usb_dev_suspend,
diff --git a/drivers/usb/host/ehci-au1xxx.c b/drivers/usb/host/ehci-au1xxx.c
index ed77be76d6bb..dbfb482a94e3 100644
--- a/drivers/usb/host/ehci-au1xxx.c
+++ b/drivers/usb/host/ehci-au1xxx.c
@@ -297,7 +297,7 @@ static int ehci_hcd_au1xxx_drv_resume(struct device *dev)
297 return 0; 297 return 0;
298} 298}
299 299
300static struct dev_pm_ops au1xxx_ehci_pmops = { 300static const struct dev_pm_ops au1xxx_ehci_pmops = {
301 .suspend = ehci_hcd_au1xxx_drv_suspend, 301 .suspend = ehci_hcd_au1xxx_drv_suspend,
302 .resume = ehci_hcd_au1xxx_drv_resume, 302 .resume = ehci_hcd_au1xxx_drv_resume,
303}; 303};
diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c
index 5c774ab98252..73352f3739b5 100644
--- a/drivers/usb/host/isp1362-hcd.c
+++ b/drivers/usb/host/isp1362-hcd.c
@@ -80,7 +80,7 @@
80#include <linux/platform_device.h> 80#include <linux/platform_device.h>
81#include <linux/pm.h> 81#include <linux/pm.h>
82#include <linux/io.h> 82#include <linux/io.h>
83#include <linux/bitops.h> 83#include <linux/bitmap.h>
84 84
85#include <asm/irq.h> 85#include <asm/irq.h>
86#include <asm/system.h> 86#include <asm/system.h>
@@ -190,10 +190,8 @@ static int claim_ptd_buffers(struct isp1362_ep_queue *epq,
190 struct isp1362_ep *ep, u16 len) 190 struct isp1362_ep *ep, u16 len)
191{ 191{
192 int ptd_offset = -EINVAL; 192 int ptd_offset = -EINVAL;
193 int index;
194 int num_ptds = ((len + PTD_HEADER_SIZE - 1) / epq->blk_size) + 1; 193 int num_ptds = ((len + PTD_HEADER_SIZE - 1) / epq->blk_size) + 1;
195 int found = -1; 194 int found;
196 int last = -1;
197 195
198 BUG_ON(len > epq->buf_size); 196 BUG_ON(len > epq->buf_size);
199 197
@@ -205,20 +203,9 @@ static int claim_ptd_buffers(struct isp1362_ep_queue *epq,
205 epq->name, len, epq->blk_size, num_ptds, epq->buf_map, epq->skip_map); 203 epq->name, len, epq->blk_size, num_ptds, epq->buf_map, epq->skip_map);
206 BUG_ON(ep->num_ptds != 0); 204 BUG_ON(ep->num_ptds != 0);
207 205
208 for (index = 0; index <= epq->buf_count - num_ptds; index++) { 206 found = bitmap_find_next_zero_area(&epq->buf_map, epq->buf_count, 0,
209 if (test_bit(index, &epq->buf_map)) 207 num_ptds, 0);
210 continue; 208 if (found >= epq->buf_count)
211 found = index;
212 for (last = index + 1; last < index + num_ptds; last++) {
213 if (test_bit(last, &epq->buf_map)) {
214 found = -1;
215 break;
216 }
217 }
218 if (found >= 0)
219 break;
220 }
221 if (found < 0)
222 return -EOVERFLOW; 209 return -EOVERFLOW;
223 210
224 DBG(1, "%s: Found %d PTDs[%d] for %d/%d byte\n", __func__, 211 DBG(1, "%s: Found %d PTDs[%d] for %d/%d byte\n", __func__,
@@ -230,8 +217,7 @@ static int claim_ptd_buffers(struct isp1362_ep_queue *epq,
230 epq->buf_avail -= num_ptds; 217 epq->buf_avail -= num_ptds;
231 BUG_ON(epq->buf_avail > epq->buf_count); 218 BUG_ON(epq->buf_avail > epq->buf_count);
232 ep->ptd_index = found; 219 ep->ptd_index = found;
233 for (index = found; index < last; index++) 220 bitmap_set(&epq->buf_map, found, num_ptds);
234 __set_bit(index, &epq->buf_map);
235 DBG(1, "%s: Done %s PTD[%d] $%04x, avail %d count %d claimed %d %08lx:%08lx\n", 221 DBG(1, "%s: Done %s PTD[%d] $%04x, avail %d count %d claimed %d %08lx:%08lx\n",
236 __func__, epq->name, ep->ptd_index, ep->ptd_offset, 222 __func__, epq->name, ep->ptd_index, ep->ptd_offset,
237 epq->buf_avail, epq->buf_count, num_ptds, epq->buf_map, epq->skip_map); 223 epq->buf_avail, epq->buf_count, num_ptds, epq->buf_map, epq->skip_map);
diff --git a/drivers/usb/host/ohci-au1xxx.c b/drivers/usb/host/ohci-au1xxx.c
index e4380082ebb1..17a6043c1fa0 100644
--- a/drivers/usb/host/ohci-au1xxx.c
+++ b/drivers/usb/host/ohci-au1xxx.c
@@ -294,7 +294,7 @@ static int ohci_hcd_au1xxx_drv_resume(struct device *dev)
294 return 0; 294 return 0;
295} 295}
296 296
297static struct dev_pm_ops au1xxx_ohci_pmops = { 297static const struct dev_pm_ops au1xxx_ohci_pmops = {
298 .suspend = ohci_hcd_au1xxx_drv_suspend, 298 .suspend = ohci_hcd_au1xxx_drv_suspend,
299 .resume = ohci_hcd_au1xxx_drv_resume, 299 .resume = ohci_hcd_au1xxx_drv_resume,
300}; 300};
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
index f1c06202fdf2..a18debdd79b8 100644
--- a/drivers/usb/host/ohci-pxa27x.c
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -518,7 +518,7 @@ static int ohci_hcd_pxa27x_drv_resume(struct device *dev)
518 return 0; 518 return 0;
519} 519}
520 520
521static struct dev_pm_ops ohci_hcd_pxa27x_pm_ops = { 521static const struct dev_pm_ops ohci_hcd_pxa27x_pm_ops = {
522 .suspend = ohci_hcd_pxa27x_drv_suspend, 522 .suspend = ohci_hcd_pxa27x_drv_suspend,
523 .resume = ohci_hcd_pxa27x_drv_resume, 523 .resume = ohci_hcd_pxa27x_drv_resume,
524}; 524};
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index 41dbc70ae752..b7a661c02bcd 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -2353,7 +2353,7 @@ static int r8a66597_resume(struct device *dev)
2353 return 0; 2353 return 0;
2354} 2354}
2355 2355
2356static struct dev_pm_ops r8a66597_dev_pm_ops = { 2356static const struct dev_pm_ops r8a66597_dev_pm_ops = {
2357 .suspend = r8a66597_suspend, 2357 .suspend = r8a66597_suspend,
2358 .resume = r8a66597_resume, 2358 .resume = r8a66597_resume,
2359 .poweroff = r8a66597_suspend, 2359 .poweroff = r8a66597_suspend,
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 49f2346afad3..bfe08f4975a3 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -2214,7 +2214,7 @@ static int musb_resume_noirq(struct device *dev)
2214 return 0; 2214 return 0;
2215} 2215}
2216 2216
2217static struct dev_pm_ops musb_dev_pm_ops = { 2217static const struct dev_pm_ops musb_dev_pm_ops = {
2218 .suspend = musb_suspend, 2218 .suspend = musb_suspend,
2219 .resume_noirq = musb_resume_noirq, 2219 .resume_noirq = musb_resume_noirq,
2220}; 2220};
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 99c0df1c7ebf..5a5c303a6373 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -614,6 +614,21 @@ config FB_BFIN_T350MCQB
614 This display is a QVGA 320x240 24-bit RGB display interfaced by an 8-bit wide PPI 614 This display is a QVGA 320x240 24-bit RGB display interfaced by an 8-bit wide PPI
615 It uses PPI[0..7] PPI_FS1, PPI_FS2 and PPI_CLK. 615 It uses PPI[0..7] PPI_FS1, PPI_FS2 and PPI_CLK.
616 616
617config FB_BFIN_LQ035Q1
618 tristate "SHARP LQ035Q1DH02 TFT LCD"
619 depends on FB && BLACKFIN && SPI
620 select FB_CFB_FILLRECT
621 select FB_CFB_COPYAREA
622 select FB_CFB_IMAGEBLIT
623 select BFIN_GPTIMERS
624 help
625 This is the framebuffer device driver for a SHARP LQ035Q1DH02 TFT display found on
626 the Blackfin Landscape LCD EZ-Extender Card.
627 This display is a QVGA 320x240 18-bit RGB display interfaced by an 16-bit wide PPI
628 It uses PPI[0..15] PPI_FS1, PPI_FS2 and PPI_CLK.
629
630 To compile this driver as a module, choose M here: the
631 module will be called bfin-lq035q1-fb.
617 632
618config FB_STI 633config FB_STI
619 tristate "HP STI frame buffer device support" 634 tristate "HP STI frame buffer device support"
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index 0f8da331ba0f..4ecb30c4f3f2 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -137,6 +137,7 @@ obj-$(CONFIG_FB_EFI) += efifb.o
137obj-$(CONFIG_FB_VGA16) += vga16fb.o 137obj-$(CONFIG_FB_VGA16) += vga16fb.o
138obj-$(CONFIG_FB_OF) += offb.o 138obj-$(CONFIG_FB_OF) += offb.o
139obj-$(CONFIG_FB_BF54X_LQ043) += bf54x-lq043fb.o 139obj-$(CONFIG_FB_BF54X_LQ043) += bf54x-lq043fb.o
140obj-$(CONFIG_FB_BFIN_LQ035Q1) += bfin-lq035q1-fb.o
140obj-$(CONFIG_FB_BFIN_T350MCQB) += bfin-t350mcqb-fb.o 141obj-$(CONFIG_FB_BFIN_T350MCQB) += bfin-t350mcqb-fb.o
141obj-$(CONFIG_FB_MX3) += mx3fb.o 142obj-$(CONFIG_FB_MX3) += mx3fb.o
142obj-$(CONFIG_FB_DA8XX) += da8xx-fb.o 143obj-$(CONFIG_FB_DA8XX) += da8xx-fb.o
diff --git a/drivers/video/atafb.c b/drivers/video/atafb.c
index b7687c55fe16..2051c9dc813b 100644
--- a/drivers/video/atafb.c
+++ b/drivers/video/atafb.c
@@ -2245,6 +2245,9 @@ static int ext_setcolreg(unsigned int regno, unsigned int red,
2245 if (regno > 255) 2245 if (regno > 255)
2246 return 1; 2246 return 1;
2247 2247
2248 if (regno > 255)
2249 return 1;
2250
2248 switch (external_card_type) { 2251 switch (external_card_type) {
2249 case IS_VGA: 2252 case IS_VGA:
2250 OUTB(0x3c8, regno); 2253 OUTB(0x3c8, regno);
diff --git a/drivers/video/backlight/da903x_bl.c b/drivers/video/backlight/da903x_bl.c
index 7fcb0eb54c60..f2d76dae1eb3 100644
--- a/drivers/video/backlight/da903x_bl.c
+++ b/drivers/video/backlight/da903x_bl.c
@@ -177,7 +177,7 @@ static int da903x_backlight_resume(struct device *dev)
177 return 0; 177 return 0;
178} 178}
179 179
180static struct dev_pm_ops da903x_backlight_pm_ops = { 180static const struct dev_pm_ops da903x_backlight_pm_ops = {
181 .suspend = da903x_backlight_suspend, 181 .suspend = da903x_backlight_suspend,
182 .resume = da903x_backlight_resume, 182 .resume = da903x_backlight_resume,
183}; 183};
diff --git a/drivers/video/backlight/lcd.c b/drivers/video/backlight/lcd.c
index a482dd7b0311..9b3be74cee5a 100644
--- a/drivers/video/backlight/lcd.c
+++ b/drivers/video/backlight/lcd.c
@@ -101,7 +101,7 @@ static ssize_t lcd_store_power(struct device *dev,
101 int power = simple_strtoul(buf, &endp, 0); 101 int power = simple_strtoul(buf, &endp, 0);
102 size_t size = endp - buf; 102 size_t size = endp - buf;
103 103
104 if (*endp && isspace(*endp)) 104 if (isspace(*endp))
105 size++; 105 size++;
106 if (size != count) 106 if (size != count)
107 return -EINVAL; 107 return -EINVAL;
@@ -140,7 +140,7 @@ static ssize_t lcd_store_contrast(struct device *dev,
140 int contrast = simple_strtoul(buf, &endp, 0); 140 int contrast = simple_strtoul(buf, &endp, 0);
141 size_t size = endp - buf; 141 size_t size = endp - buf;
142 142
143 if (*endp && isspace(*endp)) 143 if (isspace(*endp))
144 size++; 144 size++;
145 if (size != count) 145 if (size != count)
146 return -EINVAL; 146 return -EINVAL;
diff --git a/drivers/video/bfin-lq035q1-fb.c b/drivers/video/bfin-lq035q1-fb.c
new file mode 100644
index 000000000000..b690c269784a
--- /dev/null
+++ b/drivers/video/bfin-lq035q1-fb.c
@@ -0,0 +1,826 @@
1/*
2 * Blackfin LCD Framebuffer driver SHARP LQ035Q1DH02
3 *
4 * Copyright 2008-2009 Analog Devices Inc.
5 * Licensed under the GPL-2 or later.
6 */
7
8#define DRIVER_NAME "bfin-lq035q1"
9#define pr_fmt(fmt) DRIVER_NAME ": " fmt
10
11#include <linux/module.h>
12#include <linux/kernel.h>
13#include <linux/errno.h>
14#include <linux/string.h>
15#include <linux/fb.h>
16#include <linux/init.h>
17#include <linux/types.h>
18#include <linux/interrupt.h>
19#include <linux/device.h>
20#include <linux/backlight.h>
21#include <linux/lcd.h>
22#include <linux/dma-mapping.h>
23#include <linux/platform_device.h>
24#include <linux/spi/spi.h>
25#include <linux/dma-mapping.h>
26
27#include <asm/blackfin.h>
28#include <asm/irq.h>
29#include <asm/dma.h>
30#include <asm/portmux.h>
31#include <asm/gptimers.h>
32
33#include <asm/bfin-lq035q1.h>
34
35#if defined(BF533_FAMILY) || defined(BF538_FAMILY)
36#define TIMER_HSYNC_id TIMER1_id
37#define TIMER_HSYNCbit TIMER1bit
38#define TIMER_HSYNC_STATUS_TRUN TIMER_STATUS_TRUN1
39#define TIMER_HSYNC_STATUS_TIMIL TIMER_STATUS_TIMIL1
40#define TIMER_HSYNC_STATUS_TOVF TIMER_STATUS_TOVF1
41
42#define TIMER_VSYNC_id TIMER2_id
43#define TIMER_VSYNCbit TIMER2bit
44#define TIMER_VSYNC_STATUS_TRUN TIMER_STATUS_TRUN2
45#define TIMER_VSYNC_STATUS_TIMIL TIMER_STATUS_TIMIL2
46#define TIMER_VSYNC_STATUS_TOVF TIMER_STATUS_TOVF2
47#else
48#define TIMER_HSYNC_id TIMER0_id
49#define TIMER_HSYNCbit TIMER0bit
50#define TIMER_HSYNC_STATUS_TRUN TIMER_STATUS_TRUN0
51#define TIMER_HSYNC_STATUS_TIMIL TIMER_STATUS_TIMIL0
52#define TIMER_HSYNC_STATUS_TOVF TIMER_STATUS_TOVF0
53
54#define TIMER_VSYNC_id TIMER1_id
55#define TIMER_VSYNCbit TIMER1bit
56#define TIMER_VSYNC_STATUS_TRUN TIMER_STATUS_TRUN1
57#define TIMER_VSYNC_STATUS_TIMIL TIMER_STATUS_TIMIL1
58#define TIMER_VSYNC_STATUS_TOVF TIMER_STATUS_TOVF1
59#endif
60
61#define LCD_X_RES 320 /* Horizontal Resolution */
62#define LCD_Y_RES 240 /* Vertical Resolution */
63#define DMA_BUS_SIZE 16
64
65#define USE_RGB565_16_BIT_PPI
66
67#ifdef USE_RGB565_16_BIT_PPI
68#define LCD_BPP 16 /* Bit Per Pixel */
69#define CLOCKS_PER_PIX 1
70#define CPLD_PIPELINE_DELAY_COR 0 /* NO CPLB */
71#endif
72
73/* Interface 16/18-bit TFT over an 8-bit wide PPI using a small Programmable Logic Device (CPLD)
74 * http://blackfin.uclinux.org/gf/project/stamp/frs/?action=FrsReleaseBrowse&frs_package_id=165
75 */
76
77#ifdef USE_RGB565_8_BIT_PPI
78#define LCD_BPP 16 /* Bit Per Pixel */
79#define CLOCKS_PER_PIX 2
80#define CPLD_PIPELINE_DELAY_COR 3 /* RGB565 */
81#endif
82
83#ifdef USE_RGB888_8_BIT_PPI
84#define LCD_BPP 24 /* Bit Per Pixel */
85#define CLOCKS_PER_PIX 3
86#define CPLD_PIPELINE_DELAY_COR 5 /* RGB888 */
87#endif
88
89 /*
90 * HS and VS timing parameters (all in number of PPI clk ticks)
91 */
92
93#define U_LINE 4 /* Blanking Lines */
94
95#define H_ACTPIX (LCD_X_RES * CLOCKS_PER_PIX) /* active horizontal pixel */
96#define H_PERIOD (336 * CLOCKS_PER_PIX) /* HS period */
97#define H_PULSE (2 * CLOCKS_PER_PIX) /* HS pulse width */
98#define H_START (7 * CLOCKS_PER_PIX + CPLD_PIPELINE_DELAY_COR) /* first valid pixel */
99
100#define V_LINES (LCD_Y_RES + U_LINE) /* total vertical lines */
101#define V_PULSE (2 * CLOCKS_PER_PIX) /* VS pulse width (1-5 H_PERIODs) */
102#define V_PERIOD (H_PERIOD * V_LINES) /* VS period */
103
104#define ACTIVE_VIDEO_MEM_OFFSET ((U_LINE / 2) * LCD_X_RES * (LCD_BPP / 8))
105
106#define BFIN_LCD_NBR_PALETTE_ENTRIES 256
107
108#define PPI_TX_MODE 0x2
109#define PPI_XFER_TYPE_11 0xC
110#define PPI_PORT_CFG_01 0x10
111#define PPI_POLS_1 0x8000
112
113#if (CLOCKS_PER_PIX > 1)
114#define PPI_PMODE (DLEN_8 | PACK_EN)
115#else
116#define PPI_PMODE (DLEN_16)
117#endif
118
119#define LQ035_INDEX 0x74
120#define LQ035_DATA 0x76
121
122#define LQ035_DRIVER_OUTPUT_CTL 0x1
123#define LQ035_SHUT_CTL 0x11
124
125#define LQ035_DRIVER_OUTPUT_MASK (LQ035_LR | LQ035_TB | LQ035_BGR | LQ035_REV)
126#define LQ035_DRIVER_OUTPUT_DEFAULT (0x2AEF & ~LQ035_DRIVER_OUTPUT_MASK)
127
128#define LQ035_SHUT (1 << 0) /* Shutdown */
129#define LQ035_ON (0 << 0) /* Shutdown */
130
131struct bfin_lq035q1fb_info {
132 struct fb_info *fb;
133 struct device *dev;
134 struct spi_driver spidrv;
135 struct bfin_lq035q1fb_disp_info *disp_info;
136 unsigned char *fb_buffer; /* RGB Buffer */
137 dma_addr_t dma_handle;
138 int lq035_open_cnt;
139 int irq;
140 spinlock_t lock; /* lock */
141 u32 pseudo_pal[16];
142};
143
144static int nocursor;
145module_param(nocursor, int, 0644);
146MODULE_PARM_DESC(nocursor, "cursor enable/disable");
147
148struct spi_control {
149 unsigned short mode;
150};
151
152static int lq035q1_control(struct spi_device *spi, unsigned char reg, unsigned short value)
153{
154 int ret;
155 u8 regs[3] = { LQ035_INDEX, 0, 0 };
156 u8 dat[3] = { LQ035_DATA, 0, 0 };
157
158 if (!spi)
159 return -ENODEV;
160
161 regs[2] = reg;
162 dat[1] = value >> 8;
163 dat[2] = value & 0xFF;
164
165 ret = spi_write(spi, regs, ARRAY_SIZE(regs));
166 ret |= spi_write(spi, dat, ARRAY_SIZE(dat));
167 return ret;
168}
169
170static int __devinit lq035q1_spidev_probe(struct spi_device *spi)
171{
172 int ret;
173 struct spi_control *ctl;
174 struct bfin_lq035q1fb_info *info = container_of(spi->dev.driver,
175 struct bfin_lq035q1fb_info,
176 spidrv.driver);
177
178 ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
179
180 if (!ctl)
181 return -ENOMEM;
182
183 ctl->mode = (info->disp_info->mode &
184 LQ035_DRIVER_OUTPUT_MASK) | LQ035_DRIVER_OUTPUT_DEFAULT;
185
186 ret = lq035q1_control(spi, LQ035_SHUT_CTL, LQ035_ON);
187 ret |= lq035q1_control(spi, LQ035_DRIVER_OUTPUT_CTL, ctl->mode);
188 if (ret)
189 return ret;
190
191 spi_set_drvdata(spi, ctl);
192
193 return 0;
194}
195
196static int lq035q1_spidev_remove(struct spi_device *spi)
197{
198 return lq035q1_control(spi, LQ035_SHUT_CTL, LQ035_SHUT);
199}
200
201#ifdef CONFIG_PM
202static int lq035q1_spidev_suspend(struct spi_device *spi, pm_message_t state)
203{
204 return lq035q1_control(spi, LQ035_SHUT_CTL, LQ035_SHUT);
205}
206
207static int lq035q1_spidev_resume(struct spi_device *spi)
208{
209 int ret;
210 struct spi_control *ctl = spi_get_drvdata(spi);
211
212 ret = lq035q1_control(spi, LQ035_DRIVER_OUTPUT_CTL, ctl->mode);
213 if (ret)
214 return ret;
215
216 return lq035q1_control(spi, LQ035_SHUT_CTL, LQ035_ON);
217}
218#else
219# define lq035q1_spidev_suspend NULL
220# define lq035q1_spidev_resume NULL
221#endif
222
223/* Power down all displays on reboot, poweroff or halt */
224static void lq035q1_spidev_shutdown(struct spi_device *spi)
225{
226 lq035q1_control(spi, LQ035_SHUT_CTL, LQ035_SHUT);
227}
228
229static int lq035q1_backlight(struct bfin_lq035q1fb_info *info, unsigned arg)
230{
231 if (info->disp_info->use_bl)
232 gpio_set_value(info->disp_info->gpio_bl, arg);
233
234 return 0;
235}
236
237static void bfin_lq035q1_config_ppi(struct bfin_lq035q1fb_info *fbi)
238{
239 bfin_write_PPI_DELAY(H_START);
240 bfin_write_PPI_COUNT(H_ACTPIX - 1);
241 bfin_write_PPI_FRAME(V_LINES);
242
243 bfin_write_PPI_CONTROL(PPI_TX_MODE | /* output mode , PORT_DIR */
244 PPI_XFER_TYPE_11 | /* sync mode XFR_TYPE */
245 PPI_PORT_CFG_01 | /* two frame sync PORT_CFG */
246 PPI_PMODE | /* 8/16 bit data length / PACK_EN? */
247 PPI_POLS_1); /* faling edge syncs POLS */
248}
249
250static inline void bfin_lq035q1_disable_ppi(void)
251{
252 bfin_write_PPI_CONTROL(bfin_read_PPI_CONTROL() & ~PORT_EN);
253}
254
255static inline void bfin_lq035q1_enable_ppi(void)
256{
257 bfin_write_PPI_CONTROL(bfin_read_PPI_CONTROL() | PORT_EN);
258}
259
260static void bfin_lq035q1_start_timers(void)
261{
262 enable_gptimers(TIMER_VSYNCbit | TIMER_HSYNCbit);
263}
264
265static void bfin_lq035q1_stop_timers(void)
266{
267 disable_gptimers(TIMER_HSYNCbit | TIMER_VSYNCbit);
268
269 set_gptimer_status(0, TIMER_HSYNC_STATUS_TRUN | TIMER_VSYNC_STATUS_TRUN |
270 TIMER_HSYNC_STATUS_TIMIL | TIMER_VSYNC_STATUS_TIMIL |
271 TIMER_HSYNC_STATUS_TOVF | TIMER_VSYNC_STATUS_TOVF);
272
273}
274
275static void bfin_lq035q1_init_timers(void)
276{
277
278 bfin_lq035q1_stop_timers();
279
280 set_gptimer_period(TIMER_HSYNC_id, H_PERIOD);
281 set_gptimer_pwidth(TIMER_HSYNC_id, H_PULSE);
282 set_gptimer_config(TIMER_HSYNC_id, TIMER_MODE_PWM | TIMER_PERIOD_CNT |
283 TIMER_TIN_SEL | TIMER_CLK_SEL|
284 TIMER_EMU_RUN);
285
286 set_gptimer_period(TIMER_VSYNC_id, V_PERIOD);
287 set_gptimer_pwidth(TIMER_VSYNC_id, V_PULSE);
288 set_gptimer_config(TIMER_VSYNC_id, TIMER_MODE_PWM | TIMER_PERIOD_CNT |
289 TIMER_TIN_SEL | TIMER_CLK_SEL |
290 TIMER_EMU_RUN);
291
292}
293
294static void bfin_lq035q1_config_dma(struct bfin_lq035q1fb_info *fbi)
295{
296
297 set_dma_config(CH_PPI,
298 set_bfin_dma_config(DIR_READ, DMA_FLOW_AUTO,
299 INTR_DISABLE, DIMENSION_2D,
300 DATA_SIZE_16,
301 DMA_NOSYNC_KEEP_DMA_BUF));
302 set_dma_x_count(CH_PPI, (LCD_X_RES * LCD_BPP) / DMA_BUS_SIZE);
303 set_dma_x_modify(CH_PPI, DMA_BUS_SIZE / 8);
304 set_dma_y_count(CH_PPI, V_LINES);
305
306 set_dma_y_modify(CH_PPI, DMA_BUS_SIZE / 8);
307 set_dma_start_addr(CH_PPI, (unsigned long)fbi->fb_buffer);
308
309}
310
311#if (CLOCKS_PER_PIX == 1)
312static const u16 ppi0_req_16[] = {P_PPI0_CLK, P_PPI0_FS1, P_PPI0_FS2,
313 P_PPI0_D0, P_PPI0_D1, P_PPI0_D2,
314 P_PPI0_D3, P_PPI0_D4, P_PPI0_D5,
315 P_PPI0_D6, P_PPI0_D7, P_PPI0_D8,
316 P_PPI0_D9, P_PPI0_D10, P_PPI0_D11,
317 P_PPI0_D12, P_PPI0_D13, P_PPI0_D14,
318 P_PPI0_D15, 0};
319#else
320static const u16 ppi0_req_16[] = {P_PPI0_CLK, P_PPI0_FS1, P_PPI0_FS2,
321 P_PPI0_D0, P_PPI0_D1, P_PPI0_D2,
322 P_PPI0_D3, P_PPI0_D4, P_PPI0_D5,
323 P_PPI0_D6, P_PPI0_D7, 0};
324#endif
325
326static inline void bfin_lq035q1_free_ports(void)
327{
328 peripheral_free_list(ppi0_req_16);
329 if (ANOMALY_05000400)
330 gpio_free(P_IDENT(P_PPI0_FS3));
331}
332
333static int __devinit bfin_lq035q1_request_ports(struct platform_device *pdev)
334{
335 /* ANOMALY_05000400 - PPI Does Not Start Properly In Specific Mode:
336 * Drive PPI_FS3 Low
337 */
338 if (ANOMALY_05000400) {
339 int ret = gpio_request(P_IDENT(P_PPI0_FS3), "PPI_FS3");
340 if (ret)
341 return ret;
342 gpio_direction_output(P_IDENT(P_PPI0_FS3), 0);
343 }
344
345 if (peripheral_request_list(ppi0_req_16, DRIVER_NAME)) {
346 dev_err(&pdev->dev, "requesting peripherals failed\n");
347 return -EFAULT;
348 }
349
350 return 0;
351}
352
353static int bfin_lq035q1_fb_open(struct fb_info *info, int user)
354{
355 struct bfin_lq035q1fb_info *fbi = info->par;
356
357 spin_lock(&fbi->lock);
358 fbi->lq035_open_cnt++;
359
360 if (fbi->lq035_open_cnt <= 1) {
361
362 bfin_lq035q1_disable_ppi();
363 SSYNC();
364
365 bfin_lq035q1_config_dma(fbi);
366 bfin_lq035q1_config_ppi(fbi);
367 bfin_lq035q1_init_timers();
368
369 /* start dma */
370 enable_dma(CH_PPI);
371 bfin_lq035q1_enable_ppi();
372 bfin_lq035q1_start_timers();
373 lq035q1_backlight(fbi, 1);
374 }
375
376 spin_unlock(&fbi->lock);
377
378 return 0;
379}
380
381static int bfin_lq035q1_fb_release(struct fb_info *info, int user)
382{
383 struct bfin_lq035q1fb_info *fbi = info->par;
384
385 spin_lock(&fbi->lock);
386
387 fbi->lq035_open_cnt--;
388
389 if (fbi->lq035_open_cnt <= 0) {
390 lq035q1_backlight(fbi, 0);
391 bfin_lq035q1_disable_ppi();
392 SSYNC();
393 disable_dma(CH_PPI);
394 bfin_lq035q1_stop_timers();
395 }
396
397 spin_unlock(&fbi->lock);
398
399 return 0;
400}
401
402static int bfin_lq035q1_fb_check_var(struct fb_var_screeninfo *var,
403 struct fb_info *info)
404{
405 switch (var->bits_per_pixel) {
406#if (LCD_BPP == 24)
407 case 24:/* TRUECOLOUR, 16m */
408#else
409 case 16:/* DIRECTCOLOUR, 64k */
410#endif
411 var->red.offset = info->var.red.offset;
412 var->green.offset = info->var.green.offset;
413 var->blue.offset = info->var.blue.offset;
414 var->red.length = info->var.red.length;
415 var->green.length = info->var.green.length;
416 var->blue.length = info->var.blue.length;
417 var->transp.offset = 0;
418 var->transp.length = 0;
419 var->transp.msb_right = 0;
420 var->red.msb_right = 0;
421 var->green.msb_right = 0;
422 var->blue.msb_right = 0;
423 break;
424 default:
425 pr_debug("%s: depth not supported: %u BPP\n", __func__,
426 var->bits_per_pixel);
427 return -EINVAL;
428 }
429
430 if (info->var.xres != var->xres || info->var.yres != var->yres ||
431 info->var.xres_virtual != var->xres_virtual ||
432 info->var.yres_virtual != var->yres_virtual) {
433 pr_debug("%s: Resolution not supported: X%u x Y%u \n",
434 __func__, var->xres, var->yres);
435 return -EINVAL;
436 }
437
438 /*
439 * Memory limit
440 */
441
442 if ((info->fix.line_length * var->yres_virtual) > info->fix.smem_len) {
443 pr_debug("%s: Memory Limit requested yres_virtual = %u\n",
444 __func__, var->yres_virtual);
445 return -ENOMEM;
446 }
447
448
449 return 0;
450}
451
452int bfin_lq035q1_fb_cursor(struct fb_info *info, struct fb_cursor *cursor)
453{
454 if (nocursor)
455 return 0;
456 else
457 return -EINVAL; /* just to force soft_cursor() call */
458}
459
460static int bfin_lq035q1_fb_setcolreg(u_int regno, u_int red, u_int green,
461 u_int blue, u_int transp,
462 struct fb_info *info)
463{
464 if (regno >= BFIN_LCD_NBR_PALETTE_ENTRIES)
465 return -EINVAL;
466
467 if (info->var.grayscale) {
468 /* grayscale = 0.30*R + 0.59*G + 0.11*B */
469 red = green = blue = (red * 77 + green * 151 + blue * 28) >> 8;
470 }
471
472 if (info->fix.visual == FB_VISUAL_TRUECOLOR) {
473
474 u32 value;
475 /* Place color in the pseudopalette */
476 if (regno > 16)
477 return -EINVAL;
478
479 red >>= (16 - info->var.red.length);
480 green >>= (16 - info->var.green.length);
481 blue >>= (16 - info->var.blue.length);
482
483 value = (red << info->var.red.offset) |
484 (green << info->var.green.offset) |
485 (blue << info->var.blue.offset);
486 value &= 0xFFFFFF;
487
488 ((u32 *) (info->pseudo_palette))[regno] = value;
489
490 }
491
492 return 0;
493}
494
495static struct fb_ops bfin_lq035q1_fb_ops = {
496 .owner = THIS_MODULE,
497 .fb_open = bfin_lq035q1_fb_open,
498 .fb_release = bfin_lq035q1_fb_release,
499 .fb_check_var = bfin_lq035q1_fb_check_var,
500 .fb_fillrect = cfb_fillrect,
501 .fb_copyarea = cfb_copyarea,
502 .fb_imageblit = cfb_imageblit,
503 .fb_cursor = bfin_lq035q1_fb_cursor,
504 .fb_setcolreg = bfin_lq035q1_fb_setcolreg,
505};
506
507static irqreturn_t bfin_lq035q1_irq_error(int irq, void *dev_id)
508{
509 /*struct bfin_lq035q1fb_info *info = (struct bfin_lq035q1fb_info *)dev_id;*/
510
511 u16 status = bfin_read_PPI_STATUS();
512 bfin_write_PPI_STATUS(-1);
513
514 if (status) {
515 bfin_lq035q1_disable_ppi();
516 disable_dma(CH_PPI);
517
518 /* start dma */
519 enable_dma(CH_PPI);
520 bfin_lq035q1_enable_ppi();
521 bfin_write_PPI_STATUS(-1);
522 }
523
524 return IRQ_HANDLED;
525}
526
527static int __devinit bfin_lq035q1_probe(struct platform_device *pdev)
528{
529 struct bfin_lq035q1fb_info *info;
530 struct fb_info *fbinfo;
531 int ret;
532
533 ret = request_dma(CH_PPI, DRIVER_NAME"_CH_PPI");
534 if (ret < 0) {
535 dev_err(&pdev->dev, "PPI DMA unavailable\n");
536 goto out1;
537 }
538
539 fbinfo = framebuffer_alloc(sizeof(*info), &pdev->dev);
540 if (!fbinfo) {
541 ret = -ENOMEM;
542 goto out2;
543 }
544
545 info = fbinfo->par;
546 info->fb = fbinfo;
547 info->dev = &pdev->dev;
548
549 info->disp_info = pdev->dev.platform_data;
550
551 platform_set_drvdata(pdev, fbinfo);
552
553 strcpy(fbinfo->fix.id, DRIVER_NAME);
554
555 fbinfo->fix.type = FB_TYPE_PACKED_PIXELS;
556 fbinfo->fix.type_aux = 0;
557 fbinfo->fix.xpanstep = 0;
558 fbinfo->fix.ypanstep = 0;
559 fbinfo->fix.ywrapstep = 0;
560 fbinfo->fix.accel = FB_ACCEL_NONE;
561 fbinfo->fix.visual = FB_VISUAL_TRUECOLOR;
562
563 fbinfo->var.nonstd = 0;
564 fbinfo->var.activate = FB_ACTIVATE_NOW;
565 fbinfo->var.height = -1;
566 fbinfo->var.width = -1;
567 fbinfo->var.accel_flags = 0;
568 fbinfo->var.vmode = FB_VMODE_NONINTERLACED;
569
570 fbinfo->var.xres = LCD_X_RES;
571 fbinfo->var.xres_virtual = LCD_X_RES;
572 fbinfo->var.yres = LCD_Y_RES;
573 fbinfo->var.yres_virtual = LCD_Y_RES;
574 fbinfo->var.bits_per_pixel = LCD_BPP;
575
576 if (info->disp_info->mode & LQ035_BGR) {
577#if (LCD_BPP == 24)
578 fbinfo->var.red.offset = 0;
579 fbinfo->var.green.offset = 8;
580 fbinfo->var.blue.offset = 16;
581#else
582 fbinfo->var.red.offset = 0;
583 fbinfo->var.green.offset = 5;
584 fbinfo->var.blue.offset = 11;
585#endif
586 } else {
587#if (LCD_BPP == 24)
588 fbinfo->var.red.offset = 16;
589 fbinfo->var.green.offset = 8;
590 fbinfo->var.blue.offset = 0;
591#else
592 fbinfo->var.red.offset = 11;
593 fbinfo->var.green.offset = 5;
594 fbinfo->var.blue.offset = 0;
595#endif
596 }
597
598 fbinfo->var.transp.offset = 0;
599
600#if (LCD_BPP == 24)
601 fbinfo->var.red.length = 8;
602 fbinfo->var.green.length = 8;
603 fbinfo->var.blue.length = 8;
604#else
605 fbinfo->var.red.length = 5;
606 fbinfo->var.green.length = 6;
607 fbinfo->var.blue.length = 5;
608#endif
609
610 fbinfo->var.transp.length = 0;
611
612 fbinfo->fix.smem_len = LCD_X_RES * LCD_Y_RES * LCD_BPP / 8
613 + ACTIVE_VIDEO_MEM_OFFSET;
614
615 fbinfo->fix.line_length = fbinfo->var.xres_virtual *
616 fbinfo->var.bits_per_pixel / 8;
617
618
619 fbinfo->fbops = &bfin_lq035q1_fb_ops;
620 fbinfo->flags = FBINFO_FLAG_DEFAULT;
621
622 info->fb_buffer =
623 dma_alloc_coherent(NULL, fbinfo->fix.smem_len, &info->dma_handle,
624 GFP_KERNEL);
625
626 if (NULL == info->fb_buffer) {
627 dev_err(&pdev->dev, "couldn't allocate dma buffer\n");
628 ret = -ENOMEM;
629 goto out3;
630 }
631
632 fbinfo->screen_base = (void *)info->fb_buffer + ACTIVE_VIDEO_MEM_OFFSET;
633 fbinfo->fix.smem_start = (int)info->fb_buffer + ACTIVE_VIDEO_MEM_OFFSET;
634
635 fbinfo->fbops = &bfin_lq035q1_fb_ops;
636
637 fbinfo->pseudo_palette = &info->pseudo_pal;
638
639 ret = fb_alloc_cmap(&fbinfo->cmap, BFIN_LCD_NBR_PALETTE_ENTRIES, 0);
640 if (ret < 0) {
641 dev_err(&pdev->dev, "failed to allocate colormap (%d entries)\n",
642 BFIN_LCD_NBR_PALETTE_ENTRIES);
643 goto out4;
644 }
645
646 ret = bfin_lq035q1_request_ports(pdev);
647 if (ret) {
648 dev_err(&pdev->dev, "couldn't request gpio port\n");
649 goto out6;
650 }
651
652 info->irq = platform_get_irq(pdev, 0);
653 if (info->irq < 0) {
654 ret = -EINVAL;
655 goto out7;
656 }
657
658 ret = request_irq(info->irq, bfin_lq035q1_irq_error, IRQF_DISABLED,
659 DRIVER_NAME" PPI ERROR", info);
660 if (ret < 0) {
661 dev_err(&pdev->dev, "unable to request PPI ERROR IRQ\n");
662 goto out7;
663 }
664
665 info->spidrv.driver.name = DRIVER_NAME"-spi";
666 info->spidrv.probe = lq035q1_spidev_probe;
667 info->spidrv.remove = __devexit_p(lq035q1_spidev_remove);
668 info->spidrv.shutdown = lq035q1_spidev_shutdown;
669 info->spidrv.suspend = lq035q1_spidev_suspend;
670 info->spidrv.resume = lq035q1_spidev_resume;
671
672 ret = spi_register_driver(&info->spidrv);
673 if (ret < 0) {
674 dev_err(&pdev->dev, "couldn't register SPI Interface\n");
675 goto out8;
676 }
677
678 if (info->disp_info->use_bl) {
679 ret = gpio_request(info->disp_info->gpio_bl, "LQ035 Backlight");
680
681 if (ret) {
682 dev_err(&pdev->dev, "failed to request GPIO %d\n",
683 info->disp_info->gpio_bl);
684 goto out9;
685 }
686 gpio_direction_output(info->disp_info->gpio_bl, 0);
687 }
688
689 ret = register_framebuffer(fbinfo);
690 if (ret < 0) {
691 dev_err(&pdev->dev, "unable to register framebuffer\n");
692 goto out10;
693 }
694
695 dev_info(&pdev->dev, "%dx%d %d-bit RGB FrameBuffer initialized\n",
696 LCD_X_RES, LCD_Y_RES, LCD_BPP);
697
698 return 0;
699
700 out10:
701 if (info->disp_info->use_bl)
702 gpio_free(info->disp_info->gpio_bl);
703 out9:
704 spi_unregister_driver(&info->spidrv);
705 out8:
706 free_irq(info->irq, info);
707 out7:
708 bfin_lq035q1_free_ports();
709 out6:
710 fb_dealloc_cmap(&fbinfo->cmap);
711 out4:
712 dma_free_coherent(NULL, fbinfo->fix.smem_len, info->fb_buffer,
713 info->dma_handle);
714 out3:
715 framebuffer_release(fbinfo);
716 out2:
717 free_dma(CH_PPI);
718 out1:
719 platform_set_drvdata(pdev, NULL);
720
721 return ret;
722}
723
724static int __devexit bfin_lq035q1_remove(struct platform_device *pdev)
725{
726 struct fb_info *fbinfo = platform_get_drvdata(pdev);
727 struct bfin_lq035q1fb_info *info = fbinfo->par;
728
729 if (info->disp_info->use_bl)
730 gpio_free(info->disp_info->gpio_bl);
731
732 spi_unregister_driver(&info->spidrv);
733
734 unregister_framebuffer(fbinfo);
735
736 free_dma(CH_PPI);
737 free_irq(info->irq, info);
738
739 if (info->fb_buffer != NULL)
740 dma_free_coherent(NULL, fbinfo->fix.smem_len, info->fb_buffer,
741 info->dma_handle);
742
743 fb_dealloc_cmap(&fbinfo->cmap);
744
745 bfin_lq035q1_free_ports();
746
747 platform_set_drvdata(pdev, NULL);
748 framebuffer_release(fbinfo);
749
750 dev_info(&pdev->dev, "unregistered LCD driver\n");
751
752 return 0;
753}
754
755#ifdef CONFIG_PM
756static int bfin_lq035q1_suspend(struct device *dev)
757{
758 struct fb_info *fbinfo = dev_get_drvdata(dev);
759 struct bfin_lq035q1fb_info *info = fbinfo->par;
760
761 if (info->lq035_open_cnt) {
762 lq035q1_backlight(info, 0);
763 bfin_lq035q1_disable_ppi();
764 SSYNC();
765 disable_dma(CH_PPI);
766 bfin_lq035q1_stop_timers();
767 bfin_write_PPI_STATUS(-1);
768 }
769
770 return 0;
771}
772
773static int bfin_lq035q1_resume(struct device *dev)
774{
775 struct fb_info *fbinfo = dev_get_drvdata(dev);
776 struct bfin_lq035q1fb_info *info = fbinfo->par;
777
778 if (info->lq035_open_cnt) {
779 bfin_lq035q1_disable_ppi();
780 SSYNC();
781
782 bfin_lq035q1_config_dma(info);
783 bfin_lq035q1_config_ppi(info);
784 bfin_lq035q1_init_timers();
785
786 /* start dma */
787 enable_dma(CH_PPI);
788 bfin_lq035q1_enable_ppi();
789 bfin_lq035q1_start_timers();
790 lq035q1_backlight(info, 1);
791 }
792
793 return 0;
794}
795
796static struct dev_pm_ops bfin_lq035q1_dev_pm_ops = {
797 .suspend = bfin_lq035q1_suspend,
798 .resume = bfin_lq035q1_resume,
799};
800#endif
801
802static struct platform_driver bfin_lq035q1_driver = {
803 .probe = bfin_lq035q1_probe,
804 .remove = __devexit_p(bfin_lq035q1_remove),
805 .driver = {
806 .name = DRIVER_NAME,
807#ifdef CONFIG_PM
808 .pm = &bfin_lq035q1_dev_pm_ops,
809#endif
810 },
811};
812
813static int __init bfin_lq035q1_driver_init(void)
814{
815 return platform_driver_register(&bfin_lq035q1_driver);
816}
817module_init(bfin_lq035q1_driver_init);
818
819static void __exit bfin_lq035q1_driver_cleanup(void)
820{
821 platform_driver_unregister(&bfin_lq035q1_driver);
822}
823module_exit(bfin_lq035q1_driver_cleanup);
824
825MODULE_DESCRIPTION("Blackfin TFT LCD Driver");
826MODULE_LICENSE("GPL");
diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
index 5cc36cfbf07b..2549c53b26a0 100644
--- a/drivers/video/bfin-t350mcqb-fb.c
+++ b/drivers/video/bfin-t350mcqb-fb.c
@@ -487,8 +487,8 @@ static int __devinit bfin_t350mcqb_probe(struct platform_device *pdev)
487 487
488 fbinfo->var.nonstd = 0; 488 fbinfo->var.nonstd = 0;
489 fbinfo->var.activate = FB_ACTIVATE_NOW; 489 fbinfo->var.activate = FB_ACTIVATE_NOW;
490 fbinfo->var.height = -1; 490 fbinfo->var.height = 53;
491 fbinfo->var.width = -1; 491 fbinfo->var.width = 70;
492 fbinfo->var.accel_flags = 0; 492 fbinfo->var.accel_flags = 0;
493 fbinfo->var.vmode = FB_VMODE_NONINTERLACED; 493 fbinfo->var.vmode = FB_VMODE_NONINTERLACED;
494 494
@@ -634,17 +634,35 @@ static int __devexit bfin_t350mcqb_remove(struct platform_device *pdev)
634#ifdef CONFIG_PM 634#ifdef CONFIG_PM
635static int bfin_t350mcqb_suspend(struct platform_device *pdev, pm_message_t state) 635static int bfin_t350mcqb_suspend(struct platform_device *pdev, pm_message_t state)
636{ 636{
637 bfin_t350mcqb_disable_ppi(); 637 struct fb_info *fbinfo = platform_get_drvdata(pdev);
638 disable_dma(CH_PPI); 638 struct bfin_t350mcqbfb_info *fbi = fbinfo->par;
639 bfin_write_PPI_STATUS(0xFFFF); 639
640 if (fbi->lq043_open_cnt) {
641 bfin_t350mcqb_disable_ppi();
642 disable_dma(CH_PPI);
643 bfin_t350mcqb_stop_timers();
644 bfin_write_PPI_STATUS(-1);
645 }
646
640 647
641 return 0; 648 return 0;
642} 649}
643 650
644static int bfin_t350mcqb_resume(struct platform_device *pdev) 651static int bfin_t350mcqb_resume(struct platform_device *pdev)
645{ 652{
646 enable_dma(CH_PPI); 653 struct fb_info *fbinfo = platform_get_drvdata(pdev);
647 bfin_t350mcqb_enable_ppi(); 654 struct bfin_t350mcqbfb_info *fbi = fbinfo->par;
655
656 if (fbi->lq043_open_cnt) {
657 bfin_t350mcqb_config_dma(fbi);
658 bfin_t350mcqb_config_ppi(fbi);
659 bfin_t350mcqb_init_timers();
660
661 /* start dma */
662 enable_dma(CH_PPI);
663 bfin_t350mcqb_enable_ppi();
664 bfin_t350mcqb_start_timers();
665 }
648 666
649 return 0; 667 return 0;
650} 668}
diff --git a/drivers/video/clps711xfb.c b/drivers/video/clps711xfb.c
index 16f5db471ab5..99b354b8e257 100644
--- a/drivers/video/clps711xfb.c
+++ b/drivers/video/clps711xfb.c
@@ -19,8 +19,10 @@
19 * 19 *
20 * Framebuffer driver for the CLPS7111 and EP7212 processors. 20 * Framebuffer driver for the CLPS7111 and EP7212 processors.
21 */ 21 */
22#include <linux/mm.h>
22#include <linux/module.h> 23#include <linux/module.h>
23#include <linux/kernel.h> 24#include <linux/kernel.h>
25#include <linux/seq_file.h>
24#include <linux/slab.h> 26#include <linux/slab.h>
25#include <linux/fb.h> 27#include <linux/fb.h>
26#include <linux/init.h> 28#include <linux/init.h>
@@ -38,14 +40,6 @@ struct fb_info *cfb;
38 40
39#define CMAP_MAX_SIZE 16 41#define CMAP_MAX_SIZE 16
40 42
41/* The /proc entry for the backlight. */
42static struct proc_dir_entry *clps7111fb_backlight_proc_entry = NULL;
43
44static int clps7111fb_proc_backlight_read(char *page, char **start, off_t off,
45 int count, int *eof, void *data);
46static int clps7111fb_proc_backlight_write(struct file *file,
47 const char *buffer, unsigned long count, void *data);
48
49/* 43/*
50 * LCD AC Prescale. This comes from the LCD panel manufacturers specifications. 44 * LCD AC Prescale. This comes from the LCD panel manufacturers specifications.
51 * This determines how many clocks + 1 of CL1 before the M signal toggles. 45 * This determines how many clocks + 1 of CL1 before the M signal toggles.
@@ -221,26 +215,23 @@ static struct fb_ops clps7111fb_ops = {
221 .fb_imageblit = cfb_imageblit, 215 .fb_imageblit = cfb_imageblit,
222}; 216};
223 217
224static int 218static int backlight_proc_show(struct seq_file *m, void *v)
225clps7111fb_proc_backlight_read(char *page, char **start, off_t off,
226 int count, int *eof, void *data)
227{ 219{
228 /* We need at least two characters, one for the digit, and one for
229 * the terminating NULL. */
230 if (count < 2)
231 return -EINVAL;
232
233 if (machine_is_edb7211()) { 220 if (machine_is_edb7211()) {
234 return sprintf(page, "%d\n", 221 seq_printf(m, "%d\n",
235 (clps_readb(PDDR) & EDB_PD3_LCDBL) ? 1 : 0); 222 (clps_readb(PDDR) & EDB_PD3_LCDBL) ? 1 : 0);
236 } 223 }
237 224
238 return 0; 225 return 0;
239} 226}
240 227
241static int 228static int backlight_proc_open(struct inode *inode, struct file *file)
242clps7111fb_proc_backlight_write(struct file *file, const char *buffer, 229{
243 unsigned long count, void *data) 230 return single_open(file, backlight_proc_show, NULL);
231}
232
233static ssize_t backlight_proc_write(struct file *file, const char *buffer,
234 size_t count, loff_t *pos)
244{ 235{
245 unsigned char char_value; 236 unsigned char char_value;
246 int value; 237 int value;
@@ -271,6 +262,15 @@ clps7111fb_proc_backlight_write(struct file *file, const char *buffer,
271 return count; 262 return count;
272} 263}
273 264
265static const struct file_operations backlight_proc_fops = {
266 .owner = THIS_MODULE,
267 .open = backlight_proc_open,
268 .read = seq_read,
269 .llseek = seq_lseek,
270 .release = single_release,
271 .write = backlight_proc_write,
272};
273
274static void __init clps711x_guess_lcd_params(struct fb_info *info) 274static void __init clps711x_guess_lcd_params(struct fb_info *info)
275{ 275{
276 unsigned int lcdcon, syscon, size; 276 unsigned int lcdcon, syscon, size;
@@ -379,19 +379,11 @@ int __init clps711xfb_init(void)
379 379
380 fb_alloc_cmap(&cfb->cmap, CMAP_MAX_SIZE, 0); 380 fb_alloc_cmap(&cfb->cmap, CMAP_MAX_SIZE, 0);
381 381
382 /* Register the /proc entries. */ 382 if (!proc_create("backlight", 0444, NULL, &backlight_proc_fops)) {
383 clps7111fb_backlight_proc_entry = create_proc_entry("backlight", 0444,
384 NULL);
385 if (clps7111fb_backlight_proc_entry == NULL) {
386 printk("Couldn't create the /proc entry for the backlight.\n"); 383 printk("Couldn't create the /proc entry for the backlight.\n");
387 return -EINVAL; 384 return -EINVAL;
388 } 385 }
389 386
390 clps7111fb_backlight_proc_entry->read_proc =
391 &clps7111fb_proc_backlight_read;
392 clps7111fb_backlight_proc_entry->write_proc =
393 &clps7111fb_proc_backlight_write;
394
395 /* 387 /*
396 * Power up the LCD 388 * Power up the LCD
397 */ 389 */
diff --git a/drivers/video/da8xx-fb.c b/drivers/video/da8xx-fb.c
index ea1fd3f47511..369a5b3ac649 100644
--- a/drivers/video/da8xx-fb.c
+++ b/drivers/video/da8xx-fb.c
@@ -28,6 +28,8 @@
28#include <linux/uaccess.h> 28#include <linux/uaccess.h>
29#include <linux/interrupt.h> 29#include <linux/interrupt.h>
30#include <linux/clk.h> 30#include <linux/clk.h>
31#include <linux/cpufreq.h>
32#include <linux/console.h>
31#include <video/da8xx-fb.h> 33#include <video/da8xx-fb.h>
32 34
33#define DRIVER_NAME "da8xx_lcdc" 35#define DRIVER_NAME "da8xx_lcdc"
@@ -113,6 +115,12 @@ struct da8xx_fb_par {
113 unsigned short pseudo_palette[16]; 115 unsigned short pseudo_palette[16];
114 unsigned int databuf_sz; 116 unsigned int databuf_sz;
115 unsigned int palette_sz; 117 unsigned int palette_sz;
118 unsigned int pxl_clk;
119 int blank;
120#ifdef CONFIG_CPU_FREQ
121 struct notifier_block freq_transition;
122#endif
123 void (*panel_power_ctrl)(int);
116}; 124};
117 125
118/* Variable Screen Information */ 126/* Variable Screen Information */
@@ -155,7 +163,7 @@ struct da8xx_panel {
155 int vfp; /* Vertical front porch */ 163 int vfp; /* Vertical front porch */
156 int vbp; /* Vertical back porch */ 164 int vbp; /* Vertical back porch */
157 int vsw; /* Vertical Sync Pulse Width */ 165 int vsw; /* Vertical Sync Pulse Width */
158 int pxl_clk; /* Pixel clock */ 166 unsigned int pxl_clk; /* Pixel clock */
159 unsigned char invert_pxl_clk; /* Invert Pixel clock */ 167 unsigned char invert_pxl_clk; /* Invert Pixel clock */
160}; 168};
161 169
@@ -171,7 +179,7 @@ static struct da8xx_panel known_lcd_panels[] = {
171 .vfp = 2, 179 .vfp = 2,
172 .vbp = 2, 180 .vbp = 2,
173 .vsw = 0, 181 .vsw = 0,
174 .pxl_clk = 0x10, 182 .pxl_clk = 4608000,
175 .invert_pxl_clk = 1, 183 .invert_pxl_clk = 1,
176 }, 184 },
177 /* Sharp LK043T1DG01 */ 185 /* Sharp LK043T1DG01 */
@@ -185,13 +193,23 @@ static struct da8xx_panel known_lcd_panels[] = {
185 .vfp = 2, 193 .vfp = 2,
186 .vbp = 2, 194 .vbp = 2,
187 .vsw = 10, 195 .vsw = 10,
188 .pxl_clk = 0x12, 196 .pxl_clk = 7833600,
189 .invert_pxl_clk = 0, 197 .invert_pxl_clk = 0,
190 }, 198 },
191}; 199};
192 200
201/* Enable the Raster Engine of the LCD Controller */
202static inline void lcd_enable_raster(void)
203{
204 u32 reg;
205
206 reg = lcdc_read(LCD_RASTER_CTRL_REG);
207 if (!(reg & LCD_RASTER_ENABLE))
208 lcdc_write(reg | LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG);
209}
210
193/* Disable the Raster Engine of the LCD Controller */ 211/* Disable the Raster Engine of the LCD Controller */
194static void lcd_disable_raster(struct da8xx_fb_par *par) 212static inline void lcd_disable_raster(void)
195{ 213{
196 u32 reg; 214 u32 reg;
197 215
@@ -443,14 +461,25 @@ static int fb_setcolreg(unsigned regno, unsigned red, unsigned green,
443static void lcd_reset(struct da8xx_fb_par *par) 461static void lcd_reset(struct da8xx_fb_par *par)
444{ 462{
445 /* Disable the Raster if previously Enabled */ 463 /* Disable the Raster if previously Enabled */
446 if (lcdc_read(LCD_RASTER_CTRL_REG) & LCD_RASTER_ENABLE) 464 lcd_disable_raster();
447 lcd_disable_raster(par);
448 465
449 /* DMA has to be disabled */ 466 /* DMA has to be disabled */
450 lcdc_write(0, LCD_DMA_CTRL_REG); 467 lcdc_write(0, LCD_DMA_CTRL_REG);
451 lcdc_write(0, LCD_RASTER_CTRL_REG); 468 lcdc_write(0, LCD_RASTER_CTRL_REG);
452} 469}
453 470
471static void lcd_calc_clk_divider(struct da8xx_fb_par *par)
472{
473 unsigned int lcd_clk, div;
474
475 lcd_clk = clk_get_rate(par->lcdc_clk);
476 div = lcd_clk / par->pxl_clk;
477
478 /* Configure the LCD clock divisor. */
479 lcdc_write(LCD_CLK_DIVISOR(div) |
480 (LCD_RASTER_MODE & 0x1), LCD_CTRL_REG);
481}
482
454static int lcd_init(struct da8xx_fb_par *par, const struct lcd_ctrl_config *cfg, 483static int lcd_init(struct da8xx_fb_par *par, const struct lcd_ctrl_config *cfg,
455 struct da8xx_panel *panel) 484 struct da8xx_panel *panel)
456{ 485{
@@ -459,9 +488,8 @@ static int lcd_init(struct da8xx_fb_par *par, const struct lcd_ctrl_config *cfg,
459 488
460 lcd_reset(par); 489 lcd_reset(par);
461 490
462 /* Configure the LCD clock divisor. */ 491 /* Calculate the divider */
463 lcdc_write(LCD_CLK_DIVISOR(panel->pxl_clk) | 492 lcd_calc_clk_divider(par);
464 (LCD_RASTER_MODE & 0x1), LCD_CTRL_REG);
465 493
466 if (panel->invert_pxl_clk) 494 if (panel->invert_pxl_clk)
467 lcdc_write((lcdc_read(LCD_RASTER_TIMING_2_REG) | 495 lcdc_write((lcdc_read(LCD_RASTER_TIMING_2_REG) |
@@ -513,13 +541,11 @@ static int lcd_init(struct da8xx_fb_par *par, const struct lcd_ctrl_config *cfg,
513static irqreturn_t lcdc_irq_handler(int irq, void *arg) 541static irqreturn_t lcdc_irq_handler(int irq, void *arg)
514{ 542{
515 u32 stat = lcdc_read(LCD_STAT_REG); 543 u32 stat = lcdc_read(LCD_STAT_REG);
516 u32 reg;
517 544
518 if ((stat & LCD_SYNC_LOST) && (stat & LCD_FIFO_UNDERFLOW)) { 545 if ((stat & LCD_SYNC_LOST) && (stat & LCD_FIFO_UNDERFLOW)) {
519 reg = lcdc_read(LCD_RASTER_CTRL_REG); 546 lcd_disable_raster();
520 lcdc_write(reg & ~LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG);
521 lcdc_write(stat, LCD_STAT_REG); 547 lcdc_write(stat, LCD_STAT_REG);
522 lcdc_write(reg | LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG); 548 lcd_enable_raster();
523 } else 549 } else
524 lcdc_write(stat, LCD_STAT_REG); 550 lcdc_write(stat, LCD_STAT_REG);
525 551
@@ -574,6 +600,38 @@ static int fb_check_var(struct fb_var_screeninfo *var,
574 return err; 600 return err;
575} 601}
576 602
603#ifdef CONFIG_CPU_FREQ
604static int lcd_da8xx_cpufreq_transition(struct notifier_block *nb,
605 unsigned long val, void *data)
606{
607 struct da8xx_fb_par *par;
608
609 par = container_of(nb, struct da8xx_fb_par, freq_transition);
610 if (val == CPUFREQ_PRECHANGE) {
611 lcd_disable_raster();
612 } else if (val == CPUFREQ_POSTCHANGE) {
613 lcd_calc_clk_divider(par);
614 lcd_enable_raster();
615 }
616
617 return 0;
618}
619
620static inline int lcd_da8xx_cpufreq_register(struct da8xx_fb_par *par)
621{
622 par->freq_transition.notifier_call = lcd_da8xx_cpufreq_transition;
623
624 return cpufreq_register_notifier(&par->freq_transition,
625 CPUFREQ_TRANSITION_NOTIFIER);
626}
627
628static inline void lcd_da8xx_cpufreq_deregister(struct da8xx_fb_par *par)
629{
630 cpufreq_unregister_notifier(&par->freq_transition,
631 CPUFREQ_TRANSITION_NOTIFIER);
632}
633#endif
634
577static int __devexit fb_remove(struct platform_device *dev) 635static int __devexit fb_remove(struct platform_device *dev)
578{ 636{
579 struct fb_info *info = dev_get_drvdata(&dev->dev); 637 struct fb_info *info = dev_get_drvdata(&dev->dev);
@@ -581,8 +639,13 @@ static int __devexit fb_remove(struct platform_device *dev)
581 if (info) { 639 if (info) {
582 struct da8xx_fb_par *par = info->par; 640 struct da8xx_fb_par *par = info->par;
583 641
584 if (lcdc_read(LCD_RASTER_CTRL_REG) & LCD_RASTER_ENABLE) 642#ifdef CONFIG_CPU_FREQ
585 lcd_disable_raster(par); 643 lcd_da8xx_cpufreq_deregister(par);
644#endif
645 if (par->panel_power_ctrl)
646 par->panel_power_ctrl(0);
647
648 lcd_disable_raster();
586 lcdc_write(0, LCD_RASTER_CTRL_REG); 649 lcdc_write(0, LCD_RASTER_CTRL_REG);
587 650
588 /* disable DMA */ 651 /* disable DMA */
@@ -639,6 +702,35 @@ static int fb_ioctl(struct fb_info *info, unsigned int cmd,
639 return 0; 702 return 0;
640} 703}
641 704
705static int cfb_blank(int blank, struct fb_info *info)
706{
707 struct da8xx_fb_par *par = info->par;
708 int ret = 0;
709
710 if (par->blank == blank)
711 return 0;
712
713 par->blank = blank;
714 switch (blank) {
715 case FB_BLANK_UNBLANK:
716 if (par->panel_power_ctrl)
717 par->panel_power_ctrl(1);
718
719 lcd_enable_raster();
720 break;
721 case FB_BLANK_POWERDOWN:
722 if (par->panel_power_ctrl)
723 par->panel_power_ctrl(0);
724
725 lcd_disable_raster();
726 break;
727 default:
728 ret = -EINVAL;
729 }
730
731 return ret;
732}
733
642static struct fb_ops da8xx_fb_ops = { 734static struct fb_ops da8xx_fb_ops = {
643 .owner = THIS_MODULE, 735 .owner = THIS_MODULE,
644 .fb_check_var = fb_check_var, 736 .fb_check_var = fb_check_var,
@@ -647,6 +739,7 @@ static struct fb_ops da8xx_fb_ops = {
647 .fb_fillrect = cfb_fillrect, 739 .fb_fillrect = cfb_fillrect,
648 .fb_copyarea = cfb_copyarea, 740 .fb_copyarea = cfb_copyarea,
649 .fb_imageblit = cfb_imageblit, 741 .fb_imageblit = cfb_imageblit,
742 .fb_blank = cfb_blank,
650}; 743};
651 744
652static int __init fb_probe(struct platform_device *device) 745static int __init fb_probe(struct platform_device *device)
@@ -721,6 +814,12 @@ static int __init fb_probe(struct platform_device *device)
721 } 814 }
722 815
723 par = da8xx_fb_info->par; 816 par = da8xx_fb_info->par;
817 par->lcdc_clk = fb_clk;
818 par->pxl_clk = lcdc_info->pxl_clk;
819 if (fb_pdata->panel_power_ctrl) {
820 par->panel_power_ctrl = fb_pdata->panel_power_ctrl;
821 par->panel_power_ctrl(1);
822 }
724 823
725 if (lcd_init(par, lcd_cfg, lcdc_info) < 0) { 824 if (lcd_init(par, lcd_cfg, lcdc_info) < 0) {
726 dev_err(&device->dev, "lcd_init failed\n"); 825 dev_err(&device->dev, "lcd_init failed\n");
@@ -754,8 +853,6 @@ static int __init fb_probe(struct platform_device *device)
754 da8xx_fb_fix.smem_len = par->databuf_sz - par->palette_sz; 853 da8xx_fb_fix.smem_len = par->databuf_sz - par->palette_sz;
755 da8xx_fb_fix.line_length = (lcdc_info->width * lcd_cfg->bpp) / 8; 854 da8xx_fb_fix.line_length = (lcdc_info->width * lcd_cfg->bpp) / 8;
756 855
757 par->lcdc_clk = fb_clk;
758
759 par->irq = platform_get_irq(device, 0); 856 par->irq = platform_get_irq(device, 0);
760 if (par->irq < 0) { 857 if (par->irq < 0) {
761 ret = -ENOENT; 858 ret = -ENOENT;
@@ -814,12 +911,24 @@ static int __init fb_probe(struct platform_device *device)
814 goto err_dealloc_cmap; 911 goto err_dealloc_cmap;
815 } 912 }
816 913
914#ifdef CONFIG_CPU_FREQ
915 ret = lcd_da8xx_cpufreq_register(par);
916 if (ret) {
917 dev_err(&device->dev, "failed to register cpufreq\n");
918 goto err_cpu_freq;
919 }
920#endif
921
817 /* enable raster engine */ 922 /* enable raster engine */
818 lcdc_write(lcdc_read(LCD_RASTER_CTRL_REG) | 923 lcd_enable_raster();
819 LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG);
820 924
821 return 0; 925 return 0;
822 926
927#ifdef CONFIG_CPU_FREQ
928err_cpu_freq:
929 unregister_framebuffer(da8xx_fb_info);
930#endif
931
823err_dealloc_cmap: 932err_dealloc_cmap:
824 fb_dealloc_cmap(&da8xx_fb_info->cmap); 933 fb_dealloc_cmap(&da8xx_fb_info->cmap);
825 934
@@ -852,11 +961,35 @@ err_request_mem:
852#ifdef CONFIG_PM 961#ifdef CONFIG_PM
853static int fb_suspend(struct platform_device *dev, pm_message_t state) 962static int fb_suspend(struct platform_device *dev, pm_message_t state)
854{ 963{
855 return -EBUSY; 964 struct fb_info *info = platform_get_drvdata(dev);
965 struct da8xx_fb_par *par = info->par;
966
967 acquire_console_sem();
968 if (par->panel_power_ctrl)
969 par->panel_power_ctrl(0);
970
971 fb_set_suspend(info, 1);
972 lcd_disable_raster();
973 clk_disable(par->lcdc_clk);
974 release_console_sem();
975
976 return 0;
856} 977}
857static int fb_resume(struct platform_device *dev) 978static int fb_resume(struct platform_device *dev)
858{ 979{
859 return -EBUSY; 980 struct fb_info *info = platform_get_drvdata(dev);
981 struct da8xx_fb_par *par = info->par;
982
983 acquire_console_sem();
984 if (par->panel_power_ctrl)
985 par->panel_power_ctrl(1);
986
987 clk_enable(par->lcdc_clk);
988 lcd_enable_raster();
989 fb_set_suspend(info, 0);
990 release_console_sem();
991
992 return 0;
860} 993}
861#else 994#else
862#define fb_suspend NULL 995#define fb_suspend NULL
diff --git a/drivers/video/display/display-sysfs.c b/drivers/video/display/display-sysfs.c
index 4830b1bf51e5..80abbf323b99 100644
--- a/drivers/video/display/display-sysfs.c
+++ b/drivers/video/display/display-sysfs.c
@@ -67,7 +67,7 @@ static ssize_t display_store_contrast(struct device *dev,
67 contrast = simple_strtoul(buf, &endp, 0); 67 contrast = simple_strtoul(buf, &endp, 0);
68 size = endp - buf; 68 size = endp - buf;
69 69
70 if (*endp && isspace(*endp)) 70 if (isspace(*endp))
71 size++; 71 size++;
72 72
73 if (size != count) 73 if (size != count)
diff --git a/drivers/video/ep93xx-fb.c b/drivers/video/ep93xx-fb.c
index bd9d46f95291..27aab4a06198 100644
--- a/drivers/video/ep93xx-fb.c
+++ b/drivers/video/ep93xx-fb.c
@@ -358,6 +358,8 @@ static int ep93xxfb_setcolreg(unsigned int regno, unsigned int red,
358 358
359 switch (info->fix.visual) { 359 switch (info->fix.visual) {
360 case FB_VISUAL_PSEUDOCOLOR: 360 case FB_VISUAL_PSEUDOCOLOR:
361 if (regno > 255)
362 return 1;
361 rgb = ((red & 0xff00) << 8) | (green & 0xff00) | 363 rgb = ((red & 0xff00) << 8) | (green & 0xff00) |
362 ((blue & 0xff00) >> 8); 364 ((blue & 0xff00) >> 8);
363 365
diff --git a/drivers/video/geode/display_gx.c b/drivers/video/geode/display_gx.c
index e759895bf3d3..f0af911a096d 100644
--- a/drivers/video/geode/display_gx.c
+++ b/drivers/video/geode/display_gx.c
@@ -17,7 +17,7 @@
17#include <asm/io.h> 17#include <asm/io.h>
18#include <asm/div64.h> 18#include <asm/div64.h>
19#include <asm/delay.h> 19#include <asm/delay.h>
20#include <asm/geode.h> 20#include <linux/cs5535.h>
21 21
22#include "gxfb.h" 22#include "gxfb.h"
23 23
@@ -25,7 +25,7 @@ unsigned int gx_frame_buffer_size(void)
25{ 25{
26 unsigned int val; 26 unsigned int val;
27 27
28 if (!geode_has_vsa2()) { 28 if (!cs5535_has_vsa2()) {
29 uint32_t hi, lo; 29 uint32_t hi, lo;
30 30
31 /* The number of pages is (PMAX - PMIN)+1 */ 31 /* The number of pages is (PMAX - PMIN)+1 */
diff --git a/drivers/video/geode/gxfb.h b/drivers/video/geode/gxfb.h
index 16a96f8fd8c5..d19e9378b0c0 100644
--- a/drivers/video/geode/gxfb.h
+++ b/drivers/video/geode/gxfb.h
@@ -340,7 +340,7 @@ static inline void write_fp(struct gxfb_par *par, int reg, uint32_t val)
340} 340}
341 341
342 342
343/* MSRs are defined in asm/geode.h; their bitfields are here */ 343/* MSRs are defined in linux/cs5535.h; their bitfields are here */
344 344
345#define MSR_GLCP_SYS_RSTPLL_DOTPOSTDIV3 (1 << 3) 345#define MSR_GLCP_SYS_RSTPLL_DOTPOSTDIV3 (1 << 3)
346#define MSR_GLCP_SYS_RSTPLL_DOTPREMULT2 (1 << 2) 346#define MSR_GLCP_SYS_RSTPLL_DOTPREMULT2 (1 << 2)
diff --git a/drivers/video/geode/gxfb_core.c b/drivers/video/geode/gxfb_core.c
index 2552cac39e1c..b3e639d1e12c 100644
--- a/drivers/video/geode/gxfb_core.c
+++ b/drivers/video/geode/gxfb_core.c
@@ -32,7 +32,7 @@
32#include <linux/suspend.h> 32#include <linux/suspend.h>
33#include <linux/init.h> 33#include <linux/init.h>
34#include <linux/pci.h> 34#include <linux/pci.h>
35#include <asm/geode.h> 35#include <linux/cs5535.h>
36 36
37#include "gxfb.h" 37#include "gxfb.h"
38 38
diff --git a/drivers/video/geode/lxfb.h b/drivers/video/geode/lxfb.h
index 6a51448fd3f7..cc781c00f75d 100644
--- a/drivers/video/geode/lxfb.h
+++ b/drivers/video/geode/lxfb.h
@@ -1,3 +1,13 @@
1/* Geode LX framebuffer driver
2 *
3 * Copyright (C) 2006-2007, Advanced Micro Devices,Inc.
4 * Copyright (c) 2008 Andres Salomon <dilinger@debian.org>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
1#ifndef _LXFB_H_ 11#ifndef _LXFB_H_
2#define _LXFB_H_ 12#define _LXFB_H_
3 13
@@ -409,7 +419,7 @@ static inline void write_fp(struct lxfb_par *par, int reg, uint32_t val)
409} 419}
410 420
411 421
412/* MSRs are defined in asm/geode.h; their bitfields are here */ 422/* MSRs are defined in linux/cs5535.h; their bitfields are here */
413 423
414#define MSR_GLCP_DOTPLL_LOCK (1 << 25) /* r/o */ 424#define MSR_GLCP_DOTPLL_LOCK (1 << 25) /* r/o */
415#define MSR_GLCP_DOTPLL_HALFPIX (1 << 24) 425#define MSR_GLCP_DOTPLL_HALFPIX (1 << 24)
diff --git a/drivers/video/geode/lxfb_ops.c b/drivers/video/geode/lxfb_ops.c
index b1cd49c99356..0e5d8c7c3eba 100644
--- a/drivers/video/geode/lxfb_ops.c
+++ b/drivers/video/geode/lxfb_ops.c
@@ -13,7 +13,7 @@
13#include <linux/fb.h> 13#include <linux/fb.h>
14#include <linux/uaccess.h> 14#include <linux/uaccess.h>
15#include <linux/delay.h> 15#include <linux/delay.h>
16#include <asm/geode.h> 16#include <linux/cs5535.h>
17 17
18#include "lxfb.h" 18#include "lxfb.h"
19 19
@@ -307,7 +307,7 @@ unsigned int lx_framebuffer_size(void)
307{ 307{
308 unsigned int val; 308 unsigned int val;
309 309
310 if (!geode_has_vsa2()) { 310 if (!cs5535_has_vsa2()) {
311 uint32_t hi, lo; 311 uint32_t hi, lo;
312 312
313 /* The number of pages is (PMAX - PMIN)+1 */ 313 /* The number of pages is (PMAX - PMIN)+1 */
diff --git a/drivers/video/geode/suspend_gx.c b/drivers/video/geode/suspend_gx.c
index 9aff32ef8bb6..1bb043d70c64 100644
--- a/drivers/video/geode/suspend_gx.c
+++ b/drivers/video/geode/suspend_gx.c
@@ -10,7 +10,7 @@
10#include <linux/fb.h> 10#include <linux/fb.h>
11#include <asm/io.h> 11#include <asm/io.h>
12#include <asm/msr.h> 12#include <asm/msr.h>
13#include <asm/geode.h> 13#include <linux/cs5535.h>
14#include <asm/delay.h> 14#include <asm/delay.h>
15 15
16#include "gxfb.h" 16#include "gxfb.h"
diff --git a/drivers/video/geode/video_gx.c b/drivers/video/geode/video_gx.c
index b8d52a8360db..6082f653c68a 100644
--- a/drivers/video/geode/video_gx.c
+++ b/drivers/video/geode/video_gx.c
@@ -16,7 +16,7 @@
16#include <asm/io.h> 16#include <asm/io.h>
17#include <asm/delay.h> 17#include <asm/delay.h>
18#include <asm/msr.h> 18#include <asm/msr.h>
19#include <asm/geode.h> 19#include <linux/cs5535.h>
20 20
21#include "gxfb.h" 21#include "gxfb.h"
22 22
diff --git a/drivers/video/hitfb.c b/drivers/video/hitfb.c
index e7116a6d82d3..73c83a8de2d3 100644
--- a/drivers/video/hitfb.c
+++ b/drivers/video/hitfb.c
@@ -456,7 +456,7 @@ static int hitfb_resume(struct device *dev)
456 return 0; 456 return 0;
457} 457}
458 458
459static struct dev_pm_ops hitfb_dev_pm_ops = { 459static const struct dev_pm_ops hitfb_dev_pm_ops = {
460 .suspend = hitfb_suspend, 460 .suspend = hitfb_suspend,
461 .resume = hitfb_resume, 461 .resume = hitfb_resume,
462}; 462};
diff --git a/drivers/video/i810/i810_dvt.c b/drivers/video/i810/i810_dvt.c
index 27fa703a2e0a..b4b3670667ab 100644
--- a/drivers/video/i810/i810_dvt.c
+++ b/drivers/video/i810/i810_dvt.c
@@ -212,24 +212,29 @@ inline void round_off_yres(u32 *xres, u32 *yres)
212 *yres = (*xres * 3) >> 2; 212 *yres = (*xres * 3) >> 2;
213} 213}
214 214
215void i810fb_encode_registers(const struct fb_var_screeninfo *var, 215static int i810fb_find_best_mode(u32 xres, u32 yres, u32 pixclock)
216 struct i810fb_par *par, u32 xres, u32 yres)
217{ 216{
218 u32 diff = 0, diff_best = 0xFFFFFFFF, i = 0, i_best = 0; 217 u32 diff = 0, diff_best = 0xFFFFFFFF, i = 0, i_best = 0;
219 u8 hfl; 218 u8 hfl = (u8) ((xres >> 3) - 1);
220 219
221 hfl = (u8) ((xres >> 3) - 1);
222 for (i = 0; i < ARRAY_SIZE(std_modes); i++) { 220 for (i = 0; i < ARRAY_SIZE(std_modes); i++) {
223 if (std_modes[i].cr01 == hfl) { 221 if (std_modes[i].cr01 == hfl) {
224 if (std_modes[i].pixclock <= par->regs.pixclock) 222 if (std_modes[i].pixclock <= pixclock)
225 diff = par->regs.pixclock - 223 diff = pixclock - std_modes[i].pixclock;
226 std_modes[i].pixclock;
227 if (diff < diff_best) { 224 if (diff < diff_best) {
228 i_best = i; 225 i_best = i;
229 diff_best = diff; 226 diff_best = diff;
230 } 227 }
231 } 228 }
232 } 229 }
230 return i_best;
231}
232
233void i810fb_encode_registers(const struct fb_var_screeninfo *var,
234 struct i810fb_par *par, u32 xres, u32 yres)
235{
236 u32 i_best = i810fb_find_best_mode(xres, yres, par->regs.pixclock);
237
233 par->regs = std_modes[i_best]; 238 par->regs = std_modes[i_best];
234 239
235 /* overlay */ 240 /* overlay */
@@ -239,36 +244,36 @@ void i810fb_encode_registers(const struct fb_var_screeninfo *var,
239 244
240void i810fb_fill_var_timings(struct fb_var_screeninfo *var) 245void i810fb_fill_var_timings(struct fb_var_screeninfo *var)
241{ 246{
242 struct i810fb_par par;
243 u32 total, xres, yres; 247 u32 total, xres, yres;
248 u32 mode, pixclock;
244 249
245 xres = var->xres; 250 xres = var->xres;
246 yres = var->yres; 251 yres = var->yres;
247 252
248 par.regs.pixclock = 1000000000/var->pixclock; 253 pixclock = 1000000000 / var->pixclock;
249 i810fb_encode_registers(var, &par, xres, yres); 254 mode = i810fb_find_best_mode(xres, yres, pixclock);
250 255
251 total = ((par.regs.cr00 | (par.regs.cr35 & 1) << 8) + 3) << 3; 256 total = (std_modes[mode].cr00 | (std_modes[mode].cr35 & 1) << 8) + 3;
257 total <<= 3;
252 258
253 var->pixclock = 1000000000/par.regs.pixclock; 259 var->pixclock = 1000000000 / std_modes[mode].pixclock;
254 var->right_margin = (par.regs.cr04 << 3) - xres; 260 var->right_margin = (std_modes[mode].cr04 << 3) - xres;
255 var->hsync_len = ((par.regs.cr05 & 0x1F) - 261 var->hsync_len = ((std_modes[mode].cr05 & 0x1F) -
256 (par.regs.cr04 & 0x1F)) << 3; 262 (std_modes[mode].cr04 & 0x1F)) << 3;
257 var->left_margin = (total - (xres + var->right_margin + 263 var->left_margin = (total - (xres + var->right_margin +
258 var->hsync_len)); 264 var->hsync_len));
259 var->sync = FB_SYNC_ON_GREEN; 265 var->sync = FB_SYNC_ON_GREEN;
260 if (~(par.regs.msr & (1 << 6))) 266 if (~(std_modes[mode].msr & (1 << 6)))
261 var->sync |= FB_SYNC_HOR_HIGH_ACT; 267 var->sync |= FB_SYNC_HOR_HIGH_ACT;
262 if (~(par.regs.msr & (1 << 7))) 268 if (~(std_modes[mode].msr & (1 << 7)))
263 var->sync |= FB_SYNC_VERT_HIGH_ACT; 269 var->sync |= FB_SYNC_VERT_HIGH_ACT;
264 270
265 271 total = (std_modes[mode].cr06 | (std_modes[mode].cr30 & 0xF) << 8) + 2;
266 total = ((par.regs.cr06 | (par.regs.cr30 & 0x0F) << 8)) + 2; 272 var->lower_margin = (std_modes[mode].cr10 |
267 var->lower_margin = (par.regs.cr10 | 273 (std_modes[mode].cr32 & 0x0F) << 8) - yres;
268 (par.regs.cr32 & 0x0F) << 8) - yres; 274 var->vsync_len = (std_modes[mode].cr11 & 0x0F) -
269 var->vsync_len = (par.regs.cr11 & 0x0F) - (var->lower_margin & 0x0F); 275 (var->lower_margin & 0x0F);
270 var->upper_margin = total - (yres + var->lower_margin + 276 var->upper_margin = total - (yres + var->lower_margin + var->vsync_len);
271 var->vsync_len);
272} 277}
273 278
274u32 i810_get_watermark(struct fb_var_screeninfo *var, 279u32 i810_get_watermark(struct fb_var_screeninfo *var,
diff --git a/drivers/video/intelfb/intelfbdrv.c b/drivers/video/intelfb/intelfbdrv.c
index 0cafd642fbc0..5ba399991050 100644
--- a/drivers/video/intelfb/intelfbdrv.c
+++ b/drivers/video/intelfb/intelfbdrv.c
@@ -874,6 +874,9 @@ static int __devinit intelfb_pci_register(struct pci_dev *pdev,
874 if (bailearly == 18) 874 if (bailearly == 18)
875 bailout(dinfo); 875 bailout(dinfo);
876 876
877 /* read active pipe */
878 dinfo->pipe = intelfbhw_active_pipe(&dinfo->save_state);
879
877 /* Cursor initialisation */ 880 /* Cursor initialisation */
878 if (dinfo->hwcursor) { 881 if (dinfo->hwcursor) {
879 intelfbhw_cursor_init(dinfo); 882 intelfbhw_cursor_init(dinfo);
diff --git a/drivers/video/intelfb/intelfbhw.c b/drivers/video/intelfb/intelfbhw.c
index 0689f97c5238..81627466804e 100644
--- a/drivers/video/intelfb/intelfbhw.c
+++ b/drivers/video/intelfb/intelfbhw.c
@@ -469,6 +469,32 @@ void intelfbhw_do_blank(int blank, struct fb_info *info)
469} 469}
470 470
471 471
472/* Check which pipe is connected to an active display plane. */
473int intelfbhw_active_pipe(const struct intelfb_hwstate *hw)
474{
475 int pipe = -1;
476
477 /* keep old default behaviour - prefer PIPE_A */
478 if (hw->disp_b_ctrl & DISPPLANE_PLANE_ENABLE) {
479 pipe = (hw->disp_b_ctrl >> DISPPLANE_SEL_PIPE_SHIFT);
480 pipe &= PIPE_MASK;
481 if (unlikely(pipe == PIPE_A))
482 return PIPE_A;
483 }
484 if (hw->disp_a_ctrl & DISPPLANE_PLANE_ENABLE) {
485 pipe = (hw->disp_a_ctrl >> DISPPLANE_SEL_PIPE_SHIFT);
486 pipe &= PIPE_MASK;
487 if (likely(pipe == PIPE_A))
488 return PIPE_A;
489 }
490 /* Impossible that no pipe is selected - return PIPE_A */
491 WARN_ON(pipe == -1);
492 if (unlikely(pipe == -1))
493 pipe = PIPE_A;
494
495 return pipe;
496}
497
472void intelfbhw_setcolreg(struct intelfb_info *dinfo, unsigned regno, 498void intelfbhw_setcolreg(struct intelfb_info *dinfo, unsigned regno,
473 unsigned red, unsigned green, unsigned blue, 499 unsigned red, unsigned green, unsigned blue,
474 unsigned transp) 500 unsigned transp)
@@ -1019,7 +1045,7 @@ int intelfbhw_mode_to_hw(struct intelfb_info *dinfo,
1019 struct intelfb_hwstate *hw, 1045 struct intelfb_hwstate *hw,
1020 struct fb_var_screeninfo *var) 1046 struct fb_var_screeninfo *var)
1021{ 1047{
1022 int pipe = PIPE_A; 1048 int pipe = intelfbhw_active_pipe(hw);
1023 u32 *dpll, *fp0, *fp1; 1049 u32 *dpll, *fp0, *fp1;
1024 u32 m1, m2, n, p1, p2, clock_target, clock; 1050 u32 m1, m2, n, p1, p2, clock_target, clock;
1025 u32 hsync_start, hsync_end, hblank_start, hblank_end, htotal, hactive; 1051 u32 hsync_start, hsync_end, hblank_start, hblank_end, htotal, hactive;
@@ -1033,12 +1059,6 @@ int intelfbhw_mode_to_hw(struct intelfb_info *dinfo,
1033 /* Disable VGA */ 1059 /* Disable VGA */
1034 hw->vgacntrl |= VGA_DISABLE; 1060 hw->vgacntrl |= VGA_DISABLE;
1035 1061
1036 /* Check whether pipe A or pipe B is enabled. */
1037 if (hw->pipe_a_conf & PIPECONF_ENABLE)
1038 pipe = PIPE_A;
1039 else if (hw->pipe_b_conf & PIPECONF_ENABLE)
1040 pipe = PIPE_B;
1041
1042 /* Set which pipe's registers will be set. */ 1062 /* Set which pipe's registers will be set. */
1043 if (pipe == PIPE_B) { 1063 if (pipe == PIPE_B) {
1044 dpll = &hw->dpll_b; 1064 dpll = &hw->dpll_b;
@@ -1262,7 +1282,6 @@ int intelfbhw_mode_to_hw(struct intelfb_info *dinfo,
1262int intelfbhw_program_mode(struct intelfb_info *dinfo, 1282int intelfbhw_program_mode(struct intelfb_info *dinfo,
1263 const struct intelfb_hwstate *hw, int blank) 1283 const struct intelfb_hwstate *hw, int blank)
1264{ 1284{
1265 int pipe = PIPE_A;
1266 u32 tmp; 1285 u32 tmp;
1267 const u32 *dpll, *fp0, *fp1, *pipe_conf; 1286 const u32 *dpll, *fp0, *fp1, *pipe_conf;
1268 const u32 *hs, *ht, *hb, *vs, *vt, *vb, *ss; 1287 const u32 *hs, *ht, *hb, *vs, *vt, *vb, *ss;
@@ -1272,7 +1291,7 @@ int intelfbhw_program_mode(struct intelfb_info *dinfo,
1272 u32 src_size_reg; 1291 u32 src_size_reg;
1273 u32 count, tmp_val[3]; 1292 u32 count, tmp_val[3];
1274 1293
1275 /* Assume single pipe, display plane A, analog CRT. */ 1294 /* Assume single pipe */
1276 1295
1277#if VERBOSE > 0 1296#if VERBOSE > 0
1278 DBG_MSG("intelfbhw_program_mode\n"); 1297 DBG_MSG("intelfbhw_program_mode\n");
@@ -1283,15 +1302,9 @@ int intelfbhw_program_mode(struct intelfb_info *dinfo,
1283 tmp |= VGA_DISABLE; 1302 tmp |= VGA_DISABLE;
1284 OUTREG(VGACNTRL, tmp); 1303 OUTREG(VGACNTRL, tmp);
1285 1304
1286 /* Check whether pipe A or pipe B is enabled. */ 1305 dinfo->pipe = intelfbhw_active_pipe(hw);
1287 if (hw->pipe_a_conf & PIPECONF_ENABLE)
1288 pipe = PIPE_A;
1289 else if (hw->pipe_b_conf & PIPECONF_ENABLE)
1290 pipe = PIPE_B;
1291
1292 dinfo->pipe = pipe;
1293 1306
1294 if (pipe == PIPE_B) { 1307 if (dinfo->pipe == PIPE_B) {
1295 dpll = &hw->dpll_b; 1308 dpll = &hw->dpll_b;
1296 fp0 = &hw->fpb0; 1309 fp0 = &hw->fpb0;
1297 fp1 = &hw->fpb1; 1310 fp1 = &hw->fpb1;
diff --git a/drivers/video/intelfb/intelfbhw.h b/drivers/video/intelfb/intelfbhw.h
index 0b076bac321b..216ca20f259f 100644
--- a/drivers/video/intelfb/intelfbhw.h
+++ b/drivers/video/intelfb/intelfbhw.h
@@ -604,5 +604,6 @@ extern void intelfbhw_cursor_reset(struct intelfb_info *dinfo);
604extern int intelfbhw_enable_irq(struct intelfb_info *dinfo); 604extern int intelfbhw_enable_irq(struct intelfb_info *dinfo);
605extern void intelfbhw_disable_irq(struct intelfb_info *dinfo); 605extern void intelfbhw_disable_irq(struct intelfb_info *dinfo);
606extern int intelfbhw_wait_for_vsync(struct intelfb_info *dinfo, u32 pipe); 606extern int intelfbhw_wait_for_vsync(struct intelfb_info *dinfo, u32 pipe);
607extern int intelfbhw_active_pipe(const struct intelfb_hwstate *hw);
607 608
608#endif /* _INTELFBHW_H */ 609#endif /* _INTELFBHW_H */
diff --git a/drivers/video/matrox/g450_pll.c b/drivers/video/matrox/g450_pll.c
index 09f6e045d5be..c15f8a57498e 100644
--- a/drivers/video/matrox/g450_pll.c
+++ b/drivers/video/matrox/g450_pll.c
@@ -368,7 +368,8 @@ static int __g450_setclk(struct matrox_fb_info *minfo, unsigned int fout,
368 M1064_XDVICLKCTRL_C1DVICLKEN | 368 M1064_XDVICLKCTRL_C1DVICLKEN |
369 M1064_XDVICLKCTRL_DVILOOPCTL | 369 M1064_XDVICLKCTRL_DVILOOPCTL |
370 M1064_XDVICLKCTRL_P1LOOPBWDTCTL; 370 M1064_XDVICLKCTRL_P1LOOPBWDTCTL;
371 matroxfb_DAC_out(minfo, M1064_XDVICLKCTRL, tmp); 371 /* Setting this breaks PC systems so don't do it */
372 /* matroxfb_DAC_out(minfo, M1064_XDVICLKCTRL, tmp); */
372 matroxfb_DAC_out(minfo, M1064_XPWRCTRL, 373 matroxfb_DAC_out(minfo, M1064_XPWRCTRL,
373 xpwrctrl); 374 xpwrctrl);
374 375
diff --git a/drivers/video/maxinefb.c b/drivers/video/maxinefb.c
index 5e91c2b30af9..7854c7a37dc5 100644
--- a/drivers/video/maxinefb.c
+++ b/drivers/video/maxinefb.c
@@ -92,6 +92,9 @@ static int maxinefb_setcolreg(unsigned regno, unsigned red, unsigned green,
92 /* value to be written into the palette reg. */ 92 /* value to be written into the palette reg. */
93 unsigned long hw_colorvalue = 0; 93 unsigned long hw_colorvalue = 0;
94 94
95 if (regno > 255)
96 return 1;
97
95 red >>= 8; /* The cmap fields are 16 bits */ 98 red >>= 8; /* The cmap fields are 16 bits */
96 green >>= 8; /* wide, but the harware colormap */ 99 green >>= 8; /* wide, but the harware colormap */
97 blue >>= 8; /* registers are only 8 bits wide */ 100 blue >>= 8; /* registers are only 8 bits wide */
diff --git a/drivers/video/mb862xx/Makefile b/drivers/video/mb862xx/Makefile
index 07664814bb1d..d7777714166b 100644
--- a/drivers/video/mb862xx/Makefile
+++ b/drivers/video/mb862xx/Makefile
@@ -2,4 +2,4 @@
2# Makefile for the MB862xx framebuffer driver 2# Makefile for the MB862xx framebuffer driver
3# 3#
4 4
5obj-$(CONFIG_FB_MB862XX) := mb862xxfb.o 5obj-$(CONFIG_FB_MB862XX) := mb862xxfb.o mb862xxfb_accel.o
diff --git a/drivers/video/mb862xx/mb862xxfb.c b/drivers/video/mb862xx/mb862xxfb.c
index a28e3cfbbf70..fabb0c59a211 100644
--- a/drivers/video/mb862xx/mb862xxfb.c
+++ b/drivers/video/mb862xx/mb862xxfb.c
@@ -214,6 +214,8 @@ static int mb862xxfb_set_par(struct fb_info *fbi)
214 unsigned long reg, sc; 214 unsigned long reg, sc;
215 215
216 dev_dbg(par->dev, "%s\n", __func__); 216 dev_dbg(par->dev, "%s\n", __func__);
217 if (par->type == BT_CORALP)
218 mb862xxfb_init_accel(fbi, fbi->var.xres);
217 219
218 if (par->pre_init) 220 if (par->pre_init)
219 return 0; 221 return 0;
@@ -453,6 +455,18 @@ static ssize_t mb862xxfb_show_dispregs(struct device *dev,
453 ptr += sprintf(ptr, "%08x = %08x\n", 455 ptr += sprintf(ptr, "%08x = %08x\n",
454 reg, inreg(disp, reg)); 456 reg, inreg(disp, reg));
455 457
458 for (reg = 0x400; reg <= 0x410; reg += 4)
459 ptr += sprintf(ptr, "geo %08x = %08x\n",
460 reg, inreg(geo, reg));
461
462 for (reg = 0x400; reg <= 0x410; reg += 4)
463 ptr += sprintf(ptr, "draw %08x = %08x\n",
464 reg, inreg(draw, reg));
465
466 for (reg = 0x440; reg <= 0x450; reg += 4)
467 ptr += sprintf(ptr, "draw %08x = %08x\n",
468 reg, inreg(draw, reg));
469
456 return ptr - buf; 470 return ptr - buf;
457} 471}
458 472
diff --git a/drivers/video/mb862xx/mb862xxfb.h b/drivers/video/mb862xx/mb862xxfb.h
index c4c8f4dd2217..d7e7cb76bbf2 100644
--- a/drivers/video/mb862xx/mb862xxfb.h
+++ b/drivers/video/mb862xx/mb862xxfb.h
@@ -61,6 +61,8 @@ struct mb862xxfb_par {
61 u32 pseudo_palette[16]; 61 u32 pseudo_palette[16];
62}; 62};
63 63
64extern void mb862xxfb_init_accel(struct fb_info *info, int xres);
65
64#if defined(CONFIG_FB_MB862XX_LIME) && defined(CONFIG_FB_MB862XX_PCI_GDC) 66#if defined(CONFIG_FB_MB862XX_LIME) && defined(CONFIG_FB_MB862XX_PCI_GDC)
65#error "Select Lime GDC or CoralP/Carmine support, but not both together" 67#error "Select Lime GDC or CoralP/Carmine support, but not both together"
66#endif 68#endif
diff --git a/drivers/video/mb862xx/mb862xxfb_accel.c b/drivers/video/mb862xx/mb862xxfb_accel.c
new file mode 100644
index 000000000000..049256052b1a
--- /dev/null
+++ b/drivers/video/mb862xx/mb862xxfb_accel.c
@@ -0,0 +1,331 @@
1/*
2 * drivers/mb862xx/mb862xxfb_accel.c
3 *
4 * Fujitsu Carmine/Coral-P(A)/Lime framebuffer driver acceleration support
5 *
6 * (C) 2007 Alexander Shishkin <virtuoso@slind.org>
7 * (C) 2009 Valentin Sitdikov <valentin.sitdikov@siemens.com>
8 * (C) 2009 Siemens AG
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 */
15#include <linux/fb.h>
16#include <linux/delay.h>
17#include <linux/init.h>
18#include <linux/interrupt.h>
19#include <linux/pci.h>
20#if defined(CONFIG_OF)
21#include <linux/of_platform.h>
22#endif
23#include "mb862xxfb.h"
24#include "mb862xx_reg.h"
25#include "mb862xxfb_accel.h"
26
27static void mb862xxfb_write_fifo(u32 count, u32 *data, struct fb_info *info)
28{
29 struct mb862xxfb_par *par = info->par;
30 static u32 free;
31
32 u32 total = 0;
33 while (total < count) {
34 if (free) {
35 outreg(geo, GDC_GEO_REG_INPUT_FIFO, data[total]);
36 total++;
37 free--;
38 } else {
39 free = (u32) inreg(draw, GDC_REG_FIFO_COUNT);
40 }
41 }
42}
43
44static void mb86290fb_copyarea(struct fb_info *info,
45 const struct fb_copyarea *area)
46{
47 __u32 cmd[6];
48
49 cmd[0] = (GDC_TYPE_SETREGISTER << 24) | (1 << 16) | GDC_REG_MODE_BITMAP;
50 /* Set raster operation */
51 cmd[1] = (2 << 7) | (GDC_ROP_COPY << 9);
52 cmd[2] = GDC_TYPE_BLTCOPYP << 24;
53
54 if (area->sx >= area->dx && area->sy >= area->dy)
55 cmd[2] |= GDC_CMD_BLTCOPY_TOP_LEFT << 16;
56 else if (area->sx >= area->dx && area->sy <= area->dy)
57 cmd[2] |= GDC_CMD_BLTCOPY_BOTTOM_LEFT << 16;
58 else if (area->sx <= area->dx && area->sy >= area->dy)
59 cmd[2] |= GDC_CMD_BLTCOPY_TOP_RIGHT << 16;
60 else
61 cmd[2] |= GDC_CMD_BLTCOPY_BOTTOM_RIGHT << 16;
62
63 cmd[3] = (area->sy << 16) | area->sx;
64 cmd[4] = (area->dy << 16) | area->dx;
65 cmd[5] = (area->height << 16) | area->width;
66 mb862xxfb_write_fifo(6, cmd, info);
67}
68
69/*
70 * Fill in the cmd array /GDC FIFO commands/ to draw a 1bit image.
71 * Make sure cmd has enough room!
72 */
73static void mb86290fb_imageblit1(u32 *cmd, u16 step, u16 dx, u16 dy,
74 u16 width, u16 height, u32 fgcolor,
75 u32 bgcolor, const struct fb_image *image,
76 struct fb_info *info)
77{
78 int i;
79 unsigned const char *line;
80 u16 bytes;
81
82 /* set colors and raster operation regs */
83 cmd[0] = (GDC_TYPE_SETREGISTER << 24) | (1 << 16) | GDC_REG_MODE_BITMAP;
84 /* Set raster operation */
85 cmd[1] = (2 << 7) | (GDC_ROP_COPY << 9);
86 cmd[2] =
87 (GDC_TYPE_SETCOLORREGISTER << 24) | (GDC_CMD_BODY_FORE_COLOR << 16);
88 cmd[3] = fgcolor;
89 cmd[4] =
90 (GDC_TYPE_SETCOLORREGISTER << 24) | (GDC_CMD_BODY_BACK_COLOR << 16);
91 cmd[5] = bgcolor;
92
93 i = 0;
94 line = image->data;
95 bytes = (image->width + 7) >> 3;
96
97 /* and the image */
98 cmd[6] = (GDC_TYPE_DRAWBITMAPP << 24) |
99 (GDC_CMD_BITMAP << 16) | (2 + (step * height));
100 cmd[7] = (dy << 16) | dx;
101 cmd[8] = (height << 16) | width;
102
103 while (i < height) {
104 memcpy(&cmd[9 + i * step], line, step << 2);
105#ifdef __LITTLE_ENDIAN
106 {
107 int k = 0;
108 for (k = 0; k < step; k++)
109 cmd[9 + i * step + k] =
110 cpu_to_be32(cmd[9 + i * step + k]);
111 }
112#endif
113 line += bytes;
114 i++;
115 }
116}
117
118/*
119 * Fill in the cmd array /GDC FIFO commands/ to draw a 8bit image.
120 * Make sure cmd has enough room!
121 */
122static void mb86290fb_imageblit8(u32 *cmd, u16 step, u16 dx, u16 dy,
123 u16 width, u16 height, u32 fgcolor,
124 u32 bgcolor, const struct fb_image *image,
125 struct fb_info *info)
126{
127 int i, j;
128 unsigned const char *line, *ptr;
129 u16 bytes;
130
131 cmd[0] = (GDC_TYPE_DRAWBITMAPP << 24) |
132 (GDC_CMD_BLT_DRAW << 16) | (2 + (height * step));
133 cmd[1] = (dy << 16) | dx;
134 cmd[2] = (height << 16) | width;
135
136 i = 0;
137 line = ptr = image->data;
138 bytes = image->width;
139
140 while (i < height) {
141 ptr = line;
142 for (j = 0; j < step; j++) {
143 cmd[3 + i * step + j] =
144 (((u32 *) (info->pseudo_palette))[*ptr]) & 0xffff;
145 ptr++;
146 cmd[3 + i * step + j] |=
147 ((((u32 *) (info->
148 pseudo_palette))[*ptr]) & 0xffff) << 16;
149 ptr++;
150 }
151
152 line += bytes;
153 i++;
154 }
155}
156
157/*
158 * Fill in the cmd array /GDC FIFO commands/ to draw a 16bit image.
159 * Make sure cmd has enough room!
160 */
161static void mb86290fb_imageblit16(u32 *cmd, u16 step, u16 dx, u16 dy,
162 u16 width, u16 height, u32 fgcolor,
163 u32 bgcolor, const struct fb_image *image,
164 struct fb_info *info)
165{
166 int i;
167 unsigned const char *line;
168 u16 bytes;
169
170 i = 0;
171 line = image->data;
172 bytes = image->width << 1;
173
174 cmd[0] = (GDC_TYPE_DRAWBITMAPP << 24) |
175 (GDC_CMD_BLT_DRAW << 16) | (2 + step * height);
176 cmd[1] = (dy << 16) | dx;
177 cmd[2] = (height << 16) | width;
178
179 while (i < height) {
180 memcpy(&cmd[3 + i * step], line, step);
181 line += bytes;
182 i++;
183 }
184}
185
186static void mb86290fb_imageblit(struct fb_info *info,
187 const struct fb_image *image)
188{
189 int mdr;
190 u32 *cmd = NULL;
191 void (*cmdfn) (u32 *, u16, u16, u16, u16, u16, u32, u32,
192 const struct fb_image *, struct fb_info *) = NULL;
193 u32 cmdlen;
194 u32 fgcolor = 0, bgcolor = 0;
195 u16 step;
196
197 u16 width = image->width, height = image->height;
198 u16 dx = image->dx, dy = image->dy;
199 int x2, y2, vxres, vyres;
200
201 mdr = (GDC_ROP_COPY << 9);
202 x2 = image->dx + image->width;
203 y2 = image->dy + image->height;
204 vxres = info->var.xres_virtual;
205 vyres = info->var.yres_virtual;
206 x2 = min(x2, vxres);
207 y2 = min(y2, vyres);
208 width = x2 - dx;
209 height = y2 - dy;
210
211 switch (image->depth) {
212 case 1:
213 step = (width + 31) >> 5;
214 cmdlen = 9 + height * step;
215 cmdfn = mb86290fb_imageblit1;
216 if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
217 info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
218 fgcolor =
219 ((u32 *) (info->pseudo_palette))[image->fg_color];
220 bgcolor =
221 ((u32 *) (info->pseudo_palette))[image->bg_color];
222 } else {
223 fgcolor = image->fg_color;
224 bgcolor = image->bg_color;
225 }
226
227 break;
228
229 case 8:
230 step = (width + 1) >> 1;
231 cmdlen = 3 + height * step;
232 cmdfn = mb86290fb_imageblit8;
233 break;
234
235 case 16:
236 step = (width + 1) >> 1;
237 cmdlen = 3 + height * step;
238 cmdfn = mb86290fb_imageblit16;
239 break;
240
241 default:
242 cfb_imageblit(info, image);
243 return;
244 }
245
246 cmd = kmalloc(cmdlen * 4, GFP_DMA);
247 if (!cmd)
248 return cfb_imageblit(info, image);
249 cmdfn(cmd, step, dx, dy, width, height, fgcolor, bgcolor, image, info);
250 mb862xxfb_write_fifo(cmdlen, cmd, info);
251 kfree(cmd);
252}
253
254static void mb86290fb_fillrect(struct fb_info *info,
255 const struct fb_fillrect *rect)
256{
257
258 u32 x2, y2, vxres, vyres, height, width, fg;
259 u32 cmd[7];
260
261 vxres = info->var.xres_virtual;
262 vyres = info->var.yres_virtual;
263
264 if (!rect->width || !rect->height || rect->dx > vxres
265 || rect->dy > vyres)
266 return;
267
268 /* We could use hardware clipping but on many cards you get around
269 * hardware clipping by writing to framebuffer directly. */
270 x2 = rect->dx + rect->width;
271 y2 = rect->dy + rect->height;
272 x2 = min(x2, vxres);
273 y2 = min(y2, vyres);
274 width = x2 - rect->dx;
275 height = y2 - rect->dy;
276 if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
277 info->fix.visual == FB_VISUAL_DIRECTCOLOR)
278 fg = ((u32 *) (info->pseudo_palette))[rect->color];
279 else
280 fg = rect->color;
281
282 switch (rect->rop) {
283
284 case ROP_XOR:
285 /* Set raster operation */
286 cmd[1] = (2 << 7) | (GDC_ROP_XOR << 9);
287 break;
288
289 case ROP_COPY:
290 /* Set raster operation */
291 cmd[1] = (2 << 7) | (GDC_ROP_COPY << 9);
292 break;
293
294 }
295
296 cmd[0] = (GDC_TYPE_SETREGISTER << 24) | (1 << 16) | GDC_REG_MODE_BITMAP;
297 /* cmd[1] set earlier */
298 cmd[2] =
299 (GDC_TYPE_SETCOLORREGISTER << 24) | (GDC_CMD_BODY_FORE_COLOR << 16);
300 cmd[3] = fg;
301 cmd[4] = (GDC_TYPE_DRAWRECTP << 24) | (GDC_CMD_BLT_FILL << 16);
302 cmd[5] = (rect->dy << 16) | (rect->dx);
303 cmd[6] = (height << 16) | width;
304
305 mb862xxfb_write_fifo(7, cmd, info);
306}
307
308void mb862xxfb_init_accel(struct fb_info *info, int xres)
309{
310 struct mb862xxfb_par *par = info->par;
311
312 if (info->var.bits_per_pixel == 32) {
313 info->fbops->fb_fillrect = cfb_fillrect;
314 info->fbops->fb_copyarea = cfb_copyarea;
315 info->fbops->fb_imageblit = cfb_imageblit;
316 } else {
317 outreg(disp, GC_L0EM, 3);
318 info->fbops->fb_fillrect = mb86290fb_fillrect;
319 info->fbops->fb_copyarea = mb86290fb_copyarea;
320 info->fbops->fb_imageblit = mb86290fb_imageblit;
321 }
322 outreg(draw, GDC_REG_DRAW_BASE, 0);
323 outreg(draw, GDC_REG_MODE_MISC, 0x8000);
324 outreg(draw, GDC_REG_X_RESOLUTION, xres);
325
326 info->flags |=
327 FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT |
328 FBINFO_HWACCEL_IMAGEBLIT;
329 info->fix.accel = 0xff; /*FIXME: add right define */
330}
331EXPORT_SYMBOL(mb862xxfb_init_accel);
diff --git a/drivers/video/mb862xx/mb862xxfb_accel.h b/drivers/video/mb862xx/mb862xxfb_accel.h
new file mode 100644
index 000000000000..96a2dfef0f60
--- /dev/null
+++ b/drivers/video/mb862xx/mb862xxfb_accel.h
@@ -0,0 +1,203 @@
1#ifndef __MB826XXFB_ACCEL_H__
2#define __MB826XXFB_ACCEL_H__
3
4/* registers */
5#define GDC_GEO_REG_INPUT_FIFO 0x00000400L
6
7/* Special Registers */
8#define GDC_REG_CTRL 0x00000400L
9#define GDC_REG_FIFO_STATUS 0x00000404L
10#define GDC_REG_FIFO_COUNT 0x00000408L
11#define GDC_REG_SETUP_STATUS 0x0000040CL
12#define GDC_REG_DDA_STATUS 0x00000410L
13#define GDC_REG_ENGINE_STATUS 0x00000414L
14#define GDC_REG_ERROR_STATUS 0x00000418L
15#define GDC_REG_MODE_MISC 0x00000420L /* MDR0 */
16#define GDC_REG_MODE_LINE 0x00000424L /* MDR1 */
17#define GDC_REG_MODE_POLYGON 0x00000428L /* MDR2 */
18#define GDC_REG_MODE_TEXTURE 0x0000042CL /* MDR3 */
19#define GDC_REG_MODE_BITMAP 0x00000430L /* MDR4 */
20#define GDC_REG_MODE_EXTENSION 0x0000043CL /* MDR7 */
21
22/* Configuration Registers */
23#define GDC_REG_DRAW_BASE 0x00000440L
24#define GDC_REG_X_RESOLUTION 0x00000444L
25#define GDC_REG_Z_BASE 0x00000448L
26#define GDC_REG_TEXTURE_BASE 0x0000044CL
27#define GDC_REG_POLYGON_FLAG_BASE 0x00000450L
28#define GDC_REG_CLIP_XMIN 0x00000454L
29#define GDC_REG_CLIP_XMAX 0x00000458L
30#define GDC_REG_CLIP_YMIN 0x0000045CL
31#define GDC_REG_CLIP_YMAX 0x00000460L
32#define GDC_REG_TEXURE_SIZE 0x00000464L
33#define GDC_REG_TILE_SIZE 0x00000468L
34#define GDC_REG_TEX_BUF_OFFSET 0x0000046CL
35
36/* for MB86293 or later */
37#define GDC_REG_ALPHA_MAP_BASE 0x00000474L /* ABR */
38
39/* Constant Registers */
40#define GDC_REG_FOREGROUND_COLOR 0x00000480L
41#define GDC_REG_BACKGROUND_COLOR 0x00000484L
42#define GDC_REG_ALPHA 0x00000488L
43#define GDC_REG_LINE_PATTERN 0x0000048CL
44#define GDC_REG_TEX_BORDER_COLOR 0x00000494L
45#define GDC_REG_LINE_PATTERN_OFFSET 0x000003E0L
46
47/* Coomand Code */
48#define GDC_CMD_PIXEL 0x00000000L
49#define GDC_CMD_PIXEL_Z 0x00000001L
50
51#define GDC_CMD_X_VECTOR 0x00000020L
52#define GDC_CMD_Y_VECTOR 0x00000021L
53#define GDC_CMD_X_VECTOR_NOEND 0x00000022L
54#define GDC_CMD_Y_VECTOR_NOEND 0x00000023L
55#define GDC_CMD_X_VECTOR_BLPO 0x00000024L
56#define GDC_CMD_Y_VECTOR_BLPO 0x00000025L
57#define GDC_CMD_X_VECTOR_NOEND_BLPO 0x00000026L
58#define GDC_CMD_Y_VECTOR_NOEND_BLPO 0x00000027L
59#define GDC_CMD_AA_X_VECTOR 0x00000028L
60#define GDC_CMD_AA_Y_VECTOR 0x00000029L
61#define GDC_CMD_AA_X_VECTOR_NOEND 0x0000002AL
62#define GDC_CMD_AA_Y_VECTOR_NOEND 0x0000002BL
63#define GDC_CMD_AA_X_VECTOR_BLPO 0x0000002CL
64#define GDC_CMD_AA_Y_VECTOR_BLPO 0x0000002DL
65#define GDC_CMD_AA_X_VECTOR_NOEND_BLPO 0x0000002EL
66#define GDC_CMD_AA_Y_VECTOR_NOEND_BLPO 0x0000002FL
67
68#define GDC_CMD_0_VECTOR 0x00000030L
69#define GDC_CMD_1_VECTOR 0x00000031L
70#define GDC_CMD_0_VECTOR_NOEND 0x00000032L
71#define GDC_CMD_1_VECTOR_NOEND 0x00000033L
72#define GDC_CMD_0_VECTOR_BLPO 0x00000034L
73#define GDC_CMD_1_VECTOR_BLPO 0x00000035L
74#define GDC_CMD_0_VECTOR_NOEND_BLPO 0x00000036L
75#define GDC_CMD_1_VECTOR_NOEND_BLPO 0x00000037L
76#define GDC_CMD_AA_0_VECTOR 0x00000038L
77#define GDC_CMD_AA_1_VECTOR 0x00000039L
78#define GDC_CMD_AA_0_VECTOR_NOEND 0x0000003AL
79#define GDC_CMD_AA_1_VECTOR_NOEND 0x0000003BL
80#define GDC_CMD_AA_0_VECTOR_BLPO 0x0000003CL
81#define GDC_CMD_AA_1_VECTOR_BLPO 0x0000003DL
82#define GDC_CMD_AA_0_VECTOR_NOEND_BLPO 0x0000003EL
83#define GDC_CMD_AA_1_VECTOR_NOEND_BLPO 0x0000003FL
84
85#define GDC_CMD_BLT_FILL 0x00000041L
86#define GDC_CMD_BLT_DRAW 0x00000042L
87#define GDC_CMD_BITMAP 0x00000043L
88#define GDC_CMD_BLTCOPY_TOP_LEFT 0x00000044L
89#define GDC_CMD_BLTCOPY_TOP_RIGHT 0x00000045L
90#define GDC_CMD_BLTCOPY_BOTTOM_LEFT 0x00000046L
91#define GDC_CMD_BLTCOPY_BOTTOM_RIGHT 0x00000047L
92#define GDC_CMD_LOAD_TEXTURE 0x00000048L
93#define GDC_CMD_LOAD_TILE 0x00000049L
94
95#define GDC_CMD_TRAP_RIGHT 0x00000060L
96#define GDC_CMD_TRAP_LEFT 0x00000061L
97#define GDC_CMD_TRIANGLE_FAN 0x00000062L
98#define GDC_CMD_FLAG_TRIANGLE_FAN 0x00000063L
99
100#define GDC_CMD_FLUSH_FB 0x000000C1L
101#define GDC_CMD_FLUSH_Z 0x000000C2L
102
103#define GDC_CMD_POLYGON_BEGIN 0x000000E0L
104#define GDC_CMD_POLYGON_END 0x000000E1L
105#define GDC_CMD_CLEAR_POLY_FLAG 0x000000E2L
106#define GDC_CMD_NORMAL 0x000000FFL
107
108#define GDC_CMD_VECTOR_BLPO_FLAG 0x00040000L
109#define GDC_CMD_FAST_VECTOR_BLPO_FLAG 0x00000004L
110
111/* for MB86293 or later */
112#define GDC_CMD_MDR1 0x00000000L
113#define GDC_CMD_MDR1S 0x00000002L
114#define GDC_CMD_MDR1B 0x00000004L
115#define GDC_CMD_MDR2 0x00000001L
116#define GDC_CMD_MDR2S 0x00000003L
117#define GDC_CMD_MDR2TL 0x00000007L
118#define GDC_CMD_GMDR1E 0x00000010L
119#define GDC_CMD_GMDR2E 0x00000020L
120#define GDC_CMD_OVERLAP_SHADOW_XY 0x00000000L
121#define GDC_CMD_OVERLAP_SHADOW_XY_COMPOSITION 0x00000001L
122#define GDC_CMD_OVERLAP_Z_PACKED_ONBS 0x00000007L
123#define GDC_CMD_OVERLAP_Z_ORIGIN 0x00000000L
124#define GDC_CMD_OVERLAP_Z_NON_TOPLEFT 0x00000001L
125#define GDC_CMD_OVERLAP_Z_BORDER 0x00000002L
126#define GDC_CMD_OVERLAP_Z_SHADOW 0x00000003L
127#define GDC_CMD_BLTCOPY_ALT_ALPHA 0x00000000L /* Reserverd */
128#define GDC_CMD_DC_LOGOUT 0x00000000L /* Reserverd */
129#define GDC_CMD_BODY_FORE_COLOR 0x00000000L
130#define GDC_CMD_BODY_BACK_COLOR 0x00000001L
131#define GDC_CMD_SHADOW_FORE_COLOR 0x00000002L
132#define GDC_CMD_SHADOW_BACK_COLOR 0x00000003L
133#define GDC_CMD_BORDER_FORE_COLOR 0x00000004L
134#define GDC_CMD_BORDER_BACK_COLOR 0x00000005L
135
136/* Type Code Table */
137#define GDC_TYPE_G_NOP 0x00000020L
138#define GDC_TYPE_G_BEGIN 0x00000021L
139#define GDC_TYPE_G_BEGINCONT 0x00000022L
140#define GDC_TYPE_G_END 0x00000023L
141#define GDC_TYPE_G_VERTEX 0x00000030L
142#define GDC_TYPE_G_VERTEXLOG 0x00000032L
143#define GDC_TYPE_G_VERTEXNOPLOG 0x00000033L
144#define GDC_TYPE_G_INIT 0x00000040L
145#define GDC_TYPE_G_VIEWPORT 0x00000041L
146#define GDC_TYPE_G_DEPTHRANGE 0x00000042L
147#define GDC_TYPE_G_LOADMATRIX 0x00000043L
148#define GDC_TYPE_G_VIEWVOLUMEXYCLIP 0x00000044L
149#define GDC_TYPE_G_VIEWVOLUMEZCLIP 0x00000045L
150#define GDC_TYPE_G_VIEWVOLUMEWCLIP 0x00000046L
151#define GDC_TYPE_SETLVERTEX2I 0x00000072L
152#define GDC_TYPE_SETLVERTEX2IP 0x00000073L
153#define GDC_TYPE_SETMODEREGISTER 0x000000C0L
154#define GDC_TYPE_SETGMODEREGISTER 0x000000C1L
155#define GDC_TYPE_OVERLAPXYOFFT 0x000000C8L
156#define GDC_TYPE_OVERLAPZOFFT 0x000000C9L
157#define GDC_TYPE_DC_LOGOUTADDR 0x000000CCL
158#define GDC_TYPE_SETCOLORREGISTER 0x000000CEL
159#define GDC_TYPE_G_BEGINE 0x000000E1L
160#define GDC_TYPE_G_BEGINCONTE 0x000000E2L
161#define GDC_TYPE_G_ENDE 0x000000E3L
162#define GDC_TYPE_DRAWPIXEL 0x00000000L
163#define GDC_TYPE_DRAWPIXELZ 0x00000001L
164#define GDC_TYPE_DRAWLINE 0x00000002L
165#define GDC_TYPE_DRAWLINE2I 0x00000003L
166#define GDC_TYPE_DRAWLINE2IP 0x00000004L
167#define GDC_TYPE_DRAWTRAP 0x00000005L
168#define GDC_TYPE_DRAWVERTEX2I 0x00000006L
169#define GDC_TYPE_DRAWVERTEX2IP 0x00000007L
170#define GDC_TYPE_DRAWRECTP 0x00000009L
171#define GDC_TYPE_DRAWBITMAPP 0x0000000BL
172#define GDC_TYPE_BLTCOPYP 0x0000000DL
173#define GDC_TYPE_BLTCOPYALTERNATEP 0x0000000FL
174#define GDC_TYPE_LOADTEXTUREP 0x00000011L
175#define GDC_TYPE_BLTTEXTUREP 0x00000013L
176#define GDC_TYPE_BLTCOPYALTALPHABLENDP 0x0000001FL
177#define GDC_TYPE_SETVERTEX2I 0x00000070L
178#define GDC_TYPE_SETVERTEX2IP 0x00000071L
179#define GDC_TYPE_DRAW 0x000000F0L
180#define GDC_TYPE_SETREGISTER 0x000000F1L
181#define GDC_TYPE_SYNC 0x000000FCL
182#define GDC_TYPE_INTERRUPT 0x000000FDL
183#define GDC_TYPE_NOP 0x0
184
185/* Raster operation */
186#define GDC_ROP_CLEAR 0x0000
187#define GDC_ROP_AND 0x0001
188#define GDC_ROP_AND_REVERSE 0x0002
189#define GDC_ROP_COPY 0x0003
190#define GDC_ROP_AND_INVERTED 0x0004
191#define GDC_ROP_NOP 0x0005
192#define GDC_ROP_XOR 0x0006
193#define GDC_ROP_OR 0x0007
194#define GDC_ROP_NOR 0x0008
195#define GDC_ROP_EQUIV 0x0009
196#define GDC_ROP_INVERT 0x000A
197#define GDC_ROP_OR_REVERSE 0x000B
198#define GDC_ROP_COPY_INVERTED 0x000C
199#define GDC_ROP_OR_INVERTED 0x000D
200#define GDC_ROP_NAND 0x000E
201#define GDC_ROP_SET 0x000F
202
203#endif
diff --git a/drivers/video/modedb.c b/drivers/video/modedb.c
index 34e4e7995169..0129f1bc3522 100644
--- a/drivers/video/modedb.c
+++ b/drivers/video/modedb.c
@@ -13,6 +13,7 @@
13 13
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/fb.h> 15#include <linux/fb.h>
16#include <linux/kernel.h>
16 17
17#undef DEBUG 18#undef DEBUG
18 19
@@ -402,21 +403,6 @@ const struct fb_videomode vesa_modes[] = {
402EXPORT_SYMBOL(vesa_modes); 403EXPORT_SYMBOL(vesa_modes);
403#endif /* CONFIG_FB_MODE_HELPERS */ 404#endif /* CONFIG_FB_MODE_HELPERS */
404 405
405static int my_atoi(const char *name)
406{
407 int val = 0;
408
409 for (;; name++) {
410 switch (*name) {
411 case '0' ... '9':
412 val = 10*val+(*name-'0');
413 break;
414 default:
415 return val;
416 }
417 }
418}
419
420/** 406/**
421 * fb_try_mode - test a video mode 407 * fb_try_mode - test a video mode
422 * @var: frame buffer user defined part of display 408 * @var: frame buffer user defined part of display
@@ -539,7 +525,7 @@ int fb_find_mode(struct fb_var_screeninfo *var,
539 namelen = i; 525 namelen = i;
540 if (!refresh_specified && !bpp_specified && 526 if (!refresh_specified && !bpp_specified &&
541 !yres_specified) { 527 !yres_specified) {
542 refresh = my_atoi(&name[i+1]); 528 refresh = simple_strtol(&name[i+1], NULL, 10);
543 refresh_specified = 1; 529 refresh_specified = 1;
544 if (cvt || rb) 530 if (cvt || rb)
545 cvt = 0; 531 cvt = 0;
@@ -549,7 +535,7 @@ int fb_find_mode(struct fb_var_screeninfo *var,
549 case '-': 535 case '-':
550 namelen = i; 536 namelen = i;
551 if (!bpp_specified && !yres_specified) { 537 if (!bpp_specified && !yres_specified) {
552 bpp = my_atoi(&name[i+1]); 538 bpp = simple_strtol(&name[i+1], NULL, 10);
553 bpp_specified = 1; 539 bpp_specified = 1;
554 if (cvt || rb) 540 if (cvt || rb)
555 cvt = 0; 541 cvt = 0;
@@ -558,7 +544,7 @@ int fb_find_mode(struct fb_var_screeninfo *var,
558 break; 544 break;
559 case 'x': 545 case 'x':
560 if (!yres_specified) { 546 if (!yres_specified) {
561 yres = my_atoi(&name[i+1]); 547 yres = simple_strtol(&name[i+1], NULL, 10);
562 yres_specified = 1; 548 yres_specified = 1;
563 } else 549 } else
564 goto done; 550 goto done;
@@ -586,7 +572,7 @@ int fb_find_mode(struct fb_var_screeninfo *var,
586 } 572 }
587 } 573 }
588 if (i < 0 && yres_specified) { 574 if (i < 0 && yres_specified) {
589 xres = my_atoi(name); 575 xres = simple_strtol(name, NULL, 10);
590 res_specified = 1; 576 res_specified = 1;
591 } 577 }
592done: 578done:
diff --git a/drivers/video/output.c b/drivers/video/output.c
index 5e6439ae7394..5137aa016b83 100644
--- a/drivers/video/output.c
+++ b/drivers/video/output.c
@@ -50,7 +50,7 @@ static ssize_t video_output_store_state(struct device *dev,
50 int request_state = simple_strtoul(buf,&endp,0); 50 int request_state = simple_strtoul(buf,&endp,0);
51 size_t size = endp - buf; 51 size_t size = endp - buf;
52 52
53 if (*endp && isspace(*endp)) 53 if (isspace(*endp))
54 size++; 54 size++;
55 if (size != count) 55 if (size != count)
56 return -EINVAL; 56 return -EINVAL;
diff --git a/drivers/video/pmag-ba-fb.c b/drivers/video/pmag-ba-fb.c
index 0573ec685a57..0f361b6100d2 100644
--- a/drivers/video/pmag-ba-fb.c
+++ b/drivers/video/pmag-ba-fb.c
@@ -98,7 +98,8 @@ static int pmagbafb_setcolreg(unsigned int regno, unsigned int red,
98{ 98{
99 struct pmagbafb_par *par = info->par; 99 struct pmagbafb_par *par = info->par;
100 100
101 BUG_ON(regno >= info->cmap.len); 101 if (regno >= info->cmap.len)
102 return 1;
102 103
103 red >>= 8; /* The cmap fields are 16 bits */ 104 red >>= 8; /* The cmap fields are 16 bits */
104 green >>= 8; /* wide, but the hardware colormap */ 105 green >>= 8; /* wide, but the hardware colormap */
diff --git a/drivers/video/pmagb-b-fb.c b/drivers/video/pmagb-b-fb.c
index 98748723af9f..2de0806421b4 100644
--- a/drivers/video/pmagb-b-fb.c
+++ b/drivers/video/pmagb-b-fb.c
@@ -102,7 +102,8 @@ static int pmagbbfb_setcolreg(unsigned int regno, unsigned int red,
102{ 102{
103 struct pmagbbfb_par *par = info->par; 103 struct pmagbbfb_par *par = info->par;
104 104
105 BUG_ON(regno >= info->cmap.len); 105 if (regno >= info->cmap.len)
106 return 1;
106 107
107 red >>= 8; /* The cmap fields are 16 bits */ 108 red >>= 8; /* The cmap fields are 16 bits */
108 green >>= 8; /* wide, but the hardware colormap */ 109 green >>= 8; /* wide, but the hardware colormap */
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c
index f58a3aae6ea6..415858b421b3 100644
--- a/drivers/video/pxafb.c
+++ b/drivers/video/pxafb.c
@@ -1221,13 +1221,14 @@ static void setup_smart_timing(struct pxafb_info *fbi,
1221static int pxafb_smart_thread(void *arg) 1221static int pxafb_smart_thread(void *arg)
1222{ 1222{
1223 struct pxafb_info *fbi = arg; 1223 struct pxafb_info *fbi = arg;
1224 struct pxafb_mach_info *inf = fbi->dev->platform_data; 1224 struct pxafb_mach_info *inf;
1225 1225
1226 if (!fbi || !inf->smart_update) { 1226 if (!fbi || !fbi->dev->platform_data->smart_update) {
1227 pr_err("%s: not properly initialized, thread terminated\n", 1227 pr_err("%s: not properly initialized, thread terminated\n",
1228 __func__); 1228 __func__);
1229 return -EINVAL; 1229 return -EINVAL;
1230 } 1230 }
1231 inf = fbi->dev->platform_data;
1231 1232
1232 pr_debug("%s(): task starting\n", __func__); 1233 pr_debug("%s(): task starting\n", __func__);
1233 1234
@@ -1667,7 +1668,7 @@ static int pxafb_resume(struct device *dev)
1667 return 0; 1668 return 0;
1668} 1669}
1669 1670
1670static struct dev_pm_ops pxafb_pm_ops = { 1671static const struct dev_pm_ops pxafb_pm_ops = {
1671 .suspend = pxafb_suspend, 1672 .suspend = pxafb_suspend,
1672 .resume = pxafb_resume, 1673 .resume = pxafb_resume,
1673}; 1674};
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
index b4b5de930cf5..a69830d26f7f 100644
--- a/drivers/video/sh_mobile_lcdcfb.c
+++ b/drivers/video/sh_mobile_lcdcfb.c
@@ -281,6 +281,7 @@ static void sh_mobile_lcdc_deferred_io(struct fb_info *info,
281 struct list_head *pagelist) 281 struct list_head *pagelist)
282{ 282{
283 struct sh_mobile_lcdc_chan *ch = info->par; 283 struct sh_mobile_lcdc_chan *ch = info->par;
284 struct sh_mobile_lcdc_board_cfg *bcfg = &ch->cfg.board_cfg;
284 285
285 /* enable clocks before accessing hardware */ 286 /* enable clocks before accessing hardware */
286 sh_mobile_lcdc_clk_on(ch->lcdc); 287 sh_mobile_lcdc_clk_on(ch->lcdc);
@@ -305,10 +306,17 @@ static void sh_mobile_lcdc_deferred_io(struct fb_info *info,
305 306
306 /* trigger panel update */ 307 /* trigger panel update */
307 dma_map_sg(info->dev, ch->sglist, nr_pages, DMA_TO_DEVICE); 308 dma_map_sg(info->dev, ch->sglist, nr_pages, DMA_TO_DEVICE);
309 if (bcfg->start_transfer)
310 bcfg->start_transfer(bcfg->board_data, ch,
311 &sh_mobile_lcdc_sys_bus_ops);
308 lcdc_write_chan(ch, LDSM2R, 1); 312 lcdc_write_chan(ch, LDSM2R, 1);
309 dma_unmap_sg(info->dev, ch->sglist, nr_pages, DMA_TO_DEVICE); 313 dma_unmap_sg(info->dev, ch->sglist, nr_pages, DMA_TO_DEVICE);
310 } else 314 } else {
315 if (bcfg->start_transfer)
316 bcfg->start_transfer(bcfg->board_data, ch,
317 &sh_mobile_lcdc_sys_bus_ops);
311 lcdc_write_chan(ch, LDSM2R, 1); 318 lcdc_write_chan(ch, LDSM2R, 1);
319 }
312} 320}
313 321
314static void sh_mobile_lcdc_deferred_io_touch(struct fb_info *info) 322static void sh_mobile_lcdc_deferred_io_touch(struct fb_info *info)
@@ -890,7 +898,7 @@ static int sh_mobile_lcdc_runtime_resume(struct device *dev)
890 return 0; 898 return 0;
891} 899}
892 900
893static struct dev_pm_ops sh_mobile_lcdc_dev_pm_ops = { 901static const struct dev_pm_ops sh_mobile_lcdc_dev_pm_ops = {
894 .suspend = sh_mobile_lcdc_suspend, 902 .suspend = sh_mobile_lcdc_suspend,
895 .resume = sh_mobile_lcdc_resume, 903 .resume = sh_mobile_lcdc_resume,
896 .runtime_suspend = sh_mobile_lcdc_runtime_suspend, 904 .runtime_suspend = sh_mobile_lcdc_runtime_suspend,
diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c
index a4e05e4d7501..9d2b6bc49036 100644
--- a/drivers/video/sis/sis_main.c
+++ b/drivers/video/sis/sis_main.c
@@ -2115,7 +2115,7 @@ sisfb_detect_VB_connect(struct sis_video_info *ivideo)
2115 if( (!(ivideo->vbflags2 & VB2_SISBRIDGE)) && 2115 if( (!(ivideo->vbflags2 & VB2_SISBRIDGE)) &&
2116 (!((ivideo->sisvga_engine == SIS_315_VGA) && 2116 (!((ivideo->sisvga_engine == SIS_315_VGA) &&
2117 (ivideo->vbflags2 & VB2_CHRONTEL))) ) { 2117 (ivideo->vbflags2 & VB2_CHRONTEL))) ) {
2118 if(ivideo->sisfb_tvstd & (TV_PALN | TV_PALN | TV_NTSCJ)) { 2118 if(ivideo->sisfb_tvstd & (TV_PALM | TV_PALN | TV_NTSCJ)) {
2119 ivideo->sisfb_tvstd = -1; 2119 ivideo->sisfb_tvstd = -1;
2120 printk(KERN_ERR "sisfb: PALM/PALN/NTSCJ not supported\n"); 2120 printk(KERN_ERR "sisfb: PALM/PALN/NTSCJ not supported\n");
2121 } 2121 }
diff --git a/drivers/video/sm501fb.c b/drivers/video/sm501fb.c
index 924d79462780..35370d0ecf03 100644
--- a/drivers/video/sm501fb.c
+++ b/drivers/video/sm501fb.c
@@ -29,8 +29,8 @@
29#include <linux/platform_device.h> 29#include <linux/platform_device.h>
30#include <linux/clk.h> 30#include <linux/clk.h>
31#include <linux/console.h> 31#include <linux/console.h>
32#include <linux/io.h>
32 33
33#include <asm/io.h>
34#include <asm/uaccess.h> 34#include <asm/uaccess.h>
35#include <asm/div64.h> 35#include <asm/div64.h>
36 36
@@ -66,6 +66,7 @@ struct sm501fb_info {
66 struct fb_info *fb[2]; /* fb info for both heads */ 66 struct fb_info *fb[2]; /* fb info for both heads */
67 struct resource *fbmem_res; /* framebuffer resource */ 67 struct resource *fbmem_res; /* framebuffer resource */
68 struct resource *regs_res; /* registers resource */ 68 struct resource *regs_res; /* registers resource */
69 struct resource *regs2d_res; /* 2d registers resource */
69 struct sm501_platdata_fb *pdata; /* our platform data */ 70 struct sm501_platdata_fb *pdata; /* our platform data */
70 71
71 unsigned long pm_crt_ctrl; /* pm: crt ctrl save */ 72 unsigned long pm_crt_ctrl; /* pm: crt ctrl save */
@@ -73,6 +74,7 @@ struct sm501fb_info {
73 int irq; 74 int irq;
74 int swap_endian; /* set to swap rgb=>bgr */ 75 int swap_endian; /* set to swap rgb=>bgr */
75 void __iomem *regs; /* remapped registers */ 76 void __iomem *regs; /* remapped registers */
77 void __iomem *regs2d; /* 2d remapped registers */
76 void __iomem *fbmem; /* remapped framebuffer */ 78 void __iomem *fbmem; /* remapped framebuffer */
77 size_t fbmem_len; /* length of remapped region */ 79 size_t fbmem_len; /* length of remapped region */
78}; 80};
@@ -123,9 +125,9 @@ static inline void sm501fb_sync_regs(struct sm501fb_info *info)
123 * This is an attempt to lay out memory for the two framebuffers and 125 * This is an attempt to lay out memory for the two framebuffers and
124 * everything else 126 * everything else
125 * 127 *
126 * |fbmem_res->start fbmem_res->end| 128 * |fbmem_res->start fbmem_res->end|
127 * | | 129 * | |
128 * |fb[0].fix.smem_start | |fb[1].fix.smem_start | 2K | 130 * |fb[0].fix.smem_start | |fb[1].fix.smem_start | 2K |
129 * |-> fb[0].fix.smem_len <-| spare |-> fb[1].fix.smem_len <-|-> cursors <-| 131 * |-> fb[0].fix.smem_len <-| spare |-> fb[1].fix.smem_len <-|-> cursors <-|
130 * 132 *
131 * The "spare" space is for the 2d engine data 133 * The "spare" space is for the 2d engine data
@@ -1246,7 +1248,173 @@ static ssize_t sm501fb_debug_show_pnl(struct device *dev,
1246 1248
1247static DEVICE_ATTR(fbregs_pnl, 0444, sm501fb_debug_show_pnl, NULL); 1249static DEVICE_ATTR(fbregs_pnl, 0444, sm501fb_debug_show_pnl, NULL);
1248 1250
1249/* framebuffer ops */ 1251/* acceleration operations */
1252static int sm501fb_sync(struct fb_info *info)
1253{
1254 int count = 1000000;
1255 struct sm501fb_par *par = info->par;
1256 struct sm501fb_info *fbi = par->info;
1257
1258 /* wait for the 2d engine to be ready */
1259 while ((count > 0) &&
1260 (readl(fbi->regs + SM501_SYSTEM_CONTROL) &
1261 SM501_SYSCTRL_2D_ENGINE_STATUS) != 0)
1262 count--;
1263
1264 if (count <= 0) {
1265 dev_err(info->dev, "Timeout waiting for 2d engine sync\n");
1266 return 1;
1267 }
1268 return 0;
1269}
1270
1271static void sm501fb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
1272{
1273 struct sm501fb_par *par = info->par;
1274 struct sm501fb_info *fbi = par->info;
1275 int width = area->width;
1276 int height = area->height;
1277 int sx = area->sx;
1278 int sy = area->sy;
1279 int dx = area->dx;
1280 int dy = area->dy;
1281 unsigned long rtl = 0;
1282
1283 /* source clip */
1284 if ((sx >= info->var.xres_virtual) ||
1285 (sy >= info->var.yres_virtual))
1286 /* source Area not within virtual screen, skipping */
1287 return;
1288 if ((sx + width) >= info->var.xres_virtual)
1289 width = info->var.xres_virtual - sx - 1;
1290 if ((sy + height) >= info->var.yres_virtual)
1291 height = info->var.yres_virtual - sy - 1;
1292
1293 /* dest clip */
1294 if ((dx >= info->var.xres_virtual) ||
1295 (dy >= info->var.yres_virtual))
1296 /* Destination Area not within virtual screen, skipping */
1297 return;
1298 if ((dx + width) >= info->var.xres_virtual)
1299 width = info->var.xres_virtual - dx - 1;
1300 if ((dy + height) >= info->var.yres_virtual)
1301 height = info->var.yres_virtual - dy - 1;
1302
1303 if ((sx < dx) || (sy < dy)) {
1304 rtl = 1 << 27;
1305 sx += width - 1;
1306 dx += width - 1;
1307 sy += height - 1;
1308 dy += height - 1;
1309 }
1310
1311 if (sm501fb_sync(info))
1312 return;
1313
1314 /* set the base addresses */
1315 writel(par->screen.sm_addr, fbi->regs2d + SM501_2D_SOURCE_BASE);
1316 writel(par->screen.sm_addr, fbi->regs2d + SM501_2D_DESTINATION_BASE);
1317
1318 /* set the window width */
1319 writel((info->var.xres << 16) | info->var.xres,
1320 fbi->regs2d + SM501_2D_WINDOW_WIDTH);
1321
1322 /* set window stride */
1323 writel((info->var.xres_virtual << 16) | info->var.xres_virtual,
1324 fbi->regs2d + SM501_2D_PITCH);
1325
1326 /* set data format */
1327 switch (info->var.bits_per_pixel) {
1328 case 8:
1329 writel(0, fbi->regs2d + SM501_2D_STRETCH);
1330 break;
1331 case 16:
1332 writel(0x00100000, fbi->regs2d + SM501_2D_STRETCH);
1333 break;
1334 case 32:
1335 writel(0x00200000, fbi->regs2d + SM501_2D_STRETCH);
1336 break;
1337 }
1338
1339 /* 2d compare mask */
1340 writel(0xffffffff, fbi->regs2d + SM501_2D_COLOR_COMPARE_MASK);
1341
1342 /* 2d mask */
1343 writel(0xffffffff, fbi->regs2d + SM501_2D_MASK);
1344
1345 /* source and destination x y */
1346 writel((sx << 16) | sy, fbi->regs2d + SM501_2D_SOURCE);
1347 writel((dx << 16) | dy, fbi->regs2d + SM501_2D_DESTINATION);
1348
1349 /* w/h */
1350 writel((width << 16) | height, fbi->regs2d + SM501_2D_DIMENSION);
1351
1352 /* do area move */
1353 writel(0x800000cc | rtl, fbi->regs2d + SM501_2D_CONTROL);
1354}
1355
1356static void sm501fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
1357{
1358 struct sm501fb_par *par = info->par;
1359 struct sm501fb_info *fbi = par->info;
1360 int width = rect->width, height = rect->height;
1361
1362 if ((rect->dx >= info->var.xres_virtual) ||
1363 (rect->dy >= info->var.yres_virtual))
1364 /* Rectangle not within virtual screen, skipping */
1365 return;
1366 if ((rect->dx + width) >= info->var.xres_virtual)
1367 width = info->var.xres_virtual - rect->dx - 1;
1368 if ((rect->dy + height) >= info->var.yres_virtual)
1369 height = info->var.yres_virtual - rect->dy - 1;
1370
1371 if (sm501fb_sync(info))
1372 return;
1373
1374 /* set the base addresses */
1375 writel(par->screen.sm_addr, fbi->regs2d + SM501_2D_SOURCE_BASE);
1376 writel(par->screen.sm_addr, fbi->regs2d + SM501_2D_DESTINATION_BASE);
1377
1378 /* set the window width */
1379 writel((info->var.xres << 16) | info->var.xres,
1380 fbi->regs2d + SM501_2D_WINDOW_WIDTH);
1381
1382 /* set window stride */
1383 writel((info->var.xres_virtual << 16) | info->var.xres_virtual,
1384 fbi->regs2d + SM501_2D_PITCH);
1385
1386 /* set data format */
1387 switch (info->var.bits_per_pixel) {
1388 case 8:
1389 writel(0, fbi->regs2d + SM501_2D_STRETCH);
1390 break;
1391 case 16:
1392 writel(0x00100000, fbi->regs2d + SM501_2D_STRETCH);
1393 break;
1394 case 32:
1395 writel(0x00200000, fbi->regs2d + SM501_2D_STRETCH);
1396 break;
1397 }
1398
1399 /* 2d compare mask */
1400 writel(0xffffffff, fbi->regs2d + SM501_2D_COLOR_COMPARE_MASK);
1401
1402 /* 2d mask */
1403 writel(0xffffffff, fbi->regs2d + SM501_2D_MASK);
1404
1405 /* colour */
1406 writel(rect->color, fbi->regs2d + SM501_2D_FOREGROUND);
1407
1408 /* x y */
1409 writel((rect->dx << 16) | rect->dy, fbi->regs2d + SM501_2D_DESTINATION);
1410
1411 /* w/h */
1412 writel((width << 16) | height, fbi->regs2d + SM501_2D_DIMENSION);
1413
1414 /* do rectangle fill */
1415 writel(0x800100cc, fbi->regs2d + SM501_2D_CONTROL);
1416}
1417
1250 1418
1251static struct fb_ops sm501fb_ops_crt = { 1419static struct fb_ops sm501fb_ops_crt = {
1252 .owner = THIS_MODULE, 1420 .owner = THIS_MODULE,
@@ -1256,9 +1424,10 @@ static struct fb_ops sm501fb_ops_crt = {
1256 .fb_setcolreg = sm501fb_setcolreg, 1424 .fb_setcolreg = sm501fb_setcolreg,
1257 .fb_pan_display = sm501fb_pan_crt, 1425 .fb_pan_display = sm501fb_pan_crt,
1258 .fb_cursor = sm501fb_cursor, 1426 .fb_cursor = sm501fb_cursor,
1259 .fb_fillrect = cfb_fillrect, 1427 .fb_fillrect = sm501fb_fillrect,
1260 .fb_copyarea = cfb_copyarea, 1428 .fb_copyarea = sm501fb_copyarea,
1261 .fb_imageblit = cfb_imageblit, 1429 .fb_imageblit = cfb_imageblit,
1430 .fb_sync = sm501fb_sync,
1262}; 1431};
1263 1432
1264static struct fb_ops sm501fb_ops_pnl = { 1433static struct fb_ops sm501fb_ops_pnl = {
@@ -1269,9 +1438,10 @@ static struct fb_ops sm501fb_ops_pnl = {
1269 .fb_blank = sm501fb_blank_pnl, 1438 .fb_blank = sm501fb_blank_pnl,
1270 .fb_setcolreg = sm501fb_setcolreg, 1439 .fb_setcolreg = sm501fb_setcolreg,
1271 .fb_cursor = sm501fb_cursor, 1440 .fb_cursor = sm501fb_cursor,
1272 .fb_fillrect = cfb_fillrect, 1441 .fb_fillrect = sm501fb_fillrect,
1273 .fb_copyarea = cfb_copyarea, 1442 .fb_copyarea = sm501fb_copyarea,
1274 .fb_imageblit = cfb_imageblit, 1443 .fb_imageblit = cfb_imageblit,
1444 .fb_sync = sm501fb_sync,
1275}; 1445};
1276 1446
1277/* sm501_init_cursor 1447/* sm501_init_cursor
@@ -1329,7 +1499,8 @@ static int sm501fb_start(struct sm501fb_info *info,
1329 dev_warn(dev, "no irq for device\n"); 1499 dev_warn(dev, "no irq for device\n");
1330 } 1500 }
1331 1501
1332 /* allocate, reserve and remap resources for registers */ 1502 /* allocate, reserve and remap resources for display
1503 * controller registers */
1333 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1504 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1334 if (res == NULL) { 1505 if (res == NULL) {
1335 dev_err(dev, "no resource definition for registers\n"); 1506 dev_err(dev, "no resource definition for registers\n");
@@ -1338,7 +1509,7 @@ static int sm501fb_start(struct sm501fb_info *info,
1338 } 1509 }
1339 1510
1340 info->regs_res = request_mem_region(res->start, 1511 info->regs_res = request_mem_region(res->start,
1341 res->end - res->start, 1512 resource_size(res),
1342 pdev->name); 1513 pdev->name);
1343 1514
1344 if (info->regs_res == NULL) { 1515 if (info->regs_res == NULL) {
@@ -1347,37 +1518,63 @@ static int sm501fb_start(struct sm501fb_info *info,
1347 goto err_release; 1518 goto err_release;
1348 } 1519 }
1349 1520
1350 info->regs = ioremap(res->start, (res->end - res->start)+1); 1521 info->regs = ioremap(res->start, resource_size(res));
1351 if (info->regs == NULL) { 1522 if (info->regs == NULL) {
1352 dev_err(dev, "cannot remap registers\n"); 1523 dev_err(dev, "cannot remap registers\n");
1353 ret = -ENXIO; 1524 ret = -ENXIO;
1354 goto err_regs_res; 1525 goto err_regs_res;
1355 } 1526 }
1356 1527
1528 /* allocate, reserve and remap resources for 2d
1529 * controller registers */
1530 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1531 if (res == NULL) {
1532 dev_err(dev, "no resource definition for 2d registers\n");
1533 ret = -ENOENT;
1534 goto err_regs_map;
1535 }
1536
1537 info->regs2d_res = request_mem_region(res->start,
1538 resource_size(res),
1539 pdev->name);
1540
1541 if (info->regs2d_res == NULL) {
1542 dev_err(dev, "cannot claim registers\n");
1543 ret = -ENXIO;
1544 goto err_regs_map;
1545 }
1546
1547 info->regs2d = ioremap(res->start, resource_size(res));
1548 if (info->regs2d == NULL) {
1549 dev_err(dev, "cannot remap registers\n");
1550 ret = -ENXIO;
1551 goto err_regs2d_res;
1552 }
1553
1357 /* allocate, reserve resources for framebuffer */ 1554 /* allocate, reserve resources for framebuffer */
1358 res = platform_get_resource(pdev, IORESOURCE_MEM, 2); 1555 res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
1359 if (res == NULL) { 1556 if (res == NULL) {
1360 dev_err(dev, "no memory resource defined\n"); 1557 dev_err(dev, "no memory resource defined\n");
1361 ret = -ENXIO; 1558 ret = -ENXIO;
1362 goto err_regs_map; 1559 goto err_regs2d_map;
1363 } 1560 }
1364 1561
1365 info->fbmem_res = request_mem_region(res->start, 1562 info->fbmem_res = request_mem_region(res->start,
1366 (res->end - res->start)+1, 1563 resource_size(res),
1367 pdev->name); 1564 pdev->name);
1368 if (info->fbmem_res == NULL) { 1565 if (info->fbmem_res == NULL) {
1369 dev_err(dev, "cannot claim framebuffer\n"); 1566 dev_err(dev, "cannot claim framebuffer\n");
1370 ret = -ENXIO; 1567 ret = -ENXIO;
1371 goto err_regs_map; 1568 goto err_regs2d_map;
1372 } 1569 }
1373 1570
1374 info->fbmem = ioremap(res->start, (res->end - res->start)+1); 1571 info->fbmem = ioremap(res->start, resource_size(res));
1375 if (info->fbmem == NULL) { 1572 if (info->fbmem == NULL) {
1376 dev_err(dev, "cannot remap framebuffer\n"); 1573 dev_err(dev, "cannot remap framebuffer\n");
1377 goto err_mem_res; 1574 goto err_mem_res;
1378 } 1575 }
1379 1576
1380 info->fbmem_len = (res->end - res->start)+1; 1577 info->fbmem_len = resource_size(res);
1381 1578
1382 /* clear framebuffer memory - avoids garbage data on unused fb */ 1579 /* clear framebuffer memory - avoids garbage data on unused fb */
1383 memset(info->fbmem, 0, info->fbmem_len); 1580 memset(info->fbmem, 0, info->fbmem_len);
@@ -1389,8 +1586,10 @@ static int sm501fb_start(struct sm501fb_info *info,
1389 /* enable display controller */ 1586 /* enable display controller */
1390 sm501_unit_power(dev->parent, SM501_GATE_DISPLAY, 1); 1587 sm501_unit_power(dev->parent, SM501_GATE_DISPLAY, 1);
1391 1588
1392 /* setup cursors */ 1589 /* enable 2d controller */
1590 sm501_unit_power(dev->parent, SM501_GATE_2D_ENGINE, 1);
1393 1591
1592 /* setup cursors */
1394 sm501_init_cursor(info->fb[HEAD_CRT], SM501_DC_CRT_HWC_ADDR); 1593 sm501_init_cursor(info->fb[HEAD_CRT], SM501_DC_CRT_HWC_ADDR);
1395 sm501_init_cursor(info->fb[HEAD_PANEL], SM501_DC_PANEL_HWC_ADDR); 1594 sm501_init_cursor(info->fb[HEAD_PANEL], SM501_DC_PANEL_HWC_ADDR);
1396 1595
@@ -1400,6 +1599,13 @@ static int sm501fb_start(struct sm501fb_info *info,
1400 release_resource(info->fbmem_res); 1599 release_resource(info->fbmem_res);
1401 kfree(info->fbmem_res); 1600 kfree(info->fbmem_res);
1402 1601
1602 err_regs2d_map:
1603 iounmap(info->regs2d);
1604
1605 err_regs2d_res:
1606 release_resource(info->regs2d_res);
1607 kfree(info->regs2d_res);
1608
1403 err_regs_map: 1609 err_regs_map:
1404 iounmap(info->regs); 1610 iounmap(info->regs);
1405 1611
@@ -1420,6 +1626,10 @@ static void sm501fb_stop(struct sm501fb_info *info)
1420 release_resource(info->fbmem_res); 1626 release_resource(info->fbmem_res);
1421 kfree(info->fbmem_res); 1627 kfree(info->fbmem_res);
1422 1628
1629 iounmap(info->regs2d);
1630 release_resource(info->regs2d_res);
1631 kfree(info->regs2d_res);
1632
1423 iounmap(info->regs); 1633 iounmap(info->regs);
1424 release_resource(info->regs_res); 1634 release_resource(info->regs_res);
1425 kfree(info->regs_res); 1635 kfree(info->regs_res);
@@ -1486,7 +1696,8 @@ static int sm501fb_init_fb(struct fb_info *fb,
1486 par->ops.fb_cursor = NULL; 1696 par->ops.fb_cursor = NULL;
1487 1697
1488 fb->fbops = &par->ops; 1698 fb->fbops = &par->ops;
1489 fb->flags = FBINFO_FLAG_DEFAULT | 1699 fb->flags = FBINFO_FLAG_DEFAULT | FBINFO_READS_FAST |
1700 FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT |
1490 FBINFO_HWACCEL_XPAN | FBINFO_HWACCEL_YPAN; 1701 FBINFO_HWACCEL_XPAN | FBINFO_HWACCEL_YPAN;
1491 1702
1492 /* fixed data */ 1703 /* fixed data */
diff --git a/drivers/video/via/lcd.c b/drivers/video/via/lcd.c
index e3e597f937a5..09353e2b92f6 100644
--- a/drivers/video/via/lcd.c
+++ b/drivers/video/via/lcd.c
@@ -1134,45 +1134,33 @@ static void integrated_lvds_enable(struct lvds_setting_information
1134 *plvds_setting_info, 1134 *plvds_setting_info,
1135 struct lvds_chip_information *plvds_chip_info) 1135 struct lvds_chip_information *plvds_chip_info)
1136{ 1136{
1137 bool turn_on_first_powersequence = false;
1138 bool turn_on_second_powersequence = false;
1139
1140 DEBUG_MSG(KERN_INFO "integrated_lvds_enable, out_interface:%d\n", 1137 DEBUG_MSG(KERN_INFO "integrated_lvds_enable, out_interface:%d\n",
1141 plvds_chip_info->output_interface); 1138 plvds_chip_info->output_interface);
1142 if (plvds_setting_info->lcd_mode == LCD_SPWG) 1139 if (plvds_setting_info->lcd_mode == LCD_SPWG)
1143 viafb_write_reg_mask(CRD2, VIACR, 0x00, BIT0 + BIT1); 1140 viafb_write_reg_mask(CRD2, VIACR, 0x00, BIT0 + BIT1);
1144 else 1141 else
1145 viafb_write_reg_mask(CRD2, VIACR, 0x03, BIT0 + BIT1); 1142 viafb_write_reg_mask(CRD2, VIACR, 0x03, BIT0 + BIT1);
1146 if (INTERFACE_LVDS0LVDS1 == plvds_chip_info->output_interface)
1147 turn_on_first_powersequence = true;
1148 if (INTERFACE_LVDS0 == plvds_chip_info->output_interface)
1149 turn_on_first_powersequence = true;
1150 if (INTERFACE_LVDS1 == plvds_chip_info->output_interface)
1151 turn_on_second_powersequence = true;
1152
1153 if (turn_on_second_powersequence) {
1154 /* Use second power sequence control: */
1155
1156 /* Use hardware control power sequence. */
1157 viafb_write_reg_mask(CRD3, VIACR, 0, BIT0);
1158
1159 /* Turn on back light. */
1160 viafb_write_reg_mask(CRD3, VIACR, 0, BIT6 + BIT7);
1161 1143
1162 /* Turn on hardware power sequence. */ 1144 switch (plvds_chip_info->output_interface) {
1163 viafb_write_reg_mask(CRD4, VIACR, 0x02, BIT1); 1145 case INTERFACE_LVDS0LVDS1:
1164 } 1146 case INTERFACE_LVDS0:
1165 if (turn_on_first_powersequence) {
1166 /* Use first power sequence control: */ 1147 /* Use first power sequence control: */
1167
1168 /* Use hardware control power sequence. */ 1148 /* Use hardware control power sequence. */
1169 viafb_write_reg_mask(CR91, VIACR, 0, BIT0); 1149 viafb_write_reg_mask(CR91, VIACR, 0, BIT0);
1170
1171 /* Turn on back light. */ 1150 /* Turn on back light. */
1172 viafb_write_reg_mask(CR91, VIACR, 0, BIT6 + BIT7); 1151 viafb_write_reg_mask(CR91, VIACR, 0, BIT6 + BIT7);
1173
1174 /* Turn on hardware power sequence. */ 1152 /* Turn on hardware power sequence. */
1175 viafb_write_reg_mask(CR6A, VIACR, 0x08, BIT3); 1153 viafb_write_reg_mask(CR6A, VIACR, 0x08, BIT3);
1154 break;
1155 case INTERFACE_LVDS1:
1156 /* Use second power sequence control: */
1157 /* Use hardware control power sequence. */
1158 viafb_write_reg_mask(CRD3, VIACR, 0, BIT0);
1159 /* Turn on back light. */
1160 viafb_write_reg_mask(CRD3, VIACR, 0, BIT6 + BIT7);
1161 /* Turn on hardware power sequence. */
1162 viafb_write_reg_mask(CRD4, VIACR, 0x02, BIT1);
1163 break;
1176 } 1164 }
1177 1165
1178 /* Turn DFP High/Low pad on. */ 1166 /* Turn DFP High/Low pad on. */
diff --git a/drivers/video/via/viafbdev.c b/drivers/video/via/viafbdev.c
index 56ec696e8afa..10d8c4b4baeb 100644
--- a/drivers/video/via/viafbdev.c
+++ b/drivers/video/via/viafbdev.c
@@ -1797,7 +1797,7 @@ static const struct file_operations viafb_vt1636_proc_fops = {
1797static void viafb_init_proc(struct proc_dir_entry **viafb_entry) 1797static void viafb_init_proc(struct proc_dir_entry **viafb_entry)
1798{ 1798{
1799 *viafb_entry = proc_mkdir("viafb", NULL); 1799 *viafb_entry = proc_mkdir("viafb", NULL);
1800 if (viafb_entry) { 1800 if (*viafb_entry) {
1801 proc_create("dvp0", 0, *viafb_entry, &viafb_dvp0_proc_fops); 1801 proc_create("dvp0", 0, *viafb_entry, &viafb_dvp0_proc_fops);
1802 proc_create("dvp1", 0, *viafb_entry, &viafb_dvp1_proc_fops); 1802 proc_create("dvp1", 0, *viafb_entry, &viafb_dvp1_proc_fops);
1803 proc_create("dfph", 0, *viafb_entry, &viafb_dfph_proc_fops); 1803 proc_create("dfph", 0, *viafb_entry, &viafb_dfph_proc_fops);
diff --git a/drivers/watchdog/adx_wdt.c b/drivers/watchdog/adx_wdt.c
index 77afb0acc500..9c6594473d3b 100644
--- a/drivers/watchdog/adx_wdt.c
+++ b/drivers/watchdog/adx_wdt.c
@@ -314,7 +314,7 @@ static int adx_wdt_resume(struct device *dev)
314 return 0; 314 return 0;
315} 315}
316 316
317static struct dev_pm_ops adx_wdt_pm_ops = { 317static const struct dev_pm_ops adx_wdt_pm_ops = {
318 .suspend = adx_wdt_suspend, 318 .suspend = adx_wdt_suspend,
319 .resume = adx_wdt_resume, 319 .resume = adx_wdt_resume,
320}; 320};
diff --git a/fs/Kconfig b/fs/Kconfig
index 64d44efad7a5..f8fccaaad628 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -6,6 +6,10 @@ menu "File systems"
6 6
7if BLOCK 7if BLOCK
8 8
9config FS_JOURNAL_INFO
10 bool
11 default n
12
9source "fs/ext2/Kconfig" 13source "fs/ext2/Kconfig"
10source "fs/ext3/Kconfig" 14source "fs/ext3/Kconfig"
11source "fs/ext4/Kconfig" 15source "fs/ext4/Kconfig"
diff --git a/fs/aio.c b/fs/aio.c
index c30dfc006108..1cf12b3dd83a 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -711,10 +711,8 @@ static ssize_t aio_run_iocb(struct kiocb *iocb)
711 */ 711 */
712 ret = retry(iocb); 712 ret = retry(iocb);
713 713
714 if (ret != -EIOCBRETRY && ret != -EIOCBQUEUED) { 714 if (ret != -EIOCBRETRY && ret != -EIOCBQUEUED)
715 BUG_ON(!list_empty(&iocb->ki_wait.task_list));
716 aio_complete(iocb, ret, 0); 715 aio_complete(iocb, ret, 0);
717 }
718out: 716out:
719 spin_lock_irq(&ctx->ctx_lock); 717 spin_lock_irq(&ctx->ctx_lock);
720 718
@@ -866,13 +864,6 @@ static void try_queue_kicked_iocb(struct kiocb *iocb)
866 unsigned long flags; 864 unsigned long flags;
867 int run = 0; 865 int run = 0;
868 866
869 /* We're supposed to be the only path putting the iocb back on the run
870 * list. If we find that the iocb is *back* on a wait queue already
871 * than retry has happened before we could queue the iocb. This also
872 * means that the retry could have completed and freed our iocb, no
873 * good. */
874 BUG_ON((!list_empty(&iocb->ki_wait.task_list)));
875
876 spin_lock_irqsave(&ctx->ctx_lock, flags); 867 spin_lock_irqsave(&ctx->ctx_lock, flags);
877 /* set this inside the lock so that we can't race with aio_run_iocb() 868 /* set this inside the lock so that we can't race with aio_run_iocb()
878 * testing it and putting the iocb on the run list under the lock */ 869 * testing it and putting the iocb on the run list under the lock */
@@ -886,7 +877,7 @@ static void try_queue_kicked_iocb(struct kiocb *iocb)
886/* 877/*
887 * kick_iocb: 878 * kick_iocb:
888 * Called typically from a wait queue callback context 879 * Called typically from a wait queue callback context
889 * (aio_wake_function) to trigger a retry of the iocb. 880 * to trigger a retry of the iocb.
890 * The retry is usually executed by aio workqueue 881 * The retry is usually executed by aio workqueue
891 * threads (See aio_kick_handler). 882 * threads (See aio_kick_handler).
892 */ 883 */
@@ -1520,31 +1511,6 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb)
1520 return 0; 1511 return 0;
1521} 1512}
1522 1513
1523/*
1524 * aio_wake_function:
1525 * wait queue callback function for aio notification,
1526 * Simply triggers a retry of the operation via kick_iocb.
1527 *
1528 * This callback is specified in the wait queue entry in
1529 * a kiocb.
1530 *
1531 * Note:
1532 * This routine is executed with the wait queue lock held.
1533 * Since kick_iocb acquires iocb->ctx->ctx_lock, it nests
1534 * the ioctx lock inside the wait queue lock. This is safe
1535 * because this callback isn't used for wait queues which
1536 * are nested inside ioctx lock (i.e. ctx->wait)
1537 */
1538static int aio_wake_function(wait_queue_t *wait, unsigned mode,
1539 int sync, void *key)
1540{
1541 struct kiocb *iocb = container_of(wait, struct kiocb, ki_wait);
1542
1543 list_del_init(&wait->task_list);
1544 kick_iocb(iocb);
1545 return 1;
1546}
1547
1548static void aio_batch_add(struct address_space *mapping, 1514static void aio_batch_add(struct address_space *mapping,
1549 struct hlist_head *batch_hash) 1515 struct hlist_head *batch_hash)
1550{ 1516{
@@ -1642,8 +1608,6 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1642 req->ki_buf = (char __user *)(unsigned long)iocb->aio_buf; 1608 req->ki_buf = (char __user *)(unsigned long)iocb->aio_buf;
1643 req->ki_left = req->ki_nbytes = iocb->aio_nbytes; 1609 req->ki_left = req->ki_nbytes = iocb->aio_nbytes;
1644 req->ki_opcode = iocb->aio_lio_opcode; 1610 req->ki_opcode = iocb->aio_lio_opcode;
1645 init_waitqueue_func_entry(&req->ki_wait, aio_wake_function);
1646 INIT_LIST_HEAD(&req->ki_wait.task_list);
1647 1611
1648 ret = aio_setup_iocb(req); 1612 ret = aio_setup_iocb(req);
1649 1613
diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
index 8f7cdde41733..0118d67221b2 100644
--- a/fs/autofs4/autofs_i.h
+++ b/fs/autofs4/autofs_i.h
@@ -60,6 +60,11 @@ do { \
60 current->pid, __func__, ##args); \ 60 current->pid, __func__, ##args); \
61} while (0) 61} while (0)
62 62
63struct rehash_entry {
64 struct task_struct *task;
65 struct list_head list;
66};
67
63/* Unified info structure. This is pointed to by both the dentry and 68/* Unified info structure. This is pointed to by both the dentry and
64 inode structures. Each file in the filesystem has an instance of this 69 inode structures. Each file in the filesystem has an instance of this
65 structure. It holds a reference to the dentry, so dentries are never 70 structure. It holds a reference to the dentry, so dentries are never
@@ -75,6 +80,9 @@ struct autofs_info {
75 struct completion expire_complete; 80 struct completion expire_complete;
76 81
77 struct list_head active; 82 struct list_head active;
83 int active_count;
84 struct list_head rehash_list;
85
78 struct list_head expiring; 86 struct list_head expiring;
79 87
80 struct autofs_sb_info *sbi; 88 struct autofs_sb_info *sbi;
@@ -95,6 +103,8 @@ struct autofs_info {
95 103
96#define AUTOFS_INF_EXPIRING (1<<0) /* dentry is in the process of expiring */ 104#define AUTOFS_INF_EXPIRING (1<<0) /* dentry is in the process of expiring */
97#define AUTOFS_INF_MOUNTPOINT (1<<1) /* mountpoint status for direct expire */ 105#define AUTOFS_INF_MOUNTPOINT (1<<1) /* mountpoint status for direct expire */
106#define AUTOFS_INF_PENDING (1<<2) /* dentry pending mount */
107#define AUTOFS_INF_REHASH (1<<3) /* dentry in transit to ->lookup() */
98 108
99struct autofs_wait_queue { 109struct autofs_wait_queue {
100 wait_queue_head_t queue; 110 wait_queue_head_t queue;
@@ -161,7 +171,7 @@ static inline int autofs4_ispending(struct dentry *dentry)
161{ 171{
162 struct autofs_info *inf = autofs4_dentry_ino(dentry); 172 struct autofs_info *inf = autofs4_dentry_ino(dentry);
163 173
164 if (dentry->d_flags & DCACHE_AUTOFS_PENDING) 174 if (inf->flags & AUTOFS_INF_PENDING)
165 return 1; 175 return 1;
166 176
167 if (inf->flags & AUTOFS_INF_EXPIRING) 177 if (inf->flags & AUTOFS_INF_EXPIRING)
@@ -264,5 +274,31 @@ out:
264 return ret; 274 return ret;
265} 275}
266 276
277static inline void autofs4_add_expiring(struct dentry *dentry)
278{
279 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
280 struct autofs_info *ino = autofs4_dentry_ino(dentry);
281 if (ino) {
282 spin_lock(&sbi->lookup_lock);
283 if (list_empty(&ino->expiring))
284 list_add(&ino->expiring, &sbi->expiring_list);
285 spin_unlock(&sbi->lookup_lock);
286 }
287 return;
288}
289
290static inline void autofs4_del_expiring(struct dentry *dentry)
291{
292 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
293 struct autofs_info *ino = autofs4_dentry_ino(dentry);
294 if (ino) {
295 spin_lock(&sbi->lookup_lock);
296 if (!list_empty(&ino->expiring))
297 list_del_init(&ino->expiring);
298 spin_unlock(&sbi->lookup_lock);
299 }
300 return;
301}
302
267void autofs4_dentry_release(struct dentry *); 303void autofs4_dentry_release(struct dentry *);
268extern void autofs4_kill_sb(struct super_block *); 304extern void autofs4_kill_sb(struct super_block *);
diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
index 3da18d453488..74bc9aa6df31 100644
--- a/fs/autofs4/expire.c
+++ b/fs/autofs4/expire.c
@@ -27,7 +27,7 @@ static inline int autofs4_can_expire(struct dentry *dentry,
27 return 0; 27 return 0;
28 28
29 /* No point expiring a pending mount */ 29 /* No point expiring a pending mount */
30 if (dentry->d_flags & DCACHE_AUTOFS_PENDING) 30 if (ino->flags & AUTOFS_INF_PENDING)
31 return 0; 31 return 0;
32 32
33 if (!do_now) { 33 if (!do_now) {
@@ -279,6 +279,7 @@ struct dentry *autofs4_expire_direct(struct super_block *sb,
279 root->d_mounted--; 279 root->d_mounted--;
280 } 280 }
281 ino->flags |= AUTOFS_INF_EXPIRING; 281 ino->flags |= AUTOFS_INF_EXPIRING;
282 autofs4_add_expiring(root);
282 init_completion(&ino->expire_complete); 283 init_completion(&ino->expire_complete);
283 spin_unlock(&sbi->fs_lock); 284 spin_unlock(&sbi->fs_lock);
284 return root; 285 return root;
@@ -406,6 +407,7 @@ found:
406 expired, (int)expired->d_name.len, expired->d_name.name); 407 expired, (int)expired->d_name.len, expired->d_name.name);
407 ino = autofs4_dentry_ino(expired); 408 ino = autofs4_dentry_ino(expired);
408 ino->flags |= AUTOFS_INF_EXPIRING; 409 ino->flags |= AUTOFS_INF_EXPIRING;
410 autofs4_add_expiring(expired);
409 init_completion(&ino->expire_complete); 411 init_completion(&ino->expire_complete);
410 spin_unlock(&sbi->fs_lock); 412 spin_unlock(&sbi->fs_lock);
411 spin_lock(&dcache_lock); 413 spin_lock(&dcache_lock);
@@ -433,7 +435,7 @@ int autofs4_expire_wait(struct dentry *dentry)
433 435
434 DPRINTK("expire done status=%d", status); 436 DPRINTK("expire done status=%d", status);
435 437
436 if (d_unhashed(dentry)) 438 if (d_unhashed(dentry) && IS_DEADDIR(dentry->d_inode))
437 return -EAGAIN; 439 return -EAGAIN;
438 440
439 return status; 441 return status;
@@ -473,6 +475,7 @@ int autofs4_expire_run(struct super_block *sb,
473 spin_lock(&sbi->fs_lock); 475 spin_lock(&sbi->fs_lock);
474 ino = autofs4_dentry_ino(dentry); 476 ino = autofs4_dentry_ino(dentry);
475 ino->flags &= ~AUTOFS_INF_EXPIRING; 477 ino->flags &= ~AUTOFS_INF_EXPIRING;
478 autofs4_del_expiring(dentry);
476 complete_all(&ino->expire_complete); 479 complete_all(&ino->expire_complete);
477 spin_unlock(&sbi->fs_lock); 480 spin_unlock(&sbi->fs_lock);
478 481
@@ -503,6 +506,7 @@ int autofs4_do_expire_multi(struct super_block *sb, struct vfsmount *mnt,
503 ino->flags &= ~AUTOFS_INF_MOUNTPOINT; 506 ino->flags &= ~AUTOFS_INF_MOUNTPOINT;
504 } 507 }
505 ino->flags &= ~AUTOFS_INF_EXPIRING; 508 ino->flags &= ~AUTOFS_INF_EXPIRING;
509 autofs4_del_expiring(dentry);
506 complete_all(&ino->expire_complete); 510 complete_all(&ino->expire_complete);
507 spin_unlock(&sbi->fs_lock); 511 spin_unlock(&sbi->fs_lock);
508 dput(dentry); 512 dput(dentry);
diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c
index 69c8142da838..d0a3de247458 100644
--- a/fs/autofs4/inode.c
+++ b/fs/autofs4/inode.c
@@ -49,6 +49,8 @@ struct autofs_info *autofs4_init_ino(struct autofs_info *ino,
49 ino->dentry = NULL; 49 ino->dentry = NULL;
50 ino->size = 0; 50 ino->size = 0;
51 INIT_LIST_HEAD(&ino->active); 51 INIT_LIST_HEAD(&ino->active);
52 INIT_LIST_HEAD(&ino->rehash_list);
53 ino->active_count = 0;
52 INIT_LIST_HEAD(&ino->expiring); 54 INIT_LIST_HEAD(&ino->expiring);
53 atomic_set(&ino->count, 0); 55 atomic_set(&ino->count, 0);
54 } 56 }
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index b96a3c57359d..30cc9ddf4b70 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -72,6 +72,139 @@ const struct inode_operations autofs4_dir_inode_operations = {
72 .rmdir = autofs4_dir_rmdir, 72 .rmdir = autofs4_dir_rmdir,
73}; 73};
74 74
75static void autofs4_add_active(struct dentry *dentry)
76{
77 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
78 struct autofs_info *ino = autofs4_dentry_ino(dentry);
79 if (ino) {
80 spin_lock(&sbi->lookup_lock);
81 if (!ino->active_count) {
82 if (list_empty(&ino->active))
83 list_add(&ino->active, &sbi->active_list);
84 }
85 ino->active_count++;
86 spin_unlock(&sbi->lookup_lock);
87 }
88 return;
89}
90
91static void autofs4_del_active(struct dentry *dentry)
92{
93 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
94 struct autofs_info *ino = autofs4_dentry_ino(dentry);
95 if (ino) {
96 spin_lock(&sbi->lookup_lock);
97 ino->active_count--;
98 if (!ino->active_count) {
99 if (!list_empty(&ino->active))
100 list_del_init(&ino->active);
101 }
102 spin_unlock(&sbi->lookup_lock);
103 }
104 return;
105}
106
107static void autofs4_add_rehash_entry(struct autofs_info *ino,
108 struct rehash_entry *entry)
109{
110 entry->task = current;
111 INIT_LIST_HEAD(&entry->list);
112 list_add(&entry->list, &ino->rehash_list);
113 return;
114}
115
116static void autofs4_remove_rehash_entry(struct autofs_info *ino)
117{
118 struct list_head *head = &ino->rehash_list;
119 struct rehash_entry *entry;
120 list_for_each_entry(entry, head, list) {
121 if (entry->task == current) {
122 list_del(&entry->list);
123 kfree(entry);
124 break;
125 }
126 }
127 return;
128}
129
130static void autofs4_remove_rehash_entrys(struct autofs_info *ino)
131{
132 struct autofs_sb_info *sbi = ino->sbi;
133 struct rehash_entry *entry, *next;
134 struct list_head *head;
135
136 spin_lock(&sbi->fs_lock);
137 spin_lock(&sbi->lookup_lock);
138 if (!(ino->flags & AUTOFS_INF_REHASH)) {
139 spin_unlock(&sbi->lookup_lock);
140 spin_unlock(&sbi->fs_lock);
141 return;
142 }
143 ino->flags &= ~AUTOFS_INF_REHASH;
144 head = &ino->rehash_list;
145 list_for_each_entry_safe(entry, next, head, list) {
146 list_del(&entry->list);
147 kfree(entry);
148 }
149 spin_unlock(&sbi->lookup_lock);
150 spin_unlock(&sbi->fs_lock);
151 dput(ino->dentry);
152
153 return;
154}
155
156static void autofs4_revalidate_drop(struct dentry *dentry,
157 struct rehash_entry *entry)
158{
159 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
160 struct autofs_info *ino = autofs4_dentry_ino(dentry);
161 /*
162 * Add to the active list so we can pick this up in
163 * ->lookup(). Also add an entry to a rehash list so
164 * we know when there are no dentrys in flight so we
165 * know when we can rehash the dentry.
166 */
167 spin_lock(&sbi->lookup_lock);
168 if (list_empty(&ino->active))
169 list_add(&ino->active, &sbi->active_list);
170 autofs4_add_rehash_entry(ino, entry);
171 spin_unlock(&sbi->lookup_lock);
172 if (!(ino->flags & AUTOFS_INF_REHASH)) {
173 ino->flags |= AUTOFS_INF_REHASH;
174 dget(dentry);
175 spin_lock(&dentry->d_lock);
176 __d_drop(dentry);
177 spin_unlock(&dentry->d_lock);
178 }
179 return;
180}
181
182static void autofs4_revalidate_rehash(struct dentry *dentry)
183{
184 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
185 struct autofs_info *ino = autofs4_dentry_ino(dentry);
186 if (ino->flags & AUTOFS_INF_REHASH) {
187 spin_lock(&sbi->lookup_lock);
188 autofs4_remove_rehash_entry(ino);
189 if (list_empty(&ino->rehash_list)) {
190 spin_unlock(&sbi->lookup_lock);
191 ino->flags &= ~AUTOFS_INF_REHASH;
192 d_rehash(dentry);
193 dput(ino->dentry);
194 } else
195 spin_unlock(&sbi->lookup_lock);
196 }
197 return;
198}
199
200static unsigned int autofs4_need_mount(unsigned int flags)
201{
202 unsigned int res = 0;
203 if (flags & (TRIGGER_FLAGS | TRIGGER_INTENTS))
204 res = 1;
205 return res;
206}
207
75static int autofs4_dir_open(struct inode *inode, struct file *file) 208static int autofs4_dir_open(struct inode *inode, struct file *file)
76{ 209{
77 struct dentry *dentry = file->f_path.dentry; 210 struct dentry *dentry = file->f_path.dentry;
@@ -93,7 +226,7 @@ static int autofs4_dir_open(struct inode *inode, struct file *file)
93 * it. 226 * it.
94 */ 227 */
95 spin_lock(&dcache_lock); 228 spin_lock(&dcache_lock);
96 if (!d_mountpoint(dentry) && __simple_empty(dentry)) { 229 if (!d_mountpoint(dentry) && list_empty(&dentry->d_subdirs)) {
97 spin_unlock(&dcache_lock); 230 spin_unlock(&dcache_lock);
98 return -ENOENT; 231 return -ENOENT;
99 } 232 }
@@ -103,7 +236,7 @@ out:
103 return dcache_dir_open(inode, file); 236 return dcache_dir_open(inode, file);
104} 237}
105 238
106static int try_to_fill_dentry(struct dentry *dentry, int flags) 239static int try_to_fill_dentry(struct dentry *dentry)
107{ 240{
108 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); 241 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
109 struct autofs_info *ino = autofs4_dentry_ino(dentry); 242 struct autofs_info *ino = autofs4_dentry_ino(dentry);
@@ -116,55 +249,17 @@ static int try_to_fill_dentry(struct dentry *dentry, int flags)
116 * Wait for a pending mount, triggering one if there 249 * Wait for a pending mount, triggering one if there
117 * isn't one already 250 * isn't one already
118 */ 251 */
119 if (dentry->d_inode == NULL) { 252 DPRINTK("waiting for mount name=%.*s",
120 DPRINTK("waiting for mount name=%.*s", 253 dentry->d_name.len, dentry->d_name.name);
121 dentry->d_name.len, dentry->d_name.name);
122
123 status = autofs4_wait(sbi, dentry, NFY_MOUNT);
124
125 DPRINTK("mount done status=%d", status);
126
127 /* Turn this into a real negative dentry? */
128 if (status == -ENOENT) {
129 spin_lock(&dentry->d_lock);
130 dentry->d_flags &= ~DCACHE_AUTOFS_PENDING;
131 spin_unlock(&dentry->d_lock);
132 return status;
133 } else if (status) {
134 /* Return a negative dentry, but leave it "pending" */
135 return status;
136 }
137 /* Trigger mount for path component or follow link */
138 } else if (dentry->d_flags & DCACHE_AUTOFS_PENDING ||
139 flags & (TRIGGER_FLAGS | TRIGGER_INTENTS) ||
140 current->link_count) {
141 DPRINTK("waiting for mount name=%.*s",
142 dentry->d_name.len, dentry->d_name.name);
143
144 spin_lock(&dentry->d_lock);
145 dentry->d_flags |= DCACHE_AUTOFS_PENDING;
146 spin_unlock(&dentry->d_lock);
147 status = autofs4_wait(sbi, dentry, NFY_MOUNT);
148 254
149 DPRINTK("mount done status=%d", status); 255 status = autofs4_wait(sbi, dentry, NFY_MOUNT);
150 256
151 if (status) { 257 DPRINTK("mount done status=%d", status);
152 spin_lock(&dentry->d_lock);
153 dentry->d_flags &= ~DCACHE_AUTOFS_PENDING;
154 spin_unlock(&dentry->d_lock);
155 return status;
156 }
157 }
158
159 /* Initialize expiry counter after successful mount */
160 if (ino)
161 ino->last_used = jiffies;
162 258
163 spin_lock(&dentry->d_lock); 259 /* Update expiry counter */
164 dentry->d_flags &= ~DCACHE_AUTOFS_PENDING; 260 ino->last_used = jiffies;
165 spin_unlock(&dentry->d_lock);
166 261
167 return 0; 262 return status;
168} 263}
169 264
170/* For autofs direct mounts the follow link triggers the mount */ 265/* For autofs direct mounts the follow link triggers the mount */
@@ -202,27 +297,39 @@ static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
202 autofs4_expire_wait(dentry); 297 autofs4_expire_wait(dentry);
203 298
204 /* We trigger a mount for almost all flags */ 299 /* We trigger a mount for almost all flags */
205 lookup_type = nd->flags & (TRIGGER_FLAGS | TRIGGER_INTENTS); 300 lookup_type = autofs4_need_mount(nd->flags);
206 if (!(lookup_type || dentry->d_flags & DCACHE_AUTOFS_PENDING)) 301 spin_lock(&sbi->fs_lock);
302 spin_lock(&dcache_lock);
303 if (!(lookup_type || ino->flags & AUTOFS_INF_PENDING)) {
304 spin_unlock(&dcache_lock);
305 spin_unlock(&sbi->fs_lock);
207 goto follow; 306 goto follow;
307 }
208 308
209 /* 309 /*
210 * If the dentry contains directories then it is an autofs 310 * If the dentry contains directories then it is an autofs
211 * multi-mount with no root mount offset. So don't try to 311 * multi-mount with no root mount offset. So don't try to
212 * mount it again. 312 * mount it again.
213 */ 313 */
214 spin_lock(&dcache_lock); 314 if (ino->flags & AUTOFS_INF_PENDING ||
215 if (dentry->d_flags & DCACHE_AUTOFS_PENDING || 315 (!d_mountpoint(dentry) && list_empty(&dentry->d_subdirs))) {
216 (!d_mountpoint(dentry) && __simple_empty(dentry))) { 316 ino->flags |= AUTOFS_INF_PENDING;
217 spin_unlock(&dcache_lock); 317 spin_unlock(&dcache_lock);
318 spin_unlock(&sbi->fs_lock);
319
320 status = try_to_fill_dentry(dentry);
321
322 spin_lock(&sbi->fs_lock);
323 ino->flags &= ~AUTOFS_INF_PENDING;
324 spin_unlock(&sbi->fs_lock);
218 325
219 status = try_to_fill_dentry(dentry, 0);
220 if (status) 326 if (status)
221 goto out_error; 327 goto out_error;
222 328
223 goto follow; 329 goto follow;
224 } 330 }
225 spin_unlock(&dcache_lock); 331 spin_unlock(&dcache_lock);
332 spin_unlock(&sbi->fs_lock);
226follow: 333follow:
227 /* 334 /*
228 * If there is no root mount it must be an autofs 335 * If there is no root mount it must be an autofs
@@ -254,18 +361,47 @@ static int autofs4_revalidate(struct dentry *dentry, struct nameidata *nd)
254{ 361{
255 struct inode *dir = dentry->d_parent->d_inode; 362 struct inode *dir = dentry->d_parent->d_inode;
256 struct autofs_sb_info *sbi = autofs4_sbi(dir->i_sb); 363 struct autofs_sb_info *sbi = autofs4_sbi(dir->i_sb);
257 int oz_mode = autofs4_oz_mode(sbi); 364 struct autofs_info *ino = autofs4_dentry_ino(dentry);
365 struct rehash_entry *entry;
258 int flags = nd ? nd->flags : 0; 366 int flags = nd ? nd->flags : 0;
259 int status = 1; 367 unsigned int mutex_aquired;
368
369 DPRINTK("name = %.*s oz_mode = %d",
370 dentry->d_name.len, dentry->d_name.name, oz_mode);
371
372 /* Daemon never causes a mount to trigger */
373 if (autofs4_oz_mode(sbi))
374 return 1;
375
376 entry = kmalloc(sizeof(struct rehash_entry), GFP_KERNEL);
377 if (!entry)
378 return -ENOMEM;
379
380 mutex_aquired = mutex_trylock(&dir->i_mutex);
260 381
261 /* Pending dentry */
262 spin_lock(&sbi->fs_lock); 382 spin_lock(&sbi->fs_lock);
383 spin_lock(&dcache_lock);
384 /* Pending dentry */
263 if (autofs4_ispending(dentry)) { 385 if (autofs4_ispending(dentry)) {
264 /* The daemon never causes a mount to trigger */ 386 int status;
265 spin_unlock(&sbi->fs_lock);
266 387
267 if (oz_mode) 388 /*
268 return 1; 389 * We can only unhash and send this to ->lookup() if
390 * the directory mutex is held over d_revalidate() and
391 * ->lookup(). This prevents the VFS from incorrectly
392 * seeing the dentry as non-existent.
393 */
394 ino->flags |= AUTOFS_INF_PENDING;
395 if (!mutex_aquired) {
396 autofs4_revalidate_drop(dentry, entry);
397 spin_unlock(&dcache_lock);
398 spin_unlock(&sbi->fs_lock);
399 return 0;
400 }
401 spin_unlock(&dcache_lock);
402 spin_unlock(&sbi->fs_lock);
403 mutex_unlock(&dir->i_mutex);
404 kfree(entry);
269 405
270 /* 406 /*
271 * If the directory has gone away due to an expire 407 * If the directory has gone away due to an expire
@@ -279,46 +415,82 @@ static int autofs4_revalidate(struct dentry *dentry, struct nameidata *nd)
279 * A zero status is success otherwise we have a 415 * A zero status is success otherwise we have a
280 * negative error code. 416 * negative error code.
281 */ 417 */
282 status = try_to_fill_dentry(dentry, flags); 418 status = try_to_fill_dentry(dentry);
419
420 spin_lock(&sbi->fs_lock);
421 ino->flags &= ~AUTOFS_INF_PENDING;
422 spin_unlock(&sbi->fs_lock);
423
283 if (status == 0) 424 if (status == 0)
284 return 1; 425 return 1;
285 426
286 return status; 427 return status;
287 } 428 }
288 spin_unlock(&sbi->fs_lock);
289
290 /* Negative dentry.. invalidate if "old" */
291 if (dentry->d_inode == NULL)
292 return 0;
293 429
294 /* Check for a non-mountpoint directory with no contents */ 430 /* Check for a non-mountpoint directory with no contents */
295 spin_lock(&dcache_lock);
296 if (S_ISDIR(dentry->d_inode->i_mode) && 431 if (S_ISDIR(dentry->d_inode->i_mode) &&
297 !d_mountpoint(dentry) && 432 !d_mountpoint(dentry) && list_empty(&dentry->d_subdirs)) {
298 __simple_empty(dentry)) {
299 DPRINTK("dentry=%p %.*s, emptydir", 433 DPRINTK("dentry=%p %.*s, emptydir",
300 dentry, dentry->d_name.len, dentry->d_name.name); 434 dentry, dentry->d_name.len, dentry->d_name.name);
301 spin_unlock(&dcache_lock);
302 435
303 /* The daemon never causes a mount to trigger */ 436 if (autofs4_need_mount(flags) || current->link_count) {
304 if (oz_mode) 437 int status;
305 return 1;
306 438
307 /* 439 /*
308 * A zero status is success otherwise we have a 440 * We can only unhash and send this to ->lookup() if
309 * negative error code. 441 * the directory mutex is held over d_revalidate() and
310 */ 442 * ->lookup(). This prevents the VFS from incorrectly
311 status = try_to_fill_dentry(dentry, flags); 443 * seeing the dentry as non-existent.
312 if (status == 0) 444 */
313 return 1; 445 ino->flags |= AUTOFS_INF_PENDING;
446 if (!mutex_aquired) {
447 autofs4_revalidate_drop(dentry, entry);
448 spin_unlock(&dcache_lock);
449 spin_unlock(&sbi->fs_lock);
450 return 0;
451 }
452 spin_unlock(&dcache_lock);
453 spin_unlock(&sbi->fs_lock);
454 mutex_unlock(&dir->i_mutex);
455 kfree(entry);
314 456
315 return status; 457 /*
458 * A zero status is success otherwise we have a
459 * negative error code.
460 */
461 status = try_to_fill_dentry(dentry);
462
463 spin_lock(&sbi->fs_lock);
464 ino->flags &= ~AUTOFS_INF_PENDING;
465 spin_unlock(&sbi->fs_lock);
466
467 if (status == 0)
468 return 1;
469
470 return status;
471 }
316 } 472 }
317 spin_unlock(&dcache_lock); 473 spin_unlock(&dcache_lock);
474 spin_unlock(&sbi->fs_lock);
475
476 if (mutex_aquired)
477 mutex_unlock(&dir->i_mutex);
478
479 kfree(entry);
318 480
319 return 1; 481 return 1;
320} 482}
321 483
484static void autofs4_free_rehash_entrys(struct autofs_info *inf)
485{
486 struct list_head *head = &inf->rehash_list;
487 struct rehash_entry *entry, *next;
488 list_for_each_entry_safe(entry, next, head, list) {
489 list_del(&entry->list);
490 kfree(entry);
491 }
492}
493
322void autofs4_dentry_release(struct dentry *de) 494void autofs4_dentry_release(struct dentry *de)
323{ 495{
324 struct autofs_info *inf; 496 struct autofs_info *inf;
@@ -337,6 +509,8 @@ void autofs4_dentry_release(struct dentry *de)
337 list_del(&inf->active); 509 list_del(&inf->active);
338 if (!list_empty(&inf->expiring)) 510 if (!list_empty(&inf->expiring))
339 list_del(&inf->expiring); 511 list_del(&inf->expiring);
512 if (!list_empty(&inf->rehash_list))
513 autofs4_free_rehash_entrys(inf);
340 spin_unlock(&sbi->lookup_lock); 514 spin_unlock(&sbi->lookup_lock);
341 } 515 }
342 516
@@ -359,35 +533,52 @@ static const struct dentry_operations autofs4_dentry_operations = {
359 .d_release = autofs4_dentry_release, 533 .d_release = autofs4_dentry_release,
360}; 534};
361 535
362static struct dentry *autofs4_lookup_active(struct autofs_sb_info *sbi, struct dentry *parent, struct qstr *name) 536static struct dentry *autofs4_lookup_active(struct dentry *dentry)
363{ 537{
538 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
539 struct dentry *parent = dentry->d_parent;
540 struct qstr *name = &dentry->d_name;
364 unsigned int len = name->len; 541 unsigned int len = name->len;
365 unsigned int hash = name->hash; 542 unsigned int hash = name->hash;
366 const unsigned char *str = name->name; 543 const unsigned char *str = name->name;
367 struct list_head *p, *head; 544 struct list_head *p, *head;
368 545
546restart:
369 spin_lock(&dcache_lock); 547 spin_lock(&dcache_lock);
370 spin_lock(&sbi->lookup_lock); 548 spin_lock(&sbi->lookup_lock);
371 head = &sbi->active_list; 549 head = &sbi->active_list;
372 list_for_each(p, head) { 550 list_for_each(p, head) {
373 struct autofs_info *ino; 551 struct autofs_info *ino;
374 struct dentry *dentry; 552 struct dentry *active;
375 struct qstr *qstr; 553 struct qstr *qstr;
376 554
377 ino = list_entry(p, struct autofs_info, active); 555 ino = list_entry(p, struct autofs_info, active);
378 dentry = ino->dentry; 556 active = ino->dentry;
379 557
380 spin_lock(&dentry->d_lock); 558 spin_lock(&active->d_lock);
381 559
382 /* Already gone? */ 560 /* Already gone? */
383 if (atomic_read(&dentry->d_count) == 0) 561 if (atomic_read(&active->d_count) == 0)
384 goto next; 562 goto next;
385 563
386 qstr = &dentry->d_name; 564 if (active->d_inode && IS_DEADDIR(active->d_inode)) {
565 if (!list_empty(&ino->rehash_list)) {
566 dget(active);
567 spin_unlock(&active->d_lock);
568 spin_unlock(&sbi->lookup_lock);
569 spin_unlock(&dcache_lock);
570 autofs4_remove_rehash_entrys(ino);
571 dput(active);
572 goto restart;
573 }
574 goto next;
575 }
576
577 qstr = &active->d_name;
387 578
388 if (dentry->d_name.hash != hash) 579 if (active->d_name.hash != hash)
389 goto next; 580 goto next;
390 if (dentry->d_parent != parent) 581 if (active->d_parent != parent)
391 goto next; 582 goto next;
392 583
393 if (qstr->len != len) 584 if (qstr->len != len)
@@ -395,15 +586,13 @@ static struct dentry *autofs4_lookup_active(struct autofs_sb_info *sbi, struct d
395 if (memcmp(qstr->name, str, len)) 586 if (memcmp(qstr->name, str, len))
396 goto next; 587 goto next;
397 588
398 if (d_unhashed(dentry)) { 589 dget(active);
399 dget(dentry); 590 spin_unlock(&active->d_lock);
400 spin_unlock(&dentry->d_lock); 591 spin_unlock(&sbi->lookup_lock);
401 spin_unlock(&sbi->lookup_lock); 592 spin_unlock(&dcache_lock);
402 spin_unlock(&dcache_lock); 593 return active;
403 return dentry;
404 }
405next: 594next:
406 spin_unlock(&dentry->d_lock); 595 spin_unlock(&active->d_lock);
407 } 596 }
408 spin_unlock(&sbi->lookup_lock); 597 spin_unlock(&sbi->lookup_lock);
409 spin_unlock(&dcache_lock); 598 spin_unlock(&dcache_lock);
@@ -411,8 +600,11 @@ next:
411 return NULL; 600 return NULL;
412} 601}
413 602
414static struct dentry *autofs4_lookup_expiring(struct autofs_sb_info *sbi, struct dentry *parent, struct qstr *name) 603static struct dentry *autofs4_lookup_expiring(struct dentry *dentry)
415{ 604{
605 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
606 struct dentry *parent = dentry->d_parent;
607 struct qstr *name = &dentry->d_name;
416 unsigned int len = name->len; 608 unsigned int len = name->len;
417 unsigned int hash = name->hash; 609 unsigned int hash = name->hash;
418 const unsigned char *str = name->name; 610 const unsigned char *str = name->name;
@@ -423,23 +615,23 @@ static struct dentry *autofs4_lookup_expiring(struct autofs_sb_info *sbi, struct
423 head = &sbi->expiring_list; 615 head = &sbi->expiring_list;
424 list_for_each(p, head) { 616 list_for_each(p, head) {
425 struct autofs_info *ino; 617 struct autofs_info *ino;
426 struct dentry *dentry; 618 struct dentry *expiring;
427 struct qstr *qstr; 619 struct qstr *qstr;
428 620
429 ino = list_entry(p, struct autofs_info, expiring); 621 ino = list_entry(p, struct autofs_info, expiring);
430 dentry = ino->dentry; 622 expiring = ino->dentry;
431 623
432 spin_lock(&dentry->d_lock); 624 spin_lock(&expiring->d_lock);
433 625
434 /* Bad luck, we've already been dentry_iput */ 626 /* Bad luck, we've already been dentry_iput */
435 if (!dentry->d_inode) 627 if (!expiring->d_inode)
436 goto next; 628 goto next;
437 629
438 qstr = &dentry->d_name; 630 qstr = &expiring->d_name;
439 631
440 if (dentry->d_name.hash != hash) 632 if (expiring->d_name.hash != hash)
441 goto next; 633 goto next;
442 if (dentry->d_parent != parent) 634 if (expiring->d_parent != parent)
443 goto next; 635 goto next;
444 636
445 if (qstr->len != len) 637 if (qstr->len != len)
@@ -447,15 +639,13 @@ static struct dentry *autofs4_lookup_expiring(struct autofs_sb_info *sbi, struct
447 if (memcmp(qstr->name, str, len)) 639 if (memcmp(qstr->name, str, len))
448 goto next; 640 goto next;
449 641
450 if (d_unhashed(dentry)) { 642 dget(expiring);
451 dget(dentry); 643 spin_unlock(&expiring->d_lock);
452 spin_unlock(&dentry->d_lock); 644 spin_unlock(&sbi->lookup_lock);
453 spin_unlock(&sbi->lookup_lock); 645 spin_unlock(&dcache_lock);
454 spin_unlock(&dcache_lock); 646 return expiring;
455 return dentry;
456 }
457next: 647next:
458 spin_unlock(&dentry->d_lock); 648 spin_unlock(&expiring->d_lock);
459 } 649 }
460 spin_unlock(&sbi->lookup_lock); 650 spin_unlock(&sbi->lookup_lock);
461 spin_unlock(&dcache_lock); 651 spin_unlock(&dcache_lock);
@@ -463,13 +653,56 @@ next:
463 return NULL; 653 return NULL;
464} 654}
465 655
656static struct autofs_info *init_new_dentry(struct autofs_sb_info *sbi,
657 struct dentry *dentry, int oz_mode)
658{
659 struct autofs_info *ino;
660
661 /*
662 * Mark the dentry incomplete but don't hash it. We do this
663 * to serialize our inode creation operations (symlink and
664 * mkdir) which prevents deadlock during the callback to
665 * the daemon. Subsequent user space lookups for the same
666 * dentry are placed on the wait queue while the daemon
667 * itself is allowed passage unresticted so the create
668 * operation itself can then hash the dentry. Finally,
669 * we check for the hashed dentry and return the newly
670 * hashed dentry.
671 */
672 dentry->d_op = &autofs4_root_dentry_operations;
673
674 /*
675 * And we need to ensure that the same dentry is used for
676 * all following lookup calls until it is hashed so that
677 * the dentry flags are persistent throughout the request.
678 */
679 ino = autofs4_init_ino(NULL, sbi, 0555);
680 if (!ino)
681 return ERR_PTR(-ENOMEM);
682
683 dentry->d_fsdata = ino;
684 ino->dentry = dentry;
685
686 /*
687 * Only set the mount pending flag for new dentrys not created
688 * by the daemon.
689 */
690 if (!oz_mode)
691 ino->flags |= AUTOFS_INF_PENDING;
692
693 d_instantiate(dentry, NULL);
694
695 return ino;
696}
697
466/* Lookups in the root directory */ 698/* Lookups in the root directory */
467static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) 699static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
468{ 700{
469 struct autofs_sb_info *sbi; 701 struct autofs_sb_info *sbi;
470 struct autofs_info *ino; 702 struct autofs_info *ino;
471 struct dentry *expiring, *unhashed; 703 struct dentry *expiring, *active;
472 int oz_mode; 704 int oz_mode;
705 int status = 0;
473 706
474 DPRINTK("name = %.*s", 707 DPRINTK("name = %.*s",
475 dentry->d_name.len, dentry->d_name.name); 708 dentry->d_name.len, dentry->d_name.name);
@@ -484,123 +717,100 @@ static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, s
484 DPRINTK("pid = %u, pgrp = %u, catatonic = %d, oz_mode = %d", 717 DPRINTK("pid = %u, pgrp = %u, catatonic = %d, oz_mode = %d",
485 current->pid, task_pgrp_nr(current), sbi->catatonic, oz_mode); 718 current->pid, task_pgrp_nr(current), sbi->catatonic, oz_mode);
486 719
487 unhashed = autofs4_lookup_active(sbi, dentry->d_parent, &dentry->d_name); 720 spin_lock(&sbi->fs_lock);
488 if (unhashed) 721 active = autofs4_lookup_active(dentry);
489 dentry = unhashed; 722 if (active) {
490 else { 723 dentry = active;
491 /* 724 ino = autofs4_dentry_ino(dentry);
492 * Mark the dentry incomplete but don't hash it. We do this 725 /* If this came from revalidate, rehash it */
493 * to serialize our inode creation operations (symlink and 726 autofs4_revalidate_rehash(dentry);
494 * mkdir) which prevents deadlock during the callback to 727 spin_unlock(&sbi->fs_lock);
495 * the daemon. Subsequent user space lookups for the same 728 } else {
496 * dentry are placed on the wait queue while the daemon 729 spin_unlock(&sbi->fs_lock);
497 * itself is allowed passage unresticted so the create 730 ino = init_new_dentry(sbi, dentry, oz_mode);
498 * operation itself can then hash the dentry. Finally, 731 if (IS_ERR(ino))
499 * we check for the hashed dentry and return the newly 732 return (struct dentry *) ino;
500 * hashed dentry.
501 */
502 dentry->d_op = &autofs4_root_dentry_operations;
503
504 /*
505 * And we need to ensure that the same dentry is used for
506 * all following lookup calls until it is hashed so that
507 * the dentry flags are persistent throughout the request.
508 */
509 ino = autofs4_init_ino(NULL, sbi, 0555);
510 if (!ino)
511 return ERR_PTR(-ENOMEM);
512
513 dentry->d_fsdata = ino;
514 ino->dentry = dentry;
515
516 spin_lock(&sbi->lookup_lock);
517 list_add(&ino->active, &sbi->active_list);
518 spin_unlock(&sbi->lookup_lock);
519
520 d_instantiate(dentry, NULL);
521 } 733 }
522 734
735 autofs4_add_active(dentry);
736
523 if (!oz_mode) { 737 if (!oz_mode) {
738 expiring = autofs4_lookup_expiring(dentry);
524 mutex_unlock(&dir->i_mutex); 739 mutex_unlock(&dir->i_mutex);
525 expiring = autofs4_lookup_expiring(sbi,
526 dentry->d_parent,
527 &dentry->d_name);
528 if (expiring) { 740 if (expiring) {
529 /* 741 /*
530 * If we are racing with expire the request might not 742 * If we are racing with expire the request might not
531 * be quite complete but the directory has been removed 743 * be quite complete but the directory has been removed
532 * so it must have been successful, so just wait for it. 744 * so it must have been successful, so just wait for it.
533 */ 745 */
534 ino = autofs4_dentry_ino(expiring);
535 autofs4_expire_wait(expiring); 746 autofs4_expire_wait(expiring);
536 spin_lock(&sbi->lookup_lock);
537 if (!list_empty(&ino->expiring))
538 list_del_init(&ino->expiring);
539 spin_unlock(&sbi->lookup_lock);
540 dput(expiring); 747 dput(expiring);
541 } 748 }
542 749 status = try_to_fill_dentry(dentry);
543 spin_lock(&dentry->d_lock);
544 dentry->d_flags |= DCACHE_AUTOFS_PENDING;
545 spin_unlock(&dentry->d_lock);
546 if (dentry->d_op && dentry->d_op->d_revalidate)
547 (dentry->d_op->d_revalidate)(dentry, nd);
548 mutex_lock(&dir->i_mutex); 750 mutex_lock(&dir->i_mutex);
751 spin_lock(&sbi->fs_lock);
752 ino->flags &= ~AUTOFS_INF_PENDING;
753 spin_unlock(&sbi->fs_lock);
549 } 754 }
550 755
756 autofs4_del_active(dentry);
757
551 /* 758 /*
552 * If we are still pending, check if we had to handle 759 * If we had a mount fail, check if we had to handle
553 * a signal. If so we can force a restart.. 760 * a signal. If so we can force a restart..
554 */ 761 */
555 if (dentry->d_flags & DCACHE_AUTOFS_PENDING) { 762 if (status) {
556 /* See if we were interrupted */ 763 /* See if we were interrupted */
557 if (signal_pending(current)) { 764 if (signal_pending(current)) {
558 sigset_t *sigset = &current->pending.signal; 765 sigset_t *sigset = &current->pending.signal;
559 if (sigismember (sigset, SIGKILL) || 766 if (sigismember (sigset, SIGKILL) ||
560 sigismember (sigset, SIGQUIT) || 767 sigismember (sigset, SIGQUIT) ||
561 sigismember (sigset, SIGINT)) { 768 sigismember (sigset, SIGINT)) {
562 if (unhashed) 769 if (active)
563 dput(unhashed); 770 dput(active);
564 return ERR_PTR(-ERESTARTNOINTR); 771 return ERR_PTR(-ERESTARTNOINTR);
565 } 772 }
566 } 773 }
567 if (!oz_mode) { 774 }
568 spin_lock(&dentry->d_lock); 775
569 dentry->d_flags &= ~DCACHE_AUTOFS_PENDING; 776 /*
570 spin_unlock(&dentry->d_lock); 777 * User space can (and has done in the past) remove and re-create
778 * this directory during the callback. This can leave us with an
779 * unhashed dentry, but a successful mount! So we need to
780 * perform another cached lookup in case the dentry now exists.
781 */
782 if (!oz_mode && !have_submounts(dentry)) {
783 struct dentry *new;
784 new = d_lookup(dentry->d_parent, &dentry->d_name);
785 if (new) {
786 if (active)
787 dput(active);
788 return new;
789 } else {
790 if (!status)
791 status = -ENOENT;
571 } 792 }
572 } 793 }
573 794
574 /* 795 /*
575 * If this dentry is unhashed, then we shouldn't honour this 796 * If we had a mount failure, return status to user space.
576 * lookup. Returning ENOENT here doesn't do the right thing 797 * If the mount succeeded and we used a dentry from the active queue
577 * for all system calls, but it should be OK for the operations 798 * return it.
578 * we permit from an autofs.
579 */ 799 */
580 if (!oz_mode && d_unhashed(dentry)) { 800 if (status) {
801 dentry = ERR_PTR(status);
802 if (active)
803 dput(active);
804 return dentry;
805 } else {
581 /* 806 /*
582 * A user space application can (and has done in the past) 807 * Valid successful mount, return active dentry or NULL
583 * remove and re-create this directory during the callback. 808 * for a new dentry.
584 * This can leave us with an unhashed dentry, but a
585 * successful mount! So we need to perform another
586 * cached lookup in case the dentry now exists.
587 */ 809 */
588 struct dentry *parent = dentry->d_parent; 810 if (active)
589 struct dentry *new = d_lookup(parent, &dentry->d_name); 811 return active;
590 if (new != NULL)
591 dentry = new;
592 else
593 dentry = ERR_PTR(-ENOENT);
594
595 if (unhashed)
596 dput(unhashed);
597
598 return dentry;
599 } 812 }
600 813
601 if (unhashed)
602 return unhashed;
603
604 return NULL; 814 return NULL;
605} 815}
606 816
@@ -624,11 +834,6 @@ static int autofs4_dir_symlink(struct inode *dir,
624 if (!ino) 834 if (!ino)
625 return -ENOMEM; 835 return -ENOMEM;
626 836
627 spin_lock(&sbi->lookup_lock);
628 if (!list_empty(&ino->active))
629 list_del_init(&ino->active);
630 spin_unlock(&sbi->lookup_lock);
631
632 ino->size = strlen(symname); 837 ino->size = strlen(symname);
633 cp = kmalloc(ino->size + 1, GFP_KERNEL); 838 cp = kmalloc(ino->size + 1, GFP_KERNEL);
634 if (!cp) { 839 if (!cp) {
@@ -705,10 +910,6 @@ static int autofs4_dir_unlink(struct inode *dir, struct dentry *dentry)
705 dir->i_mtime = CURRENT_TIME; 910 dir->i_mtime = CURRENT_TIME;
706 911
707 spin_lock(&dcache_lock); 912 spin_lock(&dcache_lock);
708 spin_lock(&sbi->lookup_lock);
709 if (list_empty(&ino->expiring))
710 list_add(&ino->expiring, &sbi->expiring_list);
711 spin_unlock(&sbi->lookup_lock);
712 spin_lock(&dentry->d_lock); 913 spin_lock(&dentry->d_lock);
713 __d_drop(dentry); 914 __d_drop(dentry);
714 spin_unlock(&dentry->d_lock); 915 spin_unlock(&dentry->d_lock);
@@ -734,10 +935,6 @@ static int autofs4_dir_rmdir(struct inode *dir, struct dentry *dentry)
734 spin_unlock(&dcache_lock); 935 spin_unlock(&dcache_lock);
735 return -ENOTEMPTY; 936 return -ENOTEMPTY;
736 } 937 }
737 spin_lock(&sbi->lookup_lock);
738 if (list_empty(&ino->expiring))
739 list_add(&ino->expiring, &sbi->expiring_list);
740 spin_unlock(&sbi->lookup_lock);
741 spin_lock(&dentry->d_lock); 938 spin_lock(&dentry->d_lock);
742 __d_drop(dentry); 939 __d_drop(dentry);
743 spin_unlock(&dentry->d_lock); 940 spin_unlock(&dentry->d_lock);
@@ -775,11 +972,6 @@ static int autofs4_dir_mkdir(struct inode *dir, struct dentry *dentry, int mode)
775 if (!ino) 972 if (!ino)
776 return -ENOMEM; 973 return -ENOMEM;
777 974
778 spin_lock(&sbi->lookup_lock);
779 if (!list_empty(&ino->active))
780 list_del_init(&ino->active);
781 spin_unlock(&sbi->lookup_lock);
782
783 inode = autofs4_get_inode(dir->i_sb, ino); 975 inode = autofs4_get_inode(dir->i_sb, ino);
784 if (!inode) { 976 if (!inode) {
785 if (!dentry->d_fsdata) 977 if (!dentry->d_fsdata)
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index d15ea1790bfb..97b6e9efeb7f 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -44,7 +44,7 @@ static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *,
44 * If we don't support core dumping, then supply a NULL so we 44 * If we don't support core dumping, then supply a NULL so we
45 * don't even try. 45 * don't even try.
46 */ 46 */
47#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE) 47#ifdef CONFIG_ELF_CORE
48static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, unsigned long limit); 48static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
49#else 49#else
50#define elf_core_dump NULL 50#define elf_core_dump NULL
@@ -1101,12 +1101,7 @@ out:
1101 return error; 1101 return error;
1102} 1102}
1103 1103
1104/* 1104#ifdef CONFIG_ELF_CORE
1105 * Note that some platforms still use traditional core dumps and not
1106 * the ELF core dump. Each platform can select it as appropriate.
1107 */
1108#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
1109
1110/* 1105/*
1111 * ELF core dumper 1106 * ELF core dumper
1112 * 1107 *
@@ -2063,7 +2058,7 @@ out:
2063 return has_dumped; 2058 return has_dumped;
2064} 2059}
2065 2060
2066#endif /* USE_ELF_CORE_DUMP */ 2061#endif /* CONFIG_ELF_CORE */
2067 2062
2068static int __init init_elf_binfmt(void) 2063static int __init init_elf_binfmt(void)
2069{ 2064{
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index 38502c67987c..7b055385db8e 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -75,14 +75,14 @@ static int elf_fdpic_map_file_constdisp_on_uclinux(struct elf_fdpic_params *,
75static int elf_fdpic_map_file_by_direct_mmap(struct elf_fdpic_params *, 75static int elf_fdpic_map_file_by_direct_mmap(struct elf_fdpic_params *,
76 struct file *, struct mm_struct *); 76 struct file *, struct mm_struct *);
77 77
78#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE) 78#ifdef CONFIG_ELF_CORE
79static int elf_fdpic_core_dump(long, struct pt_regs *, struct file *, unsigned long limit); 79static int elf_fdpic_core_dump(long, struct pt_regs *, struct file *, unsigned long limit);
80#endif 80#endif
81 81
82static struct linux_binfmt elf_fdpic_format = { 82static struct linux_binfmt elf_fdpic_format = {
83 .module = THIS_MODULE, 83 .module = THIS_MODULE,
84 .load_binary = load_elf_fdpic_binary, 84 .load_binary = load_elf_fdpic_binary,
85#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE) 85#ifdef CONFIG_ELF_CORE
86 .core_dump = elf_fdpic_core_dump, 86 .core_dump = elf_fdpic_core_dump,
87#endif 87#endif
88 .min_coredump = ELF_EXEC_PAGESIZE, 88 .min_coredump = ELF_EXEC_PAGESIZE,
@@ -380,7 +380,8 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm,
380 down_write(&current->mm->mmap_sem); 380 down_write(&current->mm->mmap_sem);
381 current->mm->start_brk = do_mmap(NULL, 0, stack_size, 381 current->mm->start_brk = do_mmap(NULL, 0, stack_size,
382 PROT_READ | PROT_WRITE | PROT_EXEC, 382 PROT_READ | PROT_WRITE | PROT_EXEC,
383 MAP_PRIVATE | MAP_ANONYMOUS | MAP_GROWSDOWN, 383 MAP_PRIVATE | MAP_ANONYMOUS |
384 MAP_UNINITIALIZED | MAP_GROWSDOWN,
384 0); 385 0);
385 386
386 if (IS_ERR_VALUE(current->mm->start_brk)) { 387 if (IS_ERR_VALUE(current->mm->start_brk)) {
@@ -1200,7 +1201,7 @@ static int elf_fdpic_map_file_by_direct_mmap(struct elf_fdpic_params *params,
1200 * 1201 *
1201 * Modelled on fs/binfmt_elf.c core dumper 1202 * Modelled on fs/binfmt_elf.c core dumper
1202 */ 1203 */
1203#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE) 1204#ifdef CONFIG_ELF_CORE
1204 1205
1205/* 1206/*
1206 * These are the only things you should do on a core-file: use only these 1207 * These are the only things you should do on a core-file: use only these
@@ -1825,4 +1826,4 @@ cleanup:
1825#undef NUM_NOTES 1826#undef NUM_NOTES
1826} 1827}
1827 1828
1828#endif /* USE_ELF_CORE_DUMP */ 1829#endif /* CONFIG_ELF_CORE */
diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig
index 7bb3c020e570..402afe0a0bfb 100644
--- a/fs/btrfs/Kconfig
+++ b/fs/btrfs/Kconfig
@@ -4,6 +4,7 @@ config BTRFS_FS
4 select LIBCRC32C 4 select LIBCRC32C
5 select ZLIB_INFLATE 5 select ZLIB_INFLATE
6 select ZLIB_DEFLATE 6 select ZLIB_DEFLATE
7 select FS_JOURNAL_INFO
7 help 8 help
8 Btrfs is a new filesystem with extents, writable snapshotting, 9 Btrfs is a new filesystem with extents, writable snapshotting,
9 support for multiple devices and many more features. 10 support for multiple devices and many more features.
diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
index 4618516dd994..c2413561ea75 100644
--- a/fs/cachefiles/daemon.c
+++ b/fs/cachefiles/daemon.c
@@ -21,6 +21,7 @@
21#include <linux/mount.h> 21#include <linux/mount.h>
22#include <linux/statfs.h> 22#include <linux/statfs.h>
23#include <linux/ctype.h> 23#include <linux/ctype.h>
24#include <linux/string.h>
24#include <linux/fs_struct.h> 25#include <linux/fs_struct.h>
25#include "internal.h" 26#include "internal.h"
26 27
@@ -257,8 +258,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
257 if (args == data) 258 if (args == data)
258 goto error; 259 goto error;
259 *args = '\0'; 260 *args = '\0';
260 for (args++; isspace(*args); args++) 261 args = skip_spaces(++args);
261 continue;
262 } 262 }
263 263
264 /* run the appropriate command handler */ 264 /* run the appropriate command handler */
diff --git a/fs/cifs/export.c b/fs/cifs/export.c
index 75949d6a5f1b..6177f7cca16a 100644
--- a/fs/cifs/export.c
+++ b/fs/cifs/export.c
@@ -24,7 +24,7 @@
24 */ 24 */
25 25
26 /* 26 /*
27 * See Documentation/filesystems/Exporting 27 * See Documentation/filesystems/nfs/Exporting
28 * and examples in fs/exportfs 28 * and examples in fs/exportfs
29 * 29 *
30 * Since cifs is a network file system, an "fsid" must be included for 30 * Since cifs is a network file system, an "fsid" must be included for
diff --git a/fs/compat.c b/fs/compat.c
index 6c19040ffeef..00d90c2e66f0 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -38,8 +38,6 @@
38#include <linux/dirent.h> 38#include <linux/dirent.h>
39#include <linux/fsnotify.h> 39#include <linux/fsnotify.h>
40#include <linux/highuid.h> 40#include <linux/highuid.h>
41#include <linux/sunrpc/svc.h>
42#include <linux/nfsd/nfsd.h>
43#include <linux/nfsd/syscall.h> 41#include <linux/nfsd/syscall.h>
44#include <linux/personality.h> 42#include <linux/personality.h>
45#include <linux/rwsem.h> 43#include <linux/rwsem.h>
diff --git a/fs/direct-io.c b/fs/direct-io.c
index b912270942fa..4012885d027f 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -53,13 +53,6 @@
53 * 53 *
54 * If blkfactor is zero then the user's request was aligned to the filesystem's 54 * If blkfactor is zero then the user's request was aligned to the filesystem's
55 * blocksize. 55 * blocksize.
56 *
57 * lock_type is DIO_LOCKING for regular files on direct-IO-naive filesystems.
58 * This determines whether we need to do the fancy locking which prevents
59 * direct-IO from being able to read uninitialised disk blocks. If its zero
60 * (blockdev) this locking is not done, and if it is DIO_OWN_LOCKING i_mutex is
61 * not held for the entire direct write (taken briefly, initially, during a
62 * direct read though, but its never held for the duration of a direct-IO).
63 */ 56 */
64 57
65struct dio { 58struct dio {
@@ -68,7 +61,7 @@ struct dio {
68 struct inode *inode; 61 struct inode *inode;
69 int rw; 62 int rw;
70 loff_t i_size; /* i_size when submitted */ 63 loff_t i_size; /* i_size when submitted */
71 int lock_type; /* doesn't change */ 64 int flags; /* doesn't change */
72 unsigned blkbits; /* doesn't change */ 65 unsigned blkbits; /* doesn't change */
73 unsigned blkfactor; /* When we're using an alignment which 66 unsigned blkfactor; /* When we're using an alignment which
74 is finer than the filesystem's soft 67 is finer than the filesystem's soft
@@ -104,6 +97,18 @@ struct dio {
104 unsigned cur_page_len; /* Nr of bytes at cur_page_offset */ 97 unsigned cur_page_len; /* Nr of bytes at cur_page_offset */
105 sector_t cur_page_block; /* Where it starts */ 98 sector_t cur_page_block; /* Where it starts */
106 99
100 /* BIO completion state */
101 spinlock_t bio_lock; /* protects BIO fields below */
102 unsigned long refcount; /* direct_io_worker() and bios */
103 struct bio *bio_list; /* singly linked via bi_private */
104 struct task_struct *waiter; /* waiting task (NULL if none) */
105
106 /* AIO related stuff */
107 struct kiocb *iocb; /* kiocb */
108 int is_async; /* is IO async ? */
109 int io_error; /* IO error in completion path */
110 ssize_t result; /* IO result */
111
107 /* 112 /*
108 * Page fetching state. These variables belong to dio_refill_pages(). 113 * Page fetching state. These variables belong to dio_refill_pages().
109 */ 114 */
@@ -115,22 +120,16 @@ struct dio {
115 * Page queue. These variables belong to dio_refill_pages() and 120 * Page queue. These variables belong to dio_refill_pages() and
116 * dio_get_page(). 121 * dio_get_page().
117 */ 122 */
118 struct page *pages[DIO_PAGES]; /* page buffer */
119 unsigned head; /* next page to process */ 123 unsigned head; /* next page to process */
120 unsigned tail; /* last valid page + 1 */ 124 unsigned tail; /* last valid page + 1 */
121 int page_errors; /* errno from get_user_pages() */ 125 int page_errors; /* errno from get_user_pages() */
122 126
123 /* BIO completion state */ 127 /*
124 spinlock_t bio_lock; /* protects BIO fields below */ 128 * pages[] (and any fields placed after it) are not zeroed out at
125 unsigned long refcount; /* direct_io_worker() and bios */ 129 * allocation time. Don't add new fields after pages[] unless you
126 struct bio *bio_list; /* singly linked via bi_private */ 130 * wish that they not be zeroed.
127 struct task_struct *waiter; /* waiting task (NULL if none) */ 131 */
128 132 struct page *pages[DIO_PAGES]; /* page buffer */
129 /* AIO related stuff */
130 struct kiocb *iocb; /* kiocb */
131 int is_async; /* is IO async ? */
132 int io_error; /* IO error in completion path */
133 ssize_t result; /* IO result */
134}; 133};
135 134
136/* 135/*
@@ -240,7 +239,8 @@ static int dio_complete(struct dio *dio, loff_t offset, int ret)
240 if (dio->end_io && dio->result) 239 if (dio->end_io && dio->result)
241 dio->end_io(dio->iocb, offset, transferred, 240 dio->end_io(dio->iocb, offset, transferred,
242 dio->map_bh.b_private); 241 dio->map_bh.b_private);
243 if (dio->lock_type == DIO_LOCKING) 242
243 if (dio->flags & DIO_LOCKING)
244 /* lockdep: non-owner release */ 244 /* lockdep: non-owner release */
245 up_read_non_owner(&dio->inode->i_alloc_sem); 245 up_read_non_owner(&dio->inode->i_alloc_sem);
246 246
@@ -515,21 +515,24 @@ static int get_more_blocks(struct dio *dio)
515 map_bh->b_state = 0; 515 map_bh->b_state = 0;
516 map_bh->b_size = fs_count << dio->inode->i_blkbits; 516 map_bh->b_size = fs_count << dio->inode->i_blkbits;
517 517
518 /*
519 * For writes inside i_size on a DIO_SKIP_HOLES filesystem we
520 * forbid block creations: only overwrites are permitted.
521 * We will return early to the caller once we see an
522 * unmapped buffer head returned, and the caller will fall
523 * back to buffered I/O.
524 *
525 * Otherwise the decision is left to the get_blocks method,
526 * which may decide to handle it or also return an unmapped
527 * buffer head.
528 */
518 create = dio->rw & WRITE; 529 create = dio->rw & WRITE;
519 if (dio->lock_type == DIO_LOCKING) { 530 if (dio->flags & DIO_SKIP_HOLES) {
520 if (dio->block_in_file < (i_size_read(dio->inode) >> 531 if (dio->block_in_file < (i_size_read(dio->inode) >>
521 dio->blkbits)) 532 dio->blkbits))
522 create = 0; 533 create = 0;
523 } else if (dio->lock_type == DIO_NO_LOCKING) {
524 create = 0;
525 } 534 }
526 535
527 /*
528 * For writes inside i_size we forbid block creations: only
529 * overwrites are permitted. We fall back to buffered writes
530 * at a higher level for inside-i_size block-instantiating
531 * writes.
532 */
533 ret = (*dio->get_block)(dio->inode, fs_startblk, 536 ret = (*dio->get_block)(dio->inode, fs_startblk,
534 map_bh, create); 537 map_bh, create);
535 } 538 }
@@ -1039,7 +1042,7 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
1039 * we can let i_mutex go now that its achieved its purpose 1042 * we can let i_mutex go now that its achieved its purpose
1040 * of protecting us from looking up uninitialized blocks. 1043 * of protecting us from looking up uninitialized blocks.
1041 */ 1044 */
1042 if ((rw == READ) && (dio->lock_type == DIO_LOCKING)) 1045 if (rw == READ && (dio->flags & DIO_LOCKING))
1043 mutex_unlock(&dio->inode->i_mutex); 1046 mutex_unlock(&dio->inode->i_mutex);
1044 1047
1045 /* 1048 /*
@@ -1086,30 +1089,28 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
1086 1089
1087/* 1090/*
1088 * This is a library function for use by filesystem drivers. 1091 * This is a library function for use by filesystem drivers.
1089 * The locking rules are governed by the dio_lock_type parameter.
1090 * 1092 *
1091 * DIO_NO_LOCKING (no locking, for raw block device access) 1093 * The locking rules are governed by the flags parameter:
1092 * For writes, i_mutex is not held on entry; it is never taken. 1094 * - if the flags value contains DIO_LOCKING we use a fancy locking
1095 * scheme for dumb filesystems.
1096 * For writes this function is called under i_mutex and returns with
1097 * i_mutex held, for reads, i_mutex is not held on entry, but it is
1098 * taken and dropped again before returning.
1099 * For reads and writes i_alloc_sem is taken in shared mode and released
1100 * on I/O completion (which may happen asynchronously after returning to
1101 * the caller).
1093 * 1102 *
1094 * DIO_LOCKING (simple locking for regular files) 1103 * - if the flags value does NOT contain DIO_LOCKING we don't use any
1095 * For writes we are called under i_mutex and return with i_mutex held, even 1104 * internal locking but rather rely on the filesystem to synchronize
1096 * though it is internally dropped. 1105 * direct I/O reads/writes versus each other and truncate.
1097 * For reads, i_mutex is not held on entry, but it is taken and dropped before 1106 * For reads and writes both i_mutex and i_alloc_sem are not held on
1098 * returning. 1107 * entry and are never taken.
1099 *
1100 * DIO_OWN_LOCKING (filesystem provides synchronisation and handling of
1101 * uninitialised data, allowing parallel direct readers and writers)
1102 * For writes we are called without i_mutex, return without it, never touch it.
1103 * For reads we are called under i_mutex and return with i_mutex held, even
1104 * though it may be internally dropped.
1105 *
1106 * Additional i_alloc_sem locking requirements described inline below.
1107 */ 1108 */
1108ssize_t 1109ssize_t
1109__blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, 1110__blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
1110 struct block_device *bdev, const struct iovec *iov, loff_t offset, 1111 struct block_device *bdev, const struct iovec *iov, loff_t offset,
1111 unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io, 1112 unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io,
1112 int dio_lock_type) 1113 int flags)
1113{ 1114{
1114 int seg; 1115 int seg;
1115 size_t size; 1116 size_t size;
@@ -1120,8 +1121,6 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
1120 ssize_t retval = -EINVAL; 1121 ssize_t retval = -EINVAL;
1121 loff_t end = offset; 1122 loff_t end = offset;
1122 struct dio *dio; 1123 struct dio *dio;
1123 int release_i_mutex = 0;
1124 int acquire_i_mutex = 0;
1125 1124
1126 if (rw & WRITE) 1125 if (rw & WRITE)
1127 rw = WRITE_ODIRECT_PLUG; 1126 rw = WRITE_ODIRECT_PLUG;
@@ -1151,48 +1150,41 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
1151 } 1150 }
1152 } 1151 }
1153 1152
1154 dio = kzalloc(sizeof(*dio), GFP_KERNEL); 1153 dio = kmalloc(sizeof(*dio), GFP_KERNEL);
1155 retval = -ENOMEM; 1154 retval = -ENOMEM;
1156 if (!dio) 1155 if (!dio)
1157 goto out; 1156 goto out;
1158
1159 /* 1157 /*
1160 * For block device access DIO_NO_LOCKING is used, 1158 * Believe it or not, zeroing out the page array caused a .5%
1161 * neither readers nor writers do any locking at all 1159 * performance regression in a database benchmark. So, we take
1162 * For regular files using DIO_LOCKING, 1160 * care to only zero out what's needed.
1163 * readers need to grab i_mutex and i_alloc_sem
1164 * writers need to grab i_alloc_sem only (i_mutex is already held)
1165 * For regular files using DIO_OWN_LOCKING,
1166 * neither readers nor writers take any locks here
1167 */ 1161 */
1168 dio->lock_type = dio_lock_type; 1162 memset(dio, 0, offsetof(struct dio, pages));
1169 if (dio_lock_type != DIO_NO_LOCKING) { 1163
1164 dio->flags = flags;
1165 if (dio->flags & DIO_LOCKING) {
1170 /* watch out for a 0 len io from a tricksy fs */ 1166 /* watch out for a 0 len io from a tricksy fs */
1171 if (rw == READ && end > offset) { 1167 if (rw == READ && end > offset) {
1172 struct address_space *mapping; 1168 struct address_space *mapping =
1169 iocb->ki_filp->f_mapping;
1173 1170
1174 mapping = iocb->ki_filp->f_mapping; 1171 /* will be released by direct_io_worker */
1175 if (dio_lock_type != DIO_OWN_LOCKING) { 1172 mutex_lock(&inode->i_mutex);
1176 mutex_lock(&inode->i_mutex);
1177 release_i_mutex = 1;
1178 }
1179 1173
1180 retval = filemap_write_and_wait_range(mapping, offset, 1174 retval = filemap_write_and_wait_range(mapping, offset,
1181 end - 1); 1175 end - 1);
1182 if (retval) { 1176 if (retval) {
1177 mutex_unlock(&inode->i_mutex);
1183 kfree(dio); 1178 kfree(dio);
1184 goto out; 1179 goto out;
1185 } 1180 }
1186
1187 if (dio_lock_type == DIO_OWN_LOCKING) {
1188 mutex_unlock(&inode->i_mutex);
1189 acquire_i_mutex = 1;
1190 }
1191 } 1181 }
1192 1182
1193 if (dio_lock_type == DIO_LOCKING) 1183 /*
1194 /* lockdep: not the owner will release it */ 1184 * Will be released at I/O completion, possibly in a
1195 down_read_non_owner(&inode->i_alloc_sem); 1185 * different thread.
1186 */
1187 down_read_non_owner(&inode->i_alloc_sem);
1196 } 1188 }
1197 1189
1198 /* 1190 /*
@@ -1210,24 +1202,19 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
1210 /* 1202 /*
1211 * In case of error extending write may have instantiated a few 1203 * In case of error extending write may have instantiated a few
1212 * blocks outside i_size. Trim these off again for DIO_LOCKING. 1204 * blocks outside i_size. Trim these off again for DIO_LOCKING.
1213 * NOTE: DIO_NO_LOCK/DIO_OWN_LOCK callers have to handle this by 1205 *
1214 * it's own meaner. 1206 * NOTE: filesystems with their own locking have to handle this
1207 * on their own.
1215 */ 1208 */
1216 if (unlikely(retval < 0 && (rw & WRITE))) { 1209 if (dio->flags & DIO_LOCKING) {
1217 loff_t isize = i_size_read(inode); 1210 if (unlikely((rw & WRITE) && retval < 0)) {
1218 1211 loff_t isize = i_size_read(inode);
1219 if (end > isize && dio_lock_type == DIO_LOCKING) 1212 if (end > isize)
1220 vmtruncate(inode, isize); 1213 vmtruncate(inode, isize);
1214 }
1221 } 1215 }
1222 1216
1223 if (rw == READ && dio_lock_type == DIO_LOCKING)
1224 release_i_mutex = 0;
1225
1226out: 1217out:
1227 if (release_i_mutex)
1228 mutex_unlock(&inode->i_mutex);
1229 else if (acquire_i_mutex)
1230 mutex_lock(&inode->i_mutex);
1231 return retval; 1218 return retval;
1232} 1219}
1233EXPORT_SYMBOL(__blockdev_direct_IO); 1220EXPORT_SYMBOL(__blockdev_direct_IO);
diff --git a/fs/exec.c b/fs/exec.c
index c0c636e34f60..623a5cc3076a 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -923,6 +923,15 @@ char *get_task_comm(char *buf, struct task_struct *tsk)
923void set_task_comm(struct task_struct *tsk, char *buf) 923void set_task_comm(struct task_struct *tsk, char *buf)
924{ 924{
925 task_lock(tsk); 925 task_lock(tsk);
926
927 /*
928 * Threads may access current->comm without holding
929 * the task lock, so write the string carefully.
930 * Readers without a lock may see incomplete new
931 * names but are safe from non-terminating string reads.
932 */
933 memset(tsk->comm, 0, TASK_COMM_LEN);
934 wmb();
926 strlcpy(tsk->comm, buf, sizeof(tsk->comm)); 935 strlcpy(tsk->comm, buf, sizeof(tsk->comm));
927 task_unlock(tsk); 936 task_unlock(tsk);
928 perf_event_comm(tsk); 937 perf_event_comm(tsk);
diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
index 197c7db583c7..e9e175949a63 100644
--- a/fs/exportfs/expfs.c
+++ b/fs/exportfs/expfs.c
@@ -6,7 +6,7 @@
6 * and for mapping back from file handles to dentries. 6 * and for mapping back from file handles to dentries.
7 * 7 *
8 * For details on why we do all the strange and hairy things in here 8 * For details on why we do all the strange and hairy things in here
9 * take a look at Documentation/filesystems/Exporting. 9 * take a look at Documentation/filesystems/nfs/Exporting.
10 */ 10 */
11#include <linux/exportfs.h> 11#include <linux/exportfs.h>
12#include <linux/fs.h> 12#include <linux/fs.h>
diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c
index fc2bd05d3559..7516957273ed 100644
--- a/fs/ext2/dir.c
+++ b/fs/ext2/dir.c
@@ -721,5 +721,5 @@ const struct file_operations ext2_dir_operations = {
721#ifdef CONFIG_COMPAT 721#ifdef CONFIG_COMPAT
722 .compat_ioctl = ext2_compat_ioctl, 722 .compat_ioctl = ext2_compat_ioctl,
723#endif 723#endif
724 .fsync = simple_fsync, 724 .fsync = ext2_fsync,
725}; 725};
diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h
index da318b0fa637..061914add3cf 100644
--- a/fs/ext2/ext2.h
+++ b/fs/ext2/ext2.h
@@ -155,6 +155,7 @@ extern void ext2_write_super (struct super_block *);
155extern const struct file_operations ext2_dir_operations; 155extern const struct file_operations ext2_dir_operations;
156 156
157/* file.c */ 157/* file.c */
158extern int ext2_fsync(struct file *file, struct dentry *dentry, int datasync);
158extern const struct inode_operations ext2_file_inode_operations; 159extern const struct inode_operations ext2_file_inode_operations;
159extern const struct file_operations ext2_file_operations; 160extern const struct file_operations ext2_file_operations;
160extern const struct file_operations ext2_xip_file_operations; 161extern const struct file_operations ext2_xip_file_operations;
diff --git a/fs/ext2/file.c b/fs/ext2/file.c
index a2f3afd1a1c1..586e3589d4c2 100644
--- a/fs/ext2/file.c
+++ b/fs/ext2/file.c
@@ -19,6 +19,7 @@
19 */ 19 */
20 20
21#include <linux/time.h> 21#include <linux/time.h>
22#include <linux/pagemap.h>
22#include "ext2.h" 23#include "ext2.h"
23#include "xattr.h" 24#include "xattr.h"
24#include "acl.h" 25#include "acl.h"
@@ -38,6 +39,22 @@ static int ext2_release_file (struct inode * inode, struct file * filp)
38 return 0; 39 return 0;
39} 40}
40 41
42int ext2_fsync(struct file *file, struct dentry *dentry, int datasync)
43{
44 int ret;
45 struct super_block *sb = dentry->d_inode->i_sb;
46 struct address_space *mapping = sb->s_bdev->bd_inode->i_mapping;
47
48 ret = simple_fsync(file, dentry, datasync);
49 if (ret == -EIO || test_and_clear_bit(AS_EIO, &mapping->flags)) {
50 /* We don't really know where the IO error happened... */
51 ext2_error(sb, __func__,
52 "detected IO error when writing metadata buffers");
53 ret = -EIO;
54 }
55 return ret;
56}
57
41/* 58/*
42 * We have mostly NULL's here: the current defaults are ok for 59 * We have mostly NULL's here: the current defaults are ok for
43 * the ext2 filesystem. 60 * the ext2 filesystem.
@@ -55,7 +72,7 @@ const struct file_operations ext2_file_operations = {
55 .mmap = generic_file_mmap, 72 .mmap = generic_file_mmap,
56 .open = generic_file_open, 73 .open = generic_file_open,
57 .release = ext2_release_file, 74 .release = ext2_release_file,
58 .fsync = simple_fsync, 75 .fsync = ext2_fsync,
59 .splice_read = generic_file_splice_read, 76 .splice_read = generic_file_splice_read,
60 .splice_write = generic_file_splice_write, 77 .splice_write = generic_file_splice_write,
61}; 78};
@@ -72,7 +89,7 @@ const struct file_operations ext2_xip_file_operations = {
72 .mmap = xip_file_mmap, 89 .mmap = xip_file_mmap,
73 .open = generic_file_open, 90 .open = generic_file_open,
74 .release = ext2_release_file, 91 .release = ext2_release_file,
75 .fsync = simple_fsync, 92 .fsync = ext2_fsync,
76}; 93};
77#endif 94#endif
78 95
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 1388802b7803..f9cb54a585ce 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -1105,9 +1105,30 @@ failed_sbi:
1105 return ret; 1105 return ret;
1106} 1106}
1107 1107
1108static void ext2_clear_super_error(struct super_block *sb)
1109{
1110 struct buffer_head *sbh = EXT2_SB(sb)->s_sbh;
1111
1112 if (buffer_write_io_error(sbh)) {
1113 /*
1114 * Oh, dear. A previous attempt to write the
1115 * superblock failed. This could happen because the
1116 * USB device was yanked out. Or it could happen to
1117 * be a transient write error and maybe the block will
1118 * be remapped. Nothing we can do but to retry the
1119 * write and hope for the best.
1120 */
1121 printk(KERN_ERR "EXT2-fs: %s previous I/O error to "
1122 "superblock detected", sb->s_id);
1123 clear_buffer_write_io_error(sbh);
1124 set_buffer_uptodate(sbh);
1125 }
1126}
1127
1108static void ext2_commit_super (struct super_block * sb, 1128static void ext2_commit_super (struct super_block * sb,
1109 struct ext2_super_block * es) 1129 struct ext2_super_block * es)
1110{ 1130{
1131 ext2_clear_super_error(sb);
1111 es->s_wtime = cpu_to_le32(get_seconds()); 1132 es->s_wtime = cpu_to_le32(get_seconds());
1112 mark_buffer_dirty(EXT2_SB(sb)->s_sbh); 1133 mark_buffer_dirty(EXT2_SB(sb)->s_sbh);
1113 sb->s_dirt = 0; 1134 sb->s_dirt = 0;
@@ -1115,6 +1136,7 @@ static void ext2_commit_super (struct super_block * sb,
1115 1136
1116static void ext2_sync_super(struct super_block *sb, struct ext2_super_block *es) 1137static void ext2_sync_super(struct super_block *sb, struct ext2_super_block *es)
1117{ 1138{
1139 ext2_clear_super_error(sb);
1118 es->s_free_blocks_count = cpu_to_le32(ext2_count_free_blocks(sb)); 1140 es->s_free_blocks_count = cpu_to_le32(ext2_count_free_blocks(sb));
1119 es->s_free_inodes_count = cpu_to_le32(ext2_count_free_inodes(sb)); 1141 es->s_free_inodes_count = cpu_to_le32(ext2_count_free_inodes(sb));
1120 es->s_wtime = cpu_to_le32(get_seconds()); 1142 es->s_wtime = cpu_to_le32(get_seconds());
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
index 9acf7e808139..e5f6774846e4 100644
--- a/fs/ext4/Kconfig
+++ b/fs/ext4/Kconfig
@@ -2,6 +2,7 @@ config EXT4_FS
2 tristate "The Extended 4 (ext4) filesystem" 2 tristate "The Extended 4 (ext4) filesystem"
3 select JBD2 3 select JBD2
4 select CRC16 4 select CRC16
5 select FS_JOURNAL_INFO
5 help 6 help
6 This is the next generation of the ext3 filesystem. 7 This is the next generation of the ext3 filesystem.
7 8
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 768c111a77ec..827bde1f2594 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -2137,11 +2137,8 @@ static int parse_strtoul(const char *buf,
2137{ 2137{
2138 char *endp; 2138 char *endp;
2139 2139
2140 while (*buf && isspace(*buf)) 2140 *value = simple_strtoul(skip_spaces(buf), &endp, 0);
2141 buf++; 2141 endp = skip_spaces(endp);
2142 *value = simple_strtoul(buf, &endp, 0);
2143 while (*endp && isspace(*endp))
2144 endp++;
2145 if (*endp || *value > max) 2142 if (*endp || *value > max)
2146 return -EINVAL; 2143 return -EINVAL;
2147 2144
diff --git a/fs/fat/fat.h b/fs/fat/fat.h
index 7db0979c6b72..e6efdfa0f6db 100644
--- a/fs/fat/fat.h
+++ b/fs/fat/fat.h
@@ -44,7 +44,8 @@ struct fat_mount_options {
44 nocase:1, /* Does this need case conversion? 0=need case conversion*/ 44 nocase:1, /* Does this need case conversion? 0=need case conversion*/
45 usefree:1, /* Use free_clusters for FAT32 */ 45 usefree:1, /* Use free_clusters for FAT32 */
46 tz_utc:1, /* Filesystem timestamps are in UTC */ 46 tz_utc:1, /* Filesystem timestamps are in UTC */
47 rodir:1; /* allow ATTR_RO for directory */ 47 rodir:1, /* allow ATTR_RO for directory */
48 discard:1; /* Issue discard requests on deletions */
48}; 49};
49 50
50#define FAT_HASH_BITS 8 51#define FAT_HASH_BITS 8
diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c
index a81037721a6f..81184d3b75a3 100644
--- a/fs/fat/fatent.c
+++ b/fs/fat/fatent.c
@@ -566,16 +566,21 @@ int fat_free_clusters(struct inode *inode, int cluster)
566 goto error; 566 goto error;
567 } 567 }
568 568
569 /* 569 if (sbi->options.discard) {
570 * Issue discard for the sectors we no longer care about, 570 /*
571 * batching contiguous clusters into one request 571 * Issue discard for the sectors we no longer
572 */ 572 * care about, batching contiguous clusters
573 if (cluster != fatent.entry + 1) { 573 * into one request
574 int nr_clus = fatent.entry - first_cl + 1; 574 */
575 575 if (cluster != fatent.entry + 1) {
576 sb_issue_discard(sb, fat_clus_to_blknr(sbi, first_cl), 576 int nr_clus = fatent.entry - first_cl + 1;
577 nr_clus * sbi->sec_per_clus); 577
578 first_cl = cluster; 578 sb_issue_discard(sb,
579 fat_clus_to_blknr(sbi, first_cl),
580 nr_clus * sbi->sec_per_clus);
581
582 first_cl = cluster;
583 }
579 } 584 }
580 585
581 ops->ent_put(&fatent, FAT_ENT_FREE); 586 ops->ent_put(&fatent, FAT_ENT_FREE);
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 76b7961ab663..14da530b05ca 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -858,6 +858,8 @@ static int fat_show_options(struct seq_file *m, struct vfsmount *mnt)
858 seq_puts(m, ",errors=panic"); 858 seq_puts(m, ",errors=panic");
859 else 859 else
860 seq_puts(m, ",errors=remount-ro"); 860 seq_puts(m, ",errors=remount-ro");
861 if (opts->discard)
862 seq_puts(m, ",discard");
861 863
862 return 0; 864 return 0;
863} 865}
@@ -871,7 +873,7 @@ enum {
871 Opt_shortname_winnt, Opt_shortname_mixed, Opt_utf8_no, Opt_utf8_yes, 873 Opt_shortname_winnt, Opt_shortname_mixed, Opt_utf8_no, Opt_utf8_yes,
872 Opt_uni_xl_no, Opt_uni_xl_yes, Opt_nonumtail_no, Opt_nonumtail_yes, 874 Opt_uni_xl_no, Opt_uni_xl_yes, Opt_nonumtail_no, Opt_nonumtail_yes,
873 Opt_obsolate, Opt_flush, Opt_tz_utc, Opt_rodir, Opt_err_cont, 875 Opt_obsolate, Opt_flush, Opt_tz_utc, Opt_rodir, Opt_err_cont,
874 Opt_err_panic, Opt_err_ro, Opt_err, 876 Opt_err_panic, Opt_err_ro, Opt_discard, Opt_err,
875}; 877};
876 878
877static const match_table_t fat_tokens = { 879static const match_table_t fat_tokens = {
@@ -899,6 +901,7 @@ static const match_table_t fat_tokens = {
899 {Opt_err_cont, "errors=continue"}, 901 {Opt_err_cont, "errors=continue"},
900 {Opt_err_panic, "errors=panic"}, 902 {Opt_err_panic, "errors=panic"},
901 {Opt_err_ro, "errors=remount-ro"}, 903 {Opt_err_ro, "errors=remount-ro"},
904 {Opt_discard, "discard"},
902 {Opt_obsolate, "conv=binary"}, 905 {Opt_obsolate, "conv=binary"},
903 {Opt_obsolate, "conv=text"}, 906 {Opt_obsolate, "conv=text"},
904 {Opt_obsolate, "conv=auto"}, 907 {Opt_obsolate, "conv=auto"},
@@ -1136,6 +1139,9 @@ static int parse_options(char *options, int is_vfat, int silent, int *debug,
1136 case Opt_rodir: 1139 case Opt_rodir:
1137 opts->rodir = 1; 1140 opts->rodir = 1;
1138 break; 1141 break;
1142 case Opt_discard:
1143 opts->discard = 1;
1144 break;
1139 1145
1140 /* obsolete mount options */ 1146 /* obsolete mount options */
1141 case Opt_obsolate: 1147 case Opt_obsolate:
diff --git a/fs/fat/misc.c b/fs/fat/misc.c
index 0f55f5cb732f..d3da05f26465 100644
--- a/fs/fat/misc.c
+++ b/fs/fat/misc.c
@@ -9,6 +9,7 @@
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/fs.h> 10#include <linux/fs.h>
11#include <linux/buffer_head.h> 11#include <linux/buffer_head.h>
12#include <linux/time.h>
12#include "fat.h" 13#include "fat.h"
13 14
14/* 15/*
@@ -157,10 +158,6 @@ extern struct timezone sys_tz;
157#define SECS_PER_MIN 60 158#define SECS_PER_MIN 60
158#define SECS_PER_HOUR (60 * 60) 159#define SECS_PER_HOUR (60 * 60)
159#define SECS_PER_DAY (SECS_PER_HOUR * 24) 160#define SECS_PER_DAY (SECS_PER_HOUR * 24)
160#define UNIX_SECS_1980 315532800L
161#if BITS_PER_LONG == 64
162#define UNIX_SECS_2108 4354819200L
163#endif
164/* days between 1.1.70 and 1.1.80 (2 leap days) */ 161/* days between 1.1.70 and 1.1.80 (2 leap days) */
165#define DAYS_DELTA (365 * 10 + 2) 162#define DAYS_DELTA (365 * 10 + 2)
166/* 120 (2100 - 1980) isn't leap year */ 163/* 120 (2100 - 1980) isn't leap year */
@@ -213,58 +210,35 @@ void fat_time_fat2unix(struct msdos_sb_info *sbi, struct timespec *ts,
213void fat_time_unix2fat(struct msdos_sb_info *sbi, struct timespec *ts, 210void fat_time_unix2fat(struct msdos_sb_info *sbi, struct timespec *ts,
214 __le16 *time, __le16 *date, u8 *time_cs) 211 __le16 *time, __le16 *date, u8 *time_cs)
215{ 212{
216 time_t second = ts->tv_sec; 213 struct tm tm;
217 time_t day, leap_day, month, year; 214 time_to_tm(ts->tv_sec, sbi->options.tz_utc ? 0 :
215 -sys_tz.tz_minuteswest * 60, &tm);
218 216
219 if (!sbi->options.tz_utc) 217 /* FAT can only support year between 1980 to 2107 */
220 second -= sys_tz.tz_minuteswest * SECS_PER_MIN; 218 if (tm.tm_year < 1980 - 1900) {
221
222 /* Jan 1 GMT 00:00:00 1980. But what about another time zone? */
223 if (second < UNIX_SECS_1980) {
224 *time = 0; 219 *time = 0;
225 *date = cpu_to_le16((0 << 9) | (1 << 5) | 1); 220 *date = cpu_to_le16((0 << 9) | (1 << 5) | 1);
226 if (time_cs) 221 if (time_cs)
227 *time_cs = 0; 222 *time_cs = 0;
228 return; 223 return;
229 } 224 }
230#if BITS_PER_LONG == 64 225 if (tm.tm_year > 2107 - 1900) {
231 if (second >= UNIX_SECS_2108) {
232 *time = cpu_to_le16((23 << 11) | (59 << 5) | 29); 226 *time = cpu_to_le16((23 << 11) | (59 << 5) | 29);
233 *date = cpu_to_le16((127 << 9) | (12 << 5) | 31); 227 *date = cpu_to_le16((127 << 9) | (12 << 5) | 31);
234 if (time_cs) 228 if (time_cs)
235 *time_cs = 199; 229 *time_cs = 199;
236 return; 230 return;
237 } 231 }
238#endif
239 232
240 day = second / SECS_PER_DAY - DAYS_DELTA; 233 /* from 1900 -> from 1980 */
241 year = day / 365; 234 tm.tm_year -= 80;
242 leap_day = (year + 3) / 4; 235 /* 0~11 -> 1~12 */
243 if (year > YEAR_2100) /* 2100 isn't leap year */ 236 tm.tm_mon++;
244 leap_day--; 237 /* 0~59 -> 0~29(2sec counts) */
245 if (year * 365 + leap_day > day) 238 tm.tm_sec >>= 1;
246 year--;
247 leap_day = (year + 3) / 4;
248 if (year > YEAR_2100) /* 2100 isn't leap year */
249 leap_day--;
250 day -= year * 365 + leap_day;
251
252 if (IS_LEAP_YEAR(year) && day == days_in_year[3]) {
253 month = 2;
254 } else {
255 if (IS_LEAP_YEAR(year) && day > days_in_year[3])
256 day--;
257 for (month = 1; month < 12; month++) {
258 if (days_in_year[month + 1] > day)
259 break;
260 }
261 }
262 day -= days_in_year[month];
263 239
264 *time = cpu_to_le16(((second / SECS_PER_HOUR) % 24) << 11 240 *time = cpu_to_le16(tm.tm_hour << 11 | tm.tm_min << 5 | tm.tm_sec);
265 | ((second / SECS_PER_MIN) % 60) << 5 241 *date = cpu_to_le16(tm.tm_year << 9 | tm.tm_mon << 5 | tm.tm_mday);
266 | (second % SECS_PER_MIN) >> 1);
267 *date = cpu_to_le16((year << 9) | (month << 5) | (day + 1));
268 if (time_cs) 242 if (time_cs)
269 *time_cs = (ts->tv_sec & 1) * 100 + ts->tv_nsec / 10000000; 243 *time_cs = (ts->tv_sec & 1) * 100 + ts->tv_nsec / 10000000;
270} 244}
@@ -285,4 +259,3 @@ int fat_sync_bhs(struct buffer_head **bhs, int nr_bhs)
285 } 259 }
286 return err; 260 return err;
287} 261}
288
diff --git a/fs/fscache/object-list.c b/fs/fscache/object-list.c
index e590242fa41a..3221a0c7944e 100644
--- a/fs/fscache/object-list.c
+++ b/fs/fscache/object-list.c
@@ -91,7 +91,7 @@ EXPORT_SYMBOL(fscache_object_destroy);
91 */ 91 */
92static struct fscache_object *fscache_objlist_lookup(loff_t *_pos) 92static struct fscache_object *fscache_objlist_lookup(loff_t *_pos)
93{ 93{
94 struct fscache_object *pobj, *obj, *minobj = NULL; 94 struct fscache_object *pobj, *obj = NULL, *minobj = NULL;
95 struct rb_node *p; 95 struct rb_node *p;
96 unsigned long pos; 96 unsigned long pos;
97 97
diff --git a/fs/gfs2/Kconfig b/fs/gfs2/Kconfig
index 4dcddf83326f..b192c661caa6 100644
--- a/fs/gfs2/Kconfig
+++ b/fs/gfs2/Kconfig
@@ -10,6 +10,7 @@ config GFS2_FS
10 select SLOW_WORK 10 select SLOW_WORK
11 select QUOTA 11 select QUOTA
12 select QUOTACTL 12 select QUOTACTL
13 select FS_JOURNAL_INFO
13 help 14 help
14 A cluster filesystem. 15 A cluster filesystem.
15 16
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
index c5dad1eb7b91..0dc34621f6a6 100644
--- a/fs/gfs2/sys.c
+++ b/fs/gfs2/sys.c
@@ -85,11 +85,7 @@ static ssize_t uuid_show(struct gfs2_sbd *sdp, char *buf)
85 buf[0] = '\0'; 85 buf[0] = '\0';
86 if (!gfs2_uuid_valid(uuid)) 86 if (!gfs2_uuid_valid(uuid))
87 return 0; 87 return 0;
88 return snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X-%02X%02X-" 88 return snprintf(buf, PAGE_SIZE, "%pUB\n", uuid);
89 "%02X%02X-%02X%02X-%02X%02X%02X%02X%02X%02X\n",
90 uuid[0], uuid[1], uuid[2], uuid[3], uuid[4], uuid[5],
91 uuid[6], uuid[7], uuid[8], uuid[9], uuid[10], uuid[11],
92 uuid[12], uuid[13], uuid[14], uuid[15]);
93} 89}
94 90
95static ssize_t freeze_show(struct gfs2_sbd *sdp, char *buf) 91static ssize_t freeze_show(struct gfs2_sbd *sdp, char *buf)
@@ -575,14 +571,8 @@ static int gfs2_uevent(struct kset *kset, struct kobject *kobj,
575 add_uevent_var(env, "LOCKPROTO=%s", sdp->sd_proto_name); 571 add_uevent_var(env, "LOCKPROTO=%s", sdp->sd_proto_name);
576 if (!sdp->sd_args.ar_spectator) 572 if (!sdp->sd_args.ar_spectator)
577 add_uevent_var(env, "JOURNALID=%u", sdp->sd_lockstruct.ls_jid); 573 add_uevent_var(env, "JOURNALID=%u", sdp->sd_lockstruct.ls_jid);
578 if (gfs2_uuid_valid(uuid)) { 574 if (gfs2_uuid_valid(uuid))
579 add_uevent_var(env, "UUID=%02X%02X%02X%02X-%02X%02X-%02X%02X-" 575 add_uevent_var(env, "UUID=%pUB", uuid);
580 "%02X%02X-%02X%02X%02X%02X%02X%02X",
581 uuid[0], uuid[1], uuid[2], uuid[3], uuid[4],
582 uuid[5], uuid[6], uuid[7], uuid[8], uuid[9],
583 uuid[10], uuid[11], uuid[12], uuid[13],
584 uuid[14], uuid[15]);
585 }
586 return 0; 576 return 0;
587} 577}
588 578
diff --git a/fs/hfs/catalog.c b/fs/hfs/catalog.c
index 6d98f116ca03..424b0337f524 100644
--- a/fs/hfs/catalog.c
+++ b/fs/hfs/catalog.c
@@ -289,6 +289,10 @@ int hfs_cat_move(u32 cnid, struct inode *src_dir, struct qstr *src_name,
289 err = hfs_brec_find(&src_fd); 289 err = hfs_brec_find(&src_fd);
290 if (err) 290 if (err)
291 goto out; 291 goto out;
292 if (src_fd.entrylength > sizeof(entry) || src_fd.entrylength < 0) {
293 err = -EIO;
294 goto out;
295 }
292 296
293 hfs_bnode_read(src_fd.bnode, &entry, src_fd.entryoffset, 297 hfs_bnode_read(src_fd.bnode, &entry, src_fd.entryoffset,
294 src_fd.entrylength); 298 src_fd.entrylength);
diff --git a/fs/hfs/dir.c b/fs/hfs/dir.c
index 7c69b98a2e45..2b3b8611b41b 100644
--- a/fs/hfs/dir.c
+++ b/fs/hfs/dir.c
@@ -79,6 +79,11 @@ static int hfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
79 filp->f_pos++; 79 filp->f_pos++;
80 /* fall through */ 80 /* fall through */
81 case 1: 81 case 1:
82 if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) {
83 err = -EIO;
84 goto out;
85 }
86
82 hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, fd.entrylength); 87 hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, fd.entrylength);
83 if (entry.type != HFS_CDR_THD) { 88 if (entry.type != HFS_CDR_THD) {
84 printk(KERN_ERR "hfs: bad catalog folder thread\n"); 89 printk(KERN_ERR "hfs: bad catalog folder thread\n");
@@ -109,6 +114,12 @@ static int hfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
109 err = -EIO; 114 err = -EIO;
110 goto out; 115 goto out;
111 } 116 }
117
118 if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) {
119 err = -EIO;
120 goto out;
121 }
122
112 hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, fd.entrylength); 123 hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, fd.entrylength);
113 type = entry.type; 124 type = entry.type;
114 len = hfs_mac2asc(sb, strbuf, &fd.key->cat.CName); 125 len = hfs_mac2asc(sb, strbuf, &fd.key->cat.CName);
diff --git a/fs/hfs/super.c b/fs/hfs/super.c
index f7fcbe49da72..5ed7252b7b23 100644
--- a/fs/hfs/super.c
+++ b/fs/hfs/super.c
@@ -409,8 +409,13 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
409 /* try to get the root inode */ 409 /* try to get the root inode */
410 hfs_find_init(HFS_SB(sb)->cat_tree, &fd); 410 hfs_find_init(HFS_SB(sb)->cat_tree, &fd);
411 res = hfs_cat_find_brec(sb, HFS_ROOT_CNID, &fd); 411 res = hfs_cat_find_brec(sb, HFS_ROOT_CNID, &fd);
412 if (!res) 412 if (!res) {
413 if (fd.entrylength > sizeof(rec) || fd.entrylength < 0) {
414 res = -EIO;
415 goto bail;
416 }
413 hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, fd.entrylength); 417 hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, fd.entrylength);
418 }
414 if (res) { 419 if (res) {
415 hfs_find_exit(&fd); 420 hfs_find_exit(&fd);
416 goto bail_no_root; 421 goto bail_no_root;
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
index f2feaa06bf26..cadc4ce48656 100644
--- a/fs/hpfs/super.c
+++ b/fs/hpfs/super.c
@@ -14,6 +14,7 @@
14#include <linux/magic.h> 14#include <linux/magic.h>
15#include <linux/sched.h> 15#include <linux/sched.h>
16#include <linux/smp_lock.h> 16#include <linux/smp_lock.h>
17#include <linux/bitmap.h>
17 18
18/* Mark the filesystem dirty, so that chkdsk checks it when os/2 booted */ 19/* Mark the filesystem dirty, so that chkdsk checks it when os/2 booted */
19 20
@@ -115,15 +116,13 @@ static void hpfs_put_super(struct super_block *s)
115unsigned hpfs_count_one_bitmap(struct super_block *s, secno secno) 116unsigned hpfs_count_one_bitmap(struct super_block *s, secno secno)
116{ 117{
117 struct quad_buffer_head qbh; 118 struct quad_buffer_head qbh;
118 unsigned *bits; 119 unsigned long *bits;
119 unsigned i, count; 120 unsigned count;
120 if (!(bits = hpfs_map_4sectors(s, secno, &qbh, 4))) return 0; 121
121 count = 0; 122 bits = hpfs_map_4sectors(s, secno, &qbh, 4);
122 for (i = 0; i < 2048 / sizeof(unsigned); i++) { 123 if (!bits)
123 unsigned b; 124 return 0;
124 if (!bits[i]) continue; 125 count = bitmap_weight(bits, 2048 * BITS_PER_BYTE);
125 for (b = bits[i]; b; b>>=1) count += b & 1;
126 }
127 hpfs_brelse4(&qbh); 126 hpfs_brelse4(&qbh);
128 return count; 127 return count;
129} 128}
diff --git a/fs/isofs/export.c b/fs/isofs/export.c
index e81a30593ba9..ed752cb38474 100644
--- a/fs/isofs/export.c
+++ b/fs/isofs/export.c
@@ -9,7 +9,7 @@
9 * 9 *
10 * The following files are helpful: 10 * The following files are helpful:
11 * 11 *
12 * Documentation/filesystems/Exporting 12 * Documentation/filesystems/nfs/Exporting
13 * fs/exportfs/expfs.c. 13 * fs/exportfs/expfs.c.
14 */ 14 */
15 15
diff --git a/fs/jbd/Kconfig b/fs/jbd/Kconfig
index 4e28beeed157..a8408983abd4 100644
--- a/fs/jbd/Kconfig
+++ b/fs/jbd/Kconfig
@@ -1,5 +1,6 @@
1config JBD 1config JBD
2 tristate 2 tristate
3 select FS_JOURNAL_INFO
3 help 4 help
4 This is a generic journalling layer for block devices. It is 5 This is a generic journalling layer for block devices. It is
5 currently used by the ext3 file system, but it could also be 6 currently used by the ext3 file system, but it could also be
diff --git a/fs/jbd2/Kconfig b/fs/jbd2/Kconfig
index f32f346f4b0a..0f7d1ceafdfd 100644
--- a/fs/jbd2/Kconfig
+++ b/fs/jbd2/Kconfig
@@ -1,6 +1,7 @@
1config JBD2 1config JBD2
2 tristate 2 tristate
3 select CRC32 3 select CRC32
4 select FS_JOURNAL_INFO
4 help 5 help
5 This is a generic journaling layer for block devices that support 6 This is a generic journaling layer for block devices that support
6 both 32-bit and 64-bit block numbers. It is currently used by 7 both 32-bit and 64-bit block numbers. It is currently used by
diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c
index 090c556ffed2..3b6f2fa12cff 100644
--- a/fs/jffs2/gc.c
+++ b/fs/jffs2/gc.c
@@ -700,7 +700,8 @@ static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_
700 struct jffs2_raw_inode ri; 700 struct jffs2_raw_inode ri;
701 struct jffs2_node_frag *last_frag; 701 struct jffs2_node_frag *last_frag;
702 union jffs2_device_node dev; 702 union jffs2_device_node dev;
703 char *mdata = NULL, mdatalen = 0; 703 char *mdata = NULL;
704 int mdatalen = 0;
704 uint32_t alloclen, ilen; 705 uint32_t alloclen, ilen;
705 int ret; 706 int ret;
706 707
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c
index 378991cfe40f..e22de8397b74 100644
--- a/fs/jffs2/readinode.c
+++ b/fs/jffs2/readinode.c
@@ -1284,7 +1284,7 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c,
1284 f->target = NULL; 1284 f->target = NULL;
1285 mutex_unlock(&f->sem); 1285 mutex_unlock(&f->sem);
1286 jffs2_do_clear_inode(c, f); 1286 jffs2_do_clear_inode(c, f);
1287 return -ret; 1287 return ret;
1288 } 1288 }
1289 1289
1290 f->target[je32_to_cpu(latest_node->csize)] = '\0'; 1290 f->target[je32_to_cpu(latest_node->csize)] = '\0';
diff --git a/fs/jffs2/summary.c b/fs/jffs2/summary.c
index 6caf1e1ee26d..800171dca53b 100644
--- a/fs/jffs2/summary.c
+++ b/fs/jffs2/summary.c
@@ -23,7 +23,7 @@
23 23
24int jffs2_sum_init(struct jffs2_sb_info *c) 24int jffs2_sum_init(struct jffs2_sb_info *c)
25{ 25{
26 uint32_t sum_size = max_t(uint32_t, c->sector_size, MAX_SUMMARY_SIZE); 26 uint32_t sum_size = min_t(uint32_t, c->sector_size, MAX_SUMMARY_SIZE);
27 27
28 c->summary = kzalloc(sizeof(struct jffs2_summary), GFP_KERNEL); 28 c->summary = kzalloc(sizeof(struct jffs2_summary), GFP_KERNEL);
29 29
diff --git a/fs/lockd/svc4proc.c b/fs/lockd/svc4proc.c
index bd173a6ca3b1..a7966eed3c17 100644
--- a/fs/lockd/svc4proc.c
+++ b/fs/lockd/svc4proc.c
@@ -11,10 +11,6 @@
11#include <linux/time.h> 11#include <linux/time.h>
12#include <linux/slab.h> 12#include <linux/slab.h>
13#include <linux/smp_lock.h> 13#include <linux/smp_lock.h>
14#include <linux/in.h>
15#include <linux/sunrpc/svc.h>
16#include <linux/sunrpc/clnt.h>
17#include <linux/nfsd/nfsd.h>
18#include <linux/lockd/lockd.h> 14#include <linux/lockd/lockd.h>
19#include <linux/lockd/share.h> 15#include <linux/lockd/share.h>
20 16
diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c
index e1d28ddd2169..56c9519d900a 100644
--- a/fs/lockd/svcproc.c
+++ b/fs/lockd/svcproc.c
@@ -11,10 +11,6 @@
11#include <linux/time.h> 11#include <linux/time.h>
12#include <linux/slab.h> 12#include <linux/slab.h>
13#include <linux/smp_lock.h> 13#include <linux/smp_lock.h>
14#include <linux/in.h>
15#include <linux/sunrpc/svc.h>
16#include <linux/sunrpc/clnt.h>
17#include <linux/nfsd/nfsd.h>
18#include <linux/lockd/lockd.h> 14#include <linux/lockd/lockd.h>
19#include <linux/lockd/share.h> 15#include <linux/lockd/share.h>
20 16
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index 2a77bc25d5af..59e5673b4597 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -90,7 +90,7 @@ config ROOT_NFS
90 If you want your system to mount its root file system via NFS, 90 If you want your system to mount its root file system via NFS,
91 choose Y here. This is common practice for managing systems 91 choose Y here. This is common practice for managing systems
92 without local permanent storage. For details, read 92 without local permanent storage. For details, read
93 <file:Documentation/filesystems/nfsroot.txt>. 93 <file:Documentation/filesystems/nfs/nfsroot.txt>.
94 94
95 Most people say N here. 95 Most people say N here.
96 96
diff --git a/fs/nfsctl.c b/fs/nfsctl.c
index 8f9a20556f79..d3854d94b7cf 100644
--- a/fs/nfsctl.c
+++ b/fs/nfsctl.c
@@ -7,8 +7,6 @@
7#include <linux/types.h> 7#include <linux/types.h>
8#include <linux/file.h> 8#include <linux/file.h>
9#include <linux/fs.h> 9#include <linux/fs.h>
10#include <linux/sunrpc/svc.h>
11#include <linux/nfsd/nfsd.h>
12#include <linux/nfsd/syscall.h> 10#include <linux/nfsd/syscall.h>
13#include <linux/cred.h> 11#include <linux/cred.h>
14#include <linux/sched.h> 12#include <linux/sched.h>
diff --git a/fs/nfsd/auth.c b/fs/nfsd/auth.c
index 36fcabbf5186..79717a40daba 100644
--- a/fs/nfsd/auth.c
+++ b/fs/nfsd/auth.c
@@ -1,15 +1,7 @@
1/* 1/* Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> */
2 * linux/fs/nfsd/auth.c
3 *
4 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
5 */
6 2
7#include <linux/types.h>
8#include <linux/sched.h> 3#include <linux/sched.h>
9#include <linux/sunrpc/svc.h> 4#include "nfsd.h"
10#include <linux/sunrpc/svcauth.h>
11#include <linux/nfsd/nfsd.h>
12#include <linux/nfsd/export.h>
13#include "auth.h" 5#include "auth.h"
14 6
15int nfsexp_flags(struct svc_rqst *rqstp, struct svc_export *exp) 7int nfsexp_flags(struct svc_rqst *rqstp, struct svc_export *exp)
diff --git a/include/linux/nfsd/cache.h b/fs/nfsd/cache.h
index 3a3f58934f5e..d892be61016c 100644
--- a/include/linux/nfsd/cache.h
+++ b/fs/nfsd/cache.h
@@ -1,6 +1,4 @@
1/* 1/*
2 * include/linux/nfsd/cache.h
3 *
4 * Request reply cache. This was heavily inspired by the 2 * Request reply cache. This was heavily inspired by the
5 * implementation in 4.3BSD/4.4BSD. 3 * implementation in 4.3BSD/4.4BSD.
6 * 4 *
@@ -10,8 +8,7 @@
10#ifndef NFSCACHE_H 8#ifndef NFSCACHE_H
11#define NFSCACHE_H 9#define NFSCACHE_H
12 10
13#include <linux/in.h> 11#include <linux/sunrpc/svc.h>
14#include <linux/uio.h>
15 12
16/* 13/*
17 * Representation of a reply cache entry. 14 * Representation of a reply cache entry.
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index c1c9e035d4a4..c487810a2366 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -1,7 +1,5 @@
1#define MSNFS /* HACK HACK */ 1#define MSNFS /* HACK HACK */
2/* 2/*
3 * linux/fs/nfsd/export.c
4 *
5 * NFS exporting and validation. 3 * NFS exporting and validation.
6 * 4 *
7 * We maintain a list of clients, each of which has a list of 5 * We maintain a list of clients, each of which has a list of
@@ -14,29 +12,16 @@
14 * Copyright (C) 1995, 1996 Olaf Kirch, <okir@monad.swb.de> 12 * Copyright (C) 1995, 1996 Olaf Kirch, <okir@monad.swb.de>
15 */ 13 */
16 14
17#include <linux/unistd.h>
18#include <linux/slab.h>
19#include <linux/stat.h>
20#include <linux/in.h>
21#include <linux/seq_file.h>
22#include <linux/syscalls.h>
23#include <linux/rwsem.h>
24#include <linux/dcache.h>
25#include <linux/namei.h> 15#include <linux/namei.h>
26#include <linux/mount.h>
27#include <linux/hash.h>
28#include <linux/module.h> 16#include <linux/module.h>
29#include <linux/exportfs.h> 17#include <linux/exportfs.h>
30 18
31#include <linux/sunrpc/svc.h>
32#include <linux/nfsd/nfsd.h>
33#include <linux/nfsd/nfsfh.h>
34#include <linux/nfsd/syscall.h> 19#include <linux/nfsd/syscall.h>
35#include <linux/lockd/bind.h>
36#include <linux/sunrpc/msg_prot.h>
37#include <linux/sunrpc/gss_api.h>
38#include <net/ipv6.h> 20#include <net/ipv6.h>
39 21
22#include "nfsd.h"
23#include "nfsfh.h"
24
40#define NFSDDBG_FACILITY NFSDDBG_EXPORT 25#define NFSDDBG_FACILITY NFSDDBG_EXPORT
41 26
42typedef struct auth_domain svc_client; 27typedef struct auth_domain svc_client;
@@ -369,16 +354,25 @@ static struct svc_export *svc_export_update(struct svc_export *new,
369 struct svc_export *old); 354 struct svc_export *old);
370static struct svc_export *svc_export_lookup(struct svc_export *); 355static struct svc_export *svc_export_lookup(struct svc_export *);
371 356
372static int check_export(struct inode *inode, int flags, unsigned char *uuid) 357static int check_export(struct inode *inode, int *flags, unsigned char *uuid)
373{ 358{
374 359
375 /* We currently export only dirs and regular files. 360 /*
376 * This is what umountd does. 361 * We currently export only dirs, regular files, and (for v4
362 * pseudoroot) symlinks.
377 */ 363 */
378 if (!S_ISDIR(inode->i_mode) && 364 if (!S_ISDIR(inode->i_mode) &&
365 !S_ISLNK(inode->i_mode) &&
379 !S_ISREG(inode->i_mode)) 366 !S_ISREG(inode->i_mode))
380 return -ENOTDIR; 367 return -ENOTDIR;
381 368
369 /*
370 * Mountd should never pass down a writeable V4ROOT export, but,
371 * just to make sure:
372 */
373 if (*flags & NFSEXP_V4ROOT)
374 *flags |= NFSEXP_READONLY;
375
382 /* There are two requirements on a filesystem to be exportable. 376 /* There are two requirements on a filesystem to be exportable.
383 * 1: We must be able to identify the filesystem from a number. 377 * 1: We must be able to identify the filesystem from a number.
384 * either a device number (so FS_REQUIRES_DEV needed) 378 * either a device number (so FS_REQUIRES_DEV needed)
@@ -387,7 +381,7 @@ static int check_export(struct inode *inode, int flags, unsigned char *uuid)
387 * This means that s_export_op must be set. 381 * This means that s_export_op must be set.
388 */ 382 */
389 if (!(inode->i_sb->s_type->fs_flags & FS_REQUIRES_DEV) && 383 if (!(inode->i_sb->s_type->fs_flags & FS_REQUIRES_DEV) &&
390 !(flags & NFSEXP_FSID) && 384 !(*flags & NFSEXP_FSID) &&
391 uuid == NULL) { 385 uuid == NULL) {
392 dprintk("exp_export: export of non-dev fs without fsid\n"); 386 dprintk("exp_export: export of non-dev fs without fsid\n");
393 return -EINVAL; 387 return -EINVAL;
@@ -602,7 +596,7 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
602 goto out4; 596 goto out4;
603 } 597 }
604 598
605 err = check_export(exp.ex_path.dentry->d_inode, exp.ex_flags, 599 err = check_export(exp.ex_path.dentry->d_inode, &exp.ex_flags,
606 exp.ex_uuid); 600 exp.ex_uuid);
607 if (err) 601 if (err)
608 goto out4; 602 goto out4;
@@ -1041,7 +1035,7 @@ exp_export(struct nfsctl_export *nxp)
1041 goto finish; 1035 goto finish;
1042 } 1036 }
1043 1037
1044 err = check_export(path.dentry->d_inode, nxp->ex_flags, NULL); 1038 err = check_export(path.dentry->d_inode, &nxp->ex_flags, NULL);
1045 if (err) goto finish; 1039 if (err) goto finish;
1046 1040
1047 err = -ENOMEM; 1041 err = -ENOMEM;
@@ -1320,6 +1314,23 @@ rqst_exp_parent(struct svc_rqst *rqstp, struct path *path)
1320 return exp; 1314 return exp;
1321} 1315}
1322 1316
1317static struct svc_export *find_fsidzero_export(struct svc_rqst *rqstp)
1318{
1319 struct svc_export *exp;
1320 u32 fsidv[2];
1321
1322 mk_fsid(FSID_NUM, fsidv, 0, 0, 0, NULL);
1323
1324 exp = rqst_exp_find(rqstp, FSID_NUM, fsidv);
1325 /*
1326 * We shouldn't have accepting an nfsv4 request at all if we
1327 * don't have a pseudoexport!:
1328 */
1329 if (IS_ERR(exp) && PTR_ERR(exp) == -ENOENT)
1330 exp = ERR_PTR(-ESERVERFAULT);
1331 return exp;
1332}
1333
1323/* 1334/*
1324 * Called when we need the filehandle for the root of the pseudofs, 1335 * Called when we need the filehandle for the root of the pseudofs,
1325 * for a given NFSv4 client. The root is defined to be the 1336 * for a given NFSv4 client. The root is defined to be the
@@ -1330,11 +1341,8 @@ exp_pseudoroot(struct svc_rqst *rqstp, struct svc_fh *fhp)
1330{ 1341{
1331 struct svc_export *exp; 1342 struct svc_export *exp;
1332 __be32 rv; 1343 __be32 rv;
1333 u32 fsidv[2];
1334 1344
1335 mk_fsid(FSID_NUM, fsidv, 0, 0, 0, NULL); 1345 exp = find_fsidzero_export(rqstp);
1336
1337 exp = rqst_exp_find(rqstp, FSID_NUM, fsidv);
1338 if (IS_ERR(exp)) 1346 if (IS_ERR(exp))
1339 return nfserrno(PTR_ERR(exp)); 1347 return nfserrno(PTR_ERR(exp));
1340 rv = fh_compose(fhp, exp, exp->ex_path.dentry, NULL); 1348 rv = fh_compose(fhp, exp, exp->ex_path.dentry, NULL);
@@ -1425,6 +1433,7 @@ static struct flags {
1425 { NFSEXP_CROSSMOUNT, {"crossmnt", ""}}, 1433 { NFSEXP_CROSSMOUNT, {"crossmnt", ""}},
1426 { NFSEXP_NOSUBTREECHECK, {"no_subtree_check", ""}}, 1434 { NFSEXP_NOSUBTREECHECK, {"no_subtree_check", ""}},
1427 { NFSEXP_NOAUTHNLM, {"insecure_locks", ""}}, 1435 { NFSEXP_NOAUTHNLM, {"insecure_locks", ""}},
1436 { NFSEXP_V4ROOT, {"v4root", ""}},
1428#ifdef MSNFS 1437#ifdef MSNFS
1429 { NFSEXP_MSNFS, {"msnfs", ""}}, 1438 { NFSEXP_MSNFS, {"msnfs", ""}},
1430#endif 1439#endif
diff --git a/fs/nfsd/lockd.c b/fs/nfsd/lockd.c
index b2786a5f9afe..0c6d81670137 100644
--- a/fs/nfsd/lockd.c
+++ b/fs/nfsd/lockd.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * linux/fs/nfsd/lockd.c
3 *
4 * This file contains all the stubs needed when communicating with lockd. 2 * This file contains all the stubs needed when communicating with lockd.
5 * This level of indirection is necessary so we can run nfsd+lockd without 3 * This level of indirection is necessary so we can run nfsd+lockd without
6 * requiring the nfs client to be compiled in/loaded, and vice versa. 4 * requiring the nfs client to be compiled in/loaded, and vice versa.
@@ -8,14 +6,10 @@
8 * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> 6 * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
9 */ 7 */
10 8
11#include <linux/types.h>
12#include <linux/fs.h>
13#include <linux/file.h> 9#include <linux/file.h>
14#include <linux/mount.h>
15#include <linux/sunrpc/clnt.h>
16#include <linux/sunrpc/svc.h>
17#include <linux/nfsd/nfsd.h>
18#include <linux/lockd/bind.h> 10#include <linux/lockd/bind.h>
11#include "nfsd.h"
12#include "vfs.h"
19 13
20#define NFSDDBG_FACILITY NFSDDBG_LOCKD 14#define NFSDDBG_FACILITY NFSDDBG_LOCKD
21 15
diff --git a/fs/nfsd/nfs2acl.c b/fs/nfsd/nfs2acl.c
index 4e3219e84116..f20589d2ae27 100644
--- a/fs/nfsd/nfs2acl.c
+++ b/fs/nfsd/nfs2acl.c
@@ -1,19 +1,15 @@
1/* 1/*
2 * linux/fs/nfsd/nfs2acl.c
3 *
4 * Process version 2 NFSACL requests. 2 * Process version 2 NFSACL requests.
5 * 3 *
6 * Copyright (C) 2002-2003 Andreas Gruenbacher <agruen@suse.de> 4 * Copyright (C) 2002-2003 Andreas Gruenbacher <agruen@suse.de>
7 */ 5 */
8 6
9#include <linux/sunrpc/svc.h> 7#include "nfsd.h"
10#include <linux/nfs.h> 8/* FIXME: nfsacl.h is a broken header */
11#include <linux/nfsd/nfsd.h>
12#include <linux/nfsd/cache.h>
13#include <linux/nfsd/xdr.h>
14#include <linux/nfsd/xdr3.h>
15#include <linux/posix_acl.h>
16#include <linux/nfsacl.h> 9#include <linux/nfsacl.h>
10#include "cache.h"
11#include "xdr3.h"
12#include "vfs.h"
17 13
18#define NFSDDBG_FACILITY NFSDDBG_PROC 14#define NFSDDBG_FACILITY NFSDDBG_PROC
19#define RETURN_STATUS(st) { resp->status = (st); return (st); } 15#define RETURN_STATUS(st) { resp->status = (st); return (st); }
@@ -217,6 +213,16 @@ static int nfsaclsvc_decode_accessargs(struct svc_rqst *rqstp, __be32 *p,
217 * XDR encode functions 213 * XDR encode functions
218 */ 214 */
219 215
216/*
217 * There must be an encoding function for void results so svc_process
218 * will work properly.
219 */
220int
221nfsaclsvc_encode_voidres(struct svc_rqst *rqstp, __be32 *p, void *dummy)
222{
223 return xdr_ressize_check(rqstp, p);
224}
225
220/* GETACL */ 226/* GETACL */
221static int nfsaclsvc_encode_getaclres(struct svc_rqst *rqstp, __be32 *p, 227static int nfsaclsvc_encode_getaclres(struct svc_rqst *rqstp, __be32 *p,
222 struct nfsd3_getaclres *resp) 228 struct nfsd3_getaclres *resp)
@@ -308,7 +314,6 @@ static int nfsaclsvc_release_access(struct svc_rqst *rqstp, __be32 *p,
308} 314}
309 315
310#define nfsaclsvc_decode_voidargs NULL 316#define nfsaclsvc_decode_voidargs NULL
311#define nfsaclsvc_encode_voidres NULL
312#define nfsaclsvc_release_void NULL 317#define nfsaclsvc_release_void NULL
313#define nfsd3_fhandleargs nfsd_fhandle 318#define nfsd3_fhandleargs nfsd_fhandle
314#define nfsd3_attrstatres nfsd_attrstat 319#define nfsd3_attrstatres nfsd_attrstat
@@ -346,5 +351,5 @@ struct svc_version nfsd_acl_version2 = {
346 .vs_proc = nfsd_acl_procedures2, 351 .vs_proc = nfsd_acl_procedures2,
347 .vs_dispatch = nfsd_dispatch, 352 .vs_dispatch = nfsd_dispatch,
348 .vs_xdrsize = NFS3_SVC_XDRSIZE, 353 .vs_xdrsize = NFS3_SVC_XDRSIZE,
349 .vs_hidden = 1, 354 .vs_hidden = 0,
350}; 355};
diff --git a/fs/nfsd/nfs3acl.c b/fs/nfsd/nfs3acl.c
index 9981dbb377a3..e0c4846bad92 100644
--- a/fs/nfsd/nfs3acl.c
+++ b/fs/nfsd/nfs3acl.c
@@ -1,18 +1,15 @@
1/* 1/*
2 * linux/fs/nfsd/nfs3acl.c
3 *
4 * Process version 3 NFSACL requests. 2 * Process version 3 NFSACL requests.
5 * 3 *
6 * Copyright (C) 2002-2003 Andreas Gruenbacher <agruen@suse.de> 4 * Copyright (C) 2002-2003 Andreas Gruenbacher <agruen@suse.de>
7 */ 5 */
8 6
9#include <linux/sunrpc/svc.h> 7#include "nfsd.h"
10#include <linux/nfs3.h> 8/* FIXME: nfsacl.h is a broken header */
11#include <linux/nfsd/nfsd.h>
12#include <linux/nfsd/cache.h>
13#include <linux/nfsd/xdr3.h>
14#include <linux/posix_acl.h>
15#include <linux/nfsacl.h> 9#include <linux/nfsacl.h>
10#include "cache.h"
11#include "xdr3.h"
12#include "vfs.h"
16 13
17#define RETURN_STATUS(st) { resp->status = (st); return (st); } 14#define RETURN_STATUS(st) { resp->status = (st); return (st); }
18 15
@@ -264,6 +261,6 @@ struct svc_version nfsd_acl_version3 = {
264 .vs_proc = nfsd_acl_procedures3, 261 .vs_proc = nfsd_acl_procedures3,
265 .vs_dispatch = nfsd_dispatch, 262 .vs_dispatch = nfsd_dispatch,
266 .vs_xdrsize = NFS3_SVC_XDRSIZE, 263 .vs_xdrsize = NFS3_SVC_XDRSIZE,
267 .vs_hidden = 1, 264 .vs_hidden = 0,
268}; 265};
269 266
diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c
index a713c418a922..3d68f45a37b9 100644
--- a/fs/nfsd/nfs3proc.c
+++ b/fs/nfsd/nfs3proc.c
@@ -1,30 +1,16 @@
1/* 1/*
2 * linux/fs/nfsd/nfs3proc.c
3 *
4 * Process version 3 NFS requests. 2 * Process version 3 NFS requests.
5 * 3 *
6 * Copyright (C) 1996, 1997, 1998 Olaf Kirch <okir@monad.swb.de> 4 * Copyright (C) 1996, 1997, 1998 Olaf Kirch <okir@monad.swb.de>
7 */ 5 */
8 6
9#include <linux/linkage.h>
10#include <linux/time.h>
11#include <linux/errno.h>
12#include <linux/fs.h> 7#include <linux/fs.h>
13#include <linux/ext2_fs.h> 8#include <linux/ext2_fs.h>
14#include <linux/stat.h>
15#include <linux/fcntl.h>
16#include <linux/net.h>
17#include <linux/in.h>
18#include <linux/unistd.h>
19#include <linux/slab.h>
20#include <linux/major.h>
21#include <linux/magic.h> 9#include <linux/magic.h>
22 10
23#include <linux/sunrpc/svc.h> 11#include "cache.h"
24#include <linux/nfsd/nfsd.h> 12#include "xdr3.h"
25#include <linux/nfsd/cache.h> 13#include "vfs.h"
26#include <linux/nfsd/xdr3.h>
27#include <linux/nfs3.h>
28 14
29#define NFSDDBG_FACILITY NFSDDBG_PROC 15#define NFSDDBG_FACILITY NFSDDBG_PROC
30 16
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
index d0a2ce1b4324..2a533a0af2a9 100644
--- a/fs/nfsd/nfs3xdr.c
+++ b/fs/nfsd/nfs3xdr.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * linux/fs/nfsd/nfs3xdr.c
3 *
4 * XDR support for nfsd/protocol version 3. 2 * XDR support for nfsd/protocol version 3.
5 * 3 *
6 * Copyright (C) 1995, 1996, 1997 Olaf Kirch <okir@monad.swb.de> 4 * Copyright (C) 1995, 1996, 1997 Olaf Kirch <okir@monad.swb.de>
@@ -8,19 +6,8 @@
8 * 2003-08-09 Jamie Lokier: Use htonl() for nanoseconds, not htons()! 6 * 2003-08-09 Jamie Lokier: Use htonl() for nanoseconds, not htons()!
9 */ 7 */
10 8
11#include <linux/types.h>
12#include <linux/time.h>
13#include <linux/nfs3.h>
14#include <linux/list.h>
15#include <linux/spinlock.h>
16#include <linux/dcache.h>
17#include <linux/namei.h> 9#include <linux/namei.h>
18#include <linux/mm.h> 10#include "xdr3.h"
19#include <linux/vfs.h>
20#include <linux/sunrpc/xdr.h>
21#include <linux/sunrpc/svc.h>
22#include <linux/nfsd/nfsd.h>
23#include <linux/nfsd/xdr3.h>
24#include "auth.h" 11#include "auth.h"
25 12
26#define NFSDDBG_FACILITY NFSDDBG_XDR 13#define NFSDDBG_FACILITY NFSDDBG_XDR
diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c
index 725d02f210e2..88150685df34 100644
--- a/fs/nfsd/nfs4acl.c
+++ b/fs/nfsd/nfs4acl.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * fs/nfs4acl/acl.c
3 *
4 * Common NFSv4 ACL handling code. 2 * Common NFSv4 ACL handling code.
5 * 3 *
6 * Copyright (c) 2002, 2003 The Regents of the University of Michigan. 4 * Copyright (c) 2002, 2003 The Regents of the University of Michigan.
@@ -36,15 +34,7 @@
36 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 34 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 */ 35 */
38 36
39#include <linux/string.h>
40#include <linux/slab.h>
41#include <linux/list.h>
42#include <linux/types.h>
43#include <linux/fs.h>
44#include <linux/module.h>
45#include <linux/nfs_fs.h> 37#include <linux/nfs_fs.h>
46#include <linux/posix_acl.h>
47#include <linux/nfs4.h>
48#include <linux/nfs4_acl.h> 38#include <linux/nfs4_acl.h>
49 39
50 40
@@ -389,7 +379,7 @@ sort_pacl(struct posix_acl *pacl)
389 sort_pacl_range(pacl, 1, i-1); 379 sort_pacl_range(pacl, 1, i-1);
390 380
391 BUG_ON(pacl->a_entries[i].e_tag != ACL_GROUP_OBJ); 381 BUG_ON(pacl->a_entries[i].e_tag != ACL_GROUP_OBJ);
392 j = i++; 382 j = ++i;
393 while (pacl->a_entries[j].e_tag == ACL_GROUP) 383 while (pacl->a_entries[j].e_tag == ACL_GROUP)
394 j++; 384 j++;
395 sort_pacl_range(pacl, i, j-1); 385 sort_pacl_range(pacl, i, j-1);
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 24e8d78f8dde..c6eed2a3b093 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * linux/fs/nfsd/nfs4callback.c
3 *
4 * Copyright (c) 2001 The Regents of the University of Michigan. 2 * Copyright (c) 2001 The Regents of the University of Michigan.
5 * All rights reserved. 3 * All rights reserved.
6 * 4 *
@@ -33,22 +31,9 @@
33 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 */ 32 */
35 33
36#include <linux/module.h>
37#include <linux/list.h>
38#include <linux/inet.h>
39#include <linux/errno.h>
40#include <linux/delay.h>
41#include <linux/sched.h>
42#include <linux/kthread.h>
43#include <linux/sunrpc/xdr.h>
44#include <linux/sunrpc/svc.h>
45#include <linux/sunrpc/clnt.h> 34#include <linux/sunrpc/clnt.h>
46#include <linux/sunrpc/svcsock.h> 35#include "nfsd.h"
47#include <linux/nfsd/nfsd.h> 36#include "state.h"
48#include <linux/nfsd/state.h>
49#include <linux/sunrpc/sched.h>
50#include <linux/nfs4.h>
51#include <linux/sunrpc/xprtsock.h>
52 37
53#define NFSDDBG_FACILITY NFSDDBG_PROC 38#define NFSDDBG_FACILITY NFSDDBG_PROC
54 39
diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c
index ba2c199592fd..6e2983b27f3c 100644
--- a/fs/nfsd/nfs4idmap.c
+++ b/fs/nfsd/nfs4idmap.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * fs/nfsd/nfs4idmap.c
3 *
4 * Mapping of UID/GIDs to name and vice versa. 2 * Mapping of UID/GIDs to name and vice versa.
5 * 3 *
6 * Copyright (c) 2002, 2003 The Regents of the University of 4 * Copyright (c) 2002, 2003 The Regents of the University of
@@ -35,22 +33,9 @@
35 */ 33 */
36 34
37#include <linux/module.h> 35#include <linux/module.h>
38#include <linux/init.h>
39
40#include <linux/mm.h>
41#include <linux/errno.h>
42#include <linux/string.h>
43#include <linux/sunrpc/clnt.h>
44#include <linux/nfs.h>
45#include <linux/nfs4.h>
46#include <linux/nfs_fs.h>
47#include <linux/nfs_page.h>
48#include <linux/sunrpc/cache.h>
49#include <linux/nfsd_idmap.h> 36#include <linux/nfsd_idmap.h>
50#include <linux/list.h>
51#include <linux/time.h>
52#include <linux/seq_file.h> 37#include <linux/seq_file.h>
53#include <linux/sunrpc/svcauth.h> 38#include <linux/sched.h>
54 39
55/* 40/*
56 * Cache entry 41 * Cache entry
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index bebc0c2e1b0a..37514c469846 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * fs/nfsd/nfs4proc.c
3 *
4 * Server-side procedures for NFSv4. 2 * Server-side procedures for NFSv4.
5 * 3 *
6 * Copyright (c) 2002 The Regents of the University of Michigan. 4 * Copyright (c) 2002 The Regents of the University of Michigan.
@@ -34,20 +32,11 @@
34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 32 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */ 34 */
37
38#include <linux/param.h>
39#include <linux/major.h>
40#include <linux/slab.h>
41#include <linux/file.h> 35#include <linux/file.h>
42 36
43#include <linux/sunrpc/svc.h> 37#include "cache.h"
44#include <linux/nfsd/nfsd.h> 38#include "xdr4.h"
45#include <linux/nfsd/cache.h> 39#include "vfs.h"
46#include <linux/nfs4.h>
47#include <linux/nfsd/state.h>
48#include <linux/nfsd/xdr4.h>
49#include <linux/nfs4_acl.h>
50#include <linux/sunrpc/gss_api.h>
51 40
52#define NFSDDBG_FACILITY NFSDDBG_PROC 41#define NFSDDBG_FACILITY NFSDDBG_PROC
53 42
@@ -170,7 +159,7 @@ do_open_permission(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfs
170 accmode |= NFSD_MAY_READ; 159 accmode |= NFSD_MAY_READ;
171 if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE) 160 if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
172 accmode |= (NFSD_MAY_WRITE | NFSD_MAY_TRUNC); 161 accmode |= (NFSD_MAY_WRITE | NFSD_MAY_TRUNC);
173 if (open->op_share_deny & NFS4_SHARE_DENY_WRITE) 162 if (open->op_share_deny & NFS4_SHARE_DENY_READ)
174 accmode |= NFSD_MAY_WRITE; 163 accmode |= NFSD_MAY_WRITE;
175 164
176 status = fh_verify(rqstp, current_fh, S_IFREG, accmode); 165 status = fh_verify(rqstp, current_fh, S_IFREG, accmode);
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index b5348405046b..5a754f7b71ed 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -1,6 +1,4 @@
1/* 1/*
2* linux/fs/nfsd/nfs4recover.c
3*
4* Copyright (c) 2004 The Regents of the University of Michigan. 2* Copyright (c) 2004 The Regents of the University of Michigan.
5* All rights reserved. 3* All rights reserved.
6* 4*
@@ -33,20 +31,14 @@
33* 31*
34*/ 32*/
35 33
36#include <linux/err.h>
37#include <linux/sunrpc/svc.h>
38#include <linux/nfsd/nfsd.h>
39#include <linux/nfs4.h>
40#include <linux/nfsd/state.h>
41#include <linux/nfsd/xdr4.h>
42#include <linux/param.h>
43#include <linux/file.h> 34#include <linux/file.h>
44#include <linux/namei.h> 35#include <linux/namei.h>
45#include <asm/uaccess.h>
46#include <linux/scatterlist.h>
47#include <linux/crypto.h> 36#include <linux/crypto.h>
48#include <linux/sched.h> 37#include <linux/sched.h>
49#include <linux/mount.h> 38
39#include "nfsd.h"
40#include "state.h"
41#include "vfs.h"
50 42
51#define NFSDDBG_FACILITY NFSDDBG_PROC 43#define NFSDDBG_FACILITY NFSDDBG_PROC
52 44
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 2153f9bdbebd..f19ed866c95f 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -1,6 +1,4 @@
1/* 1/*
2* linux/fs/nfsd/nfs4state.c
3*
4* Copyright (c) 2001 The Regents of the University of Michigan. 2* Copyright (c) 2001 The Regents of the University of Michigan.
5* All rights reserved. 3* All rights reserved.
6* 4*
@@ -34,28 +32,14 @@
34* 32*
35*/ 33*/
36 34
37#include <linux/param.h>
38#include <linux/major.h>
39#include <linux/slab.h>
40
41#include <linux/sunrpc/svc.h>
42#include <linux/nfsd/nfsd.h>
43#include <linux/nfsd/cache.h>
44#include <linux/file.h> 35#include <linux/file.h>
45#include <linux/mount.h>
46#include <linux/workqueue.h>
47#include <linux/smp_lock.h> 36#include <linux/smp_lock.h>
48#include <linux/kthread.h>
49#include <linux/nfs4.h>
50#include <linux/nfsd/state.h>
51#include <linux/nfsd/xdr4.h>
52#include <linux/namei.h> 37#include <linux/namei.h>
53#include <linux/swap.h> 38#include <linux/swap.h>
54#include <linux/mutex.h>
55#include <linux/lockd/bind.h>
56#include <linux/module.h>
57#include <linux/sunrpc/svcauth_gss.h> 39#include <linux/sunrpc/svcauth_gss.h>
58#include <linux/sunrpc/clnt.h> 40#include <linux/sunrpc/clnt.h>
41#include "xdr4.h"
42#include "vfs.h"
59 43
60#define NFSDDBG_FACILITY NFSDDBG_PROC 44#define NFSDDBG_FACILITY NFSDDBG_PROC
61 45
@@ -477,13 +461,14 @@ static int set_forechannel_drc_size(struct nfsd4_channel_attrs *fchan)
477 461
478/* 462/*
479 * fchan holds the client values on input, and the server values on output 463 * fchan holds the client values on input, and the server values on output
464 * sv_max_mesg is the maximum payload plus one page for overhead.
480 */ 465 */
481static int init_forechannel_attrs(struct svc_rqst *rqstp, 466static int init_forechannel_attrs(struct svc_rqst *rqstp,
482 struct nfsd4_channel_attrs *session_fchan, 467 struct nfsd4_channel_attrs *session_fchan,
483 struct nfsd4_channel_attrs *fchan) 468 struct nfsd4_channel_attrs *fchan)
484{ 469{
485 int status = 0; 470 int status = 0;
486 __u32 maxcount = svc_max_payload(rqstp); 471 __u32 maxcount = nfsd_serv->sv_max_mesg;
487 472
488 /* headerpadsz set to zero in encode routine */ 473 /* headerpadsz set to zero in encode routine */
489 474
@@ -523,6 +508,15 @@ free_session_slots(struct nfsd4_session *ses)
523 kfree(ses->se_slots[i]); 508 kfree(ses->se_slots[i]);
524} 509}
525 510
511/*
512 * We don't actually need to cache the rpc and session headers, so we
513 * can allocate a little less for each slot:
514 */
515static inline int slot_bytes(struct nfsd4_channel_attrs *ca)
516{
517 return ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
518}
519
526static int 520static int
527alloc_init_session(struct svc_rqst *rqstp, struct nfs4_client *clp, 521alloc_init_session(struct svc_rqst *rqstp, struct nfs4_client *clp,
528 struct nfsd4_create_session *cses) 522 struct nfsd4_create_session *cses)
@@ -554,7 +548,7 @@ alloc_init_session(struct svc_rqst *rqstp, struct nfs4_client *clp,
554 memcpy(new, &tmp, sizeof(*new)); 548 memcpy(new, &tmp, sizeof(*new));
555 549
556 /* allocate each struct nfsd4_slot and data cache in one piece */ 550 /* allocate each struct nfsd4_slot and data cache in one piece */
557 cachesize = new->se_fchannel.maxresp_cached - NFSD_MIN_HDR_SEQ_SZ; 551 cachesize = slot_bytes(&new->se_fchannel);
558 for (i = 0; i < new->se_fchannel.maxreqs; i++) { 552 for (i = 0; i < new->se_fchannel.maxreqs; i++) {
559 sp = kzalloc(sizeof(*sp) + cachesize, GFP_KERNEL); 553 sp = kzalloc(sizeof(*sp) + cachesize, GFP_KERNEL);
560 if (!sp) 554 if (!sp)
@@ -628,10 +622,12 @@ void
628free_session(struct kref *kref) 622free_session(struct kref *kref)
629{ 623{
630 struct nfsd4_session *ses; 624 struct nfsd4_session *ses;
625 int mem;
631 626
632 ses = container_of(kref, struct nfsd4_session, se_ref); 627 ses = container_of(kref, struct nfsd4_session, se_ref);
633 spin_lock(&nfsd_drc_lock); 628 spin_lock(&nfsd_drc_lock);
634 nfsd_drc_mem_used -= ses->se_fchannel.maxreqs * NFSD_SLOT_CACHE_SIZE; 629 mem = ses->se_fchannel.maxreqs * slot_bytes(&ses->se_fchannel);
630 nfsd_drc_mem_used -= mem;
635 spin_unlock(&nfsd_drc_lock); 631 spin_unlock(&nfsd_drc_lock);
636 free_session_slots(ses); 632 free_session_slots(ses);
637 kfree(ses); 633 kfree(ses);
@@ -2404,11 +2400,8 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_sta
2404 2400
2405 memcpy(&open->op_delegate_stateid, &dp->dl_stateid, sizeof(dp->dl_stateid)); 2401 memcpy(&open->op_delegate_stateid, &dp->dl_stateid, sizeof(dp->dl_stateid));
2406 2402
2407 dprintk("NFSD: delegation stateid=(%08x/%08x/%08x/%08x)\n\n", 2403 dprintk("NFSD: delegation stateid=" STATEID_FMT "\n",
2408 dp->dl_stateid.si_boot, 2404 STATEID_VAL(&dp->dl_stateid));
2409 dp->dl_stateid.si_stateownerid,
2410 dp->dl_stateid.si_fileid,
2411 dp->dl_stateid.si_generation);
2412out: 2405out:
2413 if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS 2406 if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS
2414 && flag == NFS4_OPEN_DELEGATE_NONE 2407 && flag == NFS4_OPEN_DELEGATE_NONE
@@ -2498,9 +2491,8 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
2498 2491
2499 status = nfs_ok; 2492 status = nfs_ok;
2500 2493
2501 dprintk("nfs4_process_open2: stateid=(%08x/%08x/%08x/%08x)\n", 2494 dprintk("%s: stateid=" STATEID_FMT "\n", __func__,
2502 stp->st_stateid.si_boot, stp->st_stateid.si_stateownerid, 2495 STATEID_VAL(&stp->st_stateid));
2503 stp->st_stateid.si_fileid, stp->st_stateid.si_generation);
2504out: 2496out:
2505 if (fp) 2497 if (fp)
2506 put_nfs4_file(fp); 2498 put_nfs4_file(fp);
@@ -2666,9 +2658,8 @@ STALE_STATEID(stateid_t *stateid)
2666{ 2658{
2667 if (time_after((unsigned long)boot_time, 2659 if (time_after((unsigned long)boot_time,
2668 (unsigned long)stateid->si_boot)) { 2660 (unsigned long)stateid->si_boot)) {
2669 dprintk("NFSD: stale stateid (%08x/%08x/%08x/%08x)!\n", 2661 dprintk("NFSD: stale stateid " STATEID_FMT "!\n",
2670 stateid->si_boot, stateid->si_stateownerid, 2662 STATEID_VAL(stateid));
2671 stateid->si_fileid, stateid->si_generation);
2672 return 1; 2663 return 1;
2673 } 2664 }
2674 return 0; 2665 return 0;
@@ -2680,9 +2671,8 @@ EXPIRED_STATEID(stateid_t *stateid)
2680 if (time_before((unsigned long)boot_time, 2671 if (time_before((unsigned long)boot_time,
2681 ((unsigned long)stateid->si_boot)) && 2672 ((unsigned long)stateid->si_boot)) &&
2682 time_before((unsigned long)(stateid->si_boot + lease_time), get_seconds())) { 2673 time_before((unsigned long)(stateid->si_boot + lease_time), get_seconds())) {
2683 dprintk("NFSD: expired stateid (%08x/%08x/%08x/%08x)!\n", 2674 dprintk("NFSD: expired stateid " STATEID_FMT "!\n",
2684 stateid->si_boot, stateid->si_stateownerid, 2675 STATEID_VAL(stateid));
2685 stateid->si_fileid, stateid->si_generation);
2686 return 1; 2676 return 1;
2687 } 2677 }
2688 return 0; 2678 return 0;
@@ -2696,9 +2686,8 @@ stateid_error_map(stateid_t *stateid)
2696 if (EXPIRED_STATEID(stateid)) 2686 if (EXPIRED_STATEID(stateid))
2697 return nfserr_expired; 2687 return nfserr_expired;
2698 2688
2699 dprintk("NFSD: bad stateid (%08x/%08x/%08x/%08x)!\n", 2689 dprintk("NFSD: bad stateid " STATEID_FMT "!\n",
2700 stateid->si_boot, stateid->si_stateownerid, 2690 STATEID_VAL(stateid));
2701 stateid->si_fileid, stateid->si_generation);
2702 return nfserr_bad_stateid; 2691 return nfserr_bad_stateid;
2703} 2692}
2704 2693
@@ -2884,10 +2873,8 @@ nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
2884 struct svc_fh *current_fh = &cstate->current_fh; 2873 struct svc_fh *current_fh = &cstate->current_fh;
2885 __be32 status; 2874 __be32 status;
2886 2875
2887 dprintk("NFSD: preprocess_seqid_op: seqid=%d " 2876 dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT "\n", __func__,
2888 "stateid = (%08x/%08x/%08x/%08x)\n", seqid, 2877 seqid, STATEID_VAL(stateid));
2889 stateid->si_boot, stateid->si_stateownerid, stateid->si_fileid,
2890 stateid->si_generation);
2891 2878
2892 *stpp = NULL; 2879 *stpp = NULL;
2893 *sopp = NULL; 2880 *sopp = NULL;
@@ -3019,12 +3006,8 @@ nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3019 sop->so_confirmed = 1; 3006 sop->so_confirmed = 1;
3020 update_stateid(&stp->st_stateid); 3007 update_stateid(&stp->st_stateid);
3021 memcpy(&oc->oc_resp_stateid, &stp->st_stateid, sizeof(stateid_t)); 3008 memcpy(&oc->oc_resp_stateid, &stp->st_stateid, sizeof(stateid_t));
3022 dprintk("NFSD: nfsd4_open_confirm: success, seqid=%d " 3009 dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
3023 "stateid=(%08x/%08x/%08x/%08x)\n", oc->oc_seqid, 3010 __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stateid));
3024 stp->st_stateid.si_boot,
3025 stp->st_stateid.si_stateownerid,
3026 stp->st_stateid.si_fileid,
3027 stp->st_stateid.si_generation);
3028 3011
3029 nfsd4_create_clid_dir(sop->so_client); 3012 nfsd4_create_clid_dir(sop->so_client);
3030out: 3013out:
@@ -3283,9 +3266,8 @@ find_delegation_stateid(struct inode *ino, stateid_t *stid)
3283 struct nfs4_file *fp; 3266 struct nfs4_file *fp;
3284 struct nfs4_delegation *dl; 3267 struct nfs4_delegation *dl;
3285 3268
3286 dprintk("NFSD:find_delegation_stateid stateid=(%08x/%08x/%08x/%08x)\n", 3269 dprintk("NFSD: %s: stateid=" STATEID_FMT "\n", __func__,
3287 stid->si_boot, stid->si_stateownerid, 3270 STATEID_VAL(stid));
3288 stid->si_fileid, stid->si_generation);
3289 3271
3290 fp = find_file(ino); 3272 fp = find_file(ino);
3291 if (!fp) 3273 if (!fp)
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 0fbd50cee1f6..a8587e90fd5a 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -40,24 +40,16 @@
40 * at the end of nfs4svc_decode_compoundargs. 40 * at the end of nfs4svc_decode_compoundargs.
41 */ 41 */
42 42
43#include <linux/param.h>
44#include <linux/smp.h>
45#include <linux/fs.h>
46#include <linux/namei.h> 43#include <linux/namei.h>
47#include <linux/vfs.h> 44#include <linux/statfs.h>
48#include <linux/utsname.h> 45#include <linux/utsname.h>
49#include <linux/sunrpc/xdr.h>
50#include <linux/sunrpc/svc.h>
51#include <linux/sunrpc/clnt.h>
52#include <linux/nfsd/nfsd.h>
53#include <linux/nfsd/state.h>
54#include <linux/nfsd/xdr4.h>
55#include <linux/nfsd_idmap.h> 46#include <linux/nfsd_idmap.h>
56#include <linux/nfs4.h>
57#include <linux/nfs4_acl.h> 47#include <linux/nfs4_acl.h>
58#include <linux/sunrpc/gss_api.h>
59#include <linux/sunrpc/svcauth_gss.h> 48#include <linux/sunrpc/svcauth_gss.h>
60 49
50#include "xdr4.h"
51#include "vfs.h"
52
61#define NFSDDBG_FACILITY NFSDDBG_XDR 53#define NFSDDBG_FACILITY NFSDDBG_XDR
62 54
63/* 55/*
@@ -2204,11 +2196,14 @@ nfsd4_encode_dirent_fattr(struct nfsd4_readdir *cd,
2204 * we will not follow the cross mount and will fill the attribtutes 2196 * we will not follow the cross mount and will fill the attribtutes
2205 * directly from the mountpoint dentry. 2197 * directly from the mountpoint dentry.
2206 */ 2198 */
2207 if (d_mountpoint(dentry) && !attributes_need_mount(cd->rd_bmval)) 2199 if (nfsd_mountpoint(dentry, exp)) {
2208 ignore_crossmnt = 1;
2209 else if (d_mountpoint(dentry)) {
2210 int err; 2200 int err;
2211 2201
2202 if (!(exp->ex_flags & NFSEXP_V4ROOT)
2203 && !attributes_need_mount(cd->rd_bmval)) {
2204 ignore_crossmnt = 1;
2205 goto out_encode;
2206 }
2212 /* 2207 /*
2213 * Why the heck aren't we just using nfsd_lookup?? 2208 * Why the heck aren't we just using nfsd_lookup??
2214 * Different "."/".." handling? Something else? 2209 * Different "."/".." handling? Something else?
@@ -2224,6 +2219,7 @@ nfsd4_encode_dirent_fattr(struct nfsd4_readdir *cd,
2224 goto out_put; 2219 goto out_put;
2225 2220
2226 } 2221 }
2222out_encode:
2227 nfserr = nfsd4_encode_fattr(NULL, exp, dentry, p, buflen, cd->rd_bmval, 2223 nfserr = nfsd4_encode_fattr(NULL, exp, dentry, p, buflen, cd->rd_bmval,
2228 cd->rd_rqstp, ignore_crossmnt); 2224 cd->rd_rqstp, ignore_crossmnt);
2229out_put: 2225out_put:
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
index 4638635c5d87..da08560c4818 100644
--- a/fs/nfsd/nfscache.c
+++ b/fs/nfsd/nfscache.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * linux/fs/nfsd/nfscache.c
3 *
4 * Request reply cache. This is currently a global cache, but this may 2 * Request reply cache. This is currently a global cache, but this may
5 * change in the future and be a per-client cache. 3 * change in the future and be a per-client cache.
6 * 4 *
@@ -10,16 +8,8 @@
10 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> 8 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
11 */ 9 */
12 10
13#include <linux/kernel.h> 11#include "nfsd.h"
14#include <linux/time.h> 12#include "cache.h"
15#include <linux/slab.h>
16#include <linux/string.h>
17#include <linux/spinlock.h>
18#include <linux/list.h>
19
20#include <linux/sunrpc/svc.h>
21#include <linux/nfsd/nfsd.h>
22#include <linux/nfsd/cache.h>
23 13
24/* Size of reply cache. Common values are: 14/* Size of reply cache. Common values are:
25 * 4.3BSD: 128 15 * 4.3BSD: 128
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 5c01fc148ce8..2604c3e70ea5 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -1,46 +1,20 @@
1/* 1/*
2 * linux/fs/nfsd/nfsctl.c
3 *
4 * Syscall interface to knfsd. 2 * Syscall interface to knfsd.
5 * 3 *
6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> 4 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
7 */ 5 */
8 6
9#include <linux/module.h>
10
11#include <linux/linkage.h>
12#include <linux/time.h>
13#include <linux/errno.h>
14#include <linux/fs.h>
15#include <linux/namei.h> 7#include <linux/namei.h>
16#include <linux/fcntl.h>
17#include <linux/net.h>
18#include <linux/in.h>
19#include <linux/syscalls.h>
20#include <linux/unistd.h>
21#include <linux/slab.h>
22#include <linux/proc_fs.h>
23#include <linux/seq_file.h>
24#include <linux/pagemap.h>
25#include <linux/init.h>
26#include <linux/inet.h>
27#include <linux/string.h>
28#include <linux/ctype.h> 8#include <linux/ctype.h>
29 9
30#include <linux/nfs.h>
31#include <linux/nfsd_idmap.h> 10#include <linux/nfsd_idmap.h>
32#include <linux/lockd/bind.h>
33#include <linux/sunrpc/svc.h>
34#include <linux/sunrpc/svcsock.h> 11#include <linux/sunrpc/svcsock.h>
35#include <linux/nfsd/nfsd.h>
36#include <linux/nfsd/cache.h>
37#include <linux/nfsd/xdr.h>
38#include <linux/nfsd/syscall.h> 12#include <linux/nfsd/syscall.h>
39#include <linux/lockd/lockd.h> 13#include <linux/lockd/lockd.h>
40#include <linux/sunrpc/clnt.h> 14#include <linux/sunrpc/clnt.h>
41 15
42#include <asm/uaccess.h> 16#include "nfsd.h"
43#include <net/ipv6.h> 17#include "cache.h"
44 18
45/* 19/*
46 * We have a single directory with 9 nodes in it. 20 * We have a single directory with 9 nodes in it.
@@ -55,6 +29,7 @@ enum {
55 NFSD_Getfd, 29 NFSD_Getfd,
56 NFSD_Getfs, 30 NFSD_Getfs,
57 NFSD_List, 31 NFSD_List,
32 NFSD_Export_features,
58 NFSD_Fh, 33 NFSD_Fh,
59 NFSD_FO_UnlockIP, 34 NFSD_FO_UnlockIP,
60 NFSD_FO_UnlockFS, 35 NFSD_FO_UnlockFS,
@@ -173,6 +148,24 @@ static const struct file_operations exports_operations = {
173 .owner = THIS_MODULE, 148 .owner = THIS_MODULE,
174}; 149};
175 150
151static int export_features_show(struct seq_file *m, void *v)
152{
153 seq_printf(m, "0x%x 0x%x\n", NFSEXP_ALLFLAGS, NFSEXP_SECINFO_FLAGS);
154 return 0;
155}
156
157static int export_features_open(struct inode *inode, struct file *file)
158{
159 return single_open(file, export_features_show, NULL);
160}
161
162static struct file_operations export_features_operations = {
163 .open = export_features_open,
164 .read = seq_read,
165 .llseek = seq_lseek,
166 .release = single_release,
167};
168
176extern int nfsd_pool_stats_open(struct inode *inode, struct file *file); 169extern int nfsd_pool_stats_open(struct inode *inode, struct file *file);
177extern int nfsd_pool_stats_release(struct inode *inode, struct file *file); 170extern int nfsd_pool_stats_release(struct inode *inode, struct file *file);
178 171
@@ -1330,6 +1323,8 @@ static int nfsd_fill_super(struct super_block * sb, void * data, int silent)
1330 [NFSD_Getfd] = {".getfd", &transaction_ops, S_IWUSR|S_IRUSR}, 1323 [NFSD_Getfd] = {".getfd", &transaction_ops, S_IWUSR|S_IRUSR},
1331 [NFSD_Getfs] = {".getfs", &transaction_ops, S_IWUSR|S_IRUSR}, 1324 [NFSD_Getfs] = {".getfs", &transaction_ops, S_IWUSR|S_IRUSR},
1332 [NFSD_List] = {"exports", &exports_operations, S_IRUGO}, 1325 [NFSD_List] = {"exports", &exports_operations, S_IRUGO},
1326 [NFSD_Export_features] = {"export_features",
1327 &export_features_operations, S_IRUGO},
1333 [NFSD_FO_UnlockIP] = {"unlock_ip", 1328 [NFSD_FO_UnlockIP] = {"unlock_ip",
1334 &transaction_ops, S_IWUSR|S_IRUSR}, 1329 &transaction_ops, S_IWUSR|S_IRUSR},
1335 [NFSD_FO_UnlockFS] = {"unlock_filesystem", 1330 [NFSD_FO_UnlockFS] = {"unlock_filesystem",
diff --git a/include/linux/nfsd/nfsd.h b/fs/nfsd/nfsd.h
index 510ffdd5020e..e942a1aaac92 100644
--- a/include/linux/nfsd/nfsd.h
+++ b/fs/nfsd/nfsd.h
@@ -1,6 +1,4 @@
1/* 1/*
2 * linux/include/linux/nfsd/nfsd.h
3 *
4 * Hodge-podge collection of knfsd-related stuff. 2 * Hodge-podge collection of knfsd-related stuff.
5 * I will sort this out later. 3 * I will sort this out later.
6 * 4 *
@@ -11,13 +9,9 @@
11#define LINUX_NFSD_NFSD_H 9#define LINUX_NFSD_NFSD_H
12 10
13#include <linux/types.h> 11#include <linux/types.h>
14#include <linux/unistd.h>
15#include <linux/fs.h>
16#include <linux/posix_acl.h>
17#include <linux/mount.h> 12#include <linux/mount.h>
18 13
19#include <linux/nfsd/debug.h> 14#include <linux/nfsd/debug.h>
20#include <linux/nfsd/nfsfh.h>
21#include <linux/nfsd/export.h> 15#include <linux/nfsd/export.h>
22#include <linux/nfsd/stats.h> 16#include <linux/nfsd/stats.h>
23/* 17/*
@@ -25,30 +19,10 @@
25 */ 19 */
26#define NFSD_SUPPORTED_MINOR_VERSION 1 20#define NFSD_SUPPORTED_MINOR_VERSION 1
27 21
28/*
29 * Flags for nfsd_permission
30 */
31#define NFSD_MAY_NOP 0
32#define NFSD_MAY_EXEC 1 /* == MAY_EXEC */
33#define NFSD_MAY_WRITE 2 /* == MAY_WRITE */
34#define NFSD_MAY_READ 4 /* == MAY_READ */
35#define NFSD_MAY_SATTR 8
36#define NFSD_MAY_TRUNC 16
37#define NFSD_MAY_LOCK 32
38#define NFSD_MAY_OWNER_OVERRIDE 64
39#define NFSD_MAY_LOCAL_ACCESS 128 /* IRIX doing local access check on device special file*/
40#define NFSD_MAY_BYPASS_GSS_ON_ROOT 256
41
42#define NFSD_MAY_CREATE (NFSD_MAY_EXEC|NFSD_MAY_WRITE)
43#define NFSD_MAY_REMOVE (NFSD_MAY_EXEC|NFSD_MAY_WRITE|NFSD_MAY_TRUNC)
44
45/*
46 * Callback function for readdir
47 */
48struct readdir_cd { 22struct readdir_cd {
49 __be32 err; /* 0, nfserr, or nfserr_eof */ 23 __be32 err; /* 0, nfserr, or nfserr_eof */
50}; 24};
51typedef int (*nfsd_dirop_t)(struct inode *, struct dentry *, int, int); 25
52 26
53extern struct svc_program nfsd_program; 27extern struct svc_program nfsd_program;
54extern struct svc_version nfsd_version2, nfsd_version3, 28extern struct svc_version nfsd_version2, nfsd_version3,
@@ -73,69 +47,6 @@ int nfsd_nrpools(void);
73int nfsd_get_nrthreads(int n, int *); 47int nfsd_get_nrthreads(int n, int *);
74int nfsd_set_nrthreads(int n, int *); 48int nfsd_set_nrthreads(int n, int *);
75 49
76/* nfsd/vfs.c */
77int fh_lock_parent(struct svc_fh *, struct dentry *);
78int nfsd_racache_init(int);
79void nfsd_racache_shutdown(void);
80int nfsd_cross_mnt(struct svc_rqst *rqstp, struct dentry **dpp,
81 struct svc_export **expp);
82__be32 nfsd_lookup(struct svc_rqst *, struct svc_fh *,
83 const char *, unsigned int, struct svc_fh *);
84__be32 nfsd_lookup_dentry(struct svc_rqst *, struct svc_fh *,
85 const char *, unsigned int,
86 struct svc_export **, struct dentry **);
87__be32 nfsd_setattr(struct svc_rqst *, struct svc_fh *,
88 struct iattr *, int, time_t);
89#ifdef CONFIG_NFSD_V4
90__be32 nfsd4_set_nfs4_acl(struct svc_rqst *, struct svc_fh *,
91 struct nfs4_acl *);
92int nfsd4_get_nfs4_acl(struct svc_rqst *, struct dentry *, struct nfs4_acl **);
93#endif /* CONFIG_NFSD_V4 */
94__be32 nfsd_create(struct svc_rqst *, struct svc_fh *,
95 char *name, int len, struct iattr *attrs,
96 int type, dev_t rdev, struct svc_fh *res);
97#ifdef CONFIG_NFSD_V3
98__be32 nfsd_access(struct svc_rqst *, struct svc_fh *, u32 *, u32 *);
99__be32 nfsd_create_v3(struct svc_rqst *, struct svc_fh *,
100 char *name, int len, struct iattr *attrs,
101 struct svc_fh *res, int createmode,
102 u32 *verifier, int *truncp, int *created);
103__be32 nfsd_commit(struct svc_rqst *, struct svc_fh *,
104 loff_t, unsigned long);
105#endif /* CONFIG_NFSD_V3 */
106__be32 nfsd_open(struct svc_rqst *, struct svc_fh *, int,
107 int, struct file **);
108void nfsd_close(struct file *);
109__be32 nfsd_read(struct svc_rqst *, struct svc_fh *, struct file *,
110 loff_t, struct kvec *, int, unsigned long *);
111__be32 nfsd_write(struct svc_rqst *, struct svc_fh *,struct file *,
112 loff_t, struct kvec *,int, unsigned long *, int *);
113__be32 nfsd_readlink(struct svc_rqst *, struct svc_fh *,
114 char *, int *);
115__be32 nfsd_symlink(struct svc_rqst *, struct svc_fh *,
116 char *name, int len, char *path, int plen,
117 struct svc_fh *res, struct iattr *);
118__be32 nfsd_link(struct svc_rqst *, struct svc_fh *,
119 char *, int, struct svc_fh *);
120__be32 nfsd_rename(struct svc_rqst *,
121 struct svc_fh *, char *, int,
122 struct svc_fh *, char *, int);
123__be32 nfsd_remove(struct svc_rqst *,
124 struct svc_fh *, char *, int);
125__be32 nfsd_unlink(struct svc_rqst *, struct svc_fh *, int type,
126 char *name, int len);
127int nfsd_truncate(struct svc_rqst *, struct svc_fh *,
128 unsigned long size);
129__be32 nfsd_readdir(struct svc_rqst *, struct svc_fh *,
130 loff_t *, struct readdir_cd *, filldir_t);
131__be32 nfsd_statfs(struct svc_rqst *, struct svc_fh *,
132 struct kstatfs *, int access);
133
134int nfsd_notify_change(struct inode *, struct iattr *);
135__be32 nfsd_permission(struct svc_rqst *, struct svc_export *,
136 struct dentry *, int);
137int nfsd_sync_dir(struct dentry *dp);
138
139#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) 50#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
140#ifdef CONFIG_NFSD_V2_ACL 51#ifdef CONFIG_NFSD_V2_ACL
141extern struct svc_version nfsd_acl_version2; 52extern struct svc_version nfsd_acl_version2;
@@ -147,8 +58,6 @@ extern struct svc_version nfsd_acl_version3;
147#else 58#else
148#define nfsd_acl_version3 NULL 59#define nfsd_acl_version3 NULL
149#endif 60#endif
150struct posix_acl *nfsd_get_posix_acl(struct svc_fh *, int);
151int nfsd_set_posix_acl(struct svc_fh *, int, struct posix_acl *);
152#endif 61#endif
153 62
154enum vers_op {NFSD_SET, NFSD_CLEAR, NFSD_TEST, NFSD_AVAIL }; 63enum vers_op {NFSD_SET, NFSD_CLEAR, NFSD_TEST, NFSD_AVAIL };
@@ -159,6 +68,11 @@ int nfsd_create_serv(void);
159 68
160extern int nfsd_max_blksize; 69extern int nfsd_max_blksize;
161 70
71static inline int nfsd_v4client(struct svc_rqst *rq)
72{
73 return rq->rq_prog == NFS_PROGRAM && rq->rq_vers == 4;
74}
75
162/* 76/*
163 * NFSv4 State 77 * NFSv4 State
164 */ 78 */
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index 01965b2f3a76..1c12177b908c 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * linux/fs/nfsd/nfsfh.c
3 *
4 * NFS server file handle treatment. 2 * NFS server file handle treatment.
5 * 3 *
6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> 4 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
@@ -9,19 +7,11 @@
9 * ... and again Southern-Winter 2001 to support export_operations 7 * ... and again Southern-Winter 2001 to support export_operations
10 */ 8 */
11 9
12#include <linux/slab.h>
13#include <linux/fs.h>
14#include <linux/unistd.h>
15#include <linux/string.h>
16#include <linux/stat.h>
17#include <linux/dcache.h>
18#include <linux/exportfs.h> 10#include <linux/exportfs.h>
19#include <linux/mount.h>
20 11
21#include <linux/sunrpc/clnt.h>
22#include <linux/sunrpc/svc.h>
23#include <linux/sunrpc/svcauth_gss.h> 12#include <linux/sunrpc/svcauth_gss.h>
24#include <linux/nfsd/nfsd.h> 13#include "nfsd.h"
14#include "vfs.h"
25#include "auth.h" 15#include "auth.h"
26 16
27#define NFSDDBG_FACILITY NFSDDBG_FH 17#define NFSDDBG_FACILITY NFSDDBG_FH
@@ -96,8 +86,10 @@ nfsd_mode_check(struct svc_rqst *rqstp, umode_t mode, int type)
96static __be32 nfsd_setuser_and_check_port(struct svc_rqst *rqstp, 86static __be32 nfsd_setuser_and_check_port(struct svc_rqst *rqstp,
97 struct svc_export *exp) 87 struct svc_export *exp)
98{ 88{
89 int flags = nfsexp_flags(rqstp, exp);
90
99 /* Check if the request originated from a secure port. */ 91 /* Check if the request originated from a secure port. */
100 if (!rqstp->rq_secure && EX_SECURE(exp)) { 92 if (!rqstp->rq_secure && (flags & NFSEXP_INSECURE_PORT)) {
101 RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]); 93 RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]);
102 dprintk(KERN_WARNING 94 dprintk(KERN_WARNING
103 "nfsd: request from insecure port %s!\n", 95 "nfsd: request from insecure port %s!\n",
@@ -109,6 +101,36 @@ static __be32 nfsd_setuser_and_check_port(struct svc_rqst *rqstp,
109 return nfserrno(nfsd_setuser(rqstp, exp)); 101 return nfserrno(nfsd_setuser(rqstp, exp));
110} 102}
111 103
104static inline __be32 check_pseudo_root(struct svc_rqst *rqstp,
105 struct dentry *dentry, struct svc_export *exp)
106{
107 if (!(exp->ex_flags & NFSEXP_V4ROOT))
108 return nfs_ok;
109 /*
110 * v2/v3 clients have no need for the V4ROOT export--they use
111 * the mount protocl instead; also, further V4ROOT checks may be
112 * in v4-specific code, in which case v2/v3 clients could bypass
113 * them.
114 */
115 if (!nfsd_v4client(rqstp))
116 return nfserr_stale;
117 /*
118 * We're exposing only the directories and symlinks that have to be
119 * traversed on the way to real exports:
120 */
121 if (unlikely(!S_ISDIR(dentry->d_inode->i_mode) &&
122 !S_ISLNK(dentry->d_inode->i_mode)))
123 return nfserr_stale;
124 /*
125 * A pseudoroot export gives permission to access only one
126 * single directory; the kernel has to make another upcall
127 * before granting access to anything else under it:
128 */
129 if (unlikely(dentry != exp->ex_path.dentry))
130 return nfserr_stale;
131 return nfs_ok;
132}
133
112/* 134/*
113 * Use the given filehandle to look up the corresponding export and 135 * Use the given filehandle to look up the corresponding export and
114 * dentry. On success, the results are used to set fh_export and 136 * dentry. On success, the results are used to set fh_export and
@@ -232,14 +254,6 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp)
232 goto out; 254 goto out;
233 } 255 }
234 256
235 if (exp->ex_flags & NFSEXP_NOSUBTREECHECK) {
236 error = nfsd_setuser_and_check_port(rqstp, exp);
237 if (error) {
238 dput(dentry);
239 goto out;
240 }
241 }
242
243 if (S_ISDIR(dentry->d_inode->i_mode) && 257 if (S_ISDIR(dentry->d_inode->i_mode) &&
244 (dentry->d_flags & DCACHE_DISCONNECTED)) { 258 (dentry->d_flags & DCACHE_DISCONNECTED)) {
245 printk("nfsd: find_fh_dentry returned a DISCONNECTED directory: %s/%s\n", 259 printk("nfsd: find_fh_dentry returned a DISCONNECTED directory: %s/%s\n",
@@ -294,28 +308,32 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, int access)
294 error = nfsd_set_fh_dentry(rqstp, fhp); 308 error = nfsd_set_fh_dentry(rqstp, fhp);
295 if (error) 309 if (error)
296 goto out; 310 goto out;
297 dentry = fhp->fh_dentry;
298 exp = fhp->fh_export;
299 } else {
300 /*
301 * just rechecking permissions
302 * (e.g. nfsproc_create calls fh_verify, then nfsd_create
303 * does as well)
304 */
305 dprintk("nfsd: fh_verify - just checking\n");
306 dentry = fhp->fh_dentry;
307 exp = fhp->fh_export;
308 /*
309 * Set user creds for this exportpoint; necessary even
310 * in the "just checking" case because this may be a
311 * filehandle that was created by fh_compose, and that
312 * is about to be used in another nfsv4 compound
313 * operation.
314 */
315 error = nfsd_setuser_and_check_port(rqstp, exp);
316 if (error)
317 goto out;
318 } 311 }
312 dentry = fhp->fh_dentry;
313 exp = fhp->fh_export;
314 /*
315 * We still have to do all these permission checks, even when
316 * fh_dentry is already set:
317 * - fh_verify may be called multiple times with different
318 * "access" arguments (e.g. nfsd_proc_create calls
319 * fh_verify(...,NFSD_MAY_EXEC) first, then later (in
320 * nfsd_create) calls fh_verify(...,NFSD_MAY_CREATE).
321 * - in the NFSv4 case, the filehandle may have been filled
322 * in by fh_compose, and given a dentry, but further
323 * compound operations performed with that filehandle
324 * still need permissions checks. In the worst case, a
325 * mountpoint crossing may have changed the export
326 * options, and we may now need to use a different uid
327 * (for example, if different id-squashing options are in
328 * effect on the new filesystem).
329 */
330 error = check_pseudo_root(rqstp, dentry, exp);
331 if (error)
332 goto out;
333
334 error = nfsd_setuser_and_check_port(rqstp, exp);
335 if (error)
336 goto out;
319 337
320 error = nfsd_mode_check(rqstp, dentry->d_inode->i_mode, type); 338 error = nfsd_mode_check(rqstp, dentry->d_inode->i_mode, type);
321 if (error) 339 if (error)
diff --git a/fs/nfsd/nfsfh.h b/fs/nfsd/nfsfh.h
new file mode 100644
index 000000000000..cdfb8c6a4206
--- /dev/null
+++ b/fs/nfsd/nfsfh.h
@@ -0,0 +1,208 @@
1/* Copyright (C) 1995, 1996, 1997 Olaf Kirch <okir@monad.swb.de> */
2
3#ifndef _LINUX_NFSD_FH_INT_H
4#define _LINUX_NFSD_FH_INT_H
5
6#include <linux/nfsd/nfsfh.h>
7
8enum nfsd_fsid {
9 FSID_DEV = 0,
10 FSID_NUM,
11 FSID_MAJOR_MINOR,
12 FSID_ENCODE_DEV,
13 FSID_UUID4_INUM,
14 FSID_UUID8,
15 FSID_UUID16,
16 FSID_UUID16_INUM,
17};
18
19enum fsid_source {
20 FSIDSOURCE_DEV,
21 FSIDSOURCE_FSID,
22 FSIDSOURCE_UUID,
23};
24extern enum fsid_source fsid_source(struct svc_fh *fhp);
25
26
27/* This might look a little large to "inline" but in all calls except
28 * one, 'vers' is constant so moste of the function disappears.
29 */
30static inline void mk_fsid(int vers, u32 *fsidv, dev_t dev, ino_t ino,
31 u32 fsid, unsigned char *uuid)
32{
33 u32 *up;
34 switch(vers) {
35 case FSID_DEV:
36 fsidv[0] = htonl((MAJOR(dev)<<16) |
37 MINOR(dev));
38 fsidv[1] = ino_t_to_u32(ino);
39 break;
40 case FSID_NUM:
41 fsidv[0] = fsid;
42 break;
43 case FSID_MAJOR_MINOR:
44 fsidv[0] = htonl(MAJOR(dev));
45 fsidv[1] = htonl(MINOR(dev));
46 fsidv[2] = ino_t_to_u32(ino);
47 break;
48
49 case FSID_ENCODE_DEV:
50 fsidv[0] = new_encode_dev(dev);
51 fsidv[1] = ino_t_to_u32(ino);
52 break;
53
54 case FSID_UUID4_INUM:
55 /* 4 byte fsid and inode number */
56 up = (u32*)uuid;
57 fsidv[0] = ino_t_to_u32(ino);
58 fsidv[1] = up[0] ^ up[1] ^ up[2] ^ up[3];
59 break;
60
61 case FSID_UUID8:
62 /* 8 byte fsid */
63 up = (u32*)uuid;
64 fsidv[0] = up[0] ^ up[2];
65 fsidv[1] = up[1] ^ up[3];
66 break;
67
68 case FSID_UUID16:
69 /* 16 byte fsid - NFSv3+ only */
70 memcpy(fsidv, uuid, 16);
71 break;
72
73 case FSID_UUID16_INUM:
74 /* 8 byte inode and 16 byte fsid */
75 *(u64*)fsidv = (u64)ino;
76 memcpy(fsidv+2, uuid, 16);
77 break;
78 default: BUG();
79 }
80}
81
82static inline int key_len(int type)
83{
84 switch(type) {
85 case FSID_DEV: return 8;
86 case FSID_NUM: return 4;
87 case FSID_MAJOR_MINOR: return 12;
88 case FSID_ENCODE_DEV: return 8;
89 case FSID_UUID4_INUM: return 8;
90 case FSID_UUID8: return 8;
91 case FSID_UUID16: return 16;
92 case FSID_UUID16_INUM: return 24;
93 default: return 0;
94 }
95}
96
97/*
98 * Shorthand for dprintk()'s
99 */
100extern char * SVCFH_fmt(struct svc_fh *fhp);
101
102/*
103 * Function prototypes
104 */
105__be32 fh_verify(struct svc_rqst *, struct svc_fh *, int, int);
106__be32 fh_compose(struct svc_fh *, struct svc_export *, struct dentry *, struct svc_fh *);
107__be32 fh_update(struct svc_fh *);
108void fh_put(struct svc_fh *);
109
110static __inline__ struct svc_fh *
111fh_copy(struct svc_fh *dst, struct svc_fh *src)
112{
113 WARN_ON(src->fh_dentry || src->fh_locked);
114
115 *dst = *src;
116 return dst;
117}
118
119static inline void
120fh_copy_shallow(struct knfsd_fh *dst, struct knfsd_fh *src)
121{
122 dst->fh_size = src->fh_size;
123 memcpy(&dst->fh_base, &src->fh_base, src->fh_size);
124}
125
126static __inline__ struct svc_fh *
127fh_init(struct svc_fh *fhp, int maxsize)
128{
129 memset(fhp, 0, sizeof(*fhp));
130 fhp->fh_maxsize = maxsize;
131 return fhp;
132}
133
134#ifdef CONFIG_NFSD_V3
135/*
136 * Fill in the pre_op attr for the wcc data
137 */
138static inline void
139fill_pre_wcc(struct svc_fh *fhp)
140{
141 struct inode *inode;
142
143 inode = fhp->fh_dentry->d_inode;
144 if (!fhp->fh_pre_saved) {
145 fhp->fh_pre_mtime = inode->i_mtime;
146 fhp->fh_pre_ctime = inode->i_ctime;
147 fhp->fh_pre_size = inode->i_size;
148 fhp->fh_pre_change = inode->i_version;
149 fhp->fh_pre_saved = 1;
150 }
151}
152
153extern void fill_post_wcc(struct svc_fh *);
154#else
155#define fill_pre_wcc(ignored)
156#define fill_post_wcc(notused)
157#endif /* CONFIG_NFSD_V3 */
158
159
160/*
161 * Lock a file handle/inode
162 * NOTE: both fh_lock and fh_unlock are done "by hand" in
163 * vfs.c:nfsd_rename as it needs to grab 2 i_mutex's at once
164 * so, any changes here should be reflected there.
165 */
166
167static inline void
168fh_lock_nested(struct svc_fh *fhp, unsigned int subclass)
169{
170 struct dentry *dentry = fhp->fh_dentry;
171 struct inode *inode;
172
173 BUG_ON(!dentry);
174
175 if (fhp->fh_locked) {
176 printk(KERN_WARNING "fh_lock: %s/%s already locked!\n",
177 dentry->d_parent->d_name.name, dentry->d_name.name);
178 return;
179 }
180
181 inode = dentry->d_inode;
182 mutex_lock_nested(&inode->i_mutex, subclass);
183 fill_pre_wcc(fhp);
184 fhp->fh_locked = 1;
185}
186
187static inline void
188fh_lock(struct svc_fh *fhp)
189{
190 fh_lock_nested(fhp, I_MUTEX_NORMAL);
191}
192
193/*
194 * Unlock a file handle/inode
195 */
196static inline void
197fh_unlock(struct svc_fh *fhp)
198{
199 BUG_ON(!fhp->fh_dentry);
200
201 if (fhp->fh_locked) {
202 fill_post_wcc(fhp);
203 mutex_unlock(&fhp->fh_dentry->d_inode->i_mutex);
204 fhp->fh_locked = 0;
205 }
206}
207
208#endif /* _LINUX_NFSD_FH_INT_H */
diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c
index 0eb9c820b7a6..a047ad6111ef 100644
--- a/fs/nfsd/nfsproc.c
+++ b/fs/nfsd/nfsproc.c
@@ -1,29 +1,14 @@
1/* 1/*
2 * nfsproc2.c Process version 2 NFS requests.
3 * linux/fs/nfsd/nfs2proc.c
4 *
5 * Process version 2 NFS requests. 2 * Process version 2 NFS requests.
6 * 3 *
7 * Copyright (C) 1995-1997 Olaf Kirch <okir@monad.swb.de> 4 * Copyright (C) 1995-1997 Olaf Kirch <okir@monad.swb.de>
8 */ 5 */
9 6
10#include <linux/linkage.h>
11#include <linux/time.h>
12#include <linux/errno.h>
13#include <linux/fs.h>
14#include <linux/stat.h>
15#include <linux/fcntl.h>
16#include <linux/net.h>
17#include <linux/in.h>
18#include <linux/namei.h> 7#include <linux/namei.h>
19#include <linux/unistd.h>
20#include <linux/slab.h>
21 8
22#include <linux/sunrpc/clnt.h> 9#include "cache.h"
23#include <linux/sunrpc/svc.h> 10#include "xdr.h"
24#include <linux/nfsd/nfsd.h> 11#include "vfs.h"
25#include <linux/nfsd/cache.h>
26#include <linux/nfsd/xdr.h>
27 12
28typedef struct svc_rqst svc_rqst; 13typedef struct svc_rqst svc_rqst;
29typedef struct svc_buf svc_buf; 14typedef struct svc_buf svc_buf;
@@ -758,6 +743,7 @@ nfserrno (int errno)
758 { nfserr_io, -ETXTBSY }, 743 { nfserr_io, -ETXTBSY },
759 { nfserr_notsupp, -EOPNOTSUPP }, 744 { nfserr_notsupp, -EOPNOTSUPP },
760 { nfserr_toosmall, -ETOOSMALL }, 745 { nfserr_toosmall, -ETOOSMALL },
746 { nfserr_serverfault, -ESERVERFAULT },
761 }; 747 };
762 int i; 748 int i;
763 749
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 67ea83eedd43..171699eb07c8 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * linux/fs/nfsd/nfssvc.c
3 *
4 * Central processing for nfsd. 2 * Central processing for nfsd.
5 * 3 *
6 * Authors: Olaf Kirch (okir@monad.swb.de) 4 * Authors: Olaf Kirch (okir@monad.swb.de)
@@ -8,33 +6,19 @@
8 * Copyright (C) 1995, 1996, 1997 Olaf Kirch <okir@monad.swb.de> 6 * Copyright (C) 1995, 1996, 1997 Olaf Kirch <okir@monad.swb.de>
9 */ 7 */
10 8
11#include <linux/module.h>
12#include <linux/sched.h> 9#include <linux/sched.h>
13#include <linux/time.h>
14#include <linux/errno.h>
15#include <linux/nfs.h>
16#include <linux/in.h>
17#include <linux/uio.h>
18#include <linux/unistd.h>
19#include <linux/slab.h>
20#include <linux/smp.h>
21#include <linux/freezer.h> 10#include <linux/freezer.h>
22#include <linux/fs_struct.h> 11#include <linux/fs_struct.h>
23#include <linux/kthread.h>
24#include <linux/swap.h> 12#include <linux/swap.h>
25 13
26#include <linux/sunrpc/types.h>
27#include <linux/sunrpc/stats.h> 14#include <linux/sunrpc/stats.h>
28#include <linux/sunrpc/svc.h>
29#include <linux/sunrpc/svcsock.h> 15#include <linux/sunrpc/svcsock.h>
30#include <linux/sunrpc/cache.h>
31#include <linux/nfsd/nfsd.h>
32#include <linux/nfsd/stats.h>
33#include <linux/nfsd/cache.h>
34#include <linux/nfsd/syscall.h>
35#include <linux/lockd/bind.h> 16#include <linux/lockd/bind.h>
36#include <linux/nfsacl.h> 17#include <linux/nfsacl.h>
37#include <linux/seq_file.h> 18#include <linux/seq_file.h>
19#include "nfsd.h"
20#include "cache.h"
21#include "vfs.h"
38 22
39#define NFSDDBG_FACILITY NFSDDBG_SVC 23#define NFSDDBG_FACILITY NFSDDBG_SVC
40 24
diff --git a/fs/nfsd/nfsxdr.c b/fs/nfsd/nfsxdr.c
index afd08e2c90a5..4ce005dbf3e6 100644
--- a/fs/nfsd/nfsxdr.c
+++ b/fs/nfsd/nfsxdr.c
@@ -1,20 +1,10 @@
1/* 1/*
2 * linux/fs/nfsd/nfsxdr.c
3 *
4 * XDR support for nfsd 2 * XDR support for nfsd
5 * 3 *
6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> 4 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
7 */ 5 */
8 6
9#include <linux/types.h> 7#include "xdr.h"
10#include <linux/time.h>
11#include <linux/nfs.h>
12#include <linux/vfs.h>
13#include <linux/sunrpc/xdr.h>
14#include <linux/sunrpc/svc.h>
15#include <linux/nfsd/nfsd.h>
16#include <linux/nfsd/xdr.h>
17#include <linux/mm.h>
18#include "auth.h" 8#include "auth.h"
19 9
20#define NFSDDBG_FACILITY NFSDDBG_XDR 10#define NFSDDBG_FACILITY NFSDDBG_XDR
diff --git a/include/linux/nfsd/state.h b/fs/nfsd/state.h
index b38d11324189..fefeae27f25e 100644
--- a/include/linux/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -1,6 +1,4 @@
1/* 1/*
2 * linux/include/nfsd/state.h
3 *
4 * Copyright (c) 2001 The Regents of the University of Michigan. 2 * Copyright (c) 2001 The Regents of the University of Michigan.
5 * All rights reserved. 3 * All rights reserved.
6 * 4 *
@@ -37,9 +35,8 @@
37#ifndef _NFSD4_STATE_H 35#ifndef _NFSD4_STATE_H
38#define _NFSD4_STATE_H 36#define _NFSD4_STATE_H
39 37
40#include <linux/list.h> 38#include <linux/nfsd/nfsfh.h>
41#include <linux/kref.h> 39#include "nfsfh.h"
42#include <linux/sunrpc/clnt.h>
43 40
44typedef struct { 41typedef struct {
45 u32 cl_boot; 42 u32 cl_boot;
@@ -60,6 +57,13 @@ typedef struct {
60#define si_stateownerid si_opaque.so_stateownerid 57#define si_stateownerid si_opaque.so_stateownerid
61#define si_fileid si_opaque.so_fileid 58#define si_fileid si_opaque.so_fileid
62 59
60#define STATEID_FMT "(%08x/%08x/%08x/%08x)"
61#define STATEID_VAL(s) \
62 (s)->si_boot, \
63 (s)->si_stateownerid, \
64 (s)->si_fileid, \
65 (s)->si_generation
66
63struct nfsd4_cb_sequence { 67struct nfsd4_cb_sequence {
64 /* args/res */ 68 /* args/res */
65 u32 cbs_minorversion; 69 u32 cbs_minorversion;
diff --git a/fs/nfsd/stats.c b/fs/nfsd/stats.c
index 71944cddf680..5232d3e8fb2f 100644
--- a/fs/nfsd/stats.c
+++ b/fs/nfsd/stats.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * linux/fs/nfsd/stats.c
3 *
4 * procfs-based user access to knfsd statistics 2 * procfs-based user access to knfsd statistics
5 * 3 *
6 * /proc/net/rpc/nfsd 4 * /proc/net/rpc/nfsd
@@ -23,18 +21,13 @@
23 * Copyright (C) 1995, 1996, 1997 Olaf Kirch <okir@monad.swb.de> 21 * Copyright (C) 1995, 1996, 1997 Olaf Kirch <okir@monad.swb.de>
24 */ 22 */
25 23
26#include <linux/kernel.h>
27#include <linux/time.h>
28#include <linux/proc_fs.h>
29#include <linux/seq_file.h> 24#include <linux/seq_file.h>
30#include <linux/stat.h>
31#include <linux/module.h> 25#include <linux/module.h>
32
33#include <linux/sunrpc/svc.h>
34#include <linux/sunrpc/stats.h> 26#include <linux/sunrpc/stats.h>
35#include <linux/nfsd/nfsd.h>
36#include <linux/nfsd/stats.h> 27#include <linux/nfsd/stats.h>
37 28
29#include "nfsd.h"
30
38struct nfsd_stats nfsdstats; 31struct nfsd_stats nfsdstats;
39struct svc_stat nfsd_svcstats = { 32struct svc_stat nfsd_svcstats = {
40 .program = &nfsd_program, 33 .program = &nfsd_program,
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index a293f0273263..e3ef3ec0efd0 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -1,7 +1,5 @@
1#define MSNFS /* HACK HACK */ 1#define MSNFS /* HACK HACK */
2/* 2/*
3 * linux/fs/nfsd/vfs.c
4 *
5 * File operations used by nfsd. Some of these have been ripped from 3 * File operations used by nfsd. Some of these have been ripped from
6 * other parts of the kernel because they weren't exported, others 4 * other parts of the kernel because they weren't exported, others
7 * are partial duplicates with added or changed functionality. 5 * are partial duplicates with added or changed functionality.
@@ -16,48 +14,31 @@
16 * Zerocpy NFS support (C) 2002 Hirokazu Takahashi <taka@valinux.co.jp> 14 * Zerocpy NFS support (C) 2002 Hirokazu Takahashi <taka@valinux.co.jp>
17 */ 15 */
18 16
19#include <linux/string.h>
20#include <linux/time.h>
21#include <linux/errno.h>
22#include <linux/fs.h> 17#include <linux/fs.h>
23#include <linux/file.h> 18#include <linux/file.h>
24#include <linux/mount.h>
25#include <linux/major.h>
26#include <linux/splice.h> 19#include <linux/splice.h>
27#include <linux/proc_fs.h>
28#include <linux/stat.h>
29#include <linux/fcntl.h> 20#include <linux/fcntl.h>
30#include <linux/net.h>
31#include <linux/unistd.h>
32#include <linux/slab.h>
33#include <linux/pagemap.h>
34#include <linux/in.h>
35#include <linux/module.h>
36#include <linux/namei.h> 21#include <linux/namei.h>
37#include <linux/vfs.h>
38#include <linux/delay.h> 22#include <linux/delay.h>
39#include <linux/sunrpc/svc.h>
40#include <linux/nfsd/nfsd.h>
41#ifdef CONFIG_NFSD_V3
42#include <linux/nfs3.h>
43#include <linux/nfsd/xdr3.h>
44#endif /* CONFIG_NFSD_V3 */
45#include <linux/nfsd/nfsfh.h>
46#include <linux/quotaops.h> 23#include <linux/quotaops.h>
47#include <linux/fsnotify.h> 24#include <linux/fsnotify.h>
48#include <linux/posix_acl.h>
49#include <linux/posix_acl_xattr.h> 25#include <linux/posix_acl_xattr.h>
50#include <linux/xattr.h> 26#include <linux/xattr.h>
27#include <linux/jhash.h>
28#include <linux/ima.h>
29#include <asm/uaccess.h>
30
31#ifdef CONFIG_NFSD_V3
32#include "xdr3.h"
33#endif /* CONFIG_NFSD_V3 */
34
51#ifdef CONFIG_NFSD_V4 35#ifdef CONFIG_NFSD_V4
52#include <linux/nfs4.h>
53#include <linux/nfs4_acl.h> 36#include <linux/nfs4_acl.h>
54#include <linux/nfsd_idmap.h> 37#include <linux/nfsd_idmap.h>
55#include <linux/security.h>
56#endif /* CONFIG_NFSD_V4 */ 38#endif /* CONFIG_NFSD_V4 */
57#include <linux/jhash.h>
58#include <linux/ima.h>
59 39
60#include <asm/uaccess.h> 40#include "nfsd.h"
41#include "vfs.h"
61 42
62#define NFSDDBG_FACILITY NFSDDBG_FILEOP 43#define NFSDDBG_FACILITY NFSDDBG_FILEOP
63 44
@@ -89,12 +70,6 @@ struct raparm_hbucket {
89#define RAPARM_HASH_MASK (RAPARM_HASH_SIZE-1) 70#define RAPARM_HASH_MASK (RAPARM_HASH_SIZE-1)
90static struct raparm_hbucket raparm_hash[RAPARM_HASH_SIZE]; 71static struct raparm_hbucket raparm_hash[RAPARM_HASH_SIZE];
91 72
92static inline int
93nfsd_v4client(struct svc_rqst *rq)
94{
95 return rq->rq_prog == NFS_PROGRAM && rq->rq_vers == 4;
96}
97
98/* 73/*
99 * Called from nfsd_lookup and encode_dirent. Check if we have crossed 74 * Called from nfsd_lookup and encode_dirent. Check if we have crossed
100 * a mount point. 75 * a mount point.
@@ -116,8 +91,16 @@ nfsd_cross_mnt(struct svc_rqst *rqstp, struct dentry **dpp,
116 91
117 exp2 = rqst_exp_get_by_name(rqstp, &path); 92 exp2 = rqst_exp_get_by_name(rqstp, &path);
118 if (IS_ERR(exp2)) { 93 if (IS_ERR(exp2)) {
119 if (PTR_ERR(exp2) != -ENOENT) 94 err = PTR_ERR(exp2);
120 err = PTR_ERR(exp2); 95 /*
96 * We normally allow NFS clients to continue
97 * "underneath" a mountpoint that is not exported.
98 * The exception is V4ROOT, where no traversal is ever
99 * allowed without an explicit export of the new
100 * directory.
101 */
102 if (err == -ENOENT && !(exp->ex_flags & NFSEXP_V4ROOT))
103 err = 0;
121 path_put(&path); 104 path_put(&path);
122 goto out; 105 goto out;
123 } 106 }
@@ -141,6 +124,53 @@ out:
141 return err; 124 return err;
142} 125}
143 126
127static void follow_to_parent(struct path *path)
128{
129 struct dentry *dp;
130
131 while (path->dentry == path->mnt->mnt_root && follow_up(path))
132 ;
133 dp = dget_parent(path->dentry);
134 dput(path->dentry);
135 path->dentry = dp;
136}
137
138static int nfsd_lookup_parent(struct svc_rqst *rqstp, struct dentry *dparent, struct svc_export **exp, struct dentry **dentryp)
139{
140 struct svc_export *exp2;
141 struct path path = {.mnt = mntget((*exp)->ex_path.mnt),
142 .dentry = dget(dparent)};
143
144 follow_to_parent(&path);
145
146 exp2 = rqst_exp_parent(rqstp, &path);
147 if (PTR_ERR(exp2) == -ENOENT) {
148 *dentryp = dget(dparent);
149 } else if (IS_ERR(exp2)) {
150 path_put(&path);
151 return PTR_ERR(exp2);
152 } else {
153 *dentryp = dget(path.dentry);
154 exp_put(*exp);
155 *exp = exp2;
156 }
157 path_put(&path);
158 return 0;
159}
160
161/*
162 * For nfsd purposes, we treat V4ROOT exports as though there was an
163 * export at *every* directory.
164 */
165int nfsd_mountpoint(struct dentry *dentry, struct svc_export *exp)
166{
167 if (d_mountpoint(dentry))
168 return 1;
169 if (!(exp->ex_flags & NFSEXP_V4ROOT))
170 return 0;
171 return dentry->d_inode != NULL;
172}
173
144__be32 174__be32
145nfsd_lookup_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp, 175nfsd_lookup_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp,
146 const char *name, unsigned int len, 176 const char *name, unsigned int len,
@@ -169,35 +199,13 @@ nfsd_lookup_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp,
169 dentry = dget(dparent); 199 dentry = dget(dparent);
170 else if (dparent != exp->ex_path.dentry) 200 else if (dparent != exp->ex_path.dentry)
171 dentry = dget_parent(dparent); 201 dentry = dget_parent(dparent);
172 else if (!EX_NOHIDE(exp)) 202 else if (!EX_NOHIDE(exp) && !nfsd_v4client(rqstp))
173 dentry = dget(dparent); /* .. == . just like at / */ 203 dentry = dget(dparent); /* .. == . just like at / */
174 else { 204 else {
175 /* checking mountpoint crossing is very different when stepping up */ 205 /* checking mountpoint crossing is very different when stepping up */
176 struct svc_export *exp2 = NULL; 206 host_err = nfsd_lookup_parent(rqstp, dparent, &exp, &dentry);
177 struct dentry *dp; 207 if (host_err)
178 struct path path = {.mnt = mntget(exp->ex_path.mnt),
179 .dentry = dget(dparent)};
180
181 while (path.dentry == path.mnt->mnt_root &&
182 follow_up(&path))
183 ;
184 dp = dget_parent(path.dentry);
185 dput(path.dentry);
186 path.dentry = dp;
187
188 exp2 = rqst_exp_parent(rqstp, &path);
189 if (PTR_ERR(exp2) == -ENOENT) {
190 dentry = dget(dparent);
191 } else if (IS_ERR(exp2)) {
192 host_err = PTR_ERR(exp2);
193 path_put(&path);
194 goto out_nfserr; 208 goto out_nfserr;
195 } else {
196 dentry = dget(path.dentry);
197 exp_put(exp);
198 exp = exp2;
199 }
200 path_put(&path);
201 } 209 }
202 } else { 210 } else {
203 fh_lock(fhp); 211 fh_lock(fhp);
@@ -208,7 +216,7 @@ nfsd_lookup_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp,
208 /* 216 /*
209 * check if we have crossed a mount point ... 217 * check if we have crossed a mount point ...
210 */ 218 */
211 if (d_mountpoint(dentry)) { 219 if (nfsd_mountpoint(dentry, exp)) {
212 if ((host_err = nfsd_cross_mnt(rqstp, &dentry, &exp))) { 220 if ((host_err = nfsd_cross_mnt(rqstp, &dentry, &exp))) {
213 dput(dentry); 221 dput(dentry);
214 goto out_nfserr; 222 goto out_nfserr;
diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h
new file mode 100644
index 000000000000..4b1de0a9ea75
--- /dev/null
+++ b/fs/nfsd/vfs.h
@@ -0,0 +1,101 @@
1/*
2 * Copyright (C) 1995-1997 Olaf Kirch <okir@monad.swb.de>
3 */
4
5#ifndef LINUX_NFSD_VFS_H
6#define LINUX_NFSD_VFS_H
7
8#include "nfsfh.h"
9
10/*
11 * Flags for nfsd_permission
12 */
13#define NFSD_MAY_NOP 0
14#define NFSD_MAY_EXEC 1 /* == MAY_EXEC */
15#define NFSD_MAY_WRITE 2 /* == MAY_WRITE */
16#define NFSD_MAY_READ 4 /* == MAY_READ */
17#define NFSD_MAY_SATTR 8
18#define NFSD_MAY_TRUNC 16
19#define NFSD_MAY_LOCK 32
20#define NFSD_MAY_OWNER_OVERRIDE 64
21#define NFSD_MAY_LOCAL_ACCESS 128 /* IRIX doing local access check on device special file*/
22#define NFSD_MAY_BYPASS_GSS_ON_ROOT 256
23
24#define NFSD_MAY_CREATE (NFSD_MAY_EXEC|NFSD_MAY_WRITE)
25#define NFSD_MAY_REMOVE (NFSD_MAY_EXEC|NFSD_MAY_WRITE|NFSD_MAY_TRUNC)
26
27/*
28 * Callback function for readdir
29 */
30typedef int (*nfsd_dirop_t)(struct inode *, struct dentry *, int, int);
31
32/* nfsd/vfs.c */
33int fh_lock_parent(struct svc_fh *, struct dentry *);
34int nfsd_racache_init(int);
35void nfsd_racache_shutdown(void);
36int nfsd_cross_mnt(struct svc_rqst *rqstp, struct dentry **dpp,
37 struct svc_export **expp);
38__be32 nfsd_lookup(struct svc_rqst *, struct svc_fh *,
39 const char *, unsigned int, struct svc_fh *);
40__be32 nfsd_lookup_dentry(struct svc_rqst *, struct svc_fh *,
41 const char *, unsigned int,
42 struct svc_export **, struct dentry **);
43__be32 nfsd_setattr(struct svc_rqst *, struct svc_fh *,
44 struct iattr *, int, time_t);
45int nfsd_mountpoint(struct dentry *, struct svc_export *);
46#ifdef CONFIG_NFSD_V4
47__be32 nfsd4_set_nfs4_acl(struct svc_rqst *, struct svc_fh *,
48 struct nfs4_acl *);
49int nfsd4_get_nfs4_acl(struct svc_rqst *, struct dentry *, struct nfs4_acl **);
50#endif /* CONFIG_NFSD_V4 */
51__be32 nfsd_create(struct svc_rqst *, struct svc_fh *,
52 char *name, int len, struct iattr *attrs,
53 int type, dev_t rdev, struct svc_fh *res);
54#ifdef CONFIG_NFSD_V3
55__be32 nfsd_access(struct svc_rqst *, struct svc_fh *, u32 *, u32 *);
56__be32 nfsd_create_v3(struct svc_rqst *, struct svc_fh *,
57 char *name, int len, struct iattr *attrs,
58 struct svc_fh *res, int createmode,
59 u32 *verifier, int *truncp, int *created);
60__be32 nfsd_commit(struct svc_rqst *, struct svc_fh *,
61 loff_t, unsigned long);
62#endif /* CONFIG_NFSD_V3 */
63__be32 nfsd_open(struct svc_rqst *, struct svc_fh *, int,
64 int, struct file **);
65void nfsd_close(struct file *);
66__be32 nfsd_read(struct svc_rqst *, struct svc_fh *, struct file *,
67 loff_t, struct kvec *, int, unsigned long *);
68__be32 nfsd_write(struct svc_rqst *, struct svc_fh *,struct file *,
69 loff_t, struct kvec *,int, unsigned long *, int *);
70__be32 nfsd_readlink(struct svc_rqst *, struct svc_fh *,
71 char *, int *);
72__be32 nfsd_symlink(struct svc_rqst *, struct svc_fh *,
73 char *name, int len, char *path, int plen,
74 struct svc_fh *res, struct iattr *);
75__be32 nfsd_link(struct svc_rqst *, struct svc_fh *,
76 char *, int, struct svc_fh *);
77__be32 nfsd_rename(struct svc_rqst *,
78 struct svc_fh *, char *, int,
79 struct svc_fh *, char *, int);
80__be32 nfsd_remove(struct svc_rqst *,
81 struct svc_fh *, char *, int);
82__be32 nfsd_unlink(struct svc_rqst *, struct svc_fh *, int type,
83 char *name, int len);
84int nfsd_truncate(struct svc_rqst *, struct svc_fh *,
85 unsigned long size);
86__be32 nfsd_readdir(struct svc_rqst *, struct svc_fh *,
87 loff_t *, struct readdir_cd *, filldir_t);
88__be32 nfsd_statfs(struct svc_rqst *, struct svc_fh *,
89 struct kstatfs *, int access);
90
91int nfsd_notify_change(struct inode *, struct iattr *);
92__be32 nfsd_permission(struct svc_rqst *, struct svc_export *,
93 struct dentry *, int);
94int nfsd_sync_dir(struct dentry *dp);
95
96#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
97struct posix_acl *nfsd_get_posix_acl(struct svc_fh *, int);
98int nfsd_set_posix_acl(struct svc_fh *, int, struct posix_acl *);
99#endif
100
101#endif /* LINUX_NFSD_VFS_H */
diff --git a/include/linux/nfsd/xdr.h b/fs/nfsd/xdr.h
index a0132ef58f21..53b1863dd8f6 100644
--- a/include/linux/nfsd/xdr.h
+++ b/fs/nfsd/xdr.h
@@ -1,15 +1,11 @@
1/* 1/* XDR types for nfsd. This is mainly a typing exercise. */
2 * linux/include/linux/nfsd/xdr.h
3 *
4 * XDR types for nfsd. This is mainly a typing exercise.
5 */
6 2
7#ifndef LINUX_NFSD_H 3#ifndef LINUX_NFSD_H
8#define LINUX_NFSD_H 4#define LINUX_NFSD_H
9 5
10#include <linux/fs.h>
11#include <linux/vfs.h> 6#include <linux/vfs.h>
12#include <linux/nfs.h> 7#include "nfsd.h"
8#include "nfsfh.h"
13 9
14struct nfsd_fhandle { 10struct nfsd_fhandle {
15 struct svc_fh fh; 11 struct svc_fh fh;
diff --git a/include/linux/nfsd/xdr3.h b/fs/nfsd/xdr3.h
index 421eddd65a25..7df980eb0562 100644
--- a/include/linux/nfsd/xdr3.h
+++ b/fs/nfsd/xdr3.h
@@ -1,6 +1,4 @@
1/* 1/*
2 * linux/include/linux/nfsd/xdr3.h
3 *
4 * XDR types for NFSv3 in nfsd. 2 * XDR types for NFSv3 in nfsd.
5 * 3 *
6 * Copyright (C) 1996-1998, Olaf Kirch <okir@monad.swb.de> 4 * Copyright (C) 1996-1998, Olaf Kirch <okir@monad.swb.de>
@@ -9,7 +7,7 @@
9#ifndef _LINUX_NFSD_XDR3_H 7#ifndef _LINUX_NFSD_XDR3_H
10#define _LINUX_NFSD_XDR3_H 8#define _LINUX_NFSD_XDR3_H
11 9
12#include <linux/nfsd/xdr.h> 10#include "xdr.h"
13 11
14struct nfsd3_sattrargs { 12struct nfsd3_sattrargs {
15 struct svc_fh fh; 13 struct svc_fh fh;
diff --git a/include/linux/nfsd/xdr4.h b/fs/nfsd/xdr4.h
index 73164c2b3d29..efa337739534 100644
--- a/include/linux/nfsd/xdr4.h
+++ b/fs/nfsd/xdr4.h
@@ -1,6 +1,4 @@
1/* 1/*
2 * include/linux/nfsd/xdr4.h
3 *
4 * Server-side types for NFSv4. 2 * Server-side types for NFSv4.
5 * 3 *
6 * Copyright (c) 2002 The Regents of the University of Michigan. 4 * Copyright (c) 2002 The Regents of the University of Michigan.
@@ -39,7 +37,8 @@
39#ifndef _LINUX_NFSD_XDR4_H 37#ifndef _LINUX_NFSD_XDR4_H
40#define _LINUX_NFSD_XDR4_H 38#define _LINUX_NFSD_XDR4_H
41 39
42#include <linux/nfs4.h> 40#include "state.h"
41#include "nfsd.h"
43 42
44#define NFSD4_MAX_TAGLEN 128 43#define NFSD4_MAX_TAGLEN 128
45#define XDR_LEN(n) (((n) + 3) & ~3) 44#define XDR_LEN(n) (((n) + 3) & ~3)
diff --git a/fs/nilfs2/Kconfig b/fs/nilfs2/Kconfig
index 251da07b2a1d..1225af7b2166 100644
--- a/fs/nilfs2/Kconfig
+++ b/fs/nilfs2/Kconfig
@@ -2,6 +2,7 @@ config NILFS2_FS
2 tristate "NILFS2 file system support (EXPERIMENTAL)" 2 tristate "NILFS2 file system support (EXPERIMENTAL)"
3 depends on EXPERIMENTAL 3 depends on EXPERIMENTAL
4 select CRC32 4 select CRC32
5 select FS_JOURNAL_INFO
5 help 6 help
6 NILFS2 is a log-structured file system (LFS) supporting continuous 7 NILFS2 is a log-structured file system (LFS) supporting continuous
7 snapshotting. In addition to versioning capability of the entire 8 snapshotting. In addition to versioning capability of the entire
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index deb2b132ae5e..3dae4a13f6e4 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -547,6 +547,9 @@ bail:
547 * 547 *
548 * called like this: dio->get_blocks(dio->inode, fs_startblk, 548 * called like this: dio->get_blocks(dio->inode, fs_startblk,
549 * fs_count, map_bh, dio->rw == WRITE); 549 * fs_count, map_bh, dio->rw == WRITE);
550 *
551 * Note that we never bother to allocate blocks here, and thus ignore the
552 * create argument.
550 */ 553 */
551static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock, 554static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock,
552 struct buffer_head *bh_result, int create) 555 struct buffer_head *bh_result, int create)
@@ -563,14 +566,6 @@ static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock,
563 566
564 inode_blocks = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode)); 567 inode_blocks = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode));
565 568
566 /*
567 * Any write past EOF is not allowed because we'd be extending.
568 */
569 if (create && (iblock + max_blocks) > inode_blocks) {
570 ret = -EIO;
571 goto bail;
572 }
573
574 /* This figures out the size of the next contiguous block, and 569 /* This figures out the size of the next contiguous block, and
575 * our logical offset */ 570 * our logical offset */
576 ret = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno, 571 ret = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno,
@@ -582,15 +577,6 @@ static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock,
582 goto bail; 577 goto bail;
583 } 578 }
584 579
585 if (!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)) && !p_blkno && create) {
586 ocfs2_error(inode->i_sb,
587 "Inode %llu has a hole at block %llu\n",
588 (unsigned long long)OCFS2_I(inode)->ip_blkno,
589 (unsigned long long)iblock);
590 ret = -EROFS;
591 goto bail;
592 }
593
594 /* We should already CoW the refcounted extent. */ 580 /* We should already CoW the refcounted extent. */
595 BUG_ON(ext_flags & OCFS2_EXT_REFCOUNTED); 581 BUG_ON(ext_flags & OCFS2_EXT_REFCOUNTED);
596 /* 582 /*
@@ -601,20 +587,8 @@ static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock,
601 */ 587 */
602 if (p_blkno && !(ext_flags & OCFS2_EXT_UNWRITTEN)) 588 if (p_blkno && !(ext_flags & OCFS2_EXT_UNWRITTEN))
603 map_bh(bh_result, inode->i_sb, p_blkno); 589 map_bh(bh_result, inode->i_sb, p_blkno);
604 else { 590 else
605 /*
606 * ocfs2_prepare_inode_for_write() should have caught
607 * the case where we'd be filling a hole and triggered
608 * a buffered write instead.
609 */
610 if (create) {
611 ret = -EIO;
612 mlog_errno(ret);
613 goto bail;
614 }
615
616 clear_buffer_mapped(bh_result); 591 clear_buffer_mapped(bh_result);
617 }
618 592
619 /* make sure we don't map more than max_blocks blocks here as 593 /* make sure we don't map more than max_blocks blocks here as
620 that's all the kernel will handle at this point. */ 594 that's all the kernel will handle at this point. */
diff --git a/fs/proc/base.c b/fs/proc/base.c
index af643b5aefe8..18d5cc62d8ed 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1265,6 +1265,72 @@ static const struct file_operations proc_pid_sched_operations = {
1265 1265
1266#endif 1266#endif
1267 1267
1268static ssize_t comm_write(struct file *file, const char __user *buf,
1269 size_t count, loff_t *offset)
1270{
1271 struct inode *inode = file->f_path.dentry->d_inode;
1272 struct task_struct *p;
1273 char buffer[TASK_COMM_LEN];
1274
1275 memset(buffer, 0, sizeof(buffer));
1276 if (count > sizeof(buffer) - 1)
1277 count = sizeof(buffer) - 1;
1278 if (copy_from_user(buffer, buf, count))
1279 return -EFAULT;
1280
1281 p = get_proc_task(inode);
1282 if (!p)
1283 return -ESRCH;
1284
1285 if (same_thread_group(current, p))
1286 set_task_comm(p, buffer);
1287 else
1288 count = -EINVAL;
1289
1290 put_task_struct(p);
1291
1292 return count;
1293}
1294
1295static int comm_show(struct seq_file *m, void *v)
1296{
1297 struct inode *inode = m->private;
1298 struct task_struct *p;
1299
1300 p = get_proc_task(inode);
1301 if (!p)
1302 return -ESRCH;
1303
1304 task_lock(p);
1305 seq_printf(m, "%s\n", p->comm);
1306 task_unlock(p);
1307
1308 put_task_struct(p);
1309
1310 return 0;
1311}
1312
1313static int comm_open(struct inode *inode, struct file *filp)
1314{
1315 int ret;
1316
1317 ret = single_open(filp, comm_show, NULL);
1318 if (!ret) {
1319 struct seq_file *m = filp->private_data;
1320
1321 m->private = inode;
1322 }
1323 return ret;
1324}
1325
1326static const struct file_operations proc_pid_set_comm_operations = {
1327 .open = comm_open,
1328 .read = seq_read,
1329 .write = comm_write,
1330 .llseek = seq_lseek,
1331 .release = single_release,
1332};
1333
1268/* 1334/*
1269 * We added or removed a vma mapping the executable. The vmas are only mapped 1335 * We added or removed a vma mapping the executable. The vmas are only mapped
1270 * during exec and are not mapped with the mmap system call. 1336 * during exec and are not mapped with the mmap system call.
@@ -2200,7 +2266,7 @@ static const struct inode_operations proc_attr_dir_inode_operations = {
2200 2266
2201#endif 2267#endif
2202 2268
2203#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE) 2269#ifdef CONFIG_ELF_CORE
2204static ssize_t proc_coredump_filter_read(struct file *file, char __user *buf, 2270static ssize_t proc_coredump_filter_read(struct file *file, char __user *buf,
2205 size_t count, loff_t *ppos) 2271 size_t count, loff_t *ppos)
2206{ 2272{
@@ -2504,6 +2570,7 @@ static const struct pid_entry tgid_base_stuff[] = {
2504#ifdef CONFIG_SCHED_DEBUG 2570#ifdef CONFIG_SCHED_DEBUG
2505 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations), 2571 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
2506#endif 2572#endif
2573 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
2507#ifdef CONFIG_HAVE_ARCH_TRACEHOOK 2574#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
2508 INF("syscall", S_IRUSR, proc_pid_syscall), 2575 INF("syscall", S_IRUSR, proc_pid_syscall),
2509#endif 2576#endif
@@ -2556,7 +2623,7 @@ static const struct pid_entry tgid_base_stuff[] = {
2556#ifdef CONFIG_FAULT_INJECTION 2623#ifdef CONFIG_FAULT_INJECTION
2557 REG("make-it-fail", S_IRUGO|S_IWUSR, proc_fault_inject_operations), 2624 REG("make-it-fail", S_IRUGO|S_IWUSR, proc_fault_inject_operations),
2558#endif 2625#endif
2559#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE) 2626#ifdef CONFIG_ELF_CORE
2560 REG("coredump_filter", S_IRUGO|S_IWUSR, proc_coredump_filter_operations), 2627 REG("coredump_filter", S_IRUGO|S_IWUSR, proc_coredump_filter_operations),
2561#endif 2628#endif
2562#ifdef CONFIG_TASK_IO_ACCOUNTING 2629#ifdef CONFIG_TASK_IO_ACCOUNTING
@@ -2838,6 +2905,7 @@ static const struct pid_entry tid_base_stuff[] = {
2838#ifdef CONFIG_SCHED_DEBUG 2905#ifdef CONFIG_SCHED_DEBUG
2839 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations), 2906 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
2840#endif 2907#endif
2908 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
2841#ifdef CONFIG_HAVE_ARCH_TRACEHOOK 2909#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
2842 INF("syscall", S_IRUSR, proc_pid_syscall), 2910 INF("syscall", S_IRUSR, proc_pid_syscall),
2843#endif 2911#endif
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index fa678abc9db1..480cb1065eec 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -429,7 +429,7 @@ struct dentry *proc_lookup_de(struct proc_dir_entry *de, struct inode *dir,
429 unsigned int ino; 429 unsigned int ino;
430 430
431 ino = de->low_ino; 431 ino = de->low_ino;
432 de_get(de); 432 pde_get(de);
433 spin_unlock(&proc_subdir_lock); 433 spin_unlock(&proc_subdir_lock);
434 error = -EINVAL; 434 error = -EINVAL;
435 inode = proc_get_inode(dir->i_sb, ino, de); 435 inode = proc_get_inode(dir->i_sb, ino, de);
@@ -445,7 +445,7 @@ out_unlock:
445 return NULL; 445 return NULL;
446 } 446 }
447 if (de) 447 if (de)
448 de_put(de); 448 pde_put(de);
449 return ERR_PTR(error); 449 return ERR_PTR(error);
450} 450}
451 451
@@ -509,17 +509,17 @@ int proc_readdir_de(struct proc_dir_entry *de, struct file *filp, void *dirent,
509 struct proc_dir_entry *next; 509 struct proc_dir_entry *next;
510 510
511 /* filldir passes info to user space */ 511 /* filldir passes info to user space */
512 de_get(de); 512 pde_get(de);
513 spin_unlock(&proc_subdir_lock); 513 spin_unlock(&proc_subdir_lock);
514 if (filldir(dirent, de->name, de->namelen, filp->f_pos, 514 if (filldir(dirent, de->name, de->namelen, filp->f_pos,
515 de->low_ino, de->mode >> 12) < 0) { 515 de->low_ino, de->mode >> 12) < 0) {
516 de_put(de); 516 pde_put(de);
517 goto out; 517 goto out;
518 } 518 }
519 spin_lock(&proc_subdir_lock); 519 spin_lock(&proc_subdir_lock);
520 filp->f_pos++; 520 filp->f_pos++;
521 next = de->next; 521 next = de->next;
522 de_put(de); 522 pde_put(de);
523 de = next; 523 de = next;
524 } while (de); 524 } while (de);
525 spin_unlock(&proc_subdir_lock); 525 spin_unlock(&proc_subdir_lock);
@@ -763,7 +763,7 @@ out:
763 return NULL; 763 return NULL;
764} 764}
765 765
766void free_proc_entry(struct proc_dir_entry *de) 766static void free_proc_entry(struct proc_dir_entry *de)
767{ 767{
768 unsigned int ino = de->low_ino; 768 unsigned int ino = de->low_ino;
769 769
@@ -777,6 +777,12 @@ void free_proc_entry(struct proc_dir_entry *de)
777 kfree(de); 777 kfree(de);
778} 778}
779 779
780void pde_put(struct proc_dir_entry *pde)
781{
782 if (atomic_dec_and_test(&pde->count))
783 free_proc_entry(pde);
784}
785
780/* 786/*
781 * Remove a /proc entry and free it if it's not currently in use. 787 * Remove a /proc entry and free it if it's not currently in use.
782 */ 788 */
@@ -845,6 +851,5 @@ continue_removing:
845 WARN(de->subdir, KERN_WARNING "%s: removing non-empty directory " 851 WARN(de->subdir, KERN_WARNING "%s: removing non-empty directory "
846 "'%s/%s', leaking at least '%s'\n", __func__, 852 "'%s/%s', leaking at least '%s'\n", __func__,
847 de->parent->name, de->name, de->subdir->name); 853 de->parent->name, de->name, de->subdir->name);
848 if (atomic_dec_and_test(&de->count)) 854 pde_put(de);
849 free_proc_entry(de);
850} 855}
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index d78ade305541..445a02bcaab3 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -24,29 +24,6 @@
24 24
25#include "internal.h" 25#include "internal.h"
26 26
27struct proc_dir_entry *de_get(struct proc_dir_entry *de)
28{
29 atomic_inc(&de->count);
30 return de;
31}
32
33/*
34 * Decrements the use count and checks for deferred deletion.
35 */
36void de_put(struct proc_dir_entry *de)
37{
38 if (!atomic_read(&de->count)) {
39 printk("de_put: entry %s already free!\n", de->name);
40 return;
41 }
42
43 if (atomic_dec_and_test(&de->count))
44 free_proc_entry(de);
45}
46
47/*
48 * Decrement the use count of the proc_dir_entry.
49 */
50static void proc_delete_inode(struct inode *inode) 27static void proc_delete_inode(struct inode *inode)
51{ 28{
52 struct proc_dir_entry *de; 29 struct proc_dir_entry *de;
@@ -59,7 +36,7 @@ static void proc_delete_inode(struct inode *inode)
59 /* Let go of any associated proc directory entry */ 36 /* Let go of any associated proc directory entry */
60 de = PROC_I(inode)->pde; 37 de = PROC_I(inode)->pde;
61 if (de) 38 if (de)
62 de_put(de); 39 pde_put(de);
63 if (PROC_I(inode)->sysctl) 40 if (PROC_I(inode)->sysctl)
64 sysctl_head_put(PROC_I(inode)->sysctl); 41 sysctl_head_put(PROC_I(inode)->sysctl);
65 clear_inode(inode); 42 clear_inode(inode);
@@ -480,7 +457,7 @@ struct inode *proc_get_inode(struct super_block *sb, unsigned int ino,
480 } 457 }
481 unlock_new_inode(inode); 458 unlock_new_inode(inode);
482 } else 459 } else
483 de_put(de); 460 pde_put(de);
484 return inode; 461 return inode;
485} 462}
486 463
@@ -495,7 +472,7 @@ int proc_fill_super(struct super_block *s)
495 s->s_op = &proc_sops; 472 s->s_op = &proc_sops;
496 s->s_time_gran = 1; 473 s->s_time_gran = 1;
497 474
498 de_get(&proc_root); 475 pde_get(&proc_root);
499 root_inode = proc_get_inode(s, PROC_ROOT_INO, &proc_root); 476 root_inode = proc_get_inode(s, PROC_ROOT_INO, &proc_root);
500 if (!root_inode) 477 if (!root_inode)
501 goto out_no_root; 478 goto out_no_root;
@@ -509,6 +486,6 @@ int proc_fill_super(struct super_block *s)
509out_no_root: 486out_no_root:
510 printk("proc_read_super: get root inode failed\n"); 487 printk("proc_read_super: get root inode failed\n");
511 iput(root_inode); 488 iput(root_inode);
512 de_put(&proc_root); 489 pde_put(&proc_root);
513 return -ENOMEM; 490 return -ENOMEM;
514} 491}
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 753ca37002c8..1f24a3eddd12 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -61,8 +61,6 @@ extern const struct file_operations proc_pagemap_operations;
61extern const struct file_operations proc_net_operations; 61extern const struct file_operations proc_net_operations;
62extern const struct inode_operations proc_net_inode_operations; 62extern const struct inode_operations proc_net_inode_operations;
63 63
64void free_proc_entry(struct proc_dir_entry *de);
65
66void proc_init_inodecache(void); 64void proc_init_inodecache(void);
67 65
68static inline struct pid *proc_pid(struct inode *inode) 66static inline struct pid *proc_pid(struct inode *inode)
@@ -101,8 +99,12 @@ unsigned long task_vsize(struct mm_struct *);
101int task_statm(struct mm_struct *, int *, int *, int *, int *); 99int task_statm(struct mm_struct *, int *, int *, int *, int *);
102void task_mem(struct seq_file *, struct mm_struct *); 100void task_mem(struct seq_file *, struct mm_struct *);
103 101
104struct proc_dir_entry *de_get(struct proc_dir_entry *de); 102static inline struct proc_dir_entry *pde_get(struct proc_dir_entry *pde)
105void de_put(struct proc_dir_entry *de); 103{
104 atomic_inc(&pde->count);
105 return pde;
106}
107void pde_put(struct proc_dir_entry *pde);
106 108
107extern struct vfsmount *proc_mnt; 109extern struct vfsmount *proc_mnt;
108int proc_fill_super(struct super_block *); 110int proc_fill_super(struct super_block *);
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 2a1bef9203c6..47c03f4336b8 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -650,6 +650,50 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
650 return err; 650 return err;
651} 651}
652 652
653static u64 huge_pte_to_pagemap_entry(pte_t pte, int offset)
654{
655 u64 pme = 0;
656 if (pte_present(pte))
657 pme = PM_PFRAME(pte_pfn(pte) + offset)
658 | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT;
659 return pme;
660}
661
662static int pagemap_hugetlb_range(pte_t *pte, unsigned long addr,
663 unsigned long end, struct mm_walk *walk)
664{
665 struct vm_area_struct *vma;
666 struct pagemapread *pm = walk->private;
667 struct hstate *hs = NULL;
668 int err = 0;
669
670 vma = find_vma(walk->mm, addr);
671 if (vma)
672 hs = hstate_vma(vma);
673 for (; addr != end; addr += PAGE_SIZE) {
674 u64 pfn = PM_NOT_PRESENT;
675
676 if (vma && (addr >= vma->vm_end)) {
677 vma = find_vma(walk->mm, addr);
678 if (vma)
679 hs = hstate_vma(vma);
680 }
681
682 if (vma && (vma->vm_start <= addr) && is_vm_hugetlb_page(vma)) {
683 /* calculate pfn of the "raw" page in the hugepage. */
684 int offset = (addr & ~huge_page_mask(hs)) >> PAGE_SHIFT;
685 pfn = huge_pte_to_pagemap_entry(*pte, offset);
686 }
687 err = add_to_pagemap(addr, pfn, pm);
688 if (err)
689 return err;
690 }
691
692 cond_resched();
693
694 return err;
695}
696
653/* 697/*
654 * /proc/pid/pagemap - an array mapping virtual pages to pfns 698 * /proc/pid/pagemap - an array mapping virtual pages to pfns
655 * 699 *
@@ -742,6 +786,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
742 786
743 pagemap_walk.pmd_entry = pagemap_pte_range; 787 pagemap_walk.pmd_entry = pagemap_pte_range;
744 pagemap_walk.pte_hole = pagemap_pte_hole; 788 pagemap_walk.pte_hole = pagemap_pte_hole;
789 pagemap_walk.hugetlb_entry = pagemap_hugetlb_range;
745 pagemap_walk.mm = mm; 790 pagemap_walk.mm = mm;
746 pagemap_walk.private = &pm; 791 pagemap_walk.private = &pm;
747 792
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
index 8f5c05d3dbd3..5d9fd64ef81a 100644
--- a/fs/proc/task_nommu.c
+++ b/fs/proc/task_nommu.c
@@ -110,9 +110,13 @@ int task_statm(struct mm_struct *mm, int *shared, int *text,
110 } 110 }
111 } 111 }
112 112
113 size += (*text = mm->end_code - mm->start_code); 113 *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
114 size += (*data = mm->start_stack - mm->start_data); 114 >> PAGE_SHIFT;
115 *data = (PAGE_ALIGN(mm->start_stack) - (mm->start_data & PAGE_MASK))
116 >> PAGE_SHIFT;
115 up_read(&mm->mmap_sem); 117 up_read(&mm->mmap_sem);
118 size >>= PAGE_SHIFT;
119 size += *text + *data;
116 *resident = size; 120 *resident = size;
117 return size; 121 return size;
118} 122}
diff --git a/fs/qnx4/bitmap.c b/fs/qnx4/bitmap.c
index 32f5d131a644..22e0d60e53ef 100644
--- a/fs/qnx4/bitmap.c
+++ b/fs/qnx4/bitmap.c
@@ -17,13 +17,6 @@
17#include <linux/bitops.h> 17#include <linux/bitops.h>
18#include "qnx4.h" 18#include "qnx4.h"
19 19
20#if 0
21int qnx4_new_block(struct super_block *sb)
22{
23 return 0;
24}
25#endif /* 0 */
26
27static void count_bits(register const char *bmPart, register int size, 20static void count_bits(register const char *bmPart, register int size,
28 int *const tf) 21 int *const tf)
29{ 22{
@@ -35,22 +28,7 @@ static void count_bits(register const char *bmPart, register int size,
35 } 28 }
36 do { 29 do {
37 b = *bmPart++; 30 b = *bmPart++;
38 if ((b & 1) == 0) 31 tot += 8 - hweight8(b);
39 tot++;
40 if ((b & 2) == 0)
41 tot++;
42 if ((b & 4) == 0)
43 tot++;
44 if ((b & 8) == 0)
45 tot++;
46 if ((b & 16) == 0)
47 tot++;
48 if ((b & 32) == 0)
49 tot++;
50 if ((b & 64) == 0)
51 tot++;
52 if ((b & 128) == 0)
53 tot++;
54 size--; 32 size--;
55 } while (size != 0); 33 } while (size != 0);
56 *tf = tot; 34 *tf = tot;
diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c
index 449f5a66dd34..ebf3440d28ca 100644
--- a/fs/qnx4/inode.c
+++ b/fs/qnx4/inode.c
@@ -64,25 +64,7 @@ static struct buffer_head *qnx4_getblk(struct inode *inode, int nr,
64 result = sb_getblk(inode->i_sb, nr); 64 result = sb_getblk(inode->i_sb, nr);
65 return result; 65 return result;
66 } 66 }
67 if (!create) { 67 return NULL;
68 return NULL;
69 }
70#if 0
71 tmp = qnx4_new_block(inode->i_sb);
72 if (!tmp) {
73 return NULL;
74 }
75 result = sb_getblk(inode->i_sb, tmp);
76 if (tst) {
77 qnx4_free_block(inode->i_sb, tmp);
78 brelse(result);
79 goto repeat;
80 }
81 tst = tmp;
82#endif
83 inode->i_ctime = CURRENT_TIME_SEC;
84 mark_inode_dirty(inode);
85 return result;
86} 68}
87 69
88struct buffer_head *qnx4_bread(struct inode *inode, int block, int create) 70struct buffer_head *qnx4_bread(struct inode *inode, int block, int create)
@@ -113,8 +95,6 @@ static int qnx4_get_block( struct inode *inode, sector_t iblock, struct buffer_h
113 if ( phys ) { 95 if ( phys ) {
114 // logical block is before EOF 96 // logical block is before EOF
115 map_bh(bh, inode->i_sb, phys); 97 map_bh(bh, inode->i_sb, phys);
116 } else if ( create ) {
117 // to be done.
118 } 98 }
119 return 0; 99 return 0;
120} 100}
diff --git a/fs/reiserfs/Kconfig b/fs/reiserfs/Kconfig
index 513f431038f9..ac7cd75c86f8 100644
--- a/fs/reiserfs/Kconfig
+++ b/fs/reiserfs/Kconfig
@@ -1,6 +1,7 @@
1config REISERFS_FS 1config REISERFS_FS
2 tristate "Reiserfs support" 2 tristate "Reiserfs support"
3 select CRC32 3 select CRC32
4 select FS_JOURNAL_INFO
4 help 5 help
5 Stores not just filenames but the files themselves in a balanced 6 Stores not just filenames but the files themselves in a balanced
6 tree. Uses journalling. 7 tree. Uses journalling.
diff --git a/fs/reiserfs/Makefile b/fs/reiserfs/Makefile
index 6a9e30c041dd..792b3cb2cd18 100644
--- a/fs/reiserfs/Makefile
+++ b/fs/reiserfs/Makefile
@@ -7,7 +7,11 @@ obj-$(CONFIG_REISERFS_FS) += reiserfs.o
7reiserfs-objs := bitmap.o do_balan.o namei.o inode.o file.o dir.o fix_node.o \ 7reiserfs-objs := bitmap.o do_balan.o namei.o inode.o file.o dir.o fix_node.o \
8 super.o prints.o objectid.o lbalance.o ibalance.o stree.o \ 8 super.o prints.o objectid.o lbalance.o ibalance.o stree.o \
9 hashes.o tail_conversion.o journal.o resize.o \ 9 hashes.o tail_conversion.o journal.o resize.o \
10 item_ops.o ioctl.o procfs.o xattr.o lock.o 10 item_ops.o ioctl.o xattr.o lock.o
11
12ifeq ($(CONFIG_REISERFS_PROC_INFO),y)
13reiserfs-objs += procfs.o
14endif
11 15
12ifeq ($(CONFIG_REISERFS_FS_XATTR),y) 16ifeq ($(CONFIG_REISERFS_FS_XATTR),y)
13reiserfs-objs += xattr_user.o xattr_trusted.o 17reiserfs-objs += xattr_user.o xattr_trusted.o
diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
index 9229e5514a4e..7a9981196c1c 100644
--- a/fs/reiserfs/procfs.c
+++ b/fs/reiserfs/procfs.c
@@ -17,8 +17,6 @@
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/proc_fs.h> 18#include <linux/proc_fs.h>
19 19
20#ifdef CONFIG_REISERFS_PROC_INFO
21
22/* 20/*
23 * LOCKING: 21 * LOCKING:
24 * 22 *
@@ -48,14 +46,6 @@ static int show_version(struct seq_file *m, struct super_block *sb)
48 return 0; 46 return 0;
49} 47}
50 48
51int reiserfs_global_version_in_proc(char *buffer, char **start, off_t offset,
52 int count, int *eof, void *data)
53{
54 *start = buffer;
55 *eof = 1;
56 return 0;
57}
58
59#define SF( x ) ( r -> x ) 49#define SF( x ) ( r -> x )
60#define SFP( x ) SF( s_proc_info_data.x ) 50#define SFP( x ) SF( s_proc_info_data.x )
61#define SFPL( x ) SFP( x[ level ] ) 51#define SFPL( x ) SFP( x[ level ] )
@@ -538,19 +528,6 @@ int reiserfs_proc_info_done(struct super_block *sb)
538 return 0; 528 return 0;
539} 529}
540 530
541struct proc_dir_entry *reiserfs_proc_register_global(char *name,
542 read_proc_t * func)
543{
544 return (proc_info_root) ? create_proc_read_entry(name, 0,
545 proc_info_root,
546 func, NULL) : NULL;
547}
548
549void reiserfs_proc_unregister_global(const char *name)
550{
551 remove_proc_entry(name, proc_info_root);
552}
553
554int reiserfs_proc_info_global_init(void) 531int reiserfs_proc_info_global_init(void)
555{ 532{
556 if (proc_info_root == NULL) { 533 if (proc_info_root == NULL) {
@@ -572,48 +549,6 @@ int reiserfs_proc_info_global_done(void)
572 } 549 }
573 return 0; 550 return 0;
574} 551}
575
576/* REISERFS_PROC_INFO */
577#else
578
579int reiserfs_proc_info_init(struct super_block *sb)
580{
581 return 0;
582}
583int reiserfs_proc_info_done(struct super_block *sb)
584{
585 return 0;
586}
587
588struct proc_dir_entry *reiserfs_proc_register_global(char *name,
589 read_proc_t * func)
590{
591 return NULL;
592}
593
594void reiserfs_proc_unregister_global(const char *name)
595{;
596}
597
598int reiserfs_proc_info_global_init(void)
599{
600 return 0;
601}
602int reiserfs_proc_info_global_done(void)
603{
604 return 0;
605}
606
607int reiserfs_global_version_in_proc(char *buffer, char **start,
608 off_t offset,
609 int count, int *eof, void *data)
610{
611 return 0;
612}
613
614/* REISERFS_PROC_INFO */
615#endif
616
617/* 552/*
618 * Revision 1.1.8.2 2001/07/15 17:08:42 god 553 * Revision 1.1.8.2 2001/07/15 17:08:42 god
619 * . use get_super() in procfs.c 554 * . use get_super() in procfs.c
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 339b0baf2af6..b4a7dd03bdb9 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -2222,8 +2222,6 @@ static int __init init_reiserfs_fs(void)
2222 } 2222 }
2223 2223
2224 reiserfs_proc_info_global_init(); 2224 reiserfs_proc_info_global_init();
2225 reiserfs_proc_register_global("version",
2226 reiserfs_global_version_in_proc);
2227 2225
2228 ret = register_filesystem(&reiserfs_fs_type); 2226 ret = register_filesystem(&reiserfs_fs_type);
2229 2227
@@ -2231,7 +2229,6 @@ static int __init init_reiserfs_fs(void)
2231 return 0; 2229 return 0;
2232 } 2230 }
2233 2231
2234 reiserfs_proc_unregister_global("version");
2235 reiserfs_proc_info_global_done(); 2232 reiserfs_proc_info_global_done();
2236 destroy_inodecache(); 2233 destroy_inodecache();
2237 2234
@@ -2240,7 +2237,6 @@ static int __init init_reiserfs_fs(void)
2240 2237
2241static void __exit exit_reiserfs_fs(void) 2238static void __exit exit_reiserfs_fs(void)
2242{ 2239{
2243 reiserfs_proc_unregister_global("version");
2244 reiserfs_proc_info_global_done(); 2240 reiserfs_proc_info_global_done();
2245 unregister_filesystem(&reiserfs_fs_type); 2241 unregister_filesystem(&reiserfs_fs_type);
2246 destroy_inodecache(); 2242 destroy_inodecache();
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c
index 8a771c59ac3e..90492327b383 100644
--- a/fs/ubifs/debug.c
+++ b/fs/ubifs/debug.c
@@ -350,13 +350,8 @@ void dbg_dump_node(const struct ubifs_info *c, const void *node)
350 le32_to_cpu(sup->fmt_version)); 350 le32_to_cpu(sup->fmt_version));
351 printk(KERN_DEBUG "\ttime_gran %u\n", 351 printk(KERN_DEBUG "\ttime_gran %u\n",
352 le32_to_cpu(sup->time_gran)); 352 le32_to_cpu(sup->time_gran));
353 printk(KERN_DEBUG "\tUUID %02X%02X%02X%02X-%02X%02X" 353 printk(KERN_DEBUG "\tUUID %pUB\n",
354 "-%02X%02X-%02X%02X-%02X%02X%02X%02X%02X%02X\n", 354 sup->uuid);
355 sup->uuid[0], sup->uuid[1], sup->uuid[2], sup->uuid[3],
356 sup->uuid[4], sup->uuid[5], sup->uuid[6], sup->uuid[7],
357 sup->uuid[8], sup->uuid[9], sup->uuid[10], sup->uuid[11],
358 sup->uuid[12], sup->uuid[13], sup->uuid[14],
359 sup->uuid[15]);
360 break; 355 break;
361 } 356 }
362 case UBIFS_MST_NODE: 357 case UBIFS_MST_NODE:
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 943ad5624530..43f9d19a6f33 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -1393,12 +1393,7 @@ static int mount_ubifs(struct ubifs_info *c)
1393 c->leb_size, c->leb_size >> 10); 1393 c->leb_size, c->leb_size >> 10);
1394 dbg_msg("data journal heads: %d", 1394 dbg_msg("data journal heads: %d",
1395 c->jhead_cnt - NONDATA_JHEADS_CNT); 1395 c->jhead_cnt - NONDATA_JHEADS_CNT);
1396 dbg_msg("UUID: %02X%02X%02X%02X-%02X%02X" 1396 dbg_msg("UUID: %pUB", c->uuid);
1397 "-%02X%02X-%02X%02X-%02X%02X%02X%02X%02X%02X",
1398 c->uuid[0], c->uuid[1], c->uuid[2], c->uuid[3],
1399 c->uuid[4], c->uuid[5], c->uuid[6], c->uuid[7],
1400 c->uuid[8], c->uuid[9], c->uuid[10], c->uuid[11],
1401 c->uuid[12], c->uuid[13], c->uuid[14], c->uuid[15]);
1402 dbg_msg("big_lpt %d", c->big_lpt); 1397 dbg_msg("big_lpt %d", c->big_lpt);
1403 dbg_msg("log LEBs: %d (%d - %d)", 1398 dbg_msg("log LEBs: %d (%d - %d)",
1404 c->log_lebs, UBIFS_LOG_LNUM, c->log_last); 1399 c->log_lebs, UBIFS_LOG_LNUM, c->log_last);
diff --git a/fs/ufs/dir.c b/fs/ufs/dir.c
index 6f671f1ac271..22af68f8b682 100644
--- a/fs/ufs/dir.c
+++ b/fs/ufs/dir.c
@@ -70,13 +70,13 @@ static inline unsigned long ufs_dir_pages(struct inode *inode)
70 return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT; 70 return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT;
71} 71}
72 72
73ino_t ufs_inode_by_name(struct inode *dir, struct dentry *dentry) 73ino_t ufs_inode_by_name(struct inode *dir, struct qstr *qstr)
74{ 74{
75 ino_t res = 0; 75 ino_t res = 0;
76 struct ufs_dir_entry *de; 76 struct ufs_dir_entry *de;
77 struct page *page; 77 struct page *page;
78 78
79 de = ufs_find_entry(dir, dentry, &page); 79 de = ufs_find_entry(dir, qstr, &page);
80 if (de) { 80 if (de) {
81 res = fs32_to_cpu(dir->i_sb, de->d_ino); 81 res = fs32_to_cpu(dir->i_sb, de->d_ino);
82 ufs_put_page(page); 82 ufs_put_page(page);
@@ -249,12 +249,12 @@ struct ufs_dir_entry *ufs_dotdot(struct inode *dir, struct page **p)
249 * (as a parameter - res_dir). Page is returned mapped and unlocked. 249 * (as a parameter - res_dir). Page is returned mapped and unlocked.
250 * Entry is guaranteed to be valid. 250 * Entry is guaranteed to be valid.
251 */ 251 */
252struct ufs_dir_entry *ufs_find_entry(struct inode *dir, struct dentry *dentry, 252struct ufs_dir_entry *ufs_find_entry(struct inode *dir, struct qstr *qstr,
253 struct page **res_page) 253 struct page **res_page)
254{ 254{
255 struct super_block *sb = dir->i_sb; 255 struct super_block *sb = dir->i_sb;
256 const char *name = dentry->d_name.name; 256 const char *name = qstr->name;
257 int namelen = dentry->d_name.len; 257 int namelen = qstr->len;
258 unsigned reclen = UFS_DIR_REC_LEN(namelen); 258 unsigned reclen = UFS_DIR_REC_LEN(namelen);
259 unsigned long start, n; 259 unsigned long start, n;
260 unsigned long npages = ufs_dir_pages(dir); 260 unsigned long npages = ufs_dir_pages(dir);
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
index 23119fe7ad62..4c26d9e8bc94 100644
--- a/fs/ufs/namei.c
+++ b/fs/ufs/namei.c
@@ -56,7 +56,7 @@ static struct dentry *ufs_lookup(struct inode * dir, struct dentry *dentry, stru
56 return ERR_PTR(-ENAMETOOLONG); 56 return ERR_PTR(-ENAMETOOLONG);
57 57
58 lock_kernel(); 58 lock_kernel();
59 ino = ufs_inode_by_name(dir, dentry); 59 ino = ufs_inode_by_name(dir, &dentry->d_name);
60 if (ino) { 60 if (ino) {
61 inode = ufs_iget(dir->i_sb, ino); 61 inode = ufs_iget(dir->i_sb, ino);
62 if (IS_ERR(inode)) { 62 if (IS_ERR(inode)) {
@@ -237,7 +237,7 @@ static int ufs_unlink(struct inode *dir, struct dentry *dentry)
237 struct page *page; 237 struct page *page;
238 int err = -ENOENT; 238 int err = -ENOENT;
239 239
240 de = ufs_find_entry(dir, dentry, &page); 240 de = ufs_find_entry(dir, &dentry->d_name, &page);
241 if (!de) 241 if (!de)
242 goto out; 242 goto out;
243 243
@@ -281,7 +281,7 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry,
281 struct ufs_dir_entry *old_de; 281 struct ufs_dir_entry *old_de;
282 int err = -ENOENT; 282 int err = -ENOENT;
283 283
284 old_de = ufs_find_entry(old_dir, old_dentry, &old_page); 284 old_de = ufs_find_entry(old_dir, &old_dentry->d_name, &old_page);
285 if (!old_de) 285 if (!old_de)
286 goto out; 286 goto out;
287 287
@@ -301,7 +301,7 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry,
301 goto out_dir; 301 goto out_dir;
302 302
303 err = -ENOENT; 303 err = -ENOENT;
304 new_de = ufs_find_entry(new_dir, new_dentry, &new_page); 304 new_de = ufs_find_entry(new_dir, &new_dentry->d_name, &new_page);
305 if (!new_de) 305 if (!new_de)
306 goto out_dir; 306 goto out_dir;
307 inode_inc_link_count(old_inode); 307 inode_inc_link_count(old_inode);
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index 5faed7954d0a..143c20bfb04b 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -66,6 +66,7 @@
66 */ 66 */
67 67
68 68
69#include <linux/exportfs.h>
69#include <linux/module.h> 70#include <linux/module.h>
70#include <linux/bitops.h> 71#include <linux/bitops.h>
71 72
@@ -96,6 +97,56 @@
96#include "swab.h" 97#include "swab.h"
97#include "util.h" 98#include "util.h"
98 99
100static struct inode *ufs_nfs_get_inode(struct super_block *sb, u64 ino, u32 generation)
101{
102 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
103 struct inode *inode;
104
105 if (ino < UFS_ROOTINO || ino > uspi->s_ncg * uspi->s_ipg)
106 return ERR_PTR(-ESTALE);
107
108 inode = ufs_iget(sb, ino);
109 if (IS_ERR(inode))
110 return ERR_CAST(inode);
111 if (generation && inode->i_generation != generation) {
112 iput(inode);
113 return ERR_PTR(-ESTALE);
114 }
115 return inode;
116}
117
118static struct dentry *ufs_fh_to_dentry(struct super_block *sb, struct fid *fid,
119 int fh_len, int fh_type)
120{
121 return generic_fh_to_dentry(sb, fid, fh_len, fh_type, ufs_nfs_get_inode);
122}
123
124static struct dentry *ufs_fh_to_parent(struct super_block *sb, struct fid *fid,
125 int fh_len, int fh_type)
126{
127 return generic_fh_to_parent(sb, fid, fh_len, fh_type, ufs_nfs_get_inode);
128}
129
130static struct dentry *ufs_get_parent(struct dentry *child)
131{
132 struct qstr dot_dot = {
133 .name = "..",
134 .len = 2,
135 };
136 ino_t ino;
137
138 ino = ufs_inode_by_name(child->d_inode, &dot_dot);
139 if (!ino)
140 return ERR_PTR(-ENOENT);
141 return d_obtain_alias(ufs_iget(child->d_inode->i_sb, ino));
142}
143
144static const struct export_operations ufs_export_ops = {
145 .fh_to_dentry = ufs_fh_to_dentry,
146 .fh_to_parent = ufs_fh_to_parent,
147 .get_parent = ufs_get_parent,
148};
149
99#ifdef CONFIG_UFS_DEBUG 150#ifdef CONFIG_UFS_DEBUG
100/* 151/*
101 * Print contents of ufs_super_block, useful for debugging 152 * Print contents of ufs_super_block, useful for debugging
@@ -990,6 +1041,7 @@ magic_found:
990 * Read ufs_super_block into internal data structures 1041 * Read ufs_super_block into internal data structures
991 */ 1042 */
992 sb->s_op = &ufs_super_ops; 1043 sb->s_op = &ufs_super_ops;
1044 sb->s_export_op = &ufs_export_ops;
993 sb->dq_op = NULL; /***/ 1045 sb->dq_op = NULL; /***/
994 sb->s_magic = fs32_to_cpu(sb, usb3->fs_magic); 1046 sb->s_magic = fs32_to_cpu(sb, usb3->fs_magic);
995 1047
diff --git a/fs/ufs/ufs.h b/fs/ufs/ufs.h
index 644e77e13599..0b4c39bc0d9e 100644
--- a/fs/ufs/ufs.h
+++ b/fs/ufs/ufs.h
@@ -86,9 +86,9 @@ extern void ufs_put_cylinder (struct super_block *, unsigned);
86/* dir.c */ 86/* dir.c */
87extern const struct inode_operations ufs_dir_inode_operations; 87extern const struct inode_operations ufs_dir_inode_operations;
88extern int ufs_add_link (struct dentry *, struct inode *); 88extern int ufs_add_link (struct dentry *, struct inode *);
89extern ino_t ufs_inode_by_name(struct inode *, struct dentry *); 89extern ino_t ufs_inode_by_name(struct inode *, struct qstr *);
90extern int ufs_make_empty(struct inode *, struct inode *); 90extern int ufs_make_empty(struct inode *, struct inode *);
91extern struct ufs_dir_entry *ufs_find_entry(struct inode *, struct dentry *, struct page **); 91extern struct ufs_dir_entry *ufs_find_entry(struct inode *, struct qstr *, struct page **);
92extern int ufs_delete_entry(struct inode *, struct ufs_dir_entry *, struct page *); 92extern int ufs_delete_entry(struct inode *, struct ufs_dir_entry *, struct page *);
93extern int ufs_empty_dir (struct inode *); 93extern int ufs_empty_dir (struct inode *);
94extern struct ufs_dir_entry *ufs_dotdot(struct inode *, struct page **); 94extern struct ufs_dir_entry *ufs_dotdot(struct inode *, struct page **);
diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile
index 7a59daed1782..56641fe52a23 100644
--- a/fs/xfs/Makefile
+++ b/fs/xfs/Makefile
@@ -26,6 +26,8 @@ endif
26 26
27obj-$(CONFIG_XFS_FS) += xfs.o 27obj-$(CONFIG_XFS_FS) += xfs.o
28 28
29xfs-y += linux-2.6/xfs_trace.o
30
29xfs-$(CONFIG_XFS_QUOTA) += $(addprefix quota/, \ 31xfs-$(CONFIG_XFS_QUOTA) += $(addprefix quota/, \
30 xfs_dquot.o \ 32 xfs_dquot.o \
31 xfs_dquot_item.o \ 33 xfs_dquot_item.o \
@@ -90,8 +92,7 @@ xfs-y += xfs_alloc.o \
90 xfs_rw.o \ 92 xfs_rw.o \
91 xfs_dmops.o 93 xfs_dmops.o
92 94
93xfs-$(CONFIG_XFS_TRACE) += xfs_btree_trace.o \ 95xfs-$(CONFIG_XFS_TRACE) += xfs_btree_trace.o
94 xfs_dir2_trace.o
95 96
96# Objects in linux/ 97# Objects in linux/
97xfs-y += $(addprefix $(XFS_LINUX)/, \ 98xfs-y += $(addprefix $(XFS_LINUX)/, \
@@ -113,6 +114,3 @@ xfs-y += $(addprefix $(XFS_LINUX)/, \
113xfs-y += $(addprefix support/, \ 114xfs-y += $(addprefix support/, \
114 debug.o \ 115 debug.o \
115 uuid.o) 116 uuid.o)
116
117xfs-$(CONFIG_XFS_TRACE) += support/ktrace.o
118
diff --git a/fs/xfs/linux-2.6/xfs_acl.c b/fs/xfs/linux-2.6/xfs_acl.c
index b23a54506446..69e598b6986f 100644
--- a/fs/xfs/linux-2.6/xfs_acl.c
+++ b/fs/xfs/linux-2.6/xfs_acl.c
@@ -21,6 +21,7 @@
21#include "xfs_bmap_btree.h" 21#include "xfs_bmap_btree.h"
22#include "xfs_inode.h" 22#include "xfs_inode.h"
23#include "xfs_vnodeops.h" 23#include "xfs_vnodeops.h"
24#include "xfs_trace.h"
24#include <linux/xattr.h> 25#include <linux/xattr.h>
25#include <linux/posix_acl_xattr.h> 26#include <linux/posix_acl_xattr.h>
26 27
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 87813e405cef..66abe36c1213 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -38,6 +38,7 @@
38#include "xfs_rw.h" 38#include "xfs_rw.h"
39#include "xfs_iomap.h" 39#include "xfs_iomap.h"
40#include "xfs_vnodeops.h" 40#include "xfs_vnodeops.h"
41#include "xfs_trace.h"
41#include <linux/mpage.h> 42#include <linux/mpage.h>
42#include <linux/pagevec.h> 43#include <linux/pagevec.h>
43#include <linux/writeback.h> 44#include <linux/writeback.h>
@@ -76,7 +77,7 @@ xfs_ioend_wake(
76 wake_up(to_ioend_wq(ip)); 77 wake_up(to_ioend_wq(ip));
77} 78}
78 79
79STATIC void 80void
80xfs_count_page_state( 81xfs_count_page_state(
81 struct page *page, 82 struct page *page,
82 int *delalloc, 83 int *delalloc,
@@ -98,48 +99,6 @@ xfs_count_page_state(
98 } while ((bh = bh->b_this_page) != head); 99 } while ((bh = bh->b_this_page) != head);
99} 100}
100 101
101#if defined(XFS_RW_TRACE)
102void
103xfs_page_trace(
104 int tag,
105 struct inode *inode,
106 struct page *page,
107 unsigned long pgoff)
108{
109 xfs_inode_t *ip;
110 loff_t isize = i_size_read(inode);
111 loff_t offset = page_offset(page);
112 int delalloc = -1, unmapped = -1, unwritten = -1;
113
114 if (page_has_buffers(page))
115 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
116
117 ip = XFS_I(inode);
118 if (!ip->i_rwtrace)
119 return;
120
121 ktrace_enter(ip->i_rwtrace,
122 (void *)((unsigned long)tag),
123 (void *)ip,
124 (void *)inode,
125 (void *)page,
126 (void *)pgoff,
127 (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
128 (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
129 (void *)((unsigned long)((isize >> 32) & 0xffffffff)),
130 (void *)((unsigned long)(isize & 0xffffffff)),
131 (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
132 (void *)((unsigned long)(offset & 0xffffffff)),
133 (void *)((unsigned long)delalloc),
134 (void *)((unsigned long)unmapped),
135 (void *)((unsigned long)unwritten),
136 (void *)((unsigned long)current_pid()),
137 (void *)NULL);
138}
139#else
140#define xfs_page_trace(tag, inode, page, pgoff)
141#endif
142
143STATIC struct block_device * 102STATIC struct block_device *
144xfs_find_bdev_for_inode( 103xfs_find_bdev_for_inode(
145 struct xfs_inode *ip) 104 struct xfs_inode *ip)
@@ -1202,7 +1161,7 @@ xfs_vm_writepage(
1202 int delalloc, unmapped, unwritten; 1161 int delalloc, unmapped, unwritten;
1203 struct inode *inode = page->mapping->host; 1162 struct inode *inode = page->mapping->host;
1204 1163
1205 xfs_page_trace(XFS_WRITEPAGE_ENTER, inode, page, 0); 1164 trace_xfs_writepage(inode, page, 0);
1206 1165
1207 /* 1166 /*
1208 * We need a transaction if: 1167 * We need a transaction if:
@@ -1307,7 +1266,7 @@ xfs_vm_releasepage(
1307 .nr_to_write = 1, 1266 .nr_to_write = 1,
1308 }; 1267 };
1309 1268
1310 xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, 0); 1269 trace_xfs_releasepage(inode, page, 0);
1311 1270
1312 if (!page_has_buffers(page)) 1271 if (!page_has_buffers(page))
1313 return 0; 1272 return 0;
@@ -1515,19 +1474,13 @@ xfs_vm_direct_IO(
1515 1474
1516 bdev = xfs_find_bdev_for_inode(XFS_I(inode)); 1475 bdev = xfs_find_bdev_for_inode(XFS_I(inode));
1517 1476
1518 if (rw == WRITE) { 1477 iocb->private = xfs_alloc_ioend(inode, rw == WRITE ?
1519 iocb->private = xfs_alloc_ioend(inode, IOMAP_UNWRITTEN); 1478 IOMAP_UNWRITTEN : IOMAP_READ);
1520 ret = blockdev_direct_IO_own_locking(rw, iocb, inode, 1479
1521 bdev, iov, offset, nr_segs, 1480 ret = blockdev_direct_IO_no_locking(rw, iocb, inode, bdev, iov,
1522 xfs_get_blocks_direct, 1481 offset, nr_segs,
1523 xfs_end_io_direct); 1482 xfs_get_blocks_direct,
1524 } else { 1483 xfs_end_io_direct);
1525 iocb->private = xfs_alloc_ioend(inode, IOMAP_READ);
1526 ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
1527 bdev, iov, offset, nr_segs,
1528 xfs_get_blocks_direct,
1529 xfs_end_io_direct);
1530 }
1531 1484
1532 if (unlikely(ret != -EIOCBQUEUED && iocb->private)) 1485 if (unlikely(ret != -EIOCBQUEUED && iocb->private))
1533 xfs_destroy_ioend(iocb->private); 1486 xfs_destroy_ioend(iocb->private);
@@ -1587,8 +1540,7 @@ xfs_vm_invalidatepage(
1587 struct page *page, 1540 struct page *page,
1588 unsigned long offset) 1541 unsigned long offset)
1589{ 1542{
1590 xfs_page_trace(XFS_INVALIDPAGE_ENTER, 1543 trace_xfs_invalidatepage(page->mapping->host, page, offset);
1591 page->mapping->host, page, offset);
1592 block_invalidatepage(page, offset); 1544 block_invalidatepage(page, offset);
1593} 1545}
1594 1546
diff --git a/fs/xfs/linux-2.6/xfs_aops.h b/fs/xfs/linux-2.6/xfs_aops.h
index 221b3e66ceef..4cfc6ea87df8 100644
--- a/fs/xfs/linux-2.6/xfs_aops.h
+++ b/fs/xfs/linux-2.6/xfs_aops.h
@@ -45,4 +45,6 @@ extern int xfs_get_blocks(struct inode *, sector_t, struct buffer_head *, int);
45extern void xfs_ioend_init(void); 45extern void xfs_ioend_init(void);
46extern void xfs_ioend_wait(struct xfs_inode *); 46extern void xfs_ioend_wait(struct xfs_inode *);
47 47
48extern void xfs_count_page_state(struct page *, int *, int *, int *);
49
48#endif /* __XFS_AOPS_H__ */ 50#endif /* __XFS_AOPS_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 4ddc973aea7a..b4c7d4248aac 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -39,6 +39,7 @@
39#include "xfs_ag.h" 39#include "xfs_ag.h"
40#include "xfs_dmapi.h" 40#include "xfs_dmapi.h"
41#include "xfs_mount.h" 41#include "xfs_mount.h"
42#include "xfs_trace.h"
42 43
43static kmem_zone_t *xfs_buf_zone; 44static kmem_zone_t *xfs_buf_zone;
44STATIC int xfsbufd(void *); 45STATIC int xfsbufd(void *);
@@ -53,34 +54,6 @@ static struct workqueue_struct *xfslogd_workqueue;
53struct workqueue_struct *xfsdatad_workqueue; 54struct workqueue_struct *xfsdatad_workqueue;
54struct workqueue_struct *xfsconvertd_workqueue; 55struct workqueue_struct *xfsconvertd_workqueue;
55 56
56#ifdef XFS_BUF_TRACE
57void
58xfs_buf_trace(
59 xfs_buf_t *bp,
60 char *id,
61 void *data,
62 void *ra)
63{
64 ktrace_enter(xfs_buf_trace_buf,
65 bp, id,
66 (void *)(unsigned long)bp->b_flags,
67 (void *)(unsigned long)bp->b_hold.counter,
68 (void *)(unsigned long)bp->b_sema.count,
69 (void *)current,
70 data, ra,
71 (void *)(unsigned long)((bp->b_file_offset>>32) & 0xffffffff),
72 (void *)(unsigned long)(bp->b_file_offset & 0xffffffff),
73 (void *)(unsigned long)bp->b_buffer_length,
74 NULL, NULL, NULL, NULL, NULL);
75}
76ktrace_t *xfs_buf_trace_buf;
77#define XFS_BUF_TRACE_SIZE 4096
78#define XB_TRACE(bp, id, data) \
79 xfs_buf_trace(bp, id, (void *)data, (void *)__builtin_return_address(0))
80#else
81#define XB_TRACE(bp, id, data) do { } while (0)
82#endif
83
84#ifdef XFS_BUF_LOCK_TRACKING 57#ifdef XFS_BUF_LOCK_TRACKING
85# define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid) 58# define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid)
86# define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1) 59# define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1)
@@ -279,7 +252,8 @@ _xfs_buf_initialize(
279 init_waitqueue_head(&bp->b_waiters); 252 init_waitqueue_head(&bp->b_waiters);
280 253
281 XFS_STATS_INC(xb_create); 254 XFS_STATS_INC(xb_create);
282 XB_TRACE(bp, "initialize", target); 255
256 trace_xfs_buf_init(bp, _RET_IP_);
283} 257}
284 258
285/* 259/*
@@ -332,7 +306,7 @@ void
332xfs_buf_free( 306xfs_buf_free(
333 xfs_buf_t *bp) 307 xfs_buf_t *bp)
334{ 308{
335 XB_TRACE(bp, "free", 0); 309 trace_xfs_buf_free(bp, _RET_IP_);
336 310
337 ASSERT(list_empty(&bp->b_hash_list)); 311 ASSERT(list_empty(&bp->b_hash_list));
338 312
@@ -445,7 +419,6 @@ _xfs_buf_lookup_pages(
445 if (page_count == bp->b_page_count) 419 if (page_count == bp->b_page_count)
446 bp->b_flags |= XBF_DONE; 420 bp->b_flags |= XBF_DONE;
447 421
448 XB_TRACE(bp, "lookup_pages", (long)page_count);
449 return error; 422 return error;
450} 423}
451 424
@@ -548,7 +521,6 @@ found:
548 if (down_trylock(&bp->b_sema)) { 521 if (down_trylock(&bp->b_sema)) {
549 if (!(flags & XBF_TRYLOCK)) { 522 if (!(flags & XBF_TRYLOCK)) {
550 /* wait for buffer ownership */ 523 /* wait for buffer ownership */
551 XB_TRACE(bp, "get_lock", 0);
552 xfs_buf_lock(bp); 524 xfs_buf_lock(bp);
553 XFS_STATS_INC(xb_get_locked_waited); 525 XFS_STATS_INC(xb_get_locked_waited);
554 } else { 526 } else {
@@ -571,7 +543,8 @@ found:
571 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0); 543 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
572 bp->b_flags &= XBF_MAPPED; 544 bp->b_flags &= XBF_MAPPED;
573 } 545 }
574 XB_TRACE(bp, "got_lock", 0); 546
547 trace_xfs_buf_find(bp, flags, _RET_IP_);
575 XFS_STATS_INC(xb_get_locked); 548 XFS_STATS_INC(xb_get_locked);
576 return bp; 549 return bp;
577} 550}
@@ -627,7 +600,7 @@ xfs_buf_get(
627 bp->b_bn = ioff; 600 bp->b_bn = ioff;
628 bp->b_count_desired = bp->b_buffer_length; 601 bp->b_count_desired = bp->b_buffer_length;
629 602
630 XB_TRACE(bp, "get", (unsigned long)flags); 603 trace_xfs_buf_get(bp, flags, _RET_IP_);
631 return bp; 604 return bp;
632 605
633 no_buffer: 606 no_buffer:
@@ -644,8 +617,6 @@ _xfs_buf_read(
644{ 617{
645 int status; 618 int status;
646 619
647 XB_TRACE(bp, "_xfs_buf_read", (unsigned long)flags);
648
649 ASSERT(!(flags & (XBF_DELWRI|XBF_WRITE))); 620 ASSERT(!(flags & (XBF_DELWRI|XBF_WRITE)));
650 ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL); 621 ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL);
651 622
@@ -673,19 +644,18 @@ xfs_buf_read(
673 644
674 bp = xfs_buf_get(target, ioff, isize, flags); 645 bp = xfs_buf_get(target, ioff, isize, flags);
675 if (bp) { 646 if (bp) {
647 trace_xfs_buf_read(bp, flags, _RET_IP_);
648
676 if (!XFS_BUF_ISDONE(bp)) { 649 if (!XFS_BUF_ISDONE(bp)) {
677 XB_TRACE(bp, "read", (unsigned long)flags);
678 XFS_STATS_INC(xb_get_read); 650 XFS_STATS_INC(xb_get_read);
679 _xfs_buf_read(bp, flags); 651 _xfs_buf_read(bp, flags);
680 } else if (flags & XBF_ASYNC) { 652 } else if (flags & XBF_ASYNC) {
681 XB_TRACE(bp, "read_async", (unsigned long)flags);
682 /* 653 /*
683 * Read ahead call which is already satisfied, 654 * Read ahead call which is already satisfied,
684 * drop the buffer 655 * drop the buffer
685 */ 656 */
686 goto no_buffer; 657 goto no_buffer;
687 } else { 658 } else {
688 XB_TRACE(bp, "read_done", (unsigned long)flags);
689 /* We do not want read in the flags */ 659 /* We do not want read in the flags */
690 bp->b_flags &= ~XBF_READ; 660 bp->b_flags &= ~XBF_READ;
691 } 661 }
@@ -823,7 +793,7 @@ xfs_buf_get_noaddr(
823 793
824 xfs_buf_unlock(bp); 794 xfs_buf_unlock(bp);
825 795
826 XB_TRACE(bp, "no_daddr", len); 796 trace_xfs_buf_get_noaddr(bp, _RET_IP_);
827 return bp; 797 return bp;
828 798
829 fail_free_mem: 799 fail_free_mem:
@@ -845,8 +815,8 @@ void
845xfs_buf_hold( 815xfs_buf_hold(
846 xfs_buf_t *bp) 816 xfs_buf_t *bp)
847{ 817{
818 trace_xfs_buf_hold(bp, _RET_IP_);
848 atomic_inc(&bp->b_hold); 819 atomic_inc(&bp->b_hold);
849 XB_TRACE(bp, "hold", 0);
850} 820}
851 821
852/* 822/*
@@ -859,7 +829,7 @@ xfs_buf_rele(
859{ 829{
860 xfs_bufhash_t *hash = bp->b_hash; 830 xfs_bufhash_t *hash = bp->b_hash;
861 831
862 XB_TRACE(bp, "rele", bp->b_relse); 832 trace_xfs_buf_rele(bp, _RET_IP_);
863 833
864 if (unlikely(!hash)) { 834 if (unlikely(!hash)) {
865 ASSERT(!bp->b_relse); 835 ASSERT(!bp->b_relse);
@@ -909,21 +879,19 @@ xfs_buf_cond_lock(
909 int locked; 879 int locked;
910 880
911 locked = down_trylock(&bp->b_sema) == 0; 881 locked = down_trylock(&bp->b_sema) == 0;
912 if (locked) { 882 if (locked)
913 XB_SET_OWNER(bp); 883 XB_SET_OWNER(bp);
914 } 884
915 XB_TRACE(bp, "cond_lock", (long)locked); 885 trace_xfs_buf_cond_lock(bp, _RET_IP_);
916 return locked ? 0 : -EBUSY; 886 return locked ? 0 : -EBUSY;
917} 887}
918 888
919#if defined(DEBUG) || defined(XFS_BLI_TRACE)
920int 889int
921xfs_buf_lock_value( 890xfs_buf_lock_value(
922 xfs_buf_t *bp) 891 xfs_buf_t *bp)
923{ 892{
924 return bp->b_sema.count; 893 return bp->b_sema.count;
925} 894}
926#endif
927 895
928/* 896/*
929 * Locks a buffer object. 897 * Locks a buffer object.
@@ -935,12 +903,14 @@ void
935xfs_buf_lock( 903xfs_buf_lock(
936 xfs_buf_t *bp) 904 xfs_buf_t *bp)
937{ 905{
938 XB_TRACE(bp, "lock", 0); 906 trace_xfs_buf_lock(bp, _RET_IP_);
907
939 if (atomic_read(&bp->b_io_remaining)) 908 if (atomic_read(&bp->b_io_remaining))
940 blk_run_address_space(bp->b_target->bt_mapping); 909 blk_run_address_space(bp->b_target->bt_mapping);
941 down(&bp->b_sema); 910 down(&bp->b_sema);
942 XB_SET_OWNER(bp); 911 XB_SET_OWNER(bp);
943 XB_TRACE(bp, "locked", 0); 912
913 trace_xfs_buf_lock_done(bp, _RET_IP_);
944} 914}
945 915
946/* 916/*
@@ -962,7 +932,8 @@ xfs_buf_unlock(
962 932
963 XB_CLEAR_OWNER(bp); 933 XB_CLEAR_OWNER(bp);
964 up(&bp->b_sema); 934 up(&bp->b_sema);
965 XB_TRACE(bp, "unlock", 0); 935
936 trace_xfs_buf_unlock(bp, _RET_IP_);
966} 937}
967 938
968 939
@@ -974,17 +945,18 @@ void
974xfs_buf_pin( 945xfs_buf_pin(
975 xfs_buf_t *bp) 946 xfs_buf_t *bp)
976{ 947{
948 trace_xfs_buf_pin(bp, _RET_IP_);
977 atomic_inc(&bp->b_pin_count); 949 atomic_inc(&bp->b_pin_count);
978 XB_TRACE(bp, "pin", (long)bp->b_pin_count.counter);
979} 950}
980 951
981void 952void
982xfs_buf_unpin( 953xfs_buf_unpin(
983 xfs_buf_t *bp) 954 xfs_buf_t *bp)
984{ 955{
956 trace_xfs_buf_unpin(bp, _RET_IP_);
957
985 if (atomic_dec_and_test(&bp->b_pin_count)) 958 if (atomic_dec_and_test(&bp->b_pin_count))
986 wake_up_all(&bp->b_waiters); 959 wake_up_all(&bp->b_waiters);
987 XB_TRACE(bp, "unpin", (long)bp->b_pin_count.counter);
988} 960}
989 961
990int 962int
@@ -1035,7 +1007,7 @@ xfs_buf_iodone_work(
1035 */ 1007 */
1036 if ((bp->b_error == EOPNOTSUPP) && 1008 if ((bp->b_error == EOPNOTSUPP) &&
1037 (bp->b_flags & (XBF_ORDERED|XBF_ASYNC)) == (XBF_ORDERED|XBF_ASYNC)) { 1009 (bp->b_flags & (XBF_ORDERED|XBF_ASYNC)) == (XBF_ORDERED|XBF_ASYNC)) {
1038 XB_TRACE(bp, "ordered_retry", bp->b_iodone); 1010 trace_xfs_buf_ordered_retry(bp, _RET_IP_);
1039 bp->b_flags &= ~XBF_ORDERED; 1011 bp->b_flags &= ~XBF_ORDERED;
1040 bp->b_flags |= _XFS_BARRIER_FAILED; 1012 bp->b_flags |= _XFS_BARRIER_FAILED;
1041 xfs_buf_iorequest(bp); 1013 xfs_buf_iorequest(bp);
@@ -1050,12 +1022,12 @@ xfs_buf_ioend(
1050 xfs_buf_t *bp, 1022 xfs_buf_t *bp,
1051 int schedule) 1023 int schedule)
1052{ 1024{
1025 trace_xfs_buf_iodone(bp, _RET_IP_);
1026
1053 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD); 1027 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
1054 if (bp->b_error == 0) 1028 if (bp->b_error == 0)
1055 bp->b_flags |= XBF_DONE; 1029 bp->b_flags |= XBF_DONE;
1056 1030
1057 XB_TRACE(bp, "iodone", bp->b_iodone);
1058
1059 if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) { 1031 if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) {
1060 if (schedule) { 1032 if (schedule) {
1061 INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work); 1033 INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
@@ -1075,7 +1047,7 @@ xfs_buf_ioerror(
1075{ 1047{
1076 ASSERT(error >= 0 && error <= 0xffff); 1048 ASSERT(error >= 0 && error <= 0xffff);
1077 bp->b_error = (unsigned short)error; 1049 bp->b_error = (unsigned short)error;
1078 XB_TRACE(bp, "ioerror", (unsigned long)error); 1050 trace_xfs_buf_ioerror(bp, error, _RET_IP_);
1079} 1051}
1080 1052
1081int 1053int
@@ -1083,7 +1055,7 @@ xfs_bawrite(
1083 void *mp, 1055 void *mp,
1084 struct xfs_buf *bp) 1056 struct xfs_buf *bp)
1085{ 1057{
1086 XB_TRACE(bp, "bawrite", 0); 1058 trace_xfs_buf_bawrite(bp, _RET_IP_);
1087 1059
1088 ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL); 1060 ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL);
1089 1061
@@ -1102,7 +1074,7 @@ xfs_bdwrite(
1102 void *mp, 1074 void *mp,
1103 struct xfs_buf *bp) 1075 struct xfs_buf *bp)
1104{ 1076{
1105 XB_TRACE(bp, "bdwrite", 0); 1077 trace_xfs_buf_bdwrite(bp, _RET_IP_);
1106 1078
1107 bp->b_strat = xfs_bdstrat_cb; 1079 bp->b_strat = xfs_bdstrat_cb;
1108 bp->b_mount = mp; 1080 bp->b_mount = mp;
@@ -1253,7 +1225,7 @@ int
1253xfs_buf_iorequest( 1225xfs_buf_iorequest(
1254 xfs_buf_t *bp) 1226 xfs_buf_t *bp)
1255{ 1227{
1256 XB_TRACE(bp, "iorequest", 0); 1228 trace_xfs_buf_iorequest(bp, _RET_IP_);
1257 1229
1258 if (bp->b_flags & XBF_DELWRI) { 1230 if (bp->b_flags & XBF_DELWRI) {
1259 xfs_buf_delwri_queue(bp, 1); 1231 xfs_buf_delwri_queue(bp, 1);
@@ -1287,11 +1259,13 @@ int
1287xfs_buf_iowait( 1259xfs_buf_iowait(
1288 xfs_buf_t *bp) 1260 xfs_buf_t *bp)
1289{ 1261{
1290 XB_TRACE(bp, "iowait", 0); 1262 trace_xfs_buf_iowait(bp, _RET_IP_);
1263
1291 if (atomic_read(&bp->b_io_remaining)) 1264 if (atomic_read(&bp->b_io_remaining))
1292 blk_run_address_space(bp->b_target->bt_mapping); 1265 blk_run_address_space(bp->b_target->bt_mapping);
1293 wait_for_completion(&bp->b_iowait); 1266 wait_for_completion(&bp->b_iowait);
1294 XB_TRACE(bp, "iowaited", (long)bp->b_error); 1267
1268 trace_xfs_buf_iowait_done(bp, _RET_IP_);
1295 return bp->b_error; 1269 return bp->b_error;
1296} 1270}
1297 1271
@@ -1604,7 +1578,8 @@ xfs_buf_delwri_queue(
1604 struct list_head *dwq = &bp->b_target->bt_delwrite_queue; 1578 struct list_head *dwq = &bp->b_target->bt_delwrite_queue;
1605 spinlock_t *dwlk = &bp->b_target->bt_delwrite_lock; 1579 spinlock_t *dwlk = &bp->b_target->bt_delwrite_lock;
1606 1580
1607 XB_TRACE(bp, "delwri_q", (long)unlock); 1581 trace_xfs_buf_delwri_queue(bp, _RET_IP_);
1582
1608 ASSERT((bp->b_flags&(XBF_DELWRI|XBF_ASYNC)) == (XBF_DELWRI|XBF_ASYNC)); 1583 ASSERT((bp->b_flags&(XBF_DELWRI|XBF_ASYNC)) == (XBF_DELWRI|XBF_ASYNC));
1609 1584
1610 spin_lock(dwlk); 1585 spin_lock(dwlk);
@@ -1644,7 +1619,7 @@ xfs_buf_delwri_dequeue(
1644 if (dequeued) 1619 if (dequeued)
1645 xfs_buf_rele(bp); 1620 xfs_buf_rele(bp);
1646 1621
1647 XB_TRACE(bp, "delwri_dq", (long)dequeued); 1622 trace_xfs_buf_delwri_dequeue(bp, _RET_IP_);
1648} 1623}
1649 1624
1650STATIC void 1625STATIC void
@@ -1692,7 +1667,7 @@ xfs_buf_delwri_split(
1692 INIT_LIST_HEAD(list); 1667 INIT_LIST_HEAD(list);
1693 spin_lock(dwlk); 1668 spin_lock(dwlk);
1694 list_for_each_entry_safe(bp, n, dwq, b_list) { 1669 list_for_each_entry_safe(bp, n, dwq, b_list) {
1695 XB_TRACE(bp, "walkq1", (long)xfs_buf_ispin(bp)); 1670 trace_xfs_buf_delwri_split(bp, _RET_IP_);
1696 ASSERT(bp->b_flags & XBF_DELWRI); 1671 ASSERT(bp->b_flags & XBF_DELWRI);
1697 1672
1698 if (!xfs_buf_ispin(bp) && !xfs_buf_cond_lock(bp)) { 1673 if (!xfs_buf_ispin(bp) && !xfs_buf_cond_lock(bp)) {
@@ -1816,14 +1791,10 @@ xfs_flush_buftarg(
1816int __init 1791int __init
1817xfs_buf_init(void) 1792xfs_buf_init(void)
1818{ 1793{
1819#ifdef XFS_BUF_TRACE
1820 xfs_buf_trace_buf = ktrace_alloc(XFS_BUF_TRACE_SIZE, KM_NOFS);
1821#endif
1822
1823 xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf", 1794 xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
1824 KM_ZONE_HWALIGN, NULL); 1795 KM_ZONE_HWALIGN, NULL);
1825 if (!xfs_buf_zone) 1796 if (!xfs_buf_zone)
1826 goto out_free_trace_buf; 1797 goto out;
1827 1798
1828 xfslogd_workqueue = create_workqueue("xfslogd"); 1799 xfslogd_workqueue = create_workqueue("xfslogd");
1829 if (!xfslogd_workqueue) 1800 if (!xfslogd_workqueue)
@@ -1846,10 +1817,7 @@ xfs_buf_init(void)
1846 destroy_workqueue(xfslogd_workqueue); 1817 destroy_workqueue(xfslogd_workqueue);
1847 out_free_buf_zone: 1818 out_free_buf_zone:
1848 kmem_zone_destroy(xfs_buf_zone); 1819 kmem_zone_destroy(xfs_buf_zone);
1849 out_free_trace_buf: 1820 out:
1850#ifdef XFS_BUF_TRACE
1851 ktrace_free(xfs_buf_trace_buf);
1852#endif
1853 return -ENOMEM; 1821 return -ENOMEM;
1854} 1822}
1855 1823
@@ -1861,9 +1829,6 @@ xfs_buf_terminate(void)
1861 destroy_workqueue(xfsdatad_workqueue); 1829 destroy_workqueue(xfsdatad_workqueue);
1862 destroy_workqueue(xfslogd_workqueue); 1830 destroy_workqueue(xfslogd_workqueue);
1863 kmem_zone_destroy(xfs_buf_zone); 1831 kmem_zone_destroy(xfs_buf_zone);
1864#ifdef XFS_BUF_TRACE
1865 ktrace_free(xfs_buf_trace_buf);
1866#endif
1867} 1832}
1868 1833
1869#ifdef CONFIG_KDB_MODULES 1834#ifdef CONFIG_KDB_MODULES
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h
index 5f07dd91c5fa..a509f4addc2a 100644
--- a/fs/xfs/linux-2.6/xfs_buf.h
+++ b/fs/xfs/linux-2.6/xfs_buf.h
@@ -95,6 +95,28 @@ typedef enum {
95 _XFS_BARRIER_FAILED = (1 << 23), 95 _XFS_BARRIER_FAILED = (1 << 23),
96} xfs_buf_flags_t; 96} xfs_buf_flags_t;
97 97
98#define XFS_BUF_FLAGS \
99 { XBF_READ, "READ" }, \
100 { XBF_WRITE, "WRITE" }, \
101 { XBF_MAPPED, "MAPPED" }, \
102 { XBF_ASYNC, "ASYNC" }, \
103 { XBF_DONE, "DONE" }, \
104 { XBF_DELWRI, "DELWRI" }, \
105 { XBF_STALE, "STALE" }, \
106 { XBF_FS_MANAGED, "FS_MANAGED" }, \
107 { XBF_ORDERED, "ORDERED" }, \
108 { XBF_READ_AHEAD, "READ_AHEAD" }, \
109 { XBF_LOCK, "LOCK" }, /* should never be set */\
110 { XBF_TRYLOCK, "TRYLOCK" }, /* ditto */\
111 { XBF_DONT_BLOCK, "DONT_BLOCK" }, /* ditto */\
112 { _XBF_PAGE_CACHE, "PAGE_CACHE" }, \
113 { _XBF_PAGES, "PAGES" }, \
114 { _XBF_RUN_QUEUES, "RUN_QUEUES" }, \
115 { _XBF_DELWRI_Q, "DELWRI_Q" }, \
116 { _XBF_PAGE_LOCKED, "PAGE_LOCKED" }, \
117 { _XFS_BARRIER_FAILED, "BARRIER_FAILED" }
118
119
98typedef enum { 120typedef enum {
99 XBT_FORCE_SLEEP = 0, 121 XBT_FORCE_SLEEP = 0,
100 XBT_FORCE_FLUSH = 1, 122 XBT_FORCE_FLUSH = 1,
@@ -243,13 +265,6 @@ extern void xfs_buf_delwri_dequeue(xfs_buf_t *);
243extern int xfs_buf_init(void); 265extern int xfs_buf_init(void);
244extern void xfs_buf_terminate(void); 266extern void xfs_buf_terminate(void);
245 267
246#ifdef XFS_BUF_TRACE
247extern ktrace_t *xfs_buf_trace_buf;
248extern void xfs_buf_trace(xfs_buf_t *, char *, void *, void *);
249#else
250#define xfs_buf_trace(bp,id,ptr,ra) do { } while (0)
251#endif
252
253#define xfs_buf_target_name(target) \ 268#define xfs_buf_target_name(target) \
254 ({ char __b[BDEVNAME_SIZE]; bdevname((target)->bt_bdev, __b); __b; }) 269 ({ char __b[BDEVNAME_SIZE]; bdevname((target)->bt_bdev, __b); __b; })
255 270
@@ -365,10 +380,6 @@ static inline void xfs_buf_relse(xfs_buf_t *bp)
365 380
366#define xfs_bpin(bp) xfs_buf_pin(bp) 381#define xfs_bpin(bp) xfs_buf_pin(bp)
367#define xfs_bunpin(bp) xfs_buf_unpin(bp) 382#define xfs_bunpin(bp) xfs_buf_unpin(bp)
368
369#define xfs_buftrace(id, bp) \
370 xfs_buf_trace(bp, id, NULL, (void *)__builtin_return_address(0))
371
372#define xfs_biodone(bp) xfs_buf_ioend(bp, 0) 383#define xfs_biodone(bp) xfs_buf_ioend(bp, 0)
373 384
374#define xfs_biomove(bp, off, len, data, rw) \ 385#define xfs_biomove(bp, off, len, data, rw) \
diff --git a/fs/xfs/linux-2.6/xfs_fs_subr.c b/fs/xfs/linux-2.6/xfs_fs_subr.c
index 08be36d7326c..7501b85fd860 100644
--- a/fs/xfs/linux-2.6/xfs_fs_subr.c
+++ b/fs/xfs/linux-2.6/xfs_fs_subr.c
@@ -19,6 +19,7 @@
19#include "xfs_vnodeops.h" 19#include "xfs_vnodeops.h"
20#include "xfs_bmap_btree.h" 20#include "xfs_bmap_btree.h"
21#include "xfs_inode.h" 21#include "xfs_inode.h"
22#include "xfs_trace.h"
22 23
23int fs_noerr(void) { return 0; } 24int fs_noerr(void) { return 0; }
24int fs_nosys(void) { return ENOSYS; } 25int fs_nosys(void) { return ENOSYS; }
@@ -51,6 +52,8 @@ xfs_flushinval_pages(
51 struct address_space *mapping = VFS_I(ip)->i_mapping; 52 struct address_space *mapping = VFS_I(ip)->i_mapping;
52 int ret = 0; 53 int ret = 0;
53 54
55 trace_xfs_pagecache_inval(ip, first, last);
56
54 if (mapping->nrpages) { 57 if (mapping->nrpages) {
55 xfs_iflags_clear(ip, XFS_ITRUNCATED); 58 xfs_iflags_clear(ip, XFS_ITRUNCATED);
56 ret = filemap_write_and_wait(mapping); 59 ret = filemap_write_and_wait(mapping);
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
index 5bb523d7f37e..a034cf624437 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
@@ -51,6 +51,7 @@
51#include "xfs_quota.h" 51#include "xfs_quota.h"
52#include "xfs_inode_item.h" 52#include "xfs_inode_item.h"
53#include "xfs_export.h" 53#include "xfs_export.h"
54#include "xfs_trace.h"
54 55
55#include <linux/capability.h> 56#include <linux/capability.h>
56#include <linux/dcache.h> 57#include <linux/dcache.h>
diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c
index eafcc7c18706..be1527b1670c 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl32.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl32.c
@@ -46,6 +46,7 @@
46#include "xfs_attr.h" 46#include "xfs_attr.h"
47#include "xfs_ioctl.h" 47#include "xfs_ioctl.h"
48#include "xfs_ioctl32.h" 48#include "xfs_ioctl32.h"
49#include "xfs_trace.h"
49 50
50#define _NATIVE_IOC(cmd, type) \ 51#define _NATIVE_IOC(cmd, type) \
51 _IOC(_IOC_DIR(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd), sizeof(type)) 52 _IOC(_IOC_DIR(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd), sizeof(type))
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index 1f3b4b8f7dd4..1d5b298ba8b2 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -47,6 +47,7 @@
47#include "xfs_buf_item.h" 47#include "xfs_buf_item.h"
48#include "xfs_utils.h" 48#include "xfs_utils.h"
49#include "xfs_vnodeops.h" 49#include "xfs_vnodeops.h"
50#include "xfs_trace.h"
50 51
51#include <linux/capability.h> 52#include <linux/capability.h>
52#include <linux/xattr.h> 53#include <linux/xattr.h>
diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h
index 6127e24062d0..5af0c81ca1ae 100644
--- a/fs/xfs/linux-2.6/xfs_linux.h
+++ b/fs/xfs/linux-2.6/xfs_linux.h
@@ -40,7 +40,6 @@
40#include <sv.h> 40#include <sv.h>
41#include <time.h> 41#include <time.h>
42 42
43#include <support/ktrace.h>
44#include <support/debug.h> 43#include <support/debug.h>
45#include <support/uuid.h> 44#include <support/uuid.h>
46 45
diff --git a/fs/xfs/linux-2.6/xfs_lrw.c b/fs/xfs/linux-2.6/xfs_lrw.c
index 1bf47f219c97..0d32457abef1 100644
--- a/fs/xfs/linux-2.6/xfs_lrw.c
+++ b/fs/xfs/linux-2.6/xfs_lrw.c
@@ -48,73 +48,12 @@
48#include "xfs_utils.h" 48#include "xfs_utils.h"
49#include "xfs_iomap.h" 49#include "xfs_iomap.h"
50#include "xfs_vnodeops.h" 50#include "xfs_vnodeops.h"
51#include "xfs_trace.h"
51 52
52#include <linux/capability.h> 53#include <linux/capability.h>
53#include <linux/writeback.h> 54#include <linux/writeback.h>
54 55
55 56
56#if defined(XFS_RW_TRACE)
57void
58xfs_rw_enter_trace(
59 int tag,
60 xfs_inode_t *ip,
61 void *data,
62 size_t segs,
63 loff_t offset,
64 int ioflags)
65{
66 if (ip->i_rwtrace == NULL)
67 return;
68 ktrace_enter(ip->i_rwtrace,
69 (void *)(unsigned long)tag,
70 (void *)ip,
71 (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
72 (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
73 (void *)data,
74 (void *)((unsigned long)segs),
75 (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
76 (void *)((unsigned long)(offset & 0xffffffff)),
77 (void *)((unsigned long)ioflags),
78 (void *)((unsigned long)((ip->i_new_size >> 32) & 0xffffffff)),
79 (void *)((unsigned long)(ip->i_new_size & 0xffffffff)),
80 (void *)((unsigned long)current_pid()),
81 (void *)NULL,
82 (void *)NULL,
83 (void *)NULL,
84 (void *)NULL);
85}
86
87void
88xfs_inval_cached_trace(
89 xfs_inode_t *ip,
90 xfs_off_t offset,
91 xfs_off_t len,
92 xfs_off_t first,
93 xfs_off_t last)
94{
95
96 if (ip->i_rwtrace == NULL)
97 return;
98 ktrace_enter(ip->i_rwtrace,
99 (void *)(__psint_t)XFS_INVAL_CACHED,
100 (void *)ip,
101 (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
102 (void *)((unsigned long)(offset & 0xffffffff)),
103 (void *)((unsigned long)((len >> 32) & 0xffffffff)),
104 (void *)((unsigned long)(len & 0xffffffff)),
105 (void *)((unsigned long)((first >> 32) & 0xffffffff)),
106 (void *)((unsigned long)(first & 0xffffffff)),
107 (void *)((unsigned long)((last >> 32) & 0xffffffff)),
108 (void *)((unsigned long)(last & 0xffffffff)),
109 (void *)((unsigned long)current_pid()),
110 (void *)NULL,
111 (void *)NULL,
112 (void *)NULL,
113 (void *)NULL,
114 (void *)NULL);
115}
116#endif
117
118/* 57/*
119 * xfs_iozero 58 * xfs_iozero
120 * 59 *
@@ -250,8 +189,7 @@ xfs_read(
250 } 189 }
251 } 190 }
252 191
253 xfs_rw_enter_trace(XFS_READ_ENTER, ip, 192 trace_xfs_file_read(ip, size, *offset, ioflags);
254 (void *)iovp, segs, *offset, ioflags);
255 193
256 iocb->ki_pos = *offset; 194 iocb->ki_pos = *offset;
257 ret = generic_file_aio_read(iocb, iovp, segs, *offset); 195 ret = generic_file_aio_read(iocb, iovp, segs, *offset);
@@ -292,8 +230,9 @@ xfs_splice_read(
292 return -error; 230 return -error;
293 } 231 }
294 } 232 }
295 xfs_rw_enter_trace(XFS_SPLICE_READ_ENTER, ip, 233
296 pipe, count, *ppos, ioflags); 234 trace_xfs_file_splice_read(ip, count, *ppos, ioflags);
235
297 ret = generic_file_splice_read(infilp, ppos, pipe, count, flags); 236 ret = generic_file_splice_read(infilp, ppos, pipe, count, flags);
298 if (ret > 0) 237 if (ret > 0)
299 XFS_STATS_ADD(xs_read_bytes, ret); 238 XFS_STATS_ADD(xs_read_bytes, ret);
@@ -342,8 +281,8 @@ xfs_splice_write(
342 ip->i_new_size = new_size; 281 ip->i_new_size = new_size;
343 xfs_iunlock(ip, XFS_ILOCK_EXCL); 282 xfs_iunlock(ip, XFS_ILOCK_EXCL);
344 283
345 xfs_rw_enter_trace(XFS_SPLICE_WRITE_ENTER, ip, 284 trace_xfs_file_splice_write(ip, count, *ppos, ioflags);
346 pipe, count, *ppos, ioflags); 285
347 ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags); 286 ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags);
348 if (ret > 0) 287 if (ret > 0)
349 XFS_STATS_ADD(xs_write_bytes, ret); 288 XFS_STATS_ADD(xs_write_bytes, ret);
@@ -710,8 +649,6 @@ start:
710 if ((ioflags & IO_ISDIRECT)) { 649 if ((ioflags & IO_ISDIRECT)) {
711 if (mapping->nrpages) { 650 if (mapping->nrpages) {
712 WARN_ON(need_i_mutex == 0); 651 WARN_ON(need_i_mutex == 0);
713 xfs_inval_cached_trace(xip, pos, -1,
714 (pos & PAGE_CACHE_MASK), -1);
715 error = xfs_flushinval_pages(xip, 652 error = xfs_flushinval_pages(xip,
716 (pos & PAGE_CACHE_MASK), 653 (pos & PAGE_CACHE_MASK),
717 -1, FI_REMAPF_LOCKED); 654 -1, FI_REMAPF_LOCKED);
@@ -728,8 +665,7 @@ start:
728 need_i_mutex = 0; 665 need_i_mutex = 0;
729 } 666 }
730 667
731 xfs_rw_enter_trace(XFS_DIOWR_ENTER, xip, (void *)iovp, segs, 668 trace_xfs_file_direct_write(xip, count, *offset, ioflags);
732 *offset, ioflags);
733 ret = generic_file_direct_write(iocb, iovp, 669 ret = generic_file_direct_write(iocb, iovp,
734 &segs, pos, offset, count, ocount); 670 &segs, pos, offset, count, ocount);
735 671
@@ -752,8 +688,7 @@ start:
752 ssize_t ret2 = 0; 688 ssize_t ret2 = 0;
753 689
754write_retry: 690write_retry:
755 xfs_rw_enter_trace(XFS_WRITE_ENTER, xip, (void *)iovp, segs, 691 trace_xfs_file_buffered_write(xip, count, *offset, ioflags);
756 *offset, ioflags);
757 ret2 = generic_file_buffered_write(iocb, iovp, segs, 692 ret2 = generic_file_buffered_write(iocb, iovp, segs,
758 pos, offset, count, ret); 693 pos, offset, count, ret);
759 /* 694 /*
@@ -858,7 +793,7 @@ int
858xfs_bdstrat_cb(struct xfs_buf *bp) 793xfs_bdstrat_cb(struct xfs_buf *bp)
859{ 794{
860 if (XFS_FORCED_SHUTDOWN(bp->b_mount)) { 795 if (XFS_FORCED_SHUTDOWN(bp->b_mount)) {
861 xfs_buftrace("XFS__BDSTRAT IOERROR", bp); 796 trace_xfs_bdstrat_shut(bp, _RET_IP_);
862 /* 797 /*
863 * Metadata write that didn't get logged but 798 * Metadata write that didn't get logged but
864 * written delayed anyway. These aren't associated 799 * written delayed anyway. These aren't associated
@@ -891,7 +826,7 @@ xfsbdstrat(
891 return; 826 return;
892 } 827 }
893 828
894 xfs_buftrace("XFSBDSTRAT IOERROR", bp); 829 trace_xfs_bdstrat_shut(bp, _RET_IP_);
895 xfs_bioerror_relse(bp); 830 xfs_bioerror_relse(bp);
896} 831}
897 832
diff --git a/fs/xfs/linux-2.6/xfs_lrw.h b/fs/xfs/linux-2.6/xfs_lrw.h
index e6be37dbd0e9..d1f7789c7ffb 100644
--- a/fs/xfs/linux-2.6/xfs_lrw.h
+++ b/fs/xfs/linux-2.6/xfs_lrw.h
@@ -20,52 +20,7 @@
20 20
21struct xfs_mount; 21struct xfs_mount;
22struct xfs_inode; 22struct xfs_inode;
23struct xfs_bmbt_irec;
24struct xfs_buf; 23struct xfs_buf;
25struct xfs_iomap;
26
27#if defined(XFS_RW_TRACE)
28/*
29 * Defines for the trace mechanisms in xfs_lrw.c.
30 */
31#define XFS_RW_KTRACE_SIZE 128
32
33#define XFS_READ_ENTER 1
34#define XFS_WRITE_ENTER 2
35#define XFS_IOMAP_READ_ENTER 3
36#define XFS_IOMAP_WRITE_ENTER 4
37#define XFS_IOMAP_READ_MAP 5
38#define XFS_IOMAP_WRITE_MAP 6
39#define XFS_IOMAP_WRITE_NOSPACE 7
40#define XFS_ITRUNC_START 8
41#define XFS_ITRUNC_FINISH1 9
42#define XFS_ITRUNC_FINISH2 10
43#define XFS_CTRUNC1 11
44#define XFS_CTRUNC2 12
45#define XFS_CTRUNC3 13
46#define XFS_CTRUNC4 14
47#define XFS_CTRUNC5 15
48#define XFS_CTRUNC6 16
49#define XFS_BUNMAP 17
50#define XFS_INVAL_CACHED 18
51#define XFS_DIORD_ENTER 19
52#define XFS_DIOWR_ENTER 20
53#define XFS_WRITEPAGE_ENTER 22
54#define XFS_RELEASEPAGE_ENTER 23
55#define XFS_INVALIDPAGE_ENTER 24
56#define XFS_IOMAP_ALLOC_ENTER 25
57#define XFS_IOMAP_ALLOC_MAP 26
58#define XFS_IOMAP_UNWRITTEN 27
59#define XFS_SPLICE_READ_ENTER 28
60#define XFS_SPLICE_WRITE_ENTER 29
61extern void xfs_rw_enter_trace(int, struct xfs_inode *,
62 void *, size_t, loff_t, int);
63extern void xfs_inval_cached_trace(struct xfs_inode *,
64 xfs_off_t, xfs_off_t, xfs_off_t, xfs_off_t);
65#else
66#define xfs_rw_enter_trace(tag, ip, data, size, offset, ioflags)
67#define xfs_inval_cached_trace(ip, offset, len, first, last)
68#endif
69 24
70/* errors from xfsbdstrat() must be extracted from the buffer */ 25/* errors from xfsbdstrat() must be extracted from the buffer */
71extern void xfsbdstrat(struct xfs_mount *, struct xfs_buf *); 26extern void xfsbdstrat(struct xfs_mount *, struct xfs_buf *);
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 1bfb0e980193..09783cc444ac 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -15,6 +15,7 @@
15 * along with this program; if not, write the Free Software Foundation, 15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */ 17 */
18
18#include "xfs.h" 19#include "xfs.h"
19#include "xfs_bit.h" 20#include "xfs_bit.h"
20#include "xfs_log.h" 21#include "xfs_log.h"
@@ -52,11 +53,11 @@
52#include "xfs_trans_priv.h" 53#include "xfs_trans_priv.h"
53#include "xfs_filestream.h" 54#include "xfs_filestream.h"
54#include "xfs_da_btree.h" 55#include "xfs_da_btree.h"
55#include "xfs_dir2_trace.h"
56#include "xfs_extfree_item.h" 56#include "xfs_extfree_item.h"
57#include "xfs_mru_cache.h" 57#include "xfs_mru_cache.h"
58#include "xfs_inode_item.h" 58#include "xfs_inode_item.h"
59#include "xfs_sync.h" 59#include "xfs_sync.h"
60#include "xfs_trace.h"
60 61
61#include <linux/namei.h> 62#include <linux/namei.h>
62#include <linux/init.h> 63#include <linux/init.h>
@@ -1525,8 +1526,6 @@ xfs_fs_fill_super(
1525 goto fail_vnrele; 1526 goto fail_vnrele;
1526 1527
1527 kfree(mtpt); 1528 kfree(mtpt);
1528
1529 xfs_itrace_exit(XFS_I(sb->s_root->d_inode));
1530 return 0; 1529 return 0;
1531 1530
1532 out_filestream_unmount: 1531 out_filestream_unmount:
@@ -1602,94 +1601,6 @@ static struct file_system_type xfs_fs_type = {
1602}; 1601};
1603 1602
1604STATIC int __init 1603STATIC int __init
1605xfs_alloc_trace_bufs(void)
1606{
1607#ifdef XFS_ALLOC_TRACE
1608 xfs_alloc_trace_buf = ktrace_alloc(XFS_ALLOC_TRACE_SIZE, KM_MAYFAIL);
1609 if (!xfs_alloc_trace_buf)
1610 goto out;
1611#endif
1612#ifdef XFS_BMAP_TRACE
1613 xfs_bmap_trace_buf = ktrace_alloc(XFS_BMAP_TRACE_SIZE, KM_MAYFAIL);
1614 if (!xfs_bmap_trace_buf)
1615 goto out_free_alloc_trace;
1616#endif
1617#ifdef XFS_BTREE_TRACE
1618 xfs_allocbt_trace_buf = ktrace_alloc(XFS_ALLOCBT_TRACE_SIZE,
1619 KM_MAYFAIL);
1620 if (!xfs_allocbt_trace_buf)
1621 goto out_free_bmap_trace;
1622
1623 xfs_inobt_trace_buf = ktrace_alloc(XFS_INOBT_TRACE_SIZE, KM_MAYFAIL);
1624 if (!xfs_inobt_trace_buf)
1625 goto out_free_allocbt_trace;
1626
1627 xfs_bmbt_trace_buf = ktrace_alloc(XFS_BMBT_TRACE_SIZE, KM_MAYFAIL);
1628 if (!xfs_bmbt_trace_buf)
1629 goto out_free_inobt_trace;
1630#endif
1631#ifdef XFS_ATTR_TRACE
1632 xfs_attr_trace_buf = ktrace_alloc(XFS_ATTR_TRACE_SIZE, KM_MAYFAIL);
1633 if (!xfs_attr_trace_buf)
1634 goto out_free_bmbt_trace;
1635#endif
1636#ifdef XFS_DIR2_TRACE
1637 xfs_dir2_trace_buf = ktrace_alloc(XFS_DIR2_GTRACE_SIZE, KM_MAYFAIL);
1638 if (!xfs_dir2_trace_buf)
1639 goto out_free_attr_trace;
1640#endif
1641
1642 return 0;
1643
1644#ifdef XFS_DIR2_TRACE
1645 out_free_attr_trace:
1646#endif
1647#ifdef XFS_ATTR_TRACE
1648 ktrace_free(xfs_attr_trace_buf);
1649 out_free_bmbt_trace:
1650#endif
1651#ifdef XFS_BTREE_TRACE
1652 ktrace_free(xfs_bmbt_trace_buf);
1653 out_free_inobt_trace:
1654 ktrace_free(xfs_inobt_trace_buf);
1655 out_free_allocbt_trace:
1656 ktrace_free(xfs_allocbt_trace_buf);
1657 out_free_bmap_trace:
1658#endif
1659#ifdef XFS_BMAP_TRACE
1660 ktrace_free(xfs_bmap_trace_buf);
1661 out_free_alloc_trace:
1662#endif
1663#ifdef XFS_ALLOC_TRACE
1664 ktrace_free(xfs_alloc_trace_buf);
1665 out:
1666#endif
1667 return -ENOMEM;
1668}
1669
1670STATIC void
1671xfs_free_trace_bufs(void)
1672{
1673#ifdef XFS_DIR2_TRACE
1674 ktrace_free(xfs_dir2_trace_buf);
1675#endif
1676#ifdef XFS_ATTR_TRACE
1677 ktrace_free(xfs_attr_trace_buf);
1678#endif
1679#ifdef XFS_BTREE_TRACE
1680 ktrace_free(xfs_bmbt_trace_buf);
1681 ktrace_free(xfs_inobt_trace_buf);
1682 ktrace_free(xfs_allocbt_trace_buf);
1683#endif
1684#ifdef XFS_BMAP_TRACE
1685 ktrace_free(xfs_bmap_trace_buf);
1686#endif
1687#ifdef XFS_ALLOC_TRACE
1688 ktrace_free(xfs_alloc_trace_buf);
1689#endif
1690}
1691
1692STATIC int __init
1693xfs_init_zones(void) 1604xfs_init_zones(void)
1694{ 1605{
1695 1606
@@ -1830,7 +1741,6 @@ init_xfs_fs(void)
1830 printk(KERN_INFO XFS_VERSION_STRING " with " 1741 printk(KERN_INFO XFS_VERSION_STRING " with "
1831 XFS_BUILD_OPTIONS " enabled\n"); 1742 XFS_BUILD_OPTIONS " enabled\n");
1832 1743
1833 ktrace_init(64);
1834 xfs_ioend_init(); 1744 xfs_ioend_init();
1835 xfs_dir_startup(); 1745 xfs_dir_startup();
1836 1746
@@ -1838,13 +1748,9 @@ init_xfs_fs(void)
1838 if (error) 1748 if (error)
1839 goto out; 1749 goto out;
1840 1750
1841 error = xfs_alloc_trace_bufs();
1842 if (error)
1843 goto out_destroy_zones;
1844
1845 error = xfs_mru_cache_init(); 1751 error = xfs_mru_cache_init();
1846 if (error) 1752 if (error)
1847 goto out_free_trace_buffers; 1753 goto out_destroy_zones;
1848 1754
1849 error = xfs_filestream_init(); 1755 error = xfs_filestream_init();
1850 if (error) 1756 if (error)
@@ -1879,8 +1785,6 @@ init_xfs_fs(void)
1879 xfs_filestream_uninit(); 1785 xfs_filestream_uninit();
1880 out_mru_cache_uninit: 1786 out_mru_cache_uninit:
1881 xfs_mru_cache_uninit(); 1787 xfs_mru_cache_uninit();
1882 out_free_trace_buffers:
1883 xfs_free_trace_bufs();
1884 out_destroy_zones: 1788 out_destroy_zones:
1885 xfs_destroy_zones(); 1789 xfs_destroy_zones();
1886 out: 1790 out:
@@ -1897,9 +1801,7 @@ exit_xfs_fs(void)
1897 xfs_buf_terminate(); 1801 xfs_buf_terminate();
1898 xfs_filestream_uninit(); 1802 xfs_filestream_uninit();
1899 xfs_mru_cache_uninit(); 1803 xfs_mru_cache_uninit();
1900 xfs_free_trace_bufs();
1901 xfs_destroy_zones(); 1804 xfs_destroy_zones();
1902 ktrace_uninit();
1903} 1805}
1904 1806
1905module_init(init_xfs_fs); 1807module_init(init_xfs_fs);
diff --git a/fs/xfs/linux-2.6/xfs_super.h b/fs/xfs/linux-2.6/xfs_super.h
index 18175ebd58ed..233d4b9881b1 100644
--- a/fs/xfs/linux-2.6/xfs_super.h
+++ b/fs/xfs/linux-2.6/xfs_super.h
@@ -56,12 +56,6 @@ extern void xfs_qm_exit(void);
56# define XFS_BIGFS_STRING 56# define XFS_BIGFS_STRING
57#endif 57#endif
58 58
59#ifdef CONFIG_XFS_TRACE
60# define XFS_TRACE_STRING "tracing, "
61#else
62# define XFS_TRACE_STRING
63#endif
64
65#ifdef CONFIG_XFS_DMAPI 59#ifdef CONFIG_XFS_DMAPI
66# define XFS_DMAPI_STRING "dmapi support, " 60# define XFS_DMAPI_STRING "dmapi support, "
67#else 61#else
@@ -78,7 +72,6 @@ extern void xfs_qm_exit(void);
78 XFS_SECURITY_STRING \ 72 XFS_SECURITY_STRING \
79 XFS_REALTIME_STRING \ 73 XFS_REALTIME_STRING \
80 XFS_BIGFS_STRING \ 74 XFS_BIGFS_STRING \
81 XFS_TRACE_STRING \
82 XFS_DMAPI_STRING \ 75 XFS_DMAPI_STRING \
83 XFS_DBG_STRING /* DBG must be last */ 76 XFS_DBG_STRING /* DBG must be last */
84 77
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index d895a3a960f5..6fed97a8cd3e 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -44,6 +44,7 @@
44#include "xfs_inode_item.h" 44#include "xfs_inode_item.h"
45#include "xfs_rw.h" 45#include "xfs_rw.h"
46#include "xfs_quota.h" 46#include "xfs_quota.h"
47#include "xfs_trace.h"
47 48
48#include <linux/kthread.h> 49#include <linux/kthread.h>
49#include <linux/freezer.h> 50#include <linux/freezer.h>
diff --git a/fs/xfs/linux-2.6/xfs_trace.c b/fs/xfs/linux-2.6/xfs_trace.c
new file mode 100644
index 000000000000..856eb3c8d605
--- /dev/null
+++ b/fs/xfs/linux-2.6/xfs_trace.c
@@ -0,0 +1,75 @@
1/*
2 * Copyright (c) 2009, Christoph Hellwig
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_types.h"
21#include "xfs_bit.h"
22#include "xfs_log.h"
23#include "xfs_inum.h"
24#include "xfs_trans.h"
25#include "xfs_sb.h"
26#include "xfs_ag.h"
27#include "xfs_dir2.h"
28#include "xfs_da_btree.h"
29#include "xfs_bmap_btree.h"
30#include "xfs_alloc_btree.h"
31#include "xfs_ialloc_btree.h"
32#include "xfs_dir2_sf.h"
33#include "xfs_attr_sf.h"
34#include "xfs_dinode.h"
35#include "xfs_inode.h"
36#include "xfs_btree.h"
37#include "xfs_dmapi.h"
38#include "xfs_mount.h"
39#include "xfs_ialloc.h"
40#include "xfs_itable.h"
41#include "xfs_alloc.h"
42#include "xfs_bmap.h"
43#include "xfs_attr.h"
44#include "xfs_attr_sf.h"
45#include "xfs_attr_leaf.h"
46#include "xfs_log_priv.h"
47#include "xfs_buf_item.h"
48#include "xfs_quota.h"
49#include "xfs_iomap.h"
50#include "xfs_aops.h"
51#include "quota/xfs_dquot_item.h"
52#include "quota/xfs_dquot.h"
53
54/*
55 * Format fsblock number into a static buffer & return it.
56 */
57STATIC char *xfs_fmtfsblock(xfs_fsblock_t bno)
58{
59 static char rval[50];
60
61 if (bno == NULLFSBLOCK)
62 sprintf(rval, "NULLFSBLOCK");
63 else if (isnullstartblock(bno))
64 sprintf(rval, "NULLSTARTBLOCK(%lld)", startblockval(bno));
65 else
66 sprintf(rval, "%lld", (xfs_dfsbno_t)bno);
67 return rval;
68}
69
70/*
71 * We include this last to have the helpers above available for the trace
72 * event implementations.
73 */
74#define CREATE_TRACE_POINTS
75#include "xfs_trace.h"
diff --git a/fs/xfs/linux-2.6/xfs_trace.h b/fs/xfs/linux-2.6/xfs_trace.h
new file mode 100644
index 000000000000..c40834bdee58
--- /dev/null
+++ b/fs/xfs/linux-2.6/xfs_trace.h
@@ -0,0 +1,1369 @@
1/*
2 * Copyright (c) 2009, Christoph Hellwig
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#undef TRACE_SYSTEM
19#define TRACE_SYSTEM xfs
20
21#if !defined(_TRACE_XFS_H) || defined(TRACE_HEADER_MULTI_READ)
22#define _TRACE_XFS_H
23
24#include <linux/tracepoint.h>
25
26struct xfs_agf;
27struct xfs_alloc_arg;
28struct xfs_attr_list_context;
29struct xfs_buf_log_item;
30struct xfs_da_args;
31struct xfs_da_node_entry;
32struct xfs_dquot;
33struct xlog_ticket;
34struct log;
35
36#define DEFINE_ATTR_LIST_EVENT(name) \
37TRACE_EVENT(name, \
38 TP_PROTO(struct xfs_attr_list_context *ctx), \
39 TP_ARGS(ctx), \
40 TP_STRUCT__entry( \
41 __field(dev_t, dev) \
42 __field(xfs_ino_t, ino) \
43 __field(u32, hashval) \
44 __field(u32, blkno) \
45 __field(u32, offset) \
46 __field(void *, alist) \
47 __field(int, bufsize) \
48 __field(int, count) \
49 __field(int, firstu) \
50 __field(int, dupcnt) \
51 __field(int, flags) \
52 ), \
53 TP_fast_assign( \
54 __entry->dev = VFS_I(ctx->dp)->i_sb->s_dev; \
55 __entry->ino = ctx->dp->i_ino; \
56 __entry->hashval = ctx->cursor->hashval; \
57 __entry->blkno = ctx->cursor->blkno; \
58 __entry->offset = ctx->cursor->offset; \
59 __entry->alist = ctx->alist; \
60 __entry->bufsize = ctx->bufsize; \
61 __entry->count = ctx->count; \
62 __entry->firstu = ctx->firstu; \
63 __entry->flags = ctx->flags; \
64 ), \
65 TP_printk("dev %d:%d ino 0x%llx cursor h/b/o 0x%x/0x%x/%u dupcnt %u " \
66 "alist 0x%p size %u count %u firstu %u flags %d %s", \
67 MAJOR(__entry->dev), MINOR(__entry->dev), \
68 __entry->ino, \
69 __entry->hashval, \
70 __entry->blkno, \
71 __entry->offset, \
72 __entry->dupcnt, \
73 __entry->alist, \
74 __entry->bufsize, \
75 __entry->count, \
76 __entry->firstu, \
77 __entry->flags, \
78 __print_flags(__entry->flags, "|", XFS_ATTR_FLAGS) \
79 ) \
80)
81DEFINE_ATTR_LIST_EVENT(xfs_attr_list_sf);
82DEFINE_ATTR_LIST_EVENT(xfs_attr_list_sf_all);
83DEFINE_ATTR_LIST_EVENT(xfs_attr_list_leaf);
84DEFINE_ATTR_LIST_EVENT(xfs_attr_list_leaf_end);
85DEFINE_ATTR_LIST_EVENT(xfs_attr_list_full);
86DEFINE_ATTR_LIST_EVENT(xfs_attr_list_add);
87DEFINE_ATTR_LIST_EVENT(xfs_attr_list_wrong_blk);
88DEFINE_ATTR_LIST_EVENT(xfs_attr_list_notfound);
89
90TRACE_EVENT(xfs_attr_list_node_descend,
91 TP_PROTO(struct xfs_attr_list_context *ctx,
92 struct xfs_da_node_entry *btree),
93 TP_ARGS(ctx, btree),
94 TP_STRUCT__entry(
95 __field(dev_t, dev)
96 __field(xfs_ino_t, ino)
97 __field(u32, hashval)
98 __field(u32, blkno)
99 __field(u32, offset)
100 __field(void *, alist)
101 __field(int, bufsize)
102 __field(int, count)
103 __field(int, firstu)
104 __field(int, dupcnt)
105 __field(int, flags)
106 __field(u32, bt_hashval)
107 __field(u32, bt_before)
108 ),
109 TP_fast_assign(
110 __entry->dev = VFS_I(ctx->dp)->i_sb->s_dev;
111 __entry->ino = ctx->dp->i_ino;
112 __entry->hashval = ctx->cursor->hashval;
113 __entry->blkno = ctx->cursor->blkno;
114 __entry->offset = ctx->cursor->offset;
115 __entry->alist = ctx->alist;
116 __entry->bufsize = ctx->bufsize;
117 __entry->count = ctx->count;
118 __entry->firstu = ctx->firstu;
119 __entry->flags = ctx->flags;
120 __entry->bt_hashval = be32_to_cpu(btree->hashval);
121 __entry->bt_before = be32_to_cpu(btree->before);
122 ),
123 TP_printk("dev %d:%d ino 0x%llx cursor h/b/o 0x%x/0x%x/%u dupcnt %u "
124 "alist 0x%p size %u count %u firstu %u flags %d %s "
125 "node hashval %u, node before %u",
126 MAJOR(__entry->dev), MINOR(__entry->dev),
127 __entry->ino,
128 __entry->hashval,
129 __entry->blkno,
130 __entry->offset,
131 __entry->dupcnt,
132 __entry->alist,
133 __entry->bufsize,
134 __entry->count,
135 __entry->firstu,
136 __entry->flags,
137 __print_flags(__entry->flags, "|", XFS_ATTR_FLAGS),
138 __entry->bt_hashval,
139 __entry->bt_before)
140);
141
142TRACE_EVENT(xfs_iext_insert,
143 TP_PROTO(struct xfs_inode *ip, xfs_extnum_t idx,
144 struct xfs_bmbt_irec *r, int state, unsigned long caller_ip),
145 TP_ARGS(ip, idx, r, state, caller_ip),
146 TP_STRUCT__entry(
147 __field(dev_t, dev)
148 __field(xfs_ino_t, ino)
149 __field(xfs_extnum_t, idx)
150 __field(xfs_fileoff_t, startoff)
151 __field(xfs_fsblock_t, startblock)
152 __field(xfs_filblks_t, blockcount)
153 __field(xfs_exntst_t, state)
154 __field(int, bmap_state)
155 __field(unsigned long, caller_ip)
156 ),
157 TP_fast_assign(
158 __entry->dev = VFS_I(ip)->i_sb->s_dev;
159 __entry->ino = ip->i_ino;
160 __entry->idx = idx;
161 __entry->startoff = r->br_startoff;
162 __entry->startblock = r->br_startblock;
163 __entry->blockcount = r->br_blockcount;
164 __entry->state = r->br_state;
165 __entry->bmap_state = state;
166 __entry->caller_ip = caller_ip;
167 ),
168 TP_printk("dev %d:%d ino 0x%llx state %s idx %ld "
169 "offset %lld block %s count %lld flag %d caller %pf",
170 MAJOR(__entry->dev), MINOR(__entry->dev),
171 __entry->ino,
172 __print_flags(__entry->bmap_state, "|", XFS_BMAP_EXT_FLAGS),
173 (long)__entry->idx,
174 __entry->startoff,
175 xfs_fmtfsblock(__entry->startblock),
176 __entry->blockcount,
177 __entry->state,
178 (char *)__entry->caller_ip)
179);
180
181#define DEFINE_BMAP_EVENT(name) \
182TRACE_EVENT(name, \
183 TP_PROTO(struct xfs_inode *ip, xfs_extnum_t idx, int state, \
184 unsigned long caller_ip), \
185 TP_ARGS(ip, idx, state, caller_ip), \
186 TP_STRUCT__entry( \
187 __field(dev_t, dev) \
188 __field(xfs_ino_t, ino) \
189 __field(xfs_extnum_t, idx) \
190 __field(xfs_fileoff_t, startoff) \
191 __field(xfs_fsblock_t, startblock) \
192 __field(xfs_filblks_t, blockcount) \
193 __field(xfs_exntst_t, state) \
194 __field(int, bmap_state) \
195 __field(unsigned long, caller_ip) \
196 ), \
197 TP_fast_assign( \
198 struct xfs_ifork *ifp = (state & BMAP_ATTRFORK) ? \
199 ip->i_afp : &ip->i_df; \
200 struct xfs_bmbt_irec r; \
201 \
202 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx), &r); \
203 __entry->dev = VFS_I(ip)->i_sb->s_dev; \
204 __entry->ino = ip->i_ino; \
205 __entry->idx = idx; \
206 __entry->startoff = r.br_startoff; \
207 __entry->startblock = r.br_startblock; \
208 __entry->blockcount = r.br_blockcount; \
209 __entry->state = r.br_state; \
210 __entry->bmap_state = state; \
211 __entry->caller_ip = caller_ip; \
212 ), \
213 TP_printk("dev %d:%d ino 0x%llx state %s idx %ld " \
214 "offset %lld block %s count %lld flag %d caller %pf", \
215 MAJOR(__entry->dev), MINOR(__entry->dev), \
216 __entry->ino, \
217 __print_flags(__entry->bmap_state, "|", XFS_BMAP_EXT_FLAGS), \
218 (long)__entry->idx, \
219 __entry->startoff, \
220 xfs_fmtfsblock(__entry->startblock), \
221 __entry->blockcount, \
222 __entry->state, \
223 (char *)__entry->caller_ip) \
224)
225
226DEFINE_BMAP_EVENT(xfs_iext_remove);
227DEFINE_BMAP_EVENT(xfs_bmap_pre_update);
228DEFINE_BMAP_EVENT(xfs_bmap_post_update);
229DEFINE_BMAP_EVENT(xfs_extlist);
230
231#define DEFINE_BUF_EVENT(tname) \
232TRACE_EVENT(tname, \
233 TP_PROTO(struct xfs_buf *bp, unsigned long caller_ip), \
234 TP_ARGS(bp, caller_ip), \
235 TP_STRUCT__entry( \
236 __field(dev_t, dev) \
237 __field(xfs_daddr_t, bno) \
238 __field(size_t, buffer_length) \
239 __field(int, hold) \
240 __field(int, pincount) \
241 __field(unsigned, lockval) \
242 __field(unsigned, flags) \
243 __field(unsigned long, caller_ip) \
244 ), \
245 TP_fast_assign( \
246 __entry->dev = bp->b_target->bt_dev; \
247 __entry->bno = bp->b_bn; \
248 __entry->buffer_length = bp->b_buffer_length; \
249 __entry->hold = atomic_read(&bp->b_hold); \
250 __entry->pincount = atomic_read(&bp->b_pin_count); \
251 __entry->lockval = xfs_buf_lock_value(bp); \
252 __entry->flags = bp->b_flags; \
253 __entry->caller_ip = caller_ip; \
254 ), \
255 TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d " \
256 "lock %d flags %s caller %pf", \
257 MAJOR(__entry->dev), MINOR(__entry->dev), \
258 (unsigned long long)__entry->bno, \
259 __entry->buffer_length, \
260 __entry->hold, \
261 __entry->pincount, \
262 __entry->lockval, \
263 __print_flags(__entry->flags, "|", XFS_BUF_FLAGS), \
264 (void *)__entry->caller_ip) \
265)
266DEFINE_BUF_EVENT(xfs_buf_init);
267DEFINE_BUF_EVENT(xfs_buf_free);
268DEFINE_BUF_EVENT(xfs_buf_hold);
269DEFINE_BUF_EVENT(xfs_buf_rele);
270DEFINE_BUF_EVENT(xfs_buf_pin);
271DEFINE_BUF_EVENT(xfs_buf_unpin);
272DEFINE_BUF_EVENT(xfs_buf_iodone);
273DEFINE_BUF_EVENT(xfs_buf_iorequest);
274DEFINE_BUF_EVENT(xfs_buf_bawrite);
275DEFINE_BUF_EVENT(xfs_buf_bdwrite);
276DEFINE_BUF_EVENT(xfs_buf_lock);
277DEFINE_BUF_EVENT(xfs_buf_lock_done);
278DEFINE_BUF_EVENT(xfs_buf_cond_lock);
279DEFINE_BUF_EVENT(xfs_buf_unlock);
280DEFINE_BUF_EVENT(xfs_buf_ordered_retry);
281DEFINE_BUF_EVENT(xfs_buf_iowait);
282DEFINE_BUF_EVENT(xfs_buf_iowait_done);
283DEFINE_BUF_EVENT(xfs_buf_delwri_queue);
284DEFINE_BUF_EVENT(xfs_buf_delwri_dequeue);
285DEFINE_BUF_EVENT(xfs_buf_delwri_split);
286DEFINE_BUF_EVENT(xfs_buf_get_noaddr);
287DEFINE_BUF_EVENT(xfs_bdstrat_shut);
288DEFINE_BUF_EVENT(xfs_buf_item_relse);
289DEFINE_BUF_EVENT(xfs_buf_item_iodone);
290DEFINE_BUF_EVENT(xfs_buf_item_iodone_async);
291DEFINE_BUF_EVENT(xfs_buf_error_relse);
292DEFINE_BUF_EVENT(xfs_trans_read_buf_io);
293DEFINE_BUF_EVENT(xfs_trans_read_buf_shut);
294
295/* not really buffer traces, but the buf provides useful information */
296DEFINE_BUF_EVENT(xfs_btree_corrupt);
297DEFINE_BUF_EVENT(xfs_da_btree_corrupt);
298DEFINE_BUF_EVENT(xfs_reset_dqcounts);
299DEFINE_BUF_EVENT(xfs_inode_item_push);
300
301/* pass flags explicitly */
302#define DEFINE_BUF_FLAGS_EVENT(tname) \
303TRACE_EVENT(tname, \
304 TP_PROTO(struct xfs_buf *bp, unsigned flags, unsigned long caller_ip), \
305 TP_ARGS(bp, flags, caller_ip), \
306 TP_STRUCT__entry( \
307 __field(dev_t, dev) \
308 __field(xfs_daddr_t, bno) \
309 __field(size_t, buffer_length) \
310 __field(int, hold) \
311 __field(int, pincount) \
312 __field(unsigned, lockval) \
313 __field(unsigned, flags) \
314 __field(unsigned long, caller_ip) \
315 ), \
316 TP_fast_assign( \
317 __entry->dev = bp->b_target->bt_dev; \
318 __entry->bno = bp->b_bn; \
319 __entry->buffer_length = bp->b_buffer_length; \
320 __entry->flags = flags; \
321 __entry->hold = atomic_read(&bp->b_hold); \
322 __entry->pincount = atomic_read(&bp->b_pin_count); \
323 __entry->lockval = xfs_buf_lock_value(bp); \
324 __entry->caller_ip = caller_ip; \
325 ), \
326 TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d " \
327 "lock %d flags %s caller %pf", \
328 MAJOR(__entry->dev), MINOR(__entry->dev), \
329 (unsigned long long)__entry->bno, \
330 __entry->buffer_length, \
331 __entry->hold, \
332 __entry->pincount, \
333 __entry->lockval, \
334 __print_flags(__entry->flags, "|", XFS_BUF_FLAGS), \
335 (void *)__entry->caller_ip) \
336)
337DEFINE_BUF_FLAGS_EVENT(xfs_buf_find);
338DEFINE_BUF_FLAGS_EVENT(xfs_buf_get);
339DEFINE_BUF_FLAGS_EVENT(xfs_buf_read);
340
341TRACE_EVENT(xfs_buf_ioerror,
342 TP_PROTO(struct xfs_buf *bp, int error, unsigned long caller_ip),
343 TP_ARGS(bp, error, caller_ip),
344 TP_STRUCT__entry(
345 __field(dev_t, dev)
346 __field(xfs_daddr_t, bno)
347 __field(size_t, buffer_length)
348 __field(unsigned, flags)
349 __field(int, hold)
350 __field(int, pincount)
351 __field(unsigned, lockval)
352 __field(int, error)
353 __field(unsigned long, caller_ip)
354 ),
355 TP_fast_assign(
356 __entry->dev = bp->b_target->bt_dev;
357 __entry->bno = bp->b_bn;
358 __entry->buffer_length = bp->b_buffer_length;
359 __entry->hold = atomic_read(&bp->b_hold);
360 __entry->pincount = atomic_read(&bp->b_pin_count);
361 __entry->lockval = xfs_buf_lock_value(bp);
362 __entry->error = error;
363 __entry->flags = bp->b_flags;
364 __entry->caller_ip = caller_ip;
365 ),
366 TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d "
367 "lock %d error %d flags %s caller %pf",
368 MAJOR(__entry->dev), MINOR(__entry->dev),
369 (unsigned long long)__entry->bno,
370 __entry->buffer_length,
371 __entry->hold,
372 __entry->pincount,
373 __entry->lockval,
374 __entry->error,
375 __print_flags(__entry->flags, "|", XFS_BUF_FLAGS),
376 (void *)__entry->caller_ip)
377);
378
379#define DEFINE_BUF_ITEM_EVENT(tname) \
380TRACE_EVENT(tname, \
381 TP_PROTO(struct xfs_buf_log_item *bip), \
382 TP_ARGS(bip), \
383 TP_STRUCT__entry( \
384 __field(dev_t, dev) \
385 __field(xfs_daddr_t, buf_bno) \
386 __field(size_t, buf_len) \
387 __field(int, buf_hold) \
388 __field(int, buf_pincount) \
389 __field(int, buf_lockval) \
390 __field(unsigned, buf_flags) \
391 __field(unsigned, bli_recur) \
392 __field(int, bli_refcount) \
393 __field(unsigned, bli_flags) \
394 __field(void *, li_desc) \
395 __field(unsigned, li_flags) \
396 ), \
397 TP_fast_assign( \
398 __entry->dev = bip->bli_buf->b_target->bt_dev; \
399 __entry->bli_flags = bip->bli_flags; \
400 __entry->bli_recur = bip->bli_recur; \
401 __entry->bli_refcount = atomic_read(&bip->bli_refcount); \
402 __entry->buf_bno = bip->bli_buf->b_bn; \
403 __entry->buf_len = bip->bli_buf->b_buffer_length; \
404 __entry->buf_flags = bip->bli_buf->b_flags; \
405 __entry->buf_hold = atomic_read(&bip->bli_buf->b_hold); \
406 __entry->buf_pincount = \
407 atomic_read(&bip->bli_buf->b_pin_count); \
408 __entry->buf_lockval = xfs_buf_lock_value(bip->bli_buf); \
409 __entry->li_desc = bip->bli_item.li_desc; \
410 __entry->li_flags = bip->bli_item.li_flags; \
411 ), \
412 TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d " \
413 "lock %d flags %s recur %d refcount %d bliflags %s " \
414 "lidesc 0x%p liflags %s", \
415 MAJOR(__entry->dev), MINOR(__entry->dev), \
416 (unsigned long long)__entry->buf_bno, \
417 __entry->buf_len, \
418 __entry->buf_hold, \
419 __entry->buf_pincount, \
420 __entry->buf_lockval, \
421 __print_flags(__entry->buf_flags, "|", XFS_BUF_FLAGS), \
422 __entry->bli_recur, \
423 __entry->bli_refcount, \
424 __print_flags(__entry->bli_flags, "|", XFS_BLI_FLAGS), \
425 __entry->li_desc, \
426 __print_flags(__entry->li_flags, "|", XFS_LI_FLAGS)) \
427)
428DEFINE_BUF_ITEM_EVENT(xfs_buf_item_size);
429DEFINE_BUF_ITEM_EVENT(xfs_buf_item_size_stale);
430DEFINE_BUF_ITEM_EVENT(xfs_buf_item_format);
431DEFINE_BUF_ITEM_EVENT(xfs_buf_item_format_stale);
432DEFINE_BUF_ITEM_EVENT(xfs_buf_item_pin);
433DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unpin);
434DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unpin_stale);
435DEFINE_BUF_ITEM_EVENT(xfs_buf_item_trylock);
436DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unlock);
437DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unlock_stale);
438DEFINE_BUF_ITEM_EVENT(xfs_buf_item_committed);
439DEFINE_BUF_ITEM_EVENT(xfs_buf_item_push);
440DEFINE_BUF_ITEM_EVENT(xfs_trans_get_buf);
441DEFINE_BUF_ITEM_EVENT(xfs_trans_get_buf_recur);
442DEFINE_BUF_ITEM_EVENT(xfs_trans_getsb);
443DEFINE_BUF_ITEM_EVENT(xfs_trans_getsb_recur);
444DEFINE_BUF_ITEM_EVENT(xfs_trans_read_buf);
445DEFINE_BUF_ITEM_EVENT(xfs_trans_read_buf_recur);
446DEFINE_BUF_ITEM_EVENT(xfs_trans_log_buf);
447DEFINE_BUF_ITEM_EVENT(xfs_trans_brelse);
448DEFINE_BUF_ITEM_EVENT(xfs_trans_bjoin);
449DEFINE_BUF_ITEM_EVENT(xfs_trans_bhold);
450DEFINE_BUF_ITEM_EVENT(xfs_trans_bhold_release);
451DEFINE_BUF_ITEM_EVENT(xfs_trans_binval);
452
453#define DEFINE_LOCK_EVENT(name) \
454TRACE_EVENT(name, \
455 TP_PROTO(struct xfs_inode *ip, unsigned lock_flags, \
456 unsigned long caller_ip), \
457 TP_ARGS(ip, lock_flags, caller_ip), \
458 TP_STRUCT__entry( \
459 __field(dev_t, dev) \
460 __field(xfs_ino_t, ino) \
461 __field(int, lock_flags) \
462 __field(unsigned long, caller_ip) \
463 ), \
464 TP_fast_assign( \
465 __entry->dev = VFS_I(ip)->i_sb->s_dev; \
466 __entry->ino = ip->i_ino; \
467 __entry->lock_flags = lock_flags; \
468 __entry->caller_ip = caller_ip; \
469 ), \
470 TP_printk("dev %d:%d ino 0x%llx flags %s caller %pf", \
471 MAJOR(__entry->dev), MINOR(__entry->dev), \
472 __entry->ino, \
473 __print_flags(__entry->lock_flags, "|", XFS_LOCK_FLAGS), \
474 (void *)__entry->caller_ip) \
475)
476
477DEFINE_LOCK_EVENT(xfs_ilock);
478DEFINE_LOCK_EVENT(xfs_ilock_nowait);
479DEFINE_LOCK_EVENT(xfs_ilock_demote);
480DEFINE_LOCK_EVENT(xfs_iunlock);
481
482#define DEFINE_IGET_EVENT(name) \
483TRACE_EVENT(name, \
484 TP_PROTO(struct xfs_inode *ip), \
485 TP_ARGS(ip), \
486 TP_STRUCT__entry( \
487 __field(dev_t, dev) \
488 __field(xfs_ino_t, ino) \
489 ), \
490 TP_fast_assign( \
491 __entry->dev = VFS_I(ip)->i_sb->s_dev; \
492 __entry->ino = ip->i_ino; \
493 ), \
494 TP_printk("dev %d:%d ino 0x%llx", \
495 MAJOR(__entry->dev), MINOR(__entry->dev), \
496 __entry->ino) \
497)
498DEFINE_IGET_EVENT(xfs_iget_skip);
499DEFINE_IGET_EVENT(xfs_iget_reclaim);
500DEFINE_IGET_EVENT(xfs_iget_found);
501DEFINE_IGET_EVENT(xfs_iget_alloc);
502
503#define DEFINE_INODE_EVENT(name) \
504TRACE_EVENT(name, \
505 TP_PROTO(struct xfs_inode *ip, unsigned long caller_ip), \
506 TP_ARGS(ip, caller_ip), \
507 TP_STRUCT__entry( \
508 __field(dev_t, dev) \
509 __field(xfs_ino_t, ino) \
510 __field(int, count) \
511 __field(unsigned long, caller_ip) \
512 ), \
513 TP_fast_assign( \
514 __entry->dev = VFS_I(ip)->i_sb->s_dev; \
515 __entry->ino = ip->i_ino; \
516 __entry->count = atomic_read(&VFS_I(ip)->i_count); \
517 __entry->caller_ip = caller_ip; \
518 ), \
519 TP_printk("dev %d:%d ino 0x%llx count %d caller %pf", \
520 MAJOR(__entry->dev), MINOR(__entry->dev), \
521 __entry->ino, \
522 __entry->count, \
523 (char *)__entry->caller_ip) \
524)
525DEFINE_INODE_EVENT(xfs_ihold);
526DEFINE_INODE_EVENT(xfs_irele);
527/* the old xfs_itrace_entry tracer - to be replaced by s.th. in the VFS */
528DEFINE_INODE_EVENT(xfs_inode);
529#define xfs_itrace_entry(ip) \
530 trace_xfs_inode(ip, _THIS_IP_)
531
532#define DEFINE_DQUOT_EVENT(tname) \
533TRACE_EVENT(tname, \
534 TP_PROTO(struct xfs_dquot *dqp), \
535 TP_ARGS(dqp), \
536 TP_STRUCT__entry( \
537 __field(dev_t, dev) \
538 __field(__be32, id) \
539 __field(unsigned, flags) \
540 __field(unsigned, nrefs) \
541 __field(unsigned long long, res_bcount) \
542 __field(unsigned long long, bcount) \
543 __field(unsigned long long, icount) \
544 __field(unsigned long long, blk_hardlimit) \
545 __field(unsigned long long, blk_softlimit) \
546 __field(unsigned long long, ino_hardlimit) \
547 __field(unsigned long long, ino_softlimit) \
548 ), \
549 TP_fast_assign( \
550 __entry->dev = dqp->q_mount->m_super->s_dev; \
551 __entry->id = dqp->q_core.d_id; \
552 __entry->flags = dqp->dq_flags; \
553 __entry->nrefs = dqp->q_nrefs; \
554 __entry->res_bcount = dqp->q_res_bcount; \
555 __entry->bcount = be64_to_cpu(dqp->q_core.d_bcount); \
556 __entry->icount = be64_to_cpu(dqp->q_core.d_icount); \
557 __entry->blk_hardlimit = \
558 be64_to_cpu(dqp->q_core.d_blk_hardlimit); \
559 __entry->blk_softlimit = \
560 be64_to_cpu(dqp->q_core.d_blk_softlimit); \
561 __entry->ino_hardlimit = \
562 be64_to_cpu(dqp->q_core.d_ino_hardlimit); \
563 __entry->ino_softlimit = \
564 be64_to_cpu(dqp->q_core.d_ino_softlimit); \
565 ), \
566 TP_printk("dev %d:%d id 0x%x flags %s nrefs %u res_bc 0x%llx " \
567 "bcnt 0x%llx [hard 0x%llx | soft 0x%llx] " \
568 "icnt 0x%llx [hard 0x%llx | soft 0x%llx]", \
569 MAJOR(__entry->dev), MINOR(__entry->dev), \
570 be32_to_cpu(__entry->id), \
571 __print_flags(__entry->flags, "|", XFS_DQ_FLAGS), \
572 __entry->nrefs, \
573 __entry->res_bcount, \
574 __entry->bcount, \
575 __entry->blk_hardlimit, \
576 __entry->blk_softlimit, \
577 __entry->icount, \
578 __entry->ino_hardlimit, \
579 __entry->ino_softlimit) \
580)
581DEFINE_DQUOT_EVENT(xfs_dqadjust);
582DEFINE_DQUOT_EVENT(xfs_dqshake_dirty);
583DEFINE_DQUOT_EVENT(xfs_dqshake_unlink);
584DEFINE_DQUOT_EVENT(xfs_dqreclaim_want);
585DEFINE_DQUOT_EVENT(xfs_dqreclaim_dirty);
586DEFINE_DQUOT_EVENT(xfs_dqreclaim_unlink);
587DEFINE_DQUOT_EVENT(xfs_dqattach_found);
588DEFINE_DQUOT_EVENT(xfs_dqattach_get);
589DEFINE_DQUOT_EVENT(xfs_dqinit);
590DEFINE_DQUOT_EVENT(xfs_dqreuse);
591DEFINE_DQUOT_EVENT(xfs_dqalloc);
592DEFINE_DQUOT_EVENT(xfs_dqtobp_read);
593DEFINE_DQUOT_EVENT(xfs_dqread);
594DEFINE_DQUOT_EVENT(xfs_dqread_fail);
595DEFINE_DQUOT_EVENT(xfs_dqlookup_found);
596DEFINE_DQUOT_EVENT(xfs_dqlookup_want);
597DEFINE_DQUOT_EVENT(xfs_dqlookup_freelist);
598DEFINE_DQUOT_EVENT(xfs_dqlookup_move);
599DEFINE_DQUOT_EVENT(xfs_dqlookup_done);
600DEFINE_DQUOT_EVENT(xfs_dqget_hit);
601DEFINE_DQUOT_EVENT(xfs_dqget_miss);
602DEFINE_DQUOT_EVENT(xfs_dqput);
603DEFINE_DQUOT_EVENT(xfs_dqput_wait);
604DEFINE_DQUOT_EVENT(xfs_dqput_free);
605DEFINE_DQUOT_EVENT(xfs_dqrele);
606DEFINE_DQUOT_EVENT(xfs_dqflush);
607DEFINE_DQUOT_EVENT(xfs_dqflush_force);
608DEFINE_DQUOT_EVENT(xfs_dqflush_done);
609/* not really iget events, but we re-use the format */
610DEFINE_IGET_EVENT(xfs_dquot_dqalloc);
611DEFINE_IGET_EVENT(xfs_dquot_dqdetach);
612
613
614#define DEFINE_LOGGRANT_EVENT(tname) \
615TRACE_EVENT(tname, \
616 TP_PROTO(struct log *log, struct xlog_ticket *tic), \
617 TP_ARGS(log, tic), \
618 TP_STRUCT__entry( \
619 __field(dev_t, dev) \
620 __field(unsigned, trans_type) \
621 __field(char, ocnt) \
622 __field(char, cnt) \
623 __field(int, curr_res) \
624 __field(int, unit_res) \
625 __field(unsigned int, flags) \
626 __field(void *, reserve_headq) \
627 __field(void *, write_headq) \
628 __field(int, grant_reserve_cycle) \
629 __field(int, grant_reserve_bytes) \
630 __field(int, grant_write_cycle) \
631 __field(int, grant_write_bytes) \
632 __field(int, curr_cycle) \
633 __field(int, curr_block) \
634 __field(xfs_lsn_t, tail_lsn) \
635 ), \
636 TP_fast_assign( \
637 __entry->dev = log->l_mp->m_super->s_dev; \
638 __entry->trans_type = tic->t_trans_type; \
639 __entry->ocnt = tic->t_ocnt; \
640 __entry->cnt = tic->t_cnt; \
641 __entry->curr_res = tic->t_curr_res; \
642 __entry->unit_res = tic->t_unit_res; \
643 __entry->flags = tic->t_flags; \
644 __entry->reserve_headq = log->l_reserve_headq; \
645 __entry->write_headq = log->l_write_headq; \
646 __entry->grant_reserve_cycle = log->l_grant_reserve_cycle; \
647 __entry->grant_reserve_bytes = log->l_grant_reserve_bytes; \
648 __entry->grant_write_cycle = log->l_grant_write_cycle; \
649 __entry->grant_write_bytes = log->l_grant_write_bytes; \
650 __entry->curr_cycle = log->l_curr_cycle; \
651 __entry->curr_block = log->l_curr_block; \
652 __entry->tail_lsn = log->l_tail_lsn; \
653 ), \
654 TP_printk("dev %d:%d type %s t_ocnt %u t_cnt %u t_curr_res %u " \
655 "t_unit_res %u t_flags %s reserve_headq 0x%p " \
656 "write_headq 0x%p grant_reserve_cycle %d " \
657 "grant_reserve_bytes %d grant_write_cycle %d " \
658 "grant_write_bytes %d curr_cycle %d curr_block %d " \
659 "tail_cycle %d tail_block %d", \
660 MAJOR(__entry->dev), MINOR(__entry->dev), \
661 __print_symbolic(__entry->trans_type, XFS_TRANS_TYPES), \
662 __entry->ocnt, \
663 __entry->cnt, \
664 __entry->curr_res, \
665 __entry->unit_res, \
666 __print_flags(__entry->flags, "|", XLOG_TIC_FLAGS), \
667 __entry->reserve_headq, \
668 __entry->write_headq, \
669 __entry->grant_reserve_cycle, \
670 __entry->grant_reserve_bytes, \
671 __entry->grant_write_cycle, \
672 __entry->grant_write_bytes, \
673 __entry->curr_cycle, \
674 __entry->curr_block, \
675 CYCLE_LSN(__entry->tail_lsn), \
676 BLOCK_LSN(__entry->tail_lsn) \
677 ) \
678)
679DEFINE_LOGGRANT_EVENT(xfs_log_done_nonperm);
680DEFINE_LOGGRANT_EVENT(xfs_log_done_perm);
681DEFINE_LOGGRANT_EVENT(xfs_log_reserve);
682DEFINE_LOGGRANT_EVENT(xfs_log_umount_write);
683DEFINE_LOGGRANT_EVENT(xfs_log_grant_enter);
684DEFINE_LOGGRANT_EVENT(xfs_log_grant_exit);
685DEFINE_LOGGRANT_EVENT(xfs_log_grant_error);
686DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep1);
687DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake1);
688DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep2);
689DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake2);
690DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_enter);
691DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_exit);
692DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_error);
693DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep1);
694DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake1);
695DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep2);
696DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake2);
697DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_enter);
698DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_exit);
699DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_sub);
700DEFINE_LOGGRANT_EVENT(xfs_log_ungrant_enter);
701DEFINE_LOGGRANT_EVENT(xfs_log_ungrant_exit);
702DEFINE_LOGGRANT_EVENT(xfs_log_ungrant_sub);
703
704#define DEFINE_RW_EVENT(name) \
705TRACE_EVENT(name, \
706 TP_PROTO(struct xfs_inode *ip, size_t count, loff_t offset, int flags), \
707 TP_ARGS(ip, count, offset, flags), \
708 TP_STRUCT__entry( \
709 __field(dev_t, dev) \
710 __field(xfs_ino_t, ino) \
711 __field(xfs_fsize_t, size) \
712 __field(xfs_fsize_t, new_size) \
713 __field(loff_t, offset) \
714 __field(size_t, count) \
715 __field(int, flags) \
716 ), \
717 TP_fast_assign( \
718 __entry->dev = VFS_I(ip)->i_sb->s_dev; \
719 __entry->ino = ip->i_ino; \
720 __entry->size = ip->i_d.di_size; \
721 __entry->new_size = ip->i_new_size; \
722 __entry->offset = offset; \
723 __entry->count = count; \
724 __entry->flags = flags; \
725 ), \
726 TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx " \
727 "offset 0x%llx count 0x%zx ioflags %s", \
728 MAJOR(__entry->dev), MINOR(__entry->dev), \
729 __entry->ino, \
730 __entry->size, \
731 __entry->new_size, \
732 __entry->offset, \
733 __entry->count, \
734 __print_flags(__entry->flags, "|", XFS_IO_FLAGS)) \
735)
736DEFINE_RW_EVENT(xfs_file_read);
737DEFINE_RW_EVENT(xfs_file_buffered_write);
738DEFINE_RW_EVENT(xfs_file_direct_write);
739DEFINE_RW_EVENT(xfs_file_splice_read);
740DEFINE_RW_EVENT(xfs_file_splice_write);
741
742
743#define DEFINE_PAGE_EVENT(name) \
744TRACE_EVENT(name, \
745 TP_PROTO(struct inode *inode, struct page *page, unsigned long off), \
746 TP_ARGS(inode, page, off), \
747 TP_STRUCT__entry( \
748 __field(dev_t, dev) \
749 __field(xfs_ino_t, ino) \
750 __field(pgoff_t, pgoff) \
751 __field(loff_t, size) \
752 __field(unsigned long, offset) \
753 __field(int, delalloc) \
754 __field(int, unmapped) \
755 __field(int, unwritten) \
756 ), \
757 TP_fast_assign( \
758 int delalloc = -1, unmapped = -1, unwritten = -1; \
759 \
760 if (page_has_buffers(page)) \
761 xfs_count_page_state(page, &delalloc, \
762 &unmapped, &unwritten); \
763 __entry->dev = inode->i_sb->s_dev; \
764 __entry->ino = XFS_I(inode)->i_ino; \
765 __entry->pgoff = page_offset(page); \
766 __entry->size = i_size_read(inode); \
767 __entry->offset = off; \
768 __entry->delalloc = delalloc; \
769 __entry->unmapped = unmapped; \
770 __entry->unwritten = unwritten; \
771 ), \
772 TP_printk("dev %d:%d ino 0x%llx pgoff 0x%lx size 0x%llx offset %lx " \
773 "delalloc %d unmapped %d unwritten %d", \
774 MAJOR(__entry->dev), MINOR(__entry->dev), \
775 __entry->ino, \
776 __entry->pgoff, \
777 __entry->size, \
778 __entry->offset, \
779 __entry->delalloc, \
780 __entry->unmapped, \
781 __entry->unwritten) \
782)
783DEFINE_PAGE_EVENT(xfs_writepage);
784DEFINE_PAGE_EVENT(xfs_releasepage);
785DEFINE_PAGE_EVENT(xfs_invalidatepage);
786
787#define DEFINE_IOMAP_EVENT(name) \
788TRACE_EVENT(name, \
789 TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count, \
790 int flags, struct xfs_bmbt_irec *irec), \
791 TP_ARGS(ip, offset, count, flags, irec), \
792 TP_STRUCT__entry( \
793 __field(dev_t, dev) \
794 __field(xfs_ino_t, ino) \
795 __field(loff_t, size) \
796 __field(loff_t, new_size) \
797 __field(loff_t, offset) \
798 __field(size_t, count) \
799 __field(int, flags) \
800 __field(xfs_fileoff_t, startoff) \
801 __field(xfs_fsblock_t, startblock) \
802 __field(xfs_filblks_t, blockcount) \
803 ), \
804 TP_fast_assign( \
805 __entry->dev = VFS_I(ip)->i_sb->s_dev; \
806 __entry->ino = ip->i_ino; \
807 __entry->size = ip->i_d.di_size; \
808 __entry->new_size = ip->i_new_size; \
809 __entry->offset = offset; \
810 __entry->count = count; \
811 __entry->flags = flags; \
812 __entry->startoff = irec ? irec->br_startoff : 0; \
813 __entry->startblock = irec ? irec->br_startblock : 0; \
814 __entry->blockcount = irec ? irec->br_blockcount : 0; \
815 ), \
816 TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx " \
817 "offset 0x%llx count %zd flags %s " \
818 "startoff 0x%llx startblock 0x%llx blockcount 0x%llx", \
819 MAJOR(__entry->dev), MINOR(__entry->dev), \
820 __entry->ino, \
821 __entry->size, \
822 __entry->new_size, \
823 __entry->offset, \
824 __entry->count, \
825 __print_flags(__entry->flags, "|", BMAPI_FLAGS), \
826 __entry->startoff, \
827 __entry->startblock, \
828 __entry->blockcount) \
829)
830DEFINE_IOMAP_EVENT(xfs_iomap_enter);
831DEFINE_IOMAP_EVENT(xfs_iomap_found);
832DEFINE_IOMAP_EVENT(xfs_iomap_alloc);
833
834#define DEFINE_SIMPLE_IO_EVENT(name) \
835TRACE_EVENT(name, \
836 TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count), \
837 TP_ARGS(ip, offset, count), \
838 TP_STRUCT__entry( \
839 __field(dev_t, dev) \
840 __field(xfs_ino_t, ino) \
841 __field(loff_t, size) \
842 __field(loff_t, new_size) \
843 __field(loff_t, offset) \
844 __field(size_t, count) \
845 ), \
846 TP_fast_assign( \
847 __entry->dev = VFS_I(ip)->i_sb->s_dev; \
848 __entry->ino = ip->i_ino; \
849 __entry->size = ip->i_d.di_size; \
850 __entry->new_size = ip->i_new_size; \
851 __entry->offset = offset; \
852 __entry->count = count; \
853 ), \
854 TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx " \
855 "offset 0x%llx count %zd", \
856 MAJOR(__entry->dev), MINOR(__entry->dev), \
857 __entry->ino, \
858 __entry->size, \
859 __entry->new_size, \
860 __entry->offset, \
861 __entry->count) \
862);
863DEFINE_SIMPLE_IO_EVENT(xfs_delalloc_enospc);
864DEFINE_SIMPLE_IO_EVENT(xfs_unwritten_convert);
865
866
867TRACE_EVENT(xfs_itruncate_start,
868 TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size, int flag,
869 xfs_off_t toss_start, xfs_off_t toss_finish),
870 TP_ARGS(ip, new_size, flag, toss_start, toss_finish),
871 TP_STRUCT__entry(
872 __field(dev_t, dev)
873 __field(xfs_ino_t, ino)
874 __field(xfs_fsize_t, size)
875 __field(xfs_fsize_t, new_size)
876 __field(xfs_off_t, toss_start)
877 __field(xfs_off_t, toss_finish)
878 __field(int, flag)
879 ),
880 TP_fast_assign(
881 __entry->dev = VFS_I(ip)->i_sb->s_dev;
882 __entry->ino = ip->i_ino;
883 __entry->size = ip->i_d.di_size;
884 __entry->new_size = new_size;
885 __entry->toss_start = toss_start;
886 __entry->toss_finish = toss_finish;
887 __entry->flag = flag;
888 ),
889 TP_printk("dev %d:%d ino 0x%llx %s size 0x%llx new_size 0x%llx "
890 "toss start 0x%llx toss finish 0x%llx",
891 MAJOR(__entry->dev), MINOR(__entry->dev),
892 __entry->ino,
893 __print_flags(__entry->flag, "|", XFS_ITRUNC_FLAGS),
894 __entry->size,
895 __entry->new_size,
896 __entry->toss_start,
897 __entry->toss_finish)
898);
899
900#define DEFINE_ITRUNC_EVENT(name) \
901TRACE_EVENT(name, \
902 TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size), \
903 TP_ARGS(ip, new_size), \
904 TP_STRUCT__entry( \
905 __field(dev_t, dev) \
906 __field(xfs_ino_t, ino) \
907 __field(xfs_fsize_t, size) \
908 __field(xfs_fsize_t, new_size) \
909 ), \
910 TP_fast_assign( \
911 __entry->dev = VFS_I(ip)->i_sb->s_dev; \
912 __entry->ino = ip->i_ino; \
913 __entry->size = ip->i_d.di_size; \
914 __entry->new_size = new_size; \
915 ), \
916 TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx", \
917 MAJOR(__entry->dev), MINOR(__entry->dev), \
918 __entry->ino, \
919 __entry->size, \
920 __entry->new_size) \
921)
922DEFINE_ITRUNC_EVENT(xfs_itruncate_finish_start);
923DEFINE_ITRUNC_EVENT(xfs_itruncate_finish_end);
924
925TRACE_EVENT(xfs_pagecache_inval,
926 TP_PROTO(struct xfs_inode *ip, xfs_off_t start, xfs_off_t finish),
927 TP_ARGS(ip, start, finish),
928 TP_STRUCT__entry(
929 __field(dev_t, dev)
930 __field(xfs_ino_t, ino)
931 __field(xfs_fsize_t, size)
932 __field(xfs_off_t, start)
933 __field(xfs_off_t, finish)
934 ),
935 TP_fast_assign(
936 __entry->dev = VFS_I(ip)->i_sb->s_dev;
937 __entry->ino = ip->i_ino;
938 __entry->size = ip->i_d.di_size;
939 __entry->start = start;
940 __entry->finish = finish;
941 ),
942 TP_printk("dev %d:%d ino 0x%llx size 0x%llx start 0x%llx finish 0x%llx",
943 MAJOR(__entry->dev), MINOR(__entry->dev),
944 __entry->ino,
945 __entry->size,
946 __entry->start,
947 __entry->finish)
948);
949
950TRACE_EVENT(xfs_bunmap,
951 TP_PROTO(struct xfs_inode *ip, xfs_fileoff_t bno, xfs_filblks_t len,
952 int flags, unsigned long caller_ip),
953 TP_ARGS(ip, bno, len, flags, caller_ip),
954 TP_STRUCT__entry(
955 __field(dev_t, dev)
956 __field(xfs_ino_t, ino)
957 __field(xfs_fsize_t, size)
958 __field(xfs_fileoff_t, bno)
959 __field(xfs_filblks_t, len)
960 __field(unsigned long, caller_ip)
961 __field(int, flags)
962 ),
963 TP_fast_assign(
964 __entry->dev = VFS_I(ip)->i_sb->s_dev;
965 __entry->ino = ip->i_ino;
966 __entry->size = ip->i_d.di_size;
967 __entry->bno = bno;
968 __entry->len = len;
969 __entry->caller_ip = caller_ip;
970 __entry->flags = flags;
971 ),
972 TP_printk("dev %d:%d ino 0x%llx size 0x%llx bno 0x%llx len 0x%llx"
973 "flags %s caller %pf",
974 MAJOR(__entry->dev), MINOR(__entry->dev),
975 __entry->ino,
976 __entry->size,
977 __entry->bno,
978 __entry->len,
979 __print_flags(__entry->flags, "|", XFS_BMAPI_FLAGS),
980 (void *)__entry->caller_ip)
981
982);
983
984TRACE_EVENT(xfs_alloc_busy,
985 TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agblock_t agbno,
986 xfs_extlen_t len, int slot),
987 TP_ARGS(mp, agno, agbno, len, slot),
988 TP_STRUCT__entry(
989 __field(dev_t, dev)
990 __field(xfs_agnumber_t, agno)
991 __field(xfs_agblock_t, agbno)
992 __field(xfs_extlen_t, len)
993 __field(int, slot)
994 ),
995 TP_fast_assign(
996 __entry->dev = mp->m_super->s_dev;
997 __entry->agno = agno;
998 __entry->agbno = agbno;
999 __entry->len = len;
1000 __entry->slot = slot;
1001 ),
1002 TP_printk("dev %d:%d agno %u agbno %u len %u slot %d",
1003 MAJOR(__entry->dev), MINOR(__entry->dev),
1004 __entry->agno,
1005 __entry->agbno,
1006 __entry->len,
1007 __entry->slot)
1008
1009);
1010
1011#define XFS_BUSY_STATES \
1012 { 0, "found" }, \
1013 { 1, "missing" }
1014
1015TRACE_EVENT(xfs_alloc_unbusy,
1016 TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
1017 int slot, int found),
1018 TP_ARGS(mp, agno, slot, found),
1019 TP_STRUCT__entry(
1020 __field(dev_t, dev)
1021 __field(xfs_agnumber_t, agno)
1022 __field(int, slot)
1023 __field(int, found)
1024 ),
1025 TP_fast_assign(
1026 __entry->dev = mp->m_super->s_dev;
1027 __entry->agno = agno;
1028 __entry->slot = slot;
1029 __entry->found = found;
1030 ),
1031 TP_printk("dev %d:%d agno %u slot %d %s",
1032 MAJOR(__entry->dev), MINOR(__entry->dev),
1033 __entry->agno,
1034 __entry->slot,
1035 __print_symbolic(__entry->found, XFS_BUSY_STATES))
1036);
1037
1038TRACE_EVENT(xfs_alloc_busysearch,
1039 TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agblock_t agbno,
1040 xfs_extlen_t len, int found),
1041 TP_ARGS(mp, agno, agbno, len, found),
1042 TP_STRUCT__entry(
1043 __field(dev_t, dev)
1044 __field(xfs_agnumber_t, agno)
1045 __field(xfs_agblock_t, agbno)
1046 __field(xfs_extlen_t, len)
1047 __field(int, found)
1048 ),
1049 TP_fast_assign(
1050 __entry->dev = mp->m_super->s_dev;
1051 __entry->agno = agno;
1052 __entry->agbno = agbno;
1053 __entry->len = len;
1054 __entry->found = found;
1055 ),
1056 TP_printk("dev %d:%d agno %u agbno %u len %u %s",
1057 MAJOR(__entry->dev), MINOR(__entry->dev),
1058 __entry->agno,
1059 __entry->agbno,
1060 __entry->len,
1061 __print_symbolic(__entry->found, XFS_BUSY_STATES))
1062);
1063
1064TRACE_EVENT(xfs_agf,
1065 TP_PROTO(struct xfs_mount *mp, struct xfs_agf *agf, int flags,
1066 unsigned long caller_ip),
1067 TP_ARGS(mp, agf, flags, caller_ip),
1068 TP_STRUCT__entry(
1069 __field(dev_t, dev)
1070 __field(xfs_agnumber_t, agno)
1071 __field(int, flags)
1072 __field(__u32, length)
1073 __field(__u32, bno_root)
1074 __field(__u32, cnt_root)
1075 __field(__u32, bno_level)
1076 __field(__u32, cnt_level)
1077 __field(__u32, flfirst)
1078 __field(__u32, fllast)
1079 __field(__u32, flcount)
1080 __field(__u32, freeblks)
1081 __field(__u32, longest)
1082 __field(unsigned long, caller_ip)
1083 ),
1084 TP_fast_assign(
1085 __entry->dev = mp->m_super->s_dev;
1086 __entry->agno = be32_to_cpu(agf->agf_seqno),
1087 __entry->flags = flags;
1088 __entry->length = be32_to_cpu(agf->agf_length),
1089 __entry->bno_root = be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]),
1090 __entry->cnt_root = be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]),
1091 __entry->bno_level =
1092 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]),
1093 __entry->cnt_level =
1094 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]),
1095 __entry->flfirst = be32_to_cpu(agf->agf_flfirst),
1096 __entry->fllast = be32_to_cpu(agf->agf_fllast),
1097 __entry->flcount = be32_to_cpu(agf->agf_flcount),
1098 __entry->freeblks = be32_to_cpu(agf->agf_freeblks),
1099 __entry->longest = be32_to_cpu(agf->agf_longest);
1100 __entry->caller_ip = caller_ip;
1101 ),
1102 TP_printk("dev %d:%d agno %u flags %s length %u roots b %u c %u "
1103 "levels b %u c %u flfirst %u fllast %u flcount %u "
1104 "freeblks %u longest %u caller %pf",
1105 MAJOR(__entry->dev), MINOR(__entry->dev),
1106 __entry->agno,
1107 __print_flags(__entry->flags, "|", XFS_AGF_FLAGS),
1108 __entry->length,
1109 __entry->bno_root,
1110 __entry->cnt_root,
1111 __entry->bno_level,
1112 __entry->cnt_level,
1113 __entry->flfirst,
1114 __entry->fllast,
1115 __entry->flcount,
1116 __entry->freeblks,
1117 __entry->longest,
1118 (void *)__entry->caller_ip)
1119);
1120
1121TRACE_EVENT(xfs_free_extent,
1122 TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agblock_t agbno,
1123 xfs_extlen_t len, bool isfl, int haveleft, int haveright),
1124 TP_ARGS(mp, agno, agbno, len, isfl, haveleft, haveright),
1125 TP_STRUCT__entry(
1126 __field(dev_t, dev)
1127 __field(xfs_agnumber_t, agno)
1128 __field(xfs_agblock_t, agbno)
1129 __field(xfs_extlen_t, len)
1130 __field(int, isfl)
1131 __field(int, haveleft)
1132 __field(int, haveright)
1133 ),
1134 TP_fast_assign(
1135 __entry->dev = mp->m_super->s_dev;
1136 __entry->agno = agno;
1137 __entry->agbno = agbno;
1138 __entry->len = len;
1139 __entry->isfl = isfl;
1140 __entry->haveleft = haveleft;
1141 __entry->haveright = haveright;
1142 ),
1143 TP_printk("dev %d:%d agno %u agbno %u len %u isfl %d %s",
1144 MAJOR(__entry->dev), MINOR(__entry->dev),
1145 __entry->agno,
1146 __entry->agbno,
1147 __entry->len,
1148 __entry->isfl,
1149 __entry->haveleft ?
1150 (__entry->haveright ? "both" : "left") :
1151 (__entry->haveright ? "right" : "none"))
1152
1153);
1154
1155#define DEFINE_ALLOC_EVENT(name) \
1156TRACE_EVENT(name, \
1157 TP_PROTO(struct xfs_alloc_arg *args), \
1158 TP_ARGS(args), \
1159 TP_STRUCT__entry( \
1160 __field(dev_t, dev) \
1161 __field(xfs_agnumber_t, agno) \
1162 __field(xfs_agblock_t, agbno) \
1163 __field(xfs_extlen_t, minlen) \
1164 __field(xfs_extlen_t, maxlen) \
1165 __field(xfs_extlen_t, mod) \
1166 __field(xfs_extlen_t, prod) \
1167 __field(xfs_extlen_t, minleft) \
1168 __field(xfs_extlen_t, total) \
1169 __field(xfs_extlen_t, alignment) \
1170 __field(xfs_extlen_t, minalignslop) \
1171 __field(xfs_extlen_t, len) \
1172 __field(short, type) \
1173 __field(short, otype) \
1174 __field(char, wasdel) \
1175 __field(char, wasfromfl) \
1176 __field(char, isfl) \
1177 __field(char, userdata) \
1178 __field(xfs_fsblock_t, firstblock) \
1179 ), \
1180 TP_fast_assign( \
1181 __entry->dev = args->mp->m_super->s_dev; \
1182 __entry->agno = args->agno; \
1183 __entry->agbno = args->agbno; \
1184 __entry->minlen = args->minlen; \
1185 __entry->maxlen = args->maxlen; \
1186 __entry->mod = args->mod; \
1187 __entry->prod = args->prod; \
1188 __entry->minleft = args->minleft; \
1189 __entry->total = args->total; \
1190 __entry->alignment = args->alignment; \
1191 __entry->minalignslop = args->minalignslop; \
1192 __entry->len = args->len; \
1193 __entry->type = args->type; \
1194 __entry->otype = args->otype; \
1195 __entry->wasdel = args->wasdel; \
1196 __entry->wasfromfl = args->wasfromfl; \
1197 __entry->isfl = args->isfl; \
1198 __entry->userdata = args->userdata; \
1199 __entry->firstblock = args->firstblock; \
1200 ), \
1201 TP_printk("dev %d:%d agno %u agbno %u minlen %u maxlen %u mod %u " \
1202 "prod %u minleft %u total %u alignment %u minalignslop %u " \
1203 "len %u type %s otype %s wasdel %d wasfromfl %d isfl %d " \
1204 "userdata %d firstblock 0x%llx", \
1205 MAJOR(__entry->dev), MINOR(__entry->dev), \
1206 __entry->agno, \
1207 __entry->agbno, \
1208 __entry->minlen, \
1209 __entry->maxlen, \
1210 __entry->mod, \
1211 __entry->prod, \
1212 __entry->minleft, \
1213 __entry->total, \
1214 __entry->alignment, \
1215 __entry->minalignslop, \
1216 __entry->len, \
1217 __print_symbolic(__entry->type, XFS_ALLOC_TYPES), \
1218 __print_symbolic(__entry->otype, XFS_ALLOC_TYPES), \
1219 __entry->wasdel, \
1220 __entry->wasfromfl, \
1221 __entry->isfl, \
1222 __entry->userdata, \
1223 __entry->firstblock) \
1224)
1225
1226DEFINE_ALLOC_EVENT(xfs_alloc_exact_done);
1227DEFINE_ALLOC_EVENT(xfs_alloc_exact_error);
1228DEFINE_ALLOC_EVENT(xfs_alloc_near_nominleft);
1229DEFINE_ALLOC_EVENT(xfs_alloc_near_first);
1230DEFINE_ALLOC_EVENT(xfs_alloc_near_greater);
1231DEFINE_ALLOC_EVENT(xfs_alloc_near_lesser);
1232DEFINE_ALLOC_EVENT(xfs_alloc_near_error);
1233DEFINE_ALLOC_EVENT(xfs_alloc_size_neither);
1234DEFINE_ALLOC_EVENT(xfs_alloc_size_noentry);
1235DEFINE_ALLOC_EVENT(xfs_alloc_size_nominleft);
1236DEFINE_ALLOC_EVENT(xfs_alloc_size_done);
1237DEFINE_ALLOC_EVENT(xfs_alloc_size_error);
1238DEFINE_ALLOC_EVENT(xfs_alloc_small_freelist);
1239DEFINE_ALLOC_EVENT(xfs_alloc_small_notenough);
1240DEFINE_ALLOC_EVENT(xfs_alloc_small_done);
1241DEFINE_ALLOC_EVENT(xfs_alloc_small_error);
1242DEFINE_ALLOC_EVENT(xfs_alloc_vextent_badargs);
1243DEFINE_ALLOC_EVENT(xfs_alloc_vextent_nofix);
1244DEFINE_ALLOC_EVENT(xfs_alloc_vextent_noagbp);
1245DEFINE_ALLOC_EVENT(xfs_alloc_vextent_loopfailed);
1246DEFINE_ALLOC_EVENT(xfs_alloc_vextent_allfailed);
1247
1248#define DEFINE_DIR2_TRACE(tname) \
1249TRACE_EVENT(tname, \
1250 TP_PROTO(struct xfs_da_args *args), \
1251 TP_ARGS(args), \
1252 TP_STRUCT__entry( \
1253 __field(dev_t, dev) \
1254 __field(xfs_ino_t, ino) \
1255 __dynamic_array(char, name, args->namelen) \
1256 __field(int, namelen) \
1257 __field(xfs_dahash_t, hashval) \
1258 __field(xfs_ino_t, inumber) \
1259 __field(int, op_flags) \
1260 ), \
1261 TP_fast_assign( \
1262 __entry->dev = VFS_I(args->dp)->i_sb->s_dev; \
1263 __entry->ino = args->dp->i_ino; \
1264 if (args->namelen) \
1265 memcpy(__get_str(name), args->name, args->namelen); \
1266 __entry->namelen = args->namelen; \
1267 __entry->hashval = args->hashval; \
1268 __entry->inumber = args->inumber; \
1269 __entry->op_flags = args->op_flags; \
1270 ), \
1271 TP_printk("dev %d:%d ino 0x%llx name %.*s namelen %d hashval 0x%x " \
1272 "inumber 0x%llx op_flags %s", \
1273 MAJOR(__entry->dev), MINOR(__entry->dev), \
1274 __entry->ino, \
1275 __entry->namelen, \
1276 __entry->namelen ? __get_str(name) : NULL, \
1277 __entry->namelen, \
1278 __entry->hashval, \
1279 __entry->inumber, \
1280 __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS)) \
1281)
1282DEFINE_DIR2_TRACE(xfs_dir2_sf_addname);
1283DEFINE_DIR2_TRACE(xfs_dir2_sf_create);
1284DEFINE_DIR2_TRACE(xfs_dir2_sf_lookup);
1285DEFINE_DIR2_TRACE(xfs_dir2_sf_replace);
1286DEFINE_DIR2_TRACE(xfs_dir2_sf_removename);
1287DEFINE_DIR2_TRACE(xfs_dir2_sf_toino4);
1288DEFINE_DIR2_TRACE(xfs_dir2_sf_toino8);
1289DEFINE_DIR2_TRACE(xfs_dir2_sf_to_block);
1290DEFINE_DIR2_TRACE(xfs_dir2_block_addname);
1291DEFINE_DIR2_TRACE(xfs_dir2_block_lookup);
1292DEFINE_DIR2_TRACE(xfs_dir2_block_replace);
1293DEFINE_DIR2_TRACE(xfs_dir2_block_removename);
1294DEFINE_DIR2_TRACE(xfs_dir2_block_to_sf);
1295DEFINE_DIR2_TRACE(xfs_dir2_block_to_leaf);
1296DEFINE_DIR2_TRACE(xfs_dir2_leaf_addname);
1297DEFINE_DIR2_TRACE(xfs_dir2_leaf_lookup);
1298DEFINE_DIR2_TRACE(xfs_dir2_leaf_replace);
1299DEFINE_DIR2_TRACE(xfs_dir2_leaf_removename);
1300DEFINE_DIR2_TRACE(xfs_dir2_leaf_to_block);
1301DEFINE_DIR2_TRACE(xfs_dir2_leaf_to_node);
1302DEFINE_DIR2_TRACE(xfs_dir2_node_addname);
1303DEFINE_DIR2_TRACE(xfs_dir2_node_lookup);
1304DEFINE_DIR2_TRACE(xfs_dir2_node_replace);
1305DEFINE_DIR2_TRACE(xfs_dir2_node_removename);
1306DEFINE_DIR2_TRACE(xfs_dir2_node_to_leaf);
1307
1308#define DEFINE_DIR2_SPACE_TRACE(tname) \
1309TRACE_EVENT(tname, \
1310 TP_PROTO(struct xfs_da_args *args, int idx), \
1311 TP_ARGS(args, idx), \
1312 TP_STRUCT__entry( \
1313 __field(dev_t, dev) \
1314 __field(xfs_ino_t, ino) \
1315 __field(int, op_flags) \
1316 __field(int, idx) \
1317 ), \
1318 TP_fast_assign( \
1319 __entry->dev = VFS_I(args->dp)->i_sb->s_dev; \
1320 __entry->ino = args->dp->i_ino; \
1321 __entry->op_flags = args->op_flags; \
1322 __entry->idx = idx; \
1323 ), \
1324 TP_printk("dev %d:%d ino 0x%llx op_flags %s index %d", \
1325 MAJOR(__entry->dev), MINOR(__entry->dev), \
1326 __entry->ino, \
1327 __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS), \
1328 __entry->idx) \
1329)
1330DEFINE_DIR2_SPACE_TRACE(xfs_dir2_leafn_add);
1331DEFINE_DIR2_SPACE_TRACE(xfs_dir2_leafn_remove);
1332DEFINE_DIR2_SPACE_TRACE(xfs_dir2_grow_inode);
1333DEFINE_DIR2_SPACE_TRACE(xfs_dir2_shrink_inode);
1334
1335TRACE_EVENT(xfs_dir2_leafn_moveents,
1336 TP_PROTO(struct xfs_da_args *args, int src_idx, int dst_idx, int count),
1337 TP_ARGS(args, src_idx, dst_idx, count),
1338 TP_STRUCT__entry(
1339 __field(dev_t, dev)
1340 __field(xfs_ino_t, ino)
1341 __field(int, op_flags)
1342 __field(int, src_idx)
1343 __field(int, dst_idx)
1344 __field(int, count)
1345 ),
1346 TP_fast_assign(
1347 __entry->dev = VFS_I(args->dp)->i_sb->s_dev;
1348 __entry->ino = args->dp->i_ino;
1349 __entry->op_flags = args->op_flags;
1350 __entry->src_idx = src_idx;
1351 __entry->dst_idx = dst_idx;
1352 __entry->count = count;
1353 ),
1354 TP_printk("dev %d:%d ino 0x%llx op_flags %s "
1355 "src_idx %d dst_idx %d count %d",
1356 MAJOR(__entry->dev), MINOR(__entry->dev),
1357 __entry->ino,
1358 __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS),
1359 __entry->src_idx,
1360 __entry->dst_idx,
1361 __entry->count)
1362);
1363
1364#endif /* _TRACE_XFS_H */
1365
1366#undef TRACE_INCLUDE_PATH
1367#define TRACE_INCLUDE_PATH .
1368#define TRACE_INCLUDE_FILE xfs_trace
1369#include <trace/define_trace.h>
diff --git a/fs/xfs/linux-2.6/xfs_vnode.h b/fs/xfs/linux-2.6/xfs_vnode.h
index 00cabf5354d2..7c220b4227bc 100644
--- a/fs/xfs/linux-2.6/xfs_vnode.h
+++ b/fs/xfs/linux-2.6/xfs_vnode.h
@@ -39,6 +39,10 @@ struct attrlist_cursor_kern;
39#define IO_ISDIRECT 0x00004 /* bypass page cache */ 39#define IO_ISDIRECT 0x00004 /* bypass page cache */
40#define IO_INVIS 0x00020 /* don't update inode timestamps */ 40#define IO_INVIS 0x00020 /* don't update inode timestamps */
41 41
42#define XFS_IO_FLAGS \
43 { IO_ISDIRECT, "DIRECT" }, \
44 { IO_INVIS, "INVIS"}
45
42/* 46/*
43 * Flush/Invalidate options for vop_toss/flush/flushinval_pages. 47 * Flush/Invalidate options for vop_toss/flush/flushinval_pages.
44 */ 48 */
diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c
index 2f3f2229eaaf..d7c7eea09fc2 100644
--- a/fs/xfs/quota/xfs_dquot.c
+++ b/fs/xfs/quota/xfs_dquot.c
@@ -47,6 +47,7 @@
47#include "xfs_trans_space.h" 47#include "xfs_trans_space.h"
48#include "xfs_trans_priv.h" 48#include "xfs_trans_priv.h"
49#include "xfs_qm.h" 49#include "xfs_qm.h"
50#include "xfs_trace.h"
50 51
51 52
52/* 53/*
@@ -112,10 +113,7 @@ xfs_qm_dqinit(
112 init_completion(&dqp->q_flush); 113 init_completion(&dqp->q_flush);
113 complete(&dqp->q_flush); 114 complete(&dqp->q_flush);
114 115
115#ifdef XFS_DQUOT_TRACE 116 trace_xfs_dqinit(dqp);
116 dqp->q_trace = ktrace_alloc(DQUOT_TRACE_SIZE, KM_NOFS);
117 xfs_dqtrace_entry(dqp, "DQINIT");
118#endif
119 } else { 117 } else {
120 /* 118 /*
121 * Only the q_core portion was zeroed in dqreclaim_one(). 119 * Only the q_core portion was zeroed in dqreclaim_one().
@@ -136,10 +134,7 @@ xfs_qm_dqinit(
136 dqp->q_hash = NULL; 134 dqp->q_hash = NULL;
137 ASSERT(dqp->dq_flnext == dqp->dq_flprev); 135 ASSERT(dqp->dq_flnext == dqp->dq_flprev);
138 136
139#ifdef XFS_DQUOT_TRACE 137 trace_xfs_dqreuse(dqp);
140 ASSERT(dqp->q_trace);
141 xfs_dqtrace_entry(dqp, "DQRECLAIMED_INIT");
142#endif
143 } 138 }
144 139
145 /* 140 /*
@@ -167,13 +162,8 @@ xfs_qm_dqdestroy(
167 162
168 mutex_destroy(&dqp->q_qlock); 163 mutex_destroy(&dqp->q_qlock);
169 sv_destroy(&dqp->q_pinwait); 164 sv_destroy(&dqp->q_pinwait);
170
171#ifdef XFS_DQUOT_TRACE
172 if (dqp->q_trace)
173 ktrace_free(dqp->q_trace);
174 dqp->q_trace = NULL;
175#endif
176 kmem_zone_free(xfs_Gqm->qm_dqzone, dqp); 165 kmem_zone_free(xfs_Gqm->qm_dqzone, dqp);
166
177 atomic_dec(&xfs_Gqm->qm_totaldquots); 167 atomic_dec(&xfs_Gqm->qm_totaldquots);
178} 168}
179 169
@@ -195,49 +185,6 @@ xfs_qm_dqinit_core(
195 d->dd_diskdq.d_flags = type; 185 d->dd_diskdq.d_flags = type;
196} 186}
197 187
198
199#ifdef XFS_DQUOT_TRACE
200/*
201 * Dquot tracing for debugging.
202 */
203/* ARGSUSED */
204void
205__xfs_dqtrace_entry(
206 xfs_dquot_t *dqp,
207 char *func,
208 void *retaddr,
209 xfs_inode_t *ip)
210{
211 xfs_dquot_t *udqp = NULL;
212 xfs_ino_t ino = 0;
213
214 ASSERT(dqp->q_trace);
215 if (ip) {
216 ino = ip->i_ino;
217 udqp = ip->i_udquot;
218 }
219 ktrace_enter(dqp->q_trace,
220 (void *)(__psint_t)DQUOT_KTRACE_ENTRY,
221 (void *)func,
222 (void *)(__psint_t)dqp->q_nrefs,
223 (void *)(__psint_t)dqp->dq_flags,
224 (void *)(__psint_t)dqp->q_res_bcount,
225 (void *)(__psint_t)be64_to_cpu(dqp->q_core.d_bcount),
226 (void *)(__psint_t)be64_to_cpu(dqp->q_core.d_icount),
227 (void *)(__psint_t)be64_to_cpu(dqp->q_core.d_blk_hardlimit),
228 (void *)(__psint_t)be64_to_cpu(dqp->q_core.d_blk_softlimit),
229 (void *)(__psint_t)be64_to_cpu(dqp->q_core.d_ino_hardlimit),
230 (void *)(__psint_t)be64_to_cpu(dqp->q_core.d_ino_softlimit),
231 (void *)(__psint_t)be32_to_cpu(dqp->q_core.d_id),
232 (void *)(__psint_t)current_pid(),
233 (void *)(__psint_t)ino,
234 (void *)(__psint_t)retaddr,
235 (void *)(__psint_t)udqp);
236 return;
237}
238#endif
239
240
241/* 188/*
242 * If default limits are in force, push them into the dquot now. 189 * If default limits are in force, push them into the dquot now.
243 * We overwrite the dquot limits only if they are zero and this 190 * We overwrite the dquot limits only if they are zero and this
@@ -425,7 +372,8 @@ xfs_qm_dqalloc(
425 xfs_trans_t *tp = *tpp; 372 xfs_trans_t *tp = *tpp;
426 373
427 ASSERT(tp != NULL); 374 ASSERT(tp != NULL);
428 xfs_dqtrace_entry(dqp, "DQALLOC"); 375
376 trace_xfs_dqalloc(dqp);
429 377
430 /* 378 /*
431 * Initialize the bmap freelist prior to calling bmapi code. 379 * Initialize the bmap freelist prior to calling bmapi code.
@@ -612,7 +560,8 @@ xfs_qm_dqtobp(
612 * (in which case we already have the buf). 560 * (in which case we already have the buf).
613 */ 561 */
614 if (! newdquot) { 562 if (! newdquot) {
615 xfs_dqtrace_entry(dqp, "DQTOBP READBUF"); 563 trace_xfs_dqtobp_read(dqp);
564
616 if ((error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, 565 if ((error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
617 dqp->q_blkno, 566 dqp->q_blkno,
618 XFS_QI_DQCHUNKLEN(mp), 567 XFS_QI_DQCHUNKLEN(mp),
@@ -670,11 +619,12 @@ xfs_qm_dqread(
670 619
671 ASSERT(tpp); 620 ASSERT(tpp);
672 621
622 trace_xfs_dqread(dqp);
623
673 /* 624 /*
674 * get a pointer to the on-disk dquot and the buffer containing it 625 * get a pointer to the on-disk dquot and the buffer containing it
675 * dqp already knows its own type (GROUP/USER). 626 * dqp already knows its own type (GROUP/USER).
676 */ 627 */
677 xfs_dqtrace_entry(dqp, "DQREAD");
678 if ((error = xfs_qm_dqtobp(tpp, dqp, &ddqp, &bp, flags))) { 628 if ((error = xfs_qm_dqtobp(tpp, dqp, &ddqp, &bp, flags))) {
679 return (error); 629 return (error);
680 } 630 }
@@ -763,7 +713,7 @@ xfs_qm_idtodq(
763 * or if the dquot didn't exist on disk and we ask to 713 * or if the dquot didn't exist on disk and we ask to
764 * allocate (ENOENT). 714 * allocate (ENOENT).
765 */ 715 */
766 xfs_dqtrace_entry(dqp, "DQREAD FAIL"); 716 trace_xfs_dqread_fail(dqp);
767 cancelflags |= XFS_TRANS_ABORT; 717 cancelflags |= XFS_TRANS_ABORT;
768 goto error0; 718 goto error0;
769 } 719 }
@@ -817,7 +767,8 @@ xfs_qm_dqlookup(
817 * id can't be modified without the hashlock anyway. 767 * id can't be modified without the hashlock anyway.
818 */ 768 */
819 if (be32_to_cpu(dqp->q_core.d_id) == id && dqp->q_mount == mp) { 769 if (be32_to_cpu(dqp->q_core.d_id) == id && dqp->q_mount == mp) {
820 xfs_dqtrace_entry(dqp, "DQFOUND BY LOOKUP"); 770 trace_xfs_dqlookup_found(dqp);
771
821 /* 772 /*
822 * All in core dquots must be on the dqlist of mp 773 * All in core dquots must be on the dqlist of mp
823 */ 774 */
@@ -827,7 +778,7 @@ xfs_qm_dqlookup(
827 if (dqp->q_nrefs == 0) { 778 if (dqp->q_nrefs == 0) {
828 ASSERT (XFS_DQ_IS_ON_FREELIST(dqp)); 779 ASSERT (XFS_DQ_IS_ON_FREELIST(dqp));
829 if (! xfs_qm_freelist_lock_nowait(xfs_Gqm)) { 780 if (! xfs_qm_freelist_lock_nowait(xfs_Gqm)) {
830 xfs_dqtrace_entry(dqp, "DQLOOKUP: WANT"); 781 trace_xfs_dqlookup_want(dqp);
831 782
832 /* 783 /*
833 * We may have raced with dqreclaim_one() 784 * We may have raced with dqreclaim_one()
@@ -857,8 +808,7 @@ xfs_qm_dqlookup(
857 /* 808 /*
858 * take it off the freelist 809 * take it off the freelist
859 */ 810 */
860 xfs_dqtrace_entry(dqp, 811 trace_xfs_dqlookup_freelist(dqp);
861 "DQLOOKUP: TAKEOFF FL");
862 XQM_FREELIST_REMOVE(dqp); 812 XQM_FREELIST_REMOVE(dqp);
863 /* xfs_qm_freelist_print(&(xfs_Gqm-> 813 /* xfs_qm_freelist_print(&(xfs_Gqm->
864 qm_dqfreelist), 814 qm_dqfreelist),
@@ -878,8 +828,7 @@ xfs_qm_dqlookup(
878 */ 828 */
879 ASSERT(mutex_is_locked(&qh->qh_lock)); 829 ASSERT(mutex_is_locked(&qh->qh_lock));
880 if (dqp->HL_PREVP != &qh->qh_next) { 830 if (dqp->HL_PREVP != &qh->qh_next) {
881 xfs_dqtrace_entry(dqp, 831 trace_xfs_dqlookup_move(dqp);
882 "DQLOOKUP: HASH MOVETOFRONT");
883 if ((d = dqp->HL_NEXT)) 832 if ((d = dqp->HL_NEXT))
884 d->HL_PREVP = dqp->HL_PREVP; 833 d->HL_PREVP = dqp->HL_PREVP;
885 *(dqp->HL_PREVP) = d; 834 *(dqp->HL_PREVP) = d;
@@ -889,7 +838,7 @@ xfs_qm_dqlookup(
889 dqp->HL_PREVP = &qh->qh_next; 838 dqp->HL_PREVP = &qh->qh_next;
890 qh->qh_next = dqp; 839 qh->qh_next = dqp;
891 } 840 }
892 xfs_dqtrace_entry(dqp, "LOOKUP END"); 841 trace_xfs_dqlookup_done(dqp);
893 *O_dqpp = dqp; 842 *O_dqpp = dqp;
894 ASSERT(mutex_is_locked(&qh->qh_lock)); 843 ASSERT(mutex_is_locked(&qh->qh_lock));
895 return (0); 844 return (0);
@@ -971,7 +920,7 @@ xfs_qm_dqget(
971 ASSERT(*O_dqpp); 920 ASSERT(*O_dqpp);
972 ASSERT(XFS_DQ_IS_LOCKED(*O_dqpp)); 921 ASSERT(XFS_DQ_IS_LOCKED(*O_dqpp));
973 mutex_unlock(&h->qh_lock); 922 mutex_unlock(&h->qh_lock);
974 xfs_dqtrace_entry(*O_dqpp, "DQGET DONE (FROM CACHE)"); 923 trace_xfs_dqget_hit(*O_dqpp);
975 return (0); /* success */ 924 return (0); /* success */
976 } 925 }
977 XQM_STATS_INC(xqmstats.xs_qm_dqcachemisses); 926 XQM_STATS_INC(xqmstats.xs_qm_dqcachemisses);
@@ -1104,7 +1053,7 @@ xfs_qm_dqget(
1104 mutex_unlock(&h->qh_lock); 1053 mutex_unlock(&h->qh_lock);
1105 dqret: 1054 dqret:
1106 ASSERT((ip == NULL) || xfs_isilocked(ip, XFS_ILOCK_EXCL)); 1055 ASSERT((ip == NULL) || xfs_isilocked(ip, XFS_ILOCK_EXCL));
1107 xfs_dqtrace_entry(dqp, "DQGET DONE"); 1056 trace_xfs_dqget_miss(dqp);
1108 *O_dqpp = dqp; 1057 *O_dqpp = dqp;
1109 return (0); 1058 return (0);
1110} 1059}
@@ -1124,7 +1073,8 @@ xfs_qm_dqput(
1124 1073
1125 ASSERT(dqp->q_nrefs > 0); 1074 ASSERT(dqp->q_nrefs > 0);
1126 ASSERT(XFS_DQ_IS_LOCKED(dqp)); 1075 ASSERT(XFS_DQ_IS_LOCKED(dqp));
1127 xfs_dqtrace_entry(dqp, "DQPUT"); 1076
1077 trace_xfs_dqput(dqp);
1128 1078
1129 if (dqp->q_nrefs != 1) { 1079 if (dqp->q_nrefs != 1) {
1130 dqp->q_nrefs--; 1080 dqp->q_nrefs--;
@@ -1137,7 +1087,7 @@ xfs_qm_dqput(
1137 * in the right order; but try to get it out-of-order first 1087 * in the right order; but try to get it out-of-order first
1138 */ 1088 */
1139 if (! xfs_qm_freelist_lock_nowait(xfs_Gqm)) { 1089 if (! xfs_qm_freelist_lock_nowait(xfs_Gqm)) {
1140 xfs_dqtrace_entry(dqp, "DQPUT: FLLOCK-WAIT"); 1090 trace_xfs_dqput_wait(dqp);
1141 xfs_dqunlock(dqp); 1091 xfs_dqunlock(dqp);
1142 xfs_qm_freelist_lock(xfs_Gqm); 1092 xfs_qm_freelist_lock(xfs_Gqm);
1143 xfs_dqlock(dqp); 1093 xfs_dqlock(dqp);
@@ -1148,7 +1098,8 @@ xfs_qm_dqput(
1148 1098
1149 /* We can't depend on nrefs being == 1 here */ 1099 /* We can't depend on nrefs being == 1 here */
1150 if (--dqp->q_nrefs == 0) { 1100 if (--dqp->q_nrefs == 0) {
1151 xfs_dqtrace_entry(dqp, "DQPUT: ON FREELIST"); 1101 trace_xfs_dqput_free(dqp);
1102
1152 /* 1103 /*
1153 * insert at end of the freelist. 1104 * insert at end of the freelist.
1154 */ 1105 */
@@ -1196,7 +1147,7 @@ xfs_qm_dqrele(
1196 if (!dqp) 1147 if (!dqp)
1197 return; 1148 return;
1198 1149
1199 xfs_dqtrace_entry(dqp, "DQRELE"); 1150 trace_xfs_dqrele(dqp);
1200 1151
1201 xfs_dqlock(dqp); 1152 xfs_dqlock(dqp);
1202 /* 1153 /*
@@ -1229,7 +1180,7 @@ xfs_qm_dqflush(
1229 1180
1230 ASSERT(XFS_DQ_IS_LOCKED(dqp)); 1181 ASSERT(XFS_DQ_IS_LOCKED(dqp));
1231 ASSERT(!completion_done(&dqp->q_flush)); 1182 ASSERT(!completion_done(&dqp->q_flush));
1232 xfs_dqtrace_entry(dqp, "DQFLUSH"); 1183 trace_xfs_dqflush(dqp);
1233 1184
1234 /* 1185 /*
1235 * If not dirty, or it's pinned and we are not supposed to 1186 * If not dirty, or it's pinned and we are not supposed to
@@ -1259,7 +1210,6 @@ xfs_qm_dqflush(
1259 * the ondisk-dquot has already been allocated for. 1210 * the ondisk-dquot has already been allocated for.
1260 */ 1211 */
1261 if ((error = xfs_qm_dqtobp(NULL, dqp, &ddqp, &bp, XFS_QMOPT_DOWARN))) { 1212 if ((error = xfs_qm_dqtobp(NULL, dqp, &ddqp, &bp, XFS_QMOPT_DOWARN))) {
1262 xfs_dqtrace_entry(dqp, "DQTOBP FAIL");
1263 ASSERT(error != ENOENT); 1213 ASSERT(error != ENOENT);
1264 /* 1214 /*
1265 * Quotas could have gotten turned off (ESRCH) 1215 * Quotas could have gotten turned off (ESRCH)
@@ -1297,7 +1247,7 @@ xfs_qm_dqflush(
1297 * get stuck waiting in the write for too long. 1247 * get stuck waiting in the write for too long.
1298 */ 1248 */
1299 if (XFS_BUF_ISPINNED(bp)) { 1249 if (XFS_BUF_ISPINNED(bp)) {
1300 xfs_dqtrace_entry(dqp, "DQFLUSH LOG FORCE"); 1250 trace_xfs_dqflush_force(dqp);
1301 xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE); 1251 xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE);
1302 } 1252 }
1303 1253
@@ -1308,7 +1258,9 @@ xfs_qm_dqflush(
1308 } else { 1258 } else {
1309 error = xfs_bwrite(mp, bp); 1259 error = xfs_bwrite(mp, bp);
1310 } 1260 }
1311 xfs_dqtrace_entry(dqp, "DQFLUSH END"); 1261
1262 trace_xfs_dqflush_done(dqp);
1263
1312 /* 1264 /*
1313 * dqp is still locked, but caller is free to unlock it now. 1265 * dqp is still locked, but caller is free to unlock it now.
1314 */ 1266 */
@@ -1483,7 +1435,7 @@ xfs_qm_dqpurge(
1483 */ 1435 */
1484 if (XFS_DQ_IS_DIRTY(dqp)) { 1436 if (XFS_DQ_IS_DIRTY(dqp)) {
1485 int error; 1437 int error;
1486 xfs_dqtrace_entry(dqp, "DQPURGE ->DQFLUSH: DQDIRTY"); 1438
1487 /* dqflush unlocks dqflock */ 1439 /* dqflush unlocks dqflock */
1488 /* 1440 /*
1489 * Given that dqpurge is a very rare occurrence, it is OK 1441 * Given that dqpurge is a very rare occurrence, it is OK
diff --git a/fs/xfs/quota/xfs_dquot.h b/fs/xfs/quota/xfs_dquot.h
index a2c16bcee90b..a0f7da586d1b 100644
--- a/fs/xfs/quota/xfs_dquot.h
+++ b/fs/xfs/quota/xfs_dquot.h
@@ -85,9 +85,6 @@ typedef struct xfs_dquot {
85 struct completion q_flush; /* flush completion queue */ 85 struct completion q_flush; /* flush completion queue */
86 atomic_t q_pincount; /* dquot pin count */ 86 atomic_t q_pincount; /* dquot pin count */
87 wait_queue_head_t q_pinwait; /* dquot pinning wait queue */ 87 wait_queue_head_t q_pinwait; /* dquot pinning wait queue */
88#ifdef XFS_DQUOT_TRACE
89 struct ktrace *q_trace; /* trace header structure */
90#endif
91} xfs_dquot_t; 88} xfs_dquot_t;
92 89
93 90
@@ -144,24 +141,6 @@ static inline void xfs_dqfunlock(xfs_dquot_t *dqp)
144 (XFS_IS_UQUOTA_ON((d)->q_mount)) : \ 141 (XFS_IS_UQUOTA_ON((d)->q_mount)) : \
145 (XFS_IS_OQUOTA_ON((d)->q_mount)))) 142 (XFS_IS_OQUOTA_ON((d)->q_mount))))
146 143
147#ifdef XFS_DQUOT_TRACE
148/*
149 * Dquot Tracing stuff.
150 */
151#define DQUOT_TRACE_SIZE 64
152#define DQUOT_KTRACE_ENTRY 1
153
154extern void __xfs_dqtrace_entry(xfs_dquot_t *dqp, char *func,
155 void *, xfs_inode_t *);
156#define xfs_dqtrace_entry_ino(a,b,ip) \
157 __xfs_dqtrace_entry((a), (b), (void*)__return_address, (ip))
158#define xfs_dqtrace_entry(a,b) \
159 __xfs_dqtrace_entry((a), (b), (void*)__return_address, NULL)
160#else
161#define xfs_dqtrace_entry(a,b)
162#define xfs_dqtrace_entry_ino(a,b,ip)
163#endif
164
165#ifdef QUOTADEBUG 144#ifdef QUOTADEBUG
166extern void xfs_qm_dqprint(xfs_dquot_t *); 145extern void xfs_qm_dqprint(xfs_dquot_t *);
167#else 146#else
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
index 45b1bfef7388..9e627a8b5b0e 100644
--- a/fs/xfs/quota/xfs_qm.c
+++ b/fs/xfs/quota/xfs_qm.c
@@ -47,6 +47,7 @@
47#include "xfs_trans_space.h" 47#include "xfs_trans_space.h"
48#include "xfs_utils.h" 48#include "xfs_utils.h"
49#include "xfs_qm.h" 49#include "xfs_qm.h"
50#include "xfs_trace.h"
50 51
51/* 52/*
52 * The global quota manager. There is only one of these for the entire 53 * The global quota manager. There is only one of these for the entire
@@ -453,7 +454,7 @@ again:
453 xfs_dqunlock(dqp); 454 xfs_dqunlock(dqp);
454 continue; 455 continue;
455 } 456 }
456 xfs_dqtrace_entry(dqp, "FLUSHALL: DQDIRTY"); 457
457 /* XXX a sentinel would be better */ 458 /* XXX a sentinel would be better */
458 recl = XFS_QI_MPLRECLAIMS(mp); 459 recl = XFS_QI_MPLRECLAIMS(mp);
459 if (!xfs_dqflock_nowait(dqp)) { 460 if (!xfs_dqflock_nowait(dqp)) {
@@ -651,7 +652,7 @@ xfs_qm_dqattach_one(
651 */ 652 */
652 dqp = *IO_idqpp; 653 dqp = *IO_idqpp;
653 if (dqp) { 654 if (dqp) {
654 xfs_dqtrace_entry(dqp, "DQATTACH: found in ip"); 655 trace_xfs_dqattach_found(dqp);
655 return 0; 656 return 0;
656 } 657 }
657 658
@@ -704,7 +705,7 @@ xfs_qm_dqattach_one(
704 if (error) 705 if (error)
705 return error; 706 return error;
706 707
707 xfs_dqtrace_entry(dqp, "DQATTACH: found by dqget"); 708 trace_xfs_dqattach_get(dqp);
708 709
709 /* 710 /*
710 * dqget may have dropped and re-acquired the ilock, but it guarantees 711 * dqget may have dropped and re-acquired the ilock, but it guarantees
@@ -890,15 +891,15 @@ xfs_qm_dqdetach(
890 if (!(ip->i_udquot || ip->i_gdquot)) 891 if (!(ip->i_udquot || ip->i_gdquot))
891 return; 892 return;
892 893
894 trace_xfs_dquot_dqdetach(ip);
895
893 ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_uquotino); 896 ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_uquotino);
894 ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_gquotino); 897 ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_gquotino);
895 if (ip->i_udquot) { 898 if (ip->i_udquot) {
896 xfs_dqtrace_entry_ino(ip->i_udquot, "DQDETTACH", ip);
897 xfs_qm_dqrele(ip->i_udquot); 899 xfs_qm_dqrele(ip->i_udquot);
898 ip->i_udquot = NULL; 900 ip->i_udquot = NULL;
899 } 901 }
900 if (ip->i_gdquot) { 902 if (ip->i_gdquot) {
901 xfs_dqtrace_entry_ino(ip->i_gdquot, "DQDETTACH", ip);
902 xfs_qm_dqrele(ip->i_gdquot); 903 xfs_qm_dqrele(ip->i_gdquot);
903 ip->i_gdquot = NULL; 904 ip->i_gdquot = NULL;
904 } 905 }
@@ -977,7 +978,6 @@ xfs_qm_sync(
977 * across a disk write 978 * across a disk write
978 */ 979 */
979 xfs_qm_mplist_unlock(mp); 980 xfs_qm_mplist_unlock(mp);
980 xfs_dqtrace_entry(dqp, "XQM_SYNC: DQFLUSH");
981 error = xfs_qm_dqflush(dqp, flush_flags); 981 error = xfs_qm_dqflush(dqp, flush_flags);
982 xfs_dqunlock(dqp); 982 xfs_dqunlock(dqp);
983 if (error && XFS_FORCED_SHUTDOWN(mp)) 983 if (error && XFS_FORCED_SHUTDOWN(mp))
@@ -1350,7 +1350,8 @@ xfs_qm_reset_dqcounts(
1350 xfs_disk_dquot_t *ddq; 1350 xfs_disk_dquot_t *ddq;
1351 int j; 1351 int j;
1352 1352
1353 xfs_buftrace("RESET DQUOTS", bp); 1353 trace_xfs_reset_dqcounts(bp, _RET_IP_);
1354
1354 /* 1355 /*
1355 * Reset all counters and timers. They'll be 1356 * Reset all counters and timers. They'll be
1356 * started afresh by xfs_qm_quotacheck. 1357 * started afresh by xfs_qm_quotacheck.
@@ -1543,7 +1544,9 @@ xfs_qm_quotacheck_dqadjust(
1543 xfs_qcnt_t rtblks) 1544 xfs_qcnt_t rtblks)
1544{ 1545{
1545 ASSERT(XFS_DQ_IS_LOCKED(dqp)); 1546 ASSERT(XFS_DQ_IS_LOCKED(dqp));
1546 xfs_dqtrace_entry(dqp, "QCHECK DQADJUST"); 1547
1548 trace_xfs_dqadjust(dqp);
1549
1547 /* 1550 /*
1548 * Adjust the inode count and the block count to reflect this inode's 1551 * Adjust the inode count and the block count to reflect this inode's
1549 * resource usage. 1552 * resource usage.
@@ -1994,7 +1997,9 @@ xfs_qm_shake_freelist(
1994 */ 1997 */
1995 if (XFS_DQ_IS_DIRTY(dqp)) { 1998 if (XFS_DQ_IS_DIRTY(dqp)) {
1996 int error; 1999 int error;
1997 xfs_dqtrace_entry(dqp, "DQSHAKE: DQDIRTY"); 2000
2001 trace_xfs_dqshake_dirty(dqp);
2002
1998 /* 2003 /*
1999 * We flush it delayed write, so don't bother 2004 * We flush it delayed write, so don't bother
2000 * releasing the mplock. 2005 * releasing the mplock.
@@ -2038,7 +2043,9 @@ xfs_qm_shake_freelist(
2038 return nreclaimed; 2043 return nreclaimed;
2039 goto tryagain; 2044 goto tryagain;
2040 } 2045 }
2041 xfs_dqtrace_entry(dqp, "DQSHAKE: UNLINKING"); 2046
2047 trace_xfs_dqshake_unlink(dqp);
2048
2042#ifdef QUOTADEBUG 2049#ifdef QUOTADEBUG
2043 cmn_err(CE_DEBUG, "Shake 0x%p, ID 0x%x\n", 2050 cmn_err(CE_DEBUG, "Shake 0x%p, ID 0x%x\n",
2044 dqp, be32_to_cpu(dqp->q_core.d_id)); 2051 dqp, be32_to_cpu(dqp->q_core.d_id));
@@ -2125,7 +2132,9 @@ xfs_qm_dqreclaim_one(void)
2125 */ 2132 */
2126 if (dqp->dq_flags & XFS_DQ_WANT) { 2133 if (dqp->dq_flags & XFS_DQ_WANT) {
2127 ASSERT(! (dqp->dq_flags & XFS_DQ_INACTIVE)); 2134 ASSERT(! (dqp->dq_flags & XFS_DQ_INACTIVE));
2128 xfs_dqtrace_entry(dqp, "DQRECLAIM: DQWANT"); 2135
2136 trace_xfs_dqreclaim_want(dqp);
2137
2129 xfs_dqunlock(dqp); 2138 xfs_dqunlock(dqp);
2130 xfs_qm_freelist_unlock(xfs_Gqm); 2139 xfs_qm_freelist_unlock(xfs_Gqm);
2131 if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS) 2140 if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
@@ -2171,7 +2180,9 @@ xfs_qm_dqreclaim_one(void)
2171 */ 2180 */
2172 if (XFS_DQ_IS_DIRTY(dqp)) { 2181 if (XFS_DQ_IS_DIRTY(dqp)) {
2173 int error; 2182 int error;
2174 xfs_dqtrace_entry(dqp, "DQRECLAIM: DQDIRTY"); 2183
2184 trace_xfs_dqreclaim_dirty(dqp);
2185
2175 /* 2186 /*
2176 * We flush it delayed write, so don't bother 2187 * We flush it delayed write, so don't bother
2177 * releasing the freelist lock. 2188 * releasing the freelist lock.
@@ -2194,8 +2205,9 @@ xfs_qm_dqreclaim_one(void)
2194 if (!mutex_trylock(&dqp->q_hash->qh_lock)) 2205 if (!mutex_trylock(&dqp->q_hash->qh_lock))
2195 goto mplistunlock; 2206 goto mplistunlock;
2196 2207
2208 trace_xfs_dqreclaim_unlink(dqp);
2209
2197 ASSERT(dqp->q_nrefs == 0); 2210 ASSERT(dqp->q_nrefs == 0);
2198 xfs_dqtrace_entry(dqp, "DQRECLAIM: UNLINKING");
2199 XQM_MPLIST_REMOVE(&(XFS_QI_MPL_LIST(dqp->q_mount)), dqp); 2211 XQM_MPLIST_REMOVE(&(XFS_QI_MPL_LIST(dqp->q_mount)), dqp);
2200 XQM_HASHLIST_REMOVE(dqp->q_hash, dqp); 2212 XQM_HASHLIST_REMOVE(dqp->q_hash, dqp);
2201 XQM_FREELIST_REMOVE(dqp); 2213 XQM_FREELIST_REMOVE(dqp);
@@ -2430,7 +2442,7 @@ xfs_qm_vop_dqalloc(
2430 } 2442 }
2431 } 2443 }
2432 if (uq) 2444 if (uq)
2433 xfs_dqtrace_entry_ino(uq, "DQALLOC", ip); 2445 trace_xfs_dquot_dqalloc(ip);
2434 2446
2435 xfs_iunlock(ip, lockflags); 2447 xfs_iunlock(ip, lockflags);
2436 if (O_udqpp) 2448 if (O_udqpp)
diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c
index 5d1a3b98a6e6..71af76fe8a23 100644
--- a/fs/xfs/quota/xfs_qm_syscalls.c
+++ b/fs/xfs/quota/xfs_qm_syscalls.c
@@ -49,6 +49,7 @@
49#include "xfs_buf_item.h" 49#include "xfs_buf_item.h"
50#include "xfs_utils.h" 50#include "xfs_utils.h"
51#include "xfs_qm.h" 51#include "xfs_qm.h"
52#include "xfs_trace.h"
52 53
53#ifdef DEBUG 54#ifdef DEBUG
54# define qdprintk(s, args...) cmn_err(CE_DEBUG, s, ## args) 55# define qdprintk(s, args...) cmn_err(CE_DEBUG, s, ## args)
@@ -496,7 +497,6 @@ xfs_qm_scall_setqlim(
496 ASSERT(error != ENOENT); 497 ASSERT(error != ENOENT);
497 return (error); 498 return (error);
498 } 499 }
499 xfs_dqtrace_entry(dqp, "Q_SETQLIM: AFT DQGET");
500 xfs_trans_dqjoin(tp, dqp); 500 xfs_trans_dqjoin(tp, dqp);
501 ddq = &dqp->q_core; 501 ddq = &dqp->q_core;
502 502
@@ -602,7 +602,6 @@ xfs_qm_scall_setqlim(
602 dqp->dq_flags |= XFS_DQ_DIRTY; 602 dqp->dq_flags |= XFS_DQ_DIRTY;
603 xfs_trans_log_dquot(tp, dqp); 603 xfs_trans_log_dquot(tp, dqp);
604 604
605 xfs_dqtrace_entry(dqp, "Q_SETQLIM: COMMIT");
606 error = xfs_trans_commit(tp, 0); 605 error = xfs_trans_commit(tp, 0);
607 xfs_qm_dqprint(dqp); 606 xfs_qm_dqprint(dqp);
608 xfs_qm_dqrele(dqp); 607 xfs_qm_dqrele(dqp);
@@ -630,7 +629,6 @@ xfs_qm_scall_getquota(
630 return (error); 629 return (error);
631 } 630 }
632 631
633 xfs_dqtrace_entry(dqp, "Q_GETQUOTA SUCCESS");
634 /* 632 /*
635 * If everything's NULL, this dquot doesn't quite exist as far as 633 * If everything's NULL, this dquot doesn't quite exist as far as
636 * our utility programs are concerned. 634 * our utility programs are concerned.
diff --git a/fs/xfs/support/ktrace.c b/fs/xfs/support/ktrace.c
deleted file mode 100644
index 2d494c26717f..000000000000
--- a/fs/xfs/support/ktrace.c
+++ /dev/null
@@ -1,323 +0,0 @@
1/*
2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include <xfs.h>
19
20static kmem_zone_t *ktrace_hdr_zone;
21static kmem_zone_t *ktrace_ent_zone;
22static int ktrace_zentries;
23
24void __init
25ktrace_init(int zentries)
26{
27 ktrace_zentries = roundup_pow_of_two(zentries);
28
29 ktrace_hdr_zone = kmem_zone_init(sizeof(ktrace_t),
30 "ktrace_hdr");
31 ASSERT(ktrace_hdr_zone);
32
33 ktrace_ent_zone = kmem_zone_init(ktrace_zentries
34 * sizeof(ktrace_entry_t),
35 "ktrace_ent");
36 ASSERT(ktrace_ent_zone);
37}
38
39void __exit
40ktrace_uninit(void)
41{
42 kmem_zone_destroy(ktrace_hdr_zone);
43 kmem_zone_destroy(ktrace_ent_zone);
44}
45
46/*
47 * ktrace_alloc()
48 *
49 * Allocate a ktrace header and enough buffering for the given
50 * number of entries. Round the number of entries up to a
51 * power of 2 so we can do fast masking to get the index from
52 * the atomic index counter.
53 */
54ktrace_t *
55ktrace_alloc(int nentries, unsigned int __nocast sleep)
56{
57 ktrace_t *ktp;
58 ktrace_entry_t *ktep;
59 int entries;
60
61 ktp = (ktrace_t*)kmem_zone_alloc(ktrace_hdr_zone, sleep);
62
63 if (ktp == (ktrace_t*)NULL) {
64 /*
65 * KM_SLEEP callers don't expect failure.
66 */
67 if (sleep & KM_SLEEP)
68 panic("ktrace_alloc: NULL memory on KM_SLEEP request!");
69
70 return NULL;
71 }
72
73 /*
74 * Special treatment for buffers with the ktrace_zentries entries
75 */
76 entries = roundup_pow_of_two(nentries);
77 if (entries == ktrace_zentries) {
78 ktep = (ktrace_entry_t*)kmem_zone_zalloc(ktrace_ent_zone,
79 sleep);
80 } else {
81 ktep = (ktrace_entry_t*)kmem_zalloc((entries * sizeof(*ktep)),
82 sleep | KM_LARGE);
83 }
84
85 if (ktep == NULL) {
86 /*
87 * KM_SLEEP callers don't expect failure.
88 */
89 if (sleep & KM_SLEEP)
90 panic("ktrace_alloc: NULL memory on KM_SLEEP request!");
91
92 kmem_free(ktp);
93
94 return NULL;
95 }
96
97 ktp->kt_entries = ktep;
98 ktp->kt_nentries = entries;
99 ASSERT(is_power_of_2(entries));
100 ktp->kt_index_mask = entries - 1;
101 atomic_set(&ktp->kt_index, 0);
102 ktp->kt_rollover = 0;
103 return ktp;
104}
105
106
107/*
108 * ktrace_free()
109 *
110 * Free up the ktrace header and buffer. It is up to the caller
111 * to ensure that no-one is referencing it.
112 */
113void
114ktrace_free(ktrace_t *ktp)
115{
116 if (ktp == (ktrace_t *)NULL)
117 return;
118
119 /*
120 * Special treatment for the Vnode trace buffer.
121 */
122 if (ktp->kt_nentries == ktrace_zentries)
123 kmem_zone_free(ktrace_ent_zone, ktp->kt_entries);
124 else
125 kmem_free(ktp->kt_entries);
126
127 kmem_zone_free(ktrace_hdr_zone, ktp);
128}
129
130
131/*
132 * Enter the given values into the "next" entry in the trace buffer.
133 * kt_index is always the index of the next entry to be filled.
134 */
135void
136ktrace_enter(
137 ktrace_t *ktp,
138 void *val0,
139 void *val1,
140 void *val2,
141 void *val3,
142 void *val4,
143 void *val5,
144 void *val6,
145 void *val7,
146 void *val8,
147 void *val9,
148 void *val10,
149 void *val11,
150 void *val12,
151 void *val13,
152 void *val14,
153 void *val15)
154{
155 int index;
156 ktrace_entry_t *ktep;
157
158 ASSERT(ktp != NULL);
159
160 /*
161 * Grab an entry by pushing the index up to the next one.
162 */
163 index = atomic_add_return(1, &ktp->kt_index);
164 index = (index - 1) & ktp->kt_index_mask;
165 if (!ktp->kt_rollover && index == ktp->kt_nentries - 1)
166 ktp->kt_rollover = 1;
167
168 ASSERT((index >= 0) && (index < ktp->kt_nentries));
169
170 ktep = &(ktp->kt_entries[index]);
171
172 ktep->val[0] = val0;
173 ktep->val[1] = val1;
174 ktep->val[2] = val2;
175 ktep->val[3] = val3;
176 ktep->val[4] = val4;
177 ktep->val[5] = val5;
178 ktep->val[6] = val6;
179 ktep->val[7] = val7;
180 ktep->val[8] = val8;
181 ktep->val[9] = val9;
182 ktep->val[10] = val10;
183 ktep->val[11] = val11;
184 ktep->val[12] = val12;
185 ktep->val[13] = val13;
186 ktep->val[14] = val14;
187 ktep->val[15] = val15;
188}
189
190/*
191 * Return the number of entries in the trace buffer.
192 */
193int
194ktrace_nentries(
195 ktrace_t *ktp)
196{
197 int index;
198 if (ktp == NULL)
199 return 0;
200
201 index = atomic_read(&ktp->kt_index) & ktp->kt_index_mask;
202 return (ktp->kt_rollover ? ktp->kt_nentries : index);
203}
204
205/*
206 * ktrace_first()
207 *
208 * This is used to find the start of the trace buffer.
209 * In conjunction with ktrace_next() it can be used to
210 * iterate through the entire trace buffer. This code does
211 * not do any locking because it is assumed that it is called
212 * from the debugger.
213 *
214 * The caller must pass in a pointer to a ktrace_snap
215 * structure in which we will keep some state used to
216 * iterate through the buffer. This state must not touched
217 * by any code outside of this module.
218 */
219ktrace_entry_t *
220ktrace_first(ktrace_t *ktp, ktrace_snap_t *ktsp)
221{
222 ktrace_entry_t *ktep;
223 int index;
224 int nentries;
225
226 if (ktp->kt_rollover)
227 index = atomic_read(&ktp->kt_index) & ktp->kt_index_mask;
228 else
229 index = 0;
230
231 ktsp->ks_start = index;
232 ktep = &(ktp->kt_entries[index]);
233
234 nentries = ktrace_nentries(ktp);
235 index++;
236 if (index < nentries) {
237 ktsp->ks_index = index;
238 } else {
239 ktsp->ks_index = 0;
240 if (index > nentries)
241 ktep = NULL;
242 }
243 return ktep;
244}
245
246/*
247 * ktrace_next()
248 *
249 * This is used to iterate through the entries of the given
250 * trace buffer. The caller must pass in the ktrace_snap_t
251 * structure initialized by ktrace_first(). The return value
252 * will be either a pointer to the next ktrace_entry or NULL
253 * if all of the entries have been traversed.
254 */
255ktrace_entry_t *
256ktrace_next(
257 ktrace_t *ktp,
258 ktrace_snap_t *ktsp)
259{
260 int index;
261 ktrace_entry_t *ktep;
262
263 index = ktsp->ks_index;
264 if (index == ktsp->ks_start) {
265 ktep = NULL;
266 } else {
267 ktep = &ktp->kt_entries[index];
268 }
269
270 index++;
271 if (index == ktrace_nentries(ktp)) {
272 ktsp->ks_index = 0;
273 } else {
274 ktsp->ks_index = index;
275 }
276
277 return ktep;
278}
279
280/*
281 * ktrace_skip()
282 *
283 * Skip the next "count" entries and return the entry after that.
284 * Return NULL if this causes us to iterate past the beginning again.
285 */
286ktrace_entry_t *
287ktrace_skip(
288 ktrace_t *ktp,
289 int count,
290 ktrace_snap_t *ktsp)
291{
292 int index;
293 int new_index;
294 ktrace_entry_t *ktep;
295 int nentries = ktrace_nentries(ktp);
296
297 index = ktsp->ks_index;
298 new_index = index + count;
299 while (new_index >= nentries) {
300 new_index -= nentries;
301 }
302 if (index == ktsp->ks_start) {
303 /*
304 * We've iterated around to the start, so we're done.
305 */
306 ktep = NULL;
307 } else if ((new_index < index) && (index < ktsp->ks_index)) {
308 /*
309 * We've skipped past the start again, so we're done.
310 */
311 ktep = NULL;
312 ktsp->ks_index = ktsp->ks_start;
313 } else {
314 ktep = &(ktp->kt_entries[new_index]);
315 new_index++;
316 if (new_index == nentries) {
317 ktsp->ks_index = 0;
318 } else {
319 ktsp->ks_index = new_index;
320 }
321 }
322 return ktep;
323}
diff --git a/fs/xfs/support/ktrace.h b/fs/xfs/support/ktrace.h
deleted file mode 100644
index 741d6947ca60..000000000000
--- a/fs/xfs/support/ktrace.h
+++ /dev/null
@@ -1,85 +0,0 @@
1/*
2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#ifndef __XFS_SUPPORT_KTRACE_H__
19#define __XFS_SUPPORT_KTRACE_H__
20
21/*
22 * Trace buffer entry structure.
23 */
24typedef struct ktrace_entry {
25 void *val[16];
26} ktrace_entry_t;
27
28/*
29 * Trace buffer header structure.
30 */
31typedef struct ktrace {
32 int kt_nentries; /* number of entries in trace buf */
33 atomic_t kt_index; /* current index in entries */
34 unsigned int kt_index_mask;
35 int kt_rollover;
36 ktrace_entry_t *kt_entries; /* buffer of entries */
37} ktrace_t;
38
39/*
40 * Trace buffer snapshot structure.
41 */
42typedef struct ktrace_snap {
43 int ks_start; /* kt_index at time of snap */
44 int ks_index; /* current index */
45} ktrace_snap_t;
46
47
48#ifdef CONFIG_XFS_TRACE
49
50extern void ktrace_init(int zentries);
51extern void ktrace_uninit(void);
52
53extern ktrace_t *ktrace_alloc(int, unsigned int __nocast);
54extern void ktrace_free(ktrace_t *);
55
56extern void ktrace_enter(
57 ktrace_t *,
58 void *,
59 void *,
60 void *,
61 void *,
62 void *,
63 void *,
64 void *,
65 void *,
66 void *,
67 void *,
68 void *,
69 void *,
70 void *,
71 void *,
72 void *,
73 void *);
74
75extern ktrace_entry_t *ktrace_first(ktrace_t *, ktrace_snap_t *);
76extern int ktrace_nentries(ktrace_t *);
77extern ktrace_entry_t *ktrace_next(ktrace_t *, ktrace_snap_t *);
78extern ktrace_entry_t *ktrace_skip(ktrace_t *, int, ktrace_snap_t *);
79
80#else
81#define ktrace_init(x) do { } while (0)
82#define ktrace_uninit() do { } while (0)
83#endif /* CONFIG_XFS_TRACE */
84
85#endif /* __XFS_SUPPORT_KTRACE_H__ */
diff --git a/fs/xfs/xfs.h b/fs/xfs/xfs.h
index 17254b529c54..5ad8ad3a1dcd 100644
--- a/fs/xfs/xfs.h
+++ b/fs/xfs/xfs.h
@@ -25,21 +25,5 @@
25/* #define QUOTADEBUG 1 */ 25/* #define QUOTADEBUG 1 */
26#endif 26#endif
27 27
28#ifdef CONFIG_XFS_TRACE
29#define XFS_ALLOC_TRACE 1
30#define XFS_ATTR_TRACE 1
31#define XFS_BLI_TRACE 1
32#define XFS_BMAP_TRACE 1
33#define XFS_BTREE_TRACE 1
34#define XFS_DIR2_TRACE 1
35#define XFS_DQUOT_TRACE 1
36#define XFS_ILOCK_TRACE 1
37#define XFS_LOG_TRACE 1
38#define XFS_RW_TRACE 1
39#define XFS_BUF_TRACE 1
40#define XFS_INODE_TRACE 1
41#define XFS_FILESTREAMS_TRACE 1
42#endif
43
44#include <linux-2.6/xfs_linux.h> 28#include <linux-2.6/xfs_linux.h>
45#endif /* __XFS_H__ */ 29#endif /* __XFS_H__ */
diff --git a/fs/xfs/xfs_ag.h b/fs/xfs/xfs_ag.h
index a5d54bf4931b..6702bd865811 100644
--- a/fs/xfs/xfs_ag.h
+++ b/fs/xfs/xfs_ag.h
@@ -86,6 +86,20 @@ typedef struct xfs_agf {
86#define XFS_AGF_NUM_BITS 12 86#define XFS_AGF_NUM_BITS 12
87#define XFS_AGF_ALL_BITS ((1 << XFS_AGF_NUM_BITS) - 1) 87#define XFS_AGF_ALL_BITS ((1 << XFS_AGF_NUM_BITS) - 1)
88 88
89#define XFS_AGF_FLAGS \
90 { XFS_AGF_MAGICNUM, "MAGICNUM" }, \
91 { XFS_AGF_VERSIONNUM, "VERSIONNUM" }, \
92 { XFS_AGF_SEQNO, "SEQNO" }, \
93 { XFS_AGF_LENGTH, "LENGTH" }, \
94 { XFS_AGF_ROOTS, "ROOTS" }, \
95 { XFS_AGF_LEVELS, "LEVELS" }, \
96 { XFS_AGF_FLFIRST, "FLFIRST" }, \
97 { XFS_AGF_FLLAST, "FLLAST" }, \
98 { XFS_AGF_FLCOUNT, "FLCOUNT" }, \
99 { XFS_AGF_FREEBLKS, "FREEBLKS" }, \
100 { XFS_AGF_LONGEST, "LONGEST" }, \
101 { XFS_AGF_BTREEBLKS, "BTREEBLKS" }
102
89/* disk block (xfs_daddr_t) in the AG */ 103/* disk block (xfs_daddr_t) in the AG */
90#define XFS_AGF_DADDR(mp) ((xfs_daddr_t)(1 << (mp)->m_sectbb_log)) 104#define XFS_AGF_DADDR(mp) ((xfs_daddr_t)(1 << (mp)->m_sectbb_log))
91#define XFS_AGF_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_AGF_DADDR(mp)) 105#define XFS_AGF_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_AGF_DADDR(mp))
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c
index 2cf944eb796d..a1c65fc6d9c4 100644
--- a/fs/xfs/xfs_alloc.c
+++ b/fs/xfs/xfs_alloc.c
@@ -38,6 +38,7 @@
38#include "xfs_ialloc.h" 38#include "xfs_ialloc.h"
39#include "xfs_alloc.h" 39#include "xfs_alloc.h"
40#include "xfs_error.h" 40#include "xfs_error.h"
41#include "xfs_trace.h"
41 42
42 43
43#define XFS_ABSDIFF(a,b) (((a) <= (b)) ? ((b) - (a)) : ((a) - (b))) 44#define XFS_ABSDIFF(a,b) (((a) <= (b)) ? ((b) - (a)) : ((a) - (b)))
@@ -51,30 +52,6 @@ xfs_alloc_search_busy(xfs_trans_t *tp,
51 xfs_agblock_t bno, 52 xfs_agblock_t bno,
52 xfs_extlen_t len); 53 xfs_extlen_t len);
53 54
54#if defined(XFS_ALLOC_TRACE)
55ktrace_t *xfs_alloc_trace_buf;
56
57#define TRACE_ALLOC(s,a) \
58 xfs_alloc_trace_alloc(__func__, s, a, __LINE__)
59#define TRACE_FREE(s,a,b,x,f) \
60 xfs_alloc_trace_free(__func__, s, mp, a, b, x, f, __LINE__)
61#define TRACE_MODAGF(s,a,f) \
62 xfs_alloc_trace_modagf(__func__, s, mp, a, f, __LINE__)
63#define TRACE_BUSY(__func__,s,ag,agb,l,sl,tp) \
64 xfs_alloc_trace_busy(__func__, s, mp, ag, agb, l, sl, tp, XFS_ALLOC_KTRACE_BUSY, __LINE__)
65#define TRACE_UNBUSY(__func__,s,ag,sl,tp) \
66 xfs_alloc_trace_busy(__func__, s, mp, ag, -1, -1, sl, tp, XFS_ALLOC_KTRACE_UNBUSY, __LINE__)
67#define TRACE_BUSYSEARCH(__func__,s,ag,agb,l,tp) \
68 xfs_alloc_trace_busy(__func__, s, mp, ag, agb, l, 0, tp, XFS_ALLOC_KTRACE_BUSYSEARCH, __LINE__)
69#else
70#define TRACE_ALLOC(s,a)
71#define TRACE_FREE(s,a,b,x,f)
72#define TRACE_MODAGF(s,a,f)
73#define TRACE_BUSY(s,a,ag,agb,l,sl,tp)
74#define TRACE_UNBUSY(fname,s,ag,sl,tp)
75#define TRACE_BUSYSEARCH(fname,s,ag,agb,l,tp)
76#endif /* XFS_ALLOC_TRACE */
77
78/* 55/*
79 * Prototypes for per-ag allocation routines 56 * Prototypes for per-ag allocation routines
80 */ 57 */
@@ -498,124 +475,6 @@ xfs_alloc_read_agfl(
498 return 0; 475 return 0;
499} 476}
500 477
501#if defined(XFS_ALLOC_TRACE)
502/*
503 * Add an allocation trace entry for an alloc call.
504 */
505STATIC void
506xfs_alloc_trace_alloc(
507 const char *name, /* function tag string */
508 char *str, /* additional string */
509 xfs_alloc_arg_t *args, /* allocation argument structure */
510 int line) /* source line number */
511{
512 ktrace_enter(xfs_alloc_trace_buf,
513 (void *)(__psint_t)(XFS_ALLOC_KTRACE_ALLOC | (line << 16)),
514 (void *)name,
515 (void *)str,
516 (void *)args->mp,
517 (void *)(__psunsigned_t)args->agno,
518 (void *)(__psunsigned_t)args->agbno,
519 (void *)(__psunsigned_t)args->minlen,
520 (void *)(__psunsigned_t)args->maxlen,
521 (void *)(__psunsigned_t)args->mod,
522 (void *)(__psunsigned_t)args->prod,
523 (void *)(__psunsigned_t)args->minleft,
524 (void *)(__psunsigned_t)args->total,
525 (void *)(__psunsigned_t)args->alignment,
526 (void *)(__psunsigned_t)args->len,
527 (void *)((((__psint_t)args->type) << 16) |
528 (__psint_t)args->otype),
529 (void *)(__psint_t)((args->wasdel << 3) |
530 (args->wasfromfl << 2) |
531 (args->isfl << 1) |
532 (args->userdata << 0)));
533}
534
535/*
536 * Add an allocation trace entry for a free call.
537 */
538STATIC void
539xfs_alloc_trace_free(
540 const char *name, /* function tag string */
541 char *str, /* additional string */
542 xfs_mount_t *mp, /* file system mount point */
543 xfs_agnumber_t agno, /* allocation group number */
544 xfs_agblock_t agbno, /* a.g. relative block number */
545 xfs_extlen_t len, /* length of extent */
546 int isfl, /* set if is freelist allocation/free */
547 int line) /* source line number */
548{
549 ktrace_enter(xfs_alloc_trace_buf,
550 (void *)(__psint_t)(XFS_ALLOC_KTRACE_FREE | (line << 16)),
551 (void *)name,
552 (void *)str,
553 (void *)mp,
554 (void *)(__psunsigned_t)agno,
555 (void *)(__psunsigned_t)agbno,
556 (void *)(__psunsigned_t)len,
557 (void *)(__psint_t)isfl,
558 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
559}
560
561/*
562 * Add an allocation trace entry for modifying an agf.
563 */
564STATIC void
565xfs_alloc_trace_modagf(
566 const char *name, /* function tag string */
567 char *str, /* additional string */
568 xfs_mount_t *mp, /* file system mount point */
569 xfs_agf_t *agf, /* new agf value */
570 int flags, /* logging flags for agf */
571 int line) /* source line number */
572{
573 ktrace_enter(xfs_alloc_trace_buf,
574 (void *)(__psint_t)(XFS_ALLOC_KTRACE_MODAGF | (line << 16)),
575 (void *)name,
576 (void *)str,
577 (void *)mp,
578 (void *)(__psint_t)flags,
579 (void *)(__psunsigned_t)be32_to_cpu(agf->agf_seqno),
580 (void *)(__psunsigned_t)be32_to_cpu(agf->agf_length),
581 (void *)(__psunsigned_t)be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]),
582 (void *)(__psunsigned_t)be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]),
583 (void *)(__psunsigned_t)be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]),
584 (void *)(__psunsigned_t)be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]),
585 (void *)(__psunsigned_t)be32_to_cpu(agf->agf_flfirst),
586 (void *)(__psunsigned_t)be32_to_cpu(agf->agf_fllast),
587 (void *)(__psunsigned_t)be32_to_cpu(agf->agf_flcount),
588 (void *)(__psunsigned_t)be32_to_cpu(agf->agf_freeblks),
589 (void *)(__psunsigned_t)be32_to_cpu(agf->agf_longest));
590}
591
592STATIC void
593xfs_alloc_trace_busy(
594 const char *name, /* function tag string */
595 char *str, /* additional string */
596 xfs_mount_t *mp, /* file system mount point */
597 xfs_agnumber_t agno, /* allocation group number */
598 xfs_agblock_t agbno, /* a.g. relative block number */
599 xfs_extlen_t len, /* length of extent */
600 int slot, /* perag Busy slot */
601 xfs_trans_t *tp,
602 int trtype, /* type: add, delete, search */
603 int line) /* source line number */
604{
605 ktrace_enter(xfs_alloc_trace_buf,
606 (void *)(__psint_t)(trtype | (line << 16)),
607 (void *)name,
608 (void *)str,
609 (void *)mp,
610 (void *)(__psunsigned_t)agno,
611 (void *)(__psunsigned_t)agbno,
612 (void *)(__psunsigned_t)len,
613 (void *)(__psint_t)slot,
614 (void *)tp,
615 NULL, NULL, NULL, NULL, NULL, NULL, NULL);
616}
617#endif /* XFS_ALLOC_TRACE */
618
619/* 478/*
620 * Allocation group level functions. 479 * Allocation group level functions.
621 */ 480 */
@@ -665,9 +524,6 @@ xfs_alloc_ag_vextent(
665 */ 524 */
666 if (args->agbno != NULLAGBLOCK) { 525 if (args->agbno != NULLAGBLOCK) {
667 xfs_agf_t *agf; /* allocation group freelist header */ 526 xfs_agf_t *agf; /* allocation group freelist header */
668#ifdef XFS_ALLOC_TRACE
669 xfs_mount_t *mp = args->mp;
670#endif
671 long slen = (long)args->len; 527 long slen = (long)args->len;
672 528
673 ASSERT(args->len >= args->minlen && args->len <= args->maxlen); 529 ASSERT(args->len >= args->minlen && args->len <= args->maxlen);
@@ -682,7 +538,6 @@ xfs_alloc_ag_vextent(
682 args->pag->pagf_freeblks -= args->len; 538 args->pag->pagf_freeblks -= args->len;
683 ASSERT(be32_to_cpu(agf->agf_freeblks) <= 539 ASSERT(be32_to_cpu(agf->agf_freeblks) <=
684 be32_to_cpu(agf->agf_length)); 540 be32_to_cpu(agf->agf_length));
685 TRACE_MODAGF(NULL, agf, XFS_AGF_FREEBLKS);
686 xfs_alloc_log_agf(args->tp, args->agbp, 541 xfs_alloc_log_agf(args->tp, args->agbp,
687 XFS_AGF_FREEBLKS); 542 XFS_AGF_FREEBLKS);
688 /* search the busylist for these blocks */ 543 /* search the busylist for these blocks */
@@ -792,13 +647,14 @@ xfs_alloc_ag_vextent_exact(
792 } 647 }
793 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR); 648 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
794 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); 649 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
795 TRACE_ALLOC("normal", args); 650
651 trace_xfs_alloc_exact_done(args);
796 args->wasfromfl = 0; 652 args->wasfromfl = 0;
797 return 0; 653 return 0;
798 654
799error0: 655error0:
800 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR); 656 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
801 TRACE_ALLOC("error", args); 657 trace_xfs_alloc_exact_error(args);
802 return error; 658 return error;
803} 659}
804 660
@@ -958,7 +814,7 @@ xfs_alloc_ag_vextent_near(
958 args->len = blen; 814 args->len = blen;
959 if (!xfs_alloc_fix_minleft(args)) { 815 if (!xfs_alloc_fix_minleft(args)) {
960 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); 816 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
961 TRACE_ALLOC("nominleft", args); 817 trace_xfs_alloc_near_nominleft(args);
962 return 0; 818 return 0;
963 } 819 }
964 blen = args->len; 820 blen = args->len;
@@ -981,7 +837,8 @@ xfs_alloc_ag_vextent_near(
981 goto error0; 837 goto error0;
982 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); 838 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
983 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR); 839 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
984 TRACE_ALLOC("first", args); 840
841 trace_xfs_alloc_near_first(args);
985 return 0; 842 return 0;
986 } 843 }
987 /* 844 /*
@@ -1272,7 +1129,7 @@ xfs_alloc_ag_vextent_near(
1272 * If we couldn't get anything, give up. 1129 * If we couldn't get anything, give up.
1273 */ 1130 */
1274 if (bno_cur_lt == NULL && bno_cur_gt == NULL) { 1131 if (bno_cur_lt == NULL && bno_cur_gt == NULL) {
1275 TRACE_ALLOC("neither", args); 1132 trace_xfs_alloc_size_neither(args);
1276 args->agbno = NULLAGBLOCK; 1133 args->agbno = NULLAGBLOCK;
1277 return 0; 1134 return 0;
1278 } 1135 }
@@ -1299,7 +1156,7 @@ xfs_alloc_ag_vextent_near(
1299 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen); 1156 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
1300 xfs_alloc_fix_len(args); 1157 xfs_alloc_fix_len(args);
1301 if (!xfs_alloc_fix_minleft(args)) { 1158 if (!xfs_alloc_fix_minleft(args)) {
1302 TRACE_ALLOC("nominleft", args); 1159 trace_xfs_alloc_near_nominleft(args);
1303 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR); 1160 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
1304 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); 1161 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1305 return 0; 1162 return 0;
@@ -1314,13 +1171,18 @@ xfs_alloc_ag_vextent_near(
1314 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno, ltlen, 1171 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno, ltlen,
1315 ltnew, rlen, XFSA_FIXUP_BNO_OK))) 1172 ltnew, rlen, XFSA_FIXUP_BNO_OK)))
1316 goto error0; 1173 goto error0;
1317 TRACE_ALLOC(j ? "gt" : "lt", args); 1174
1175 if (j)
1176 trace_xfs_alloc_near_greater(args);
1177 else
1178 trace_xfs_alloc_near_lesser(args);
1179
1318 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); 1180 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1319 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR); 1181 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
1320 return 0; 1182 return 0;
1321 1183
1322 error0: 1184 error0:
1323 TRACE_ALLOC("error", args); 1185 trace_xfs_alloc_near_error(args);
1324 if (cnt_cur != NULL) 1186 if (cnt_cur != NULL)
1325 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR); 1187 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1326 if (bno_cur_lt != NULL) 1188 if (bno_cur_lt != NULL)
@@ -1371,7 +1233,7 @@ xfs_alloc_ag_vextent_size(
1371 goto error0; 1233 goto error0;
1372 if (i == 0 || flen == 0) { 1234 if (i == 0 || flen == 0) {
1373 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); 1235 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1374 TRACE_ALLOC("noentry", args); 1236 trace_xfs_alloc_size_noentry(args);
1375 return 0; 1237 return 0;
1376 } 1238 }
1377 ASSERT(i == 1); 1239 ASSERT(i == 1);
@@ -1448,7 +1310,7 @@ xfs_alloc_ag_vextent_size(
1448 xfs_alloc_fix_len(args); 1310 xfs_alloc_fix_len(args);
1449 if (rlen < args->minlen || !xfs_alloc_fix_minleft(args)) { 1311 if (rlen < args->minlen || !xfs_alloc_fix_minleft(args)) {
1450 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); 1312 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1451 TRACE_ALLOC("nominleft", args); 1313 trace_xfs_alloc_size_nominleft(args);
1452 args->agbno = NULLAGBLOCK; 1314 args->agbno = NULLAGBLOCK;
1453 return 0; 1315 return 0;
1454 } 1316 }
@@ -1471,11 +1333,11 @@ xfs_alloc_ag_vextent_size(
1471 args->agbno + args->len <= 1333 args->agbno + args->len <=
1472 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length), 1334 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
1473 error0); 1335 error0);
1474 TRACE_ALLOC("normal", args); 1336 trace_xfs_alloc_size_done(args);
1475 return 0; 1337 return 0;
1476 1338
1477error0: 1339error0:
1478 TRACE_ALLOC("error", args); 1340 trace_xfs_alloc_size_error(args);
1479 if (cnt_cur) 1341 if (cnt_cur)
1480 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR); 1342 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1481 if (bno_cur) 1343 if (bno_cur)
@@ -1534,7 +1396,7 @@ xfs_alloc_ag_vextent_small(
1534 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length), 1396 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
1535 error0); 1397 error0);
1536 args->wasfromfl = 1; 1398 args->wasfromfl = 1;
1537 TRACE_ALLOC("freelist", args); 1399 trace_xfs_alloc_small_freelist(args);
1538 *stat = 0; 1400 *stat = 0;
1539 return 0; 1401 return 0;
1540 } 1402 }
@@ -1556,17 +1418,17 @@ xfs_alloc_ag_vextent_small(
1556 */ 1418 */
1557 if (flen < args->minlen) { 1419 if (flen < args->minlen) {
1558 args->agbno = NULLAGBLOCK; 1420 args->agbno = NULLAGBLOCK;
1559 TRACE_ALLOC("notenough", args); 1421 trace_xfs_alloc_small_notenough(args);
1560 flen = 0; 1422 flen = 0;
1561 } 1423 }
1562 *fbnop = fbno; 1424 *fbnop = fbno;
1563 *flenp = flen; 1425 *flenp = flen;
1564 *stat = 1; 1426 *stat = 1;
1565 TRACE_ALLOC("normal", args); 1427 trace_xfs_alloc_small_done(args);
1566 return 0; 1428 return 0;
1567 1429
1568error0: 1430error0:
1569 TRACE_ALLOC("error", args); 1431 trace_xfs_alloc_small_error(args);
1570 return error; 1432 return error;
1571} 1433}
1572 1434
@@ -1809,17 +1671,14 @@ xfs_free_ag_extent(
1809 be32_to_cpu(agf->agf_freeblks) <= 1671 be32_to_cpu(agf->agf_freeblks) <=
1810 be32_to_cpu(agf->agf_length), 1672 be32_to_cpu(agf->agf_length),
1811 error0); 1673 error0);
1812 TRACE_MODAGF(NULL, agf, XFS_AGF_FREEBLKS);
1813 xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS); 1674 xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS);
1814 if (!isfl) 1675 if (!isfl)
1815 xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, (long)len); 1676 xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, (long)len);
1816 XFS_STATS_INC(xs_freex); 1677 XFS_STATS_INC(xs_freex);
1817 XFS_STATS_ADD(xs_freeb, len); 1678 XFS_STATS_ADD(xs_freeb, len);
1818 } 1679 }
1819 TRACE_FREE(haveleft ? 1680
1820 (haveright ? "both" : "left") : 1681 trace_xfs_free_extent(mp, agno, bno, len, isfl, haveleft, haveright);
1821 (haveright ? "right" : "none"),
1822 agno, bno, len, isfl);
1823 1682
1824 /* 1683 /*
1825 * Since blocks move to the free list without the coordination 1684 * Since blocks move to the free list without the coordination
@@ -1836,7 +1695,7 @@ xfs_free_ag_extent(
1836 return 0; 1695 return 0;
1837 1696
1838 error0: 1697 error0:
1839 TRACE_FREE("error", agno, bno, len, isfl); 1698 trace_xfs_free_extent(mp, agno, bno, len, isfl, -1, -1);
1840 if (bno_cur) 1699 if (bno_cur)
1841 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR); 1700 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1842 if (cnt_cur) 1701 if (cnt_cur)
@@ -2122,7 +1981,6 @@ xfs_alloc_get_freelist(
2122 logflags |= XFS_AGF_BTREEBLKS; 1981 logflags |= XFS_AGF_BTREEBLKS;
2123 } 1982 }
2124 1983
2125 TRACE_MODAGF(NULL, agf, logflags);
2126 xfs_alloc_log_agf(tp, agbp, logflags); 1984 xfs_alloc_log_agf(tp, agbp, logflags);
2127 *bnop = bno; 1985 *bnop = bno;
2128 1986
@@ -2165,6 +2023,8 @@ xfs_alloc_log_agf(
2165 sizeof(xfs_agf_t) 2023 sizeof(xfs_agf_t)
2166 }; 2024 };
2167 2025
2026 trace_xfs_agf(tp->t_mountp, XFS_BUF_TO_AGF(bp), fields, _RET_IP_);
2027
2168 xfs_btree_offsets(fields, offsets, XFS_AGF_NUM_BITS, &first, &last); 2028 xfs_btree_offsets(fields, offsets, XFS_AGF_NUM_BITS, &first, &last);
2169 xfs_trans_log_buf(tp, bp, (uint)first, (uint)last); 2029 xfs_trans_log_buf(tp, bp, (uint)first, (uint)last);
2170} 2030}
@@ -2230,13 +2090,11 @@ xfs_alloc_put_freelist(
2230 logflags |= XFS_AGF_BTREEBLKS; 2090 logflags |= XFS_AGF_BTREEBLKS;
2231 } 2091 }
2232 2092
2233 TRACE_MODAGF(NULL, agf, logflags);
2234 xfs_alloc_log_agf(tp, agbp, logflags); 2093 xfs_alloc_log_agf(tp, agbp, logflags);
2235 2094
2236 ASSERT(be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp)); 2095 ASSERT(be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp));
2237 blockp = &agfl->agfl_bno[be32_to_cpu(agf->agf_fllast)]; 2096 blockp = &agfl->agfl_bno[be32_to_cpu(agf->agf_fllast)];
2238 *blockp = cpu_to_be32(bno); 2097 *blockp = cpu_to_be32(bno);
2239 TRACE_MODAGF(NULL, agf, logflags);
2240 xfs_alloc_log_agf(tp, agbp, logflags); 2098 xfs_alloc_log_agf(tp, agbp, logflags);
2241 xfs_trans_log_buf(tp, agflbp, 2099 xfs_trans_log_buf(tp, agflbp,
2242 (int)((xfs_caddr_t)blockp - (xfs_caddr_t)agfl), 2100 (int)((xfs_caddr_t)blockp - (xfs_caddr_t)agfl),
@@ -2399,7 +2257,7 @@ xfs_alloc_vextent(
2399 args->minlen > args->maxlen || args->minlen > agsize || 2257 args->minlen > args->maxlen || args->minlen > agsize ||
2400 args->mod >= args->prod) { 2258 args->mod >= args->prod) {
2401 args->fsbno = NULLFSBLOCK; 2259 args->fsbno = NULLFSBLOCK;
2402 TRACE_ALLOC("badargs", args); 2260 trace_xfs_alloc_vextent_badargs(args);
2403 return 0; 2261 return 0;
2404 } 2262 }
2405 minleft = args->minleft; 2263 minleft = args->minleft;
@@ -2418,12 +2276,12 @@ xfs_alloc_vextent(
2418 error = xfs_alloc_fix_freelist(args, 0); 2276 error = xfs_alloc_fix_freelist(args, 0);
2419 args->minleft = minleft; 2277 args->minleft = minleft;
2420 if (error) { 2278 if (error) {
2421 TRACE_ALLOC("nofix", args); 2279 trace_xfs_alloc_vextent_nofix(args);
2422 goto error0; 2280 goto error0;
2423 } 2281 }
2424 if (!args->agbp) { 2282 if (!args->agbp) {
2425 up_read(&mp->m_peraglock); 2283 up_read(&mp->m_peraglock);
2426 TRACE_ALLOC("noagbp", args); 2284 trace_xfs_alloc_vextent_noagbp(args);
2427 break; 2285 break;
2428 } 2286 }
2429 args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno); 2287 args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
@@ -2488,7 +2346,7 @@ xfs_alloc_vextent(
2488 error = xfs_alloc_fix_freelist(args, flags); 2346 error = xfs_alloc_fix_freelist(args, flags);
2489 args->minleft = minleft; 2347 args->minleft = minleft;
2490 if (error) { 2348 if (error) {
2491 TRACE_ALLOC("nofix", args); 2349 trace_xfs_alloc_vextent_nofix(args);
2492 goto error0; 2350 goto error0;
2493 } 2351 }
2494 /* 2352 /*
@@ -2499,7 +2357,9 @@ xfs_alloc_vextent(
2499 goto error0; 2357 goto error0;
2500 break; 2358 break;
2501 } 2359 }
2502 TRACE_ALLOC("loopfailed", args); 2360
2361 trace_xfs_alloc_vextent_loopfailed(args);
2362
2503 /* 2363 /*
2504 * Didn't work, figure out the next iteration. 2364 * Didn't work, figure out the next iteration.
2505 */ 2365 */
@@ -2526,7 +2386,7 @@ xfs_alloc_vextent(
2526 if (args->agno == sagno) { 2386 if (args->agno == sagno) {
2527 if (no_min == 1) { 2387 if (no_min == 1) {
2528 args->agbno = NULLAGBLOCK; 2388 args->agbno = NULLAGBLOCK;
2529 TRACE_ALLOC("allfailed", args); 2389 trace_xfs_alloc_vextent_allfailed(args);
2530 break; 2390 break;
2531 } 2391 }
2532 if (flags == 0) { 2392 if (flags == 0) {
@@ -2642,16 +2502,16 @@ xfs_alloc_mark_busy(xfs_trans_t *tp,
2642 } 2502 }
2643 } 2503 }
2644 2504
2505 trace_xfs_alloc_busy(mp, agno, bno, len, n);
2506
2645 if (n < XFS_PAGB_NUM_SLOTS) { 2507 if (n < XFS_PAGB_NUM_SLOTS) {
2646 bsy = &mp->m_perag[agno].pagb_list[n]; 2508 bsy = &mp->m_perag[agno].pagb_list[n];
2647 mp->m_perag[agno].pagb_count++; 2509 mp->m_perag[agno].pagb_count++;
2648 TRACE_BUSY("xfs_alloc_mark_busy", "got", agno, bno, len, n, tp);
2649 bsy->busy_start = bno; 2510 bsy->busy_start = bno;
2650 bsy->busy_length = len; 2511 bsy->busy_length = len;
2651 bsy->busy_tp = tp; 2512 bsy->busy_tp = tp;
2652 xfs_trans_add_busy(tp, agno, n); 2513 xfs_trans_add_busy(tp, agno, n);
2653 } else { 2514 } else {
2654 TRACE_BUSY("xfs_alloc_mark_busy", "FULL", agno, bno, len, -1, tp);
2655 /* 2515 /*
2656 * The busy list is full! Since it is now not possible to 2516 * The busy list is full! Since it is now not possible to
2657 * track the free block, make this a synchronous transaction 2517 * track the free block, make this a synchronous transaction
@@ -2678,12 +2538,12 @@ xfs_alloc_clear_busy(xfs_trans_t *tp,
2678 list = mp->m_perag[agno].pagb_list; 2538 list = mp->m_perag[agno].pagb_list;
2679 2539
2680 ASSERT(idx < XFS_PAGB_NUM_SLOTS); 2540 ASSERT(idx < XFS_PAGB_NUM_SLOTS);
2541
2542 trace_xfs_alloc_unbusy(mp, agno, idx, list[idx].busy_tp == tp);
2543
2681 if (list[idx].busy_tp == tp) { 2544 if (list[idx].busy_tp == tp) {
2682 TRACE_UNBUSY("xfs_alloc_clear_busy", "found", agno, idx, tp);
2683 list[idx].busy_tp = NULL; 2545 list[idx].busy_tp = NULL;
2684 mp->m_perag[agno].pagb_count--; 2546 mp->m_perag[agno].pagb_count--;
2685 } else {
2686 TRACE_UNBUSY("xfs_alloc_clear_busy", "missing", agno, idx, tp);
2687 } 2547 }
2688 2548
2689 spin_unlock(&mp->m_perag[agno].pagb_lock); 2549 spin_unlock(&mp->m_perag[agno].pagb_lock);
@@ -2724,24 +2584,22 @@ xfs_alloc_search_busy(xfs_trans_t *tp,
2724 if ((bno > bend) || (uend < bsy->busy_start)) { 2584 if ((bno > bend) || (uend < bsy->busy_start)) {
2725 cnt--; 2585 cnt--;
2726 } else { 2586 } else {
2727 TRACE_BUSYSEARCH("xfs_alloc_search_busy",
2728 "found1", agno, bno, len, tp);
2729 break; 2587 break;
2730 } 2588 }
2731 } 2589 }
2732 } 2590 }
2733 2591
2592 trace_xfs_alloc_busysearch(mp, agno, bno, len, !!cnt);
2593
2734 /* 2594 /*
2735 * If a block was found, force the log through the LSN of the 2595 * If a block was found, force the log through the LSN of the
2736 * transaction that freed the block 2596 * transaction that freed the block
2737 */ 2597 */
2738 if (cnt) { 2598 if (cnt) {
2739 TRACE_BUSYSEARCH("xfs_alloc_search_busy", "found", agno, bno, len, tp);
2740 lsn = bsy->busy_tp->t_commit_lsn; 2599 lsn = bsy->busy_tp->t_commit_lsn;
2741 spin_unlock(&mp->m_perag[agno].pagb_lock); 2600 spin_unlock(&mp->m_perag[agno].pagb_lock);
2742 xfs_log_force(mp, lsn, XFS_LOG_FORCE|XFS_LOG_SYNC); 2601 xfs_log_force(mp, lsn, XFS_LOG_FORCE|XFS_LOG_SYNC);
2743 } else { 2602 } else {
2744 TRACE_BUSYSEARCH("xfs_alloc_search_busy", "not-found", agno, bno, len, tp);
2745 spin_unlock(&mp->m_perag[agno].pagb_lock); 2603 spin_unlock(&mp->m_perag[agno].pagb_lock);
2746 } 2604 }
2747} 2605}
diff --git a/fs/xfs/xfs_alloc.h b/fs/xfs/xfs_alloc.h
index e704caee10df..599bffa39784 100644
--- a/fs/xfs/xfs_alloc.h
+++ b/fs/xfs/xfs_alloc.h
@@ -37,6 +37,15 @@ typedef enum xfs_alloctype
37 XFS_ALLOCTYPE_THIS_BNO /* at exactly this block */ 37 XFS_ALLOCTYPE_THIS_BNO /* at exactly this block */
38} xfs_alloctype_t; 38} xfs_alloctype_t;
39 39
40#define XFS_ALLOC_TYPES \
41 { XFS_ALLOCTYPE_ANY_AG, "ANY_AG" }, \
42 { XFS_ALLOCTYPE_FIRST_AG, "FIRST_AG" }, \
43 { XFS_ALLOCTYPE_START_AG, "START_AG" }, \
44 { XFS_ALLOCTYPE_THIS_AG, "THIS_AG" }, \
45 { XFS_ALLOCTYPE_START_BNO, "START_BNO" }, \
46 { XFS_ALLOCTYPE_NEAR_BNO, "NEAR_BNO" }, \
47 { XFS_ALLOCTYPE_THIS_BNO, "THIS_BNO" }
48
40/* 49/*
41 * Flags for xfs_alloc_fix_freelist. 50 * Flags for xfs_alloc_fix_freelist.
42 */ 51 */
@@ -109,24 +118,6 @@ xfs_alloc_longest_free_extent(struct xfs_mount *mp,
109 118
110#ifdef __KERNEL__ 119#ifdef __KERNEL__
111 120
112#if defined(XFS_ALLOC_TRACE)
113/*
114 * Allocation tracing buffer size.
115 */
116#define XFS_ALLOC_TRACE_SIZE 4096
117extern ktrace_t *xfs_alloc_trace_buf;
118
119/*
120 * Types for alloc tracing.
121 */
122#define XFS_ALLOC_KTRACE_ALLOC 1
123#define XFS_ALLOC_KTRACE_FREE 2
124#define XFS_ALLOC_KTRACE_MODAGF 3
125#define XFS_ALLOC_KTRACE_BUSY 4
126#define XFS_ALLOC_KTRACE_UNBUSY 5
127#define XFS_ALLOC_KTRACE_BUSYSEARCH 6
128#endif
129
130void 121void
131xfs_alloc_mark_busy(xfs_trans_t *tp, 122xfs_alloc_mark_busy(xfs_trans_t *tp,
132 xfs_agnumber_t agno, 123 xfs_agnumber_t agno,
diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c
index c10c3a292d30..adbd9141aea1 100644
--- a/fs/xfs/xfs_alloc_btree.c
+++ b/fs/xfs/xfs_alloc_btree.c
@@ -39,6 +39,7 @@
39#include "xfs_ialloc.h" 39#include "xfs_ialloc.h"
40#include "xfs_alloc.h" 40#include "xfs_alloc.h"
41#include "xfs_error.h" 41#include "xfs_error.h"
42#include "xfs_trace.h"
42 43
43 44
44STATIC struct xfs_btree_cur * 45STATIC struct xfs_btree_cur *
diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c
index 8fe6f6b78a4a..e953b6cfb2a8 100644
--- a/fs/xfs/xfs_attr.c
+++ b/fs/xfs/xfs_attr.c
@@ -47,6 +47,7 @@
47#include "xfs_trans_space.h" 47#include "xfs_trans_space.h"
48#include "xfs_rw.h" 48#include "xfs_rw.h"
49#include "xfs_vnodeops.h" 49#include "xfs_vnodeops.h"
50#include "xfs_trace.h"
50 51
51/* 52/*
52 * xfs_attr.c 53 * xfs_attr.c
@@ -89,10 +90,6 @@ STATIC int xfs_attr_rmtval_remove(xfs_da_args_t *args);
89 90
90#define ATTR_RMTVALUE_MAPSIZE 1 /* # of map entries at once */ 91#define ATTR_RMTVALUE_MAPSIZE 1 /* # of map entries at once */
91 92
92#if defined(XFS_ATTR_TRACE)
93ktrace_t *xfs_attr_trace_buf;
94#endif
95
96STATIC int 93STATIC int
97xfs_attr_name_to_xname( 94xfs_attr_name_to_xname(
98 struct xfs_name *xname, 95 struct xfs_name *xname,
@@ -640,7 +637,6 @@ xfs_attr_list_int(xfs_attr_list_context_t *context)
640 return EIO; 637 return EIO;
641 638
642 xfs_ilock(dp, XFS_ILOCK_SHARED); 639 xfs_ilock(dp, XFS_ILOCK_SHARED);
643 xfs_attr_trace_l_c("syscall start", context);
644 640
645 /* 641 /*
646 * Decide on what work routines to call based on the inode size. 642 * Decide on what work routines to call based on the inode size.
@@ -656,7 +652,6 @@ xfs_attr_list_int(xfs_attr_list_context_t *context)
656 } 652 }
657 653
658 xfs_iunlock(dp, XFS_ILOCK_SHARED); 654 xfs_iunlock(dp, XFS_ILOCK_SHARED);
659 xfs_attr_trace_l_c("syscall end", context);
660 655
661 return error; 656 return error;
662} 657}
@@ -702,7 +697,7 @@ xfs_attr_put_listent(xfs_attr_list_context_t *context, int flags,
702 context->count * sizeof(alist->al_offset[0]); 697 context->count * sizeof(alist->al_offset[0]);
703 context->firstu -= ATTR_ENTSIZE(namelen); 698 context->firstu -= ATTR_ENTSIZE(namelen);
704 if (context->firstu < arraytop) { 699 if (context->firstu < arraytop) {
705 xfs_attr_trace_l_c("buffer full", context); 700 trace_xfs_attr_list_full(context);
706 alist->al_more = 1; 701 alist->al_more = 1;
707 context->seen_enough = 1; 702 context->seen_enough = 1;
708 return 1; 703 return 1;
@@ -714,7 +709,7 @@ xfs_attr_put_listent(xfs_attr_list_context_t *context, int flags,
714 aep->a_name[namelen] = 0; 709 aep->a_name[namelen] = 0;
715 alist->al_offset[context->count++] = context->firstu; 710 alist->al_offset[context->count++] = context->firstu;
716 alist->al_count = context->count; 711 alist->al_count = context->count;
717 xfs_attr_trace_l_c("add", context); 712 trace_xfs_attr_list_add(context);
718 return 0; 713 return 0;
719} 714}
720 715
@@ -1853,7 +1848,7 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
1853 node = bp->data; 1848 node = bp->data;
1854 switch (be16_to_cpu(node->hdr.info.magic)) { 1849 switch (be16_to_cpu(node->hdr.info.magic)) {
1855 case XFS_DA_NODE_MAGIC: 1850 case XFS_DA_NODE_MAGIC:
1856 xfs_attr_trace_l_cn("wrong blk", context, node); 1851 trace_xfs_attr_list_wrong_blk(context);
1857 xfs_da_brelse(NULL, bp); 1852 xfs_da_brelse(NULL, bp);
1858 bp = NULL; 1853 bp = NULL;
1859 break; 1854 break;
@@ -1861,20 +1856,18 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
1861 leaf = bp->data; 1856 leaf = bp->data;
1862 if (cursor->hashval > be32_to_cpu(leaf->entries[ 1857 if (cursor->hashval > be32_to_cpu(leaf->entries[
1863 be16_to_cpu(leaf->hdr.count)-1].hashval)) { 1858 be16_to_cpu(leaf->hdr.count)-1].hashval)) {
1864 xfs_attr_trace_l_cl("wrong blk", 1859 trace_xfs_attr_list_wrong_blk(context);
1865 context, leaf);
1866 xfs_da_brelse(NULL, bp); 1860 xfs_da_brelse(NULL, bp);
1867 bp = NULL; 1861 bp = NULL;
1868 } else if (cursor->hashval <= 1862 } else if (cursor->hashval <=
1869 be32_to_cpu(leaf->entries[0].hashval)) { 1863 be32_to_cpu(leaf->entries[0].hashval)) {
1870 xfs_attr_trace_l_cl("maybe wrong blk", 1864 trace_xfs_attr_list_wrong_blk(context);
1871 context, leaf);
1872 xfs_da_brelse(NULL, bp); 1865 xfs_da_brelse(NULL, bp);
1873 bp = NULL; 1866 bp = NULL;
1874 } 1867 }
1875 break; 1868 break;
1876 default: 1869 default:
1877 xfs_attr_trace_l_c("wrong blk - ??", context); 1870 trace_xfs_attr_list_wrong_blk(context);
1878 xfs_da_brelse(NULL, bp); 1871 xfs_da_brelse(NULL, bp);
1879 bp = NULL; 1872 bp = NULL;
1880 } 1873 }
@@ -1919,8 +1912,8 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
1919 if (cursor->hashval 1912 if (cursor->hashval
1920 <= be32_to_cpu(btree->hashval)) { 1913 <= be32_to_cpu(btree->hashval)) {
1921 cursor->blkno = be32_to_cpu(btree->before); 1914 cursor->blkno = be32_to_cpu(btree->before);
1922 xfs_attr_trace_l_cb("descending", 1915 trace_xfs_attr_list_node_descend(context,
1923 context, btree); 1916 btree);
1924 break; 1917 break;
1925 } 1918 }
1926 } 1919 }
@@ -2270,85 +2263,3 @@ xfs_attr_rmtval_remove(xfs_da_args_t *args)
2270 } 2263 }
2271 return(0); 2264 return(0);
2272} 2265}
2273
2274#if defined(XFS_ATTR_TRACE)
2275/*
2276 * Add a trace buffer entry for an attr_list context structure.
2277 */
2278void
2279xfs_attr_trace_l_c(char *where, struct xfs_attr_list_context *context)
2280{
2281 xfs_attr_trace_enter(XFS_ATTR_KTRACE_L_C, where, context,
2282 (__psunsigned_t)NULL,
2283 (__psunsigned_t)NULL,
2284 (__psunsigned_t)NULL);
2285}
2286
2287/*
2288 * Add a trace buffer entry for a context structure and a Btree node.
2289 */
2290void
2291xfs_attr_trace_l_cn(char *where, struct xfs_attr_list_context *context,
2292 struct xfs_da_intnode *node)
2293{
2294 xfs_attr_trace_enter(XFS_ATTR_KTRACE_L_CN, where, context,
2295 (__psunsigned_t)be16_to_cpu(node->hdr.count),
2296 (__psunsigned_t)be32_to_cpu(node->btree[0].hashval),
2297 (__psunsigned_t)be32_to_cpu(node->btree[
2298 be16_to_cpu(node->hdr.count)-1].hashval));
2299}
2300
2301/*
2302 * Add a trace buffer entry for a context structure and a Btree element.
2303 */
2304void
2305xfs_attr_trace_l_cb(char *where, struct xfs_attr_list_context *context,
2306 struct xfs_da_node_entry *btree)
2307{
2308 xfs_attr_trace_enter(XFS_ATTR_KTRACE_L_CB, where, context,
2309 (__psunsigned_t)be32_to_cpu(btree->hashval),
2310 (__psunsigned_t)be32_to_cpu(btree->before),
2311 (__psunsigned_t)NULL);
2312}
2313
2314/*
2315 * Add a trace buffer entry for a context structure and a leaf block.
2316 */
2317void
2318xfs_attr_trace_l_cl(char *where, struct xfs_attr_list_context *context,
2319 struct xfs_attr_leafblock *leaf)
2320{
2321 xfs_attr_trace_enter(XFS_ATTR_KTRACE_L_CL, where, context,
2322 (__psunsigned_t)be16_to_cpu(leaf->hdr.count),
2323 (__psunsigned_t)be32_to_cpu(leaf->entries[0].hashval),
2324 (__psunsigned_t)be32_to_cpu(leaf->entries[
2325 be16_to_cpu(leaf->hdr.count)-1].hashval));
2326}
2327
2328/*
2329 * Add a trace buffer entry for the arguments given to the routine,
2330 * generic form.
2331 */
2332void
2333xfs_attr_trace_enter(int type, char *where,
2334 struct xfs_attr_list_context *context,
2335 __psunsigned_t a13, __psunsigned_t a14,
2336 __psunsigned_t a15)
2337{
2338 ASSERT(xfs_attr_trace_buf);
2339 ktrace_enter(xfs_attr_trace_buf, (void *)((__psunsigned_t)type),
2340 (void *)((__psunsigned_t)where),
2341 (void *)((__psunsigned_t)context->dp),
2342 (void *)((__psunsigned_t)context->cursor->hashval),
2343 (void *)((__psunsigned_t)context->cursor->blkno),
2344 (void *)((__psunsigned_t)context->cursor->offset),
2345 (void *)((__psunsigned_t)context->alist),
2346 (void *)((__psunsigned_t)context->bufsize),
2347 (void *)((__psunsigned_t)context->count),
2348 (void *)((__psunsigned_t)context->firstu),
2349 NULL,
2350 (void *)((__psunsigned_t)context->dupcnt),
2351 (void *)((__psunsigned_t)context->flags),
2352 (void *)a13, (void *)a14, (void *)a15);
2353}
2354#endif /* XFS_ATTR_TRACE */
diff --git a/fs/xfs/xfs_attr.h b/fs/xfs/xfs_attr.h
index 12f0be3a73d4..59b410ce69a1 100644
--- a/fs/xfs/xfs_attr.h
+++ b/fs/xfs/xfs_attr.h
@@ -48,6 +48,16 @@ struct xfs_attr_list_context;
48#define ATTR_KERNOTIME 0x1000 /* [kernel] don't update inode timestamps */ 48#define ATTR_KERNOTIME 0x1000 /* [kernel] don't update inode timestamps */
49#define ATTR_KERNOVAL 0x2000 /* [kernel] get attr size only, not value */ 49#define ATTR_KERNOVAL 0x2000 /* [kernel] get attr size only, not value */
50 50
51#define XFS_ATTR_FLAGS \
52 { ATTR_DONTFOLLOW, "DONTFOLLOW" }, \
53 { ATTR_ROOT, "ROOT" }, \
54 { ATTR_TRUST, "TRUST" }, \
55 { ATTR_SECURE, "SECURE" }, \
56 { ATTR_CREATE, "CREATE" }, \
57 { ATTR_REPLACE, "REPLACE" }, \
58 { ATTR_KERNOTIME, "KERNOTIME" }, \
59 { ATTR_KERNOVAL, "KERNOVAL" }
60
51/* 61/*
52 * The maximum size (into the kernel or returned from the kernel) of an 62 * The maximum size (into the kernel or returned from the kernel) of an
53 * attribute value or the buffer used for an attr_list() call. Larger 63 * attribute value or the buffer used for an attr_list() call. Larger
diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c
index 0b687351293f..baf41b5af756 100644
--- a/fs/xfs/xfs_attr_leaf.c
+++ b/fs/xfs/xfs_attr_leaf.c
@@ -42,6 +42,7 @@
42#include "xfs_attr.h" 42#include "xfs_attr.h"
43#include "xfs_attr_leaf.h" 43#include "xfs_attr_leaf.h"
44#include "xfs_error.h" 44#include "xfs_error.h"
45#include "xfs_trace.h"
45 46
46/* 47/*
47 * xfs_attr_leaf.c 48 * xfs_attr_leaf.c
@@ -594,7 +595,7 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
594 cursor = context->cursor; 595 cursor = context->cursor;
595 ASSERT(cursor != NULL); 596 ASSERT(cursor != NULL);
596 597
597 xfs_attr_trace_l_c("sf start", context); 598 trace_xfs_attr_list_sf(context);
598 599
599 /* 600 /*
600 * If the buffer is large enough and the cursor is at the start, 601 * If the buffer is large enough and the cursor is at the start,
@@ -627,7 +628,7 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
627 return error; 628 return error;
628 sfe = XFS_ATTR_SF_NEXTENTRY(sfe); 629 sfe = XFS_ATTR_SF_NEXTENTRY(sfe);
629 } 630 }
630 xfs_attr_trace_l_c("sf big-gulp", context); 631 trace_xfs_attr_list_sf_all(context);
631 return(0); 632 return(0);
632 } 633 }
633 634
@@ -653,7 +654,6 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
653 XFS_CORRUPTION_ERROR("xfs_attr_shortform_list", 654 XFS_CORRUPTION_ERROR("xfs_attr_shortform_list",
654 XFS_ERRLEVEL_LOW, 655 XFS_ERRLEVEL_LOW,
655 context->dp->i_mount, sfe); 656 context->dp->i_mount, sfe);
656 xfs_attr_trace_l_c("sf corrupted", context);
657 kmem_free(sbuf); 657 kmem_free(sbuf);
658 return XFS_ERROR(EFSCORRUPTED); 658 return XFS_ERROR(EFSCORRUPTED);
659 } 659 }
@@ -693,7 +693,6 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
693 } 693 }
694 if (i == nsbuf) { 694 if (i == nsbuf) {
695 kmem_free(sbuf); 695 kmem_free(sbuf);
696 xfs_attr_trace_l_c("blk end", context);
697 return(0); 696 return(0);
698 } 697 }
699 698
@@ -719,7 +718,6 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
719 } 718 }
720 719
721 kmem_free(sbuf); 720 kmem_free(sbuf);
722 xfs_attr_trace_l_c("sf E-O-F", context);
723 return(0); 721 return(0);
724} 722}
725 723
@@ -2323,7 +2321,7 @@ xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context)
2323 cursor = context->cursor; 2321 cursor = context->cursor;
2324 cursor->initted = 1; 2322 cursor->initted = 1;
2325 2323
2326 xfs_attr_trace_l_cl("blk start", context, leaf); 2324 trace_xfs_attr_list_leaf(context);
2327 2325
2328 /* 2326 /*
2329 * Re-find our place in the leaf block if this is a new syscall. 2327 * Re-find our place in the leaf block if this is a new syscall.
@@ -2344,7 +2342,7 @@ xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context)
2344 } 2342 }
2345 } 2343 }
2346 if (i == be16_to_cpu(leaf->hdr.count)) { 2344 if (i == be16_to_cpu(leaf->hdr.count)) {
2347 xfs_attr_trace_l_c("not found", context); 2345 trace_xfs_attr_list_notfound(context);
2348 return(0); 2346 return(0);
2349 } 2347 }
2350 } else { 2348 } else {
@@ -2419,7 +2417,7 @@ xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context)
2419 break; 2417 break;
2420 cursor->offset++; 2418 cursor->offset++;
2421 } 2419 }
2422 xfs_attr_trace_l_cl("blk end", context, leaf); 2420 trace_xfs_attr_list_leaf_end(context);
2423 return(retval); 2421 return(retval);
2424} 2422}
2425 2423
diff --git a/fs/xfs/xfs_attr_sf.h b/fs/xfs/xfs_attr_sf.h
index ea22839caed2..76ab7b0cbb3a 100644
--- a/fs/xfs/xfs_attr_sf.h
+++ b/fs/xfs/xfs_attr_sf.h
@@ -25,8 +25,6 @@
25 * to fit into the literal area of the inode. 25 * to fit into the literal area of the inode.
26 */ 26 */
27 27
28struct xfs_inode;
29
30/* 28/*
31 * Entries are packed toward the top as tight as possible. 29 * Entries are packed toward the top as tight as possible.
32 */ 30 */
@@ -69,42 +67,4 @@ typedef struct xfs_attr_sf_sort {
69 (be16_to_cpu(((xfs_attr_shortform_t *) \ 67 (be16_to_cpu(((xfs_attr_shortform_t *) \
70 ((dp)->i_afp->if_u1.if_data))->hdr.totsize)) 68 ((dp)->i_afp->if_u1.if_data))->hdr.totsize))
71 69
72#if defined(XFS_ATTR_TRACE)
73/*
74 * Kernel tracing support for attribute lists
75 */
76struct xfs_attr_list_context;
77struct xfs_da_intnode;
78struct xfs_da_node_entry;
79struct xfs_attr_leafblock;
80
81#define XFS_ATTR_TRACE_SIZE 4096 /* size of global trace buffer */
82extern ktrace_t *xfs_attr_trace_buf;
83
84/*
85 * Trace record types.
86 */
87#define XFS_ATTR_KTRACE_L_C 1 /* context */
88#define XFS_ATTR_KTRACE_L_CN 2 /* context, node */
89#define XFS_ATTR_KTRACE_L_CB 3 /* context, btree */
90#define XFS_ATTR_KTRACE_L_CL 4 /* context, leaf */
91
92void xfs_attr_trace_l_c(char *where, struct xfs_attr_list_context *context);
93void xfs_attr_trace_l_cn(char *where, struct xfs_attr_list_context *context,
94 struct xfs_da_intnode *node);
95void xfs_attr_trace_l_cb(char *where, struct xfs_attr_list_context *context,
96 struct xfs_da_node_entry *btree);
97void xfs_attr_trace_l_cl(char *where, struct xfs_attr_list_context *context,
98 struct xfs_attr_leafblock *leaf);
99void xfs_attr_trace_enter(int type, char *where,
100 struct xfs_attr_list_context *context,
101 __psunsigned_t a13, __psunsigned_t a14,
102 __psunsigned_t a15);
103#else
104#define xfs_attr_trace_l_c(w,c)
105#define xfs_attr_trace_l_cn(w,c,n)
106#define xfs_attr_trace_l_cb(w,c,b)
107#define xfs_attr_trace_l_cl(w,c,l)
108#endif /* XFS_ATTR_TRACE */
109
110#endif /* __XFS_ATTR_SF_H__ */ 70#endif /* __XFS_ATTR_SF_H__ */
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index 8971fb09d387..98251cdc52aa 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -54,6 +54,7 @@
54#include "xfs_buf_item.h" 54#include "xfs_buf_item.h"
55#include "xfs_filestream.h" 55#include "xfs_filestream.h"
56#include "xfs_vnodeops.h" 56#include "xfs_vnodeops.h"
57#include "xfs_trace.h"
57 58
58 59
59#ifdef DEBUG 60#ifdef DEBUG
@@ -272,71 +273,6 @@ xfs_bmap_isaeof(
272 int whichfork, /* data or attribute fork */ 273 int whichfork, /* data or attribute fork */
273 char *aeof); /* return value */ 274 char *aeof); /* return value */
274 275
275#ifdef XFS_BMAP_TRACE
276/*
277 * Add bmap trace entry prior to a call to xfs_iext_remove.
278 */
279STATIC void
280xfs_bmap_trace_delete(
281 const char *fname, /* function name */
282 char *desc, /* operation description */
283 xfs_inode_t *ip, /* incore inode pointer */
284 xfs_extnum_t idx, /* index of entry(entries) deleted */
285 xfs_extnum_t cnt, /* count of entries deleted, 1 or 2 */
286 int whichfork); /* data or attr fork */
287
288/*
289 * Add bmap trace entry prior to a call to xfs_iext_insert, or
290 * reading in the extents list from the disk (in the btree).
291 */
292STATIC void
293xfs_bmap_trace_insert(
294 const char *fname, /* function name */
295 char *desc, /* operation description */
296 xfs_inode_t *ip, /* incore inode pointer */
297 xfs_extnum_t idx, /* index of entry(entries) inserted */
298 xfs_extnum_t cnt, /* count of entries inserted, 1 or 2 */
299 xfs_bmbt_irec_t *r1, /* inserted record 1 */
300 xfs_bmbt_irec_t *r2, /* inserted record 2 or null */
301 int whichfork); /* data or attr fork */
302
303/*
304 * Add bmap trace entry after updating an extent record in place.
305 */
306STATIC void
307xfs_bmap_trace_post_update(
308 const char *fname, /* function name */
309 char *desc, /* operation description */
310 xfs_inode_t *ip, /* incore inode pointer */
311 xfs_extnum_t idx, /* index of entry updated */
312 int whichfork); /* data or attr fork */
313
314/*
315 * Add bmap trace entry prior to updating an extent record in place.
316 */
317STATIC void
318xfs_bmap_trace_pre_update(
319 const char *fname, /* function name */
320 char *desc, /* operation description */
321 xfs_inode_t *ip, /* incore inode pointer */
322 xfs_extnum_t idx, /* index of entry to be updated */
323 int whichfork); /* data or attr fork */
324
325#define XFS_BMAP_TRACE_DELETE(d,ip,i,c,w) \
326 xfs_bmap_trace_delete(__func__,d,ip,i,c,w)
327#define XFS_BMAP_TRACE_INSERT(d,ip,i,c,r1,r2,w) \
328 xfs_bmap_trace_insert(__func__,d,ip,i,c,r1,r2,w)
329#define XFS_BMAP_TRACE_POST_UPDATE(d,ip,i,w) \
330 xfs_bmap_trace_post_update(__func__,d,ip,i,w)
331#define XFS_BMAP_TRACE_PRE_UPDATE(d,ip,i,w) \
332 xfs_bmap_trace_pre_update(__func__,d,ip,i,w)
333#else
334#define XFS_BMAP_TRACE_DELETE(d,ip,i,c,w)
335#define XFS_BMAP_TRACE_INSERT(d,ip,i,c,r1,r2,w)
336#define XFS_BMAP_TRACE_POST_UPDATE(d,ip,i,w)
337#define XFS_BMAP_TRACE_PRE_UPDATE(d,ip,i,w)
338#endif /* XFS_BMAP_TRACE */
339
340/* 276/*
341 * Compute the worst-case number of indirect blocks that will be used 277 * Compute the worst-case number of indirect blocks that will be used
342 * for ip's delayed extent of length "len". 278 * for ip's delayed extent of length "len".
@@ -363,18 +299,6 @@ xfs_bmap_validate_ret(
363#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) 299#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
364#endif /* DEBUG */ 300#endif /* DEBUG */
365 301
366#if defined(XFS_RW_TRACE)
367STATIC void
368xfs_bunmap_trace(
369 xfs_inode_t *ip,
370 xfs_fileoff_t bno,
371 xfs_filblks_t len,
372 int flags,
373 inst_t *ra);
374#else
375#define xfs_bunmap_trace(ip, bno, len, flags, ra)
376#endif /* XFS_RW_TRACE */
377
378STATIC int 302STATIC int
379xfs_bmap_count_tree( 303xfs_bmap_count_tree(
380 xfs_mount_t *mp, 304 xfs_mount_t *mp,
@@ -590,9 +514,9 @@ xfs_bmap_add_extent(
590 * already extents in the list. 514 * already extents in the list.
591 */ 515 */
592 if (nextents == 0) { 516 if (nextents == 0) {
593 XFS_BMAP_TRACE_INSERT("insert empty", ip, 0, 1, new, NULL, 517 xfs_iext_insert(ip, 0, 1, new,
594 whichfork); 518 whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0);
595 xfs_iext_insert(ifp, 0, 1, new); 519
596 ASSERT(cur == NULL); 520 ASSERT(cur == NULL);
597 ifp->if_lastex = 0; 521 ifp->if_lastex = 0;
598 if (!isnullstartblock(new->br_startblock)) { 522 if (!isnullstartblock(new->br_startblock)) {
@@ -759,26 +683,10 @@ xfs_bmap_add_extent_delay_real(
759 xfs_filblks_t temp=0; /* value for dnew calculations */ 683 xfs_filblks_t temp=0; /* value for dnew calculations */
760 xfs_filblks_t temp2=0;/* value for dnew calculations */ 684 xfs_filblks_t temp2=0;/* value for dnew calculations */
761 int tmp_rval; /* partial logging flags */ 685 int tmp_rval; /* partial logging flags */
762 enum { /* bit number definitions for state */
763 LEFT_CONTIG, RIGHT_CONTIG,
764 LEFT_FILLING, RIGHT_FILLING,
765 LEFT_DELAY, RIGHT_DELAY,
766 LEFT_VALID, RIGHT_VALID
767 };
768 686
769#define LEFT r[0] 687#define LEFT r[0]
770#define RIGHT r[1] 688#define RIGHT r[1]
771#define PREV r[2] 689#define PREV r[2]
772#define MASK(b) (1 << (b))
773#define MASK2(a,b) (MASK(a) | MASK(b))
774#define MASK3(a,b,c) (MASK2(a,b) | MASK(c))
775#define MASK4(a,b,c,d) (MASK3(a,b,c) | MASK(d))
776#define STATE_SET(b,v) ((v) ? (state |= MASK(b)) : (state &= ~MASK(b)))
777#define STATE_TEST(b) (state & MASK(b))
778#define STATE_SET_TEST(b,v) ((v) ? ((state |= MASK(b)), 1) : \
779 ((state &= ~MASK(b)), 0))
780#define SWITCH_STATE \
781 (state & MASK4(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG))
782 690
783 /* 691 /*
784 * Set up a bunch of variables to make the tests simpler. 692 * Set up a bunch of variables to make the tests simpler.
@@ -790,69 +698,80 @@ xfs_bmap_add_extent_delay_real(
790 new_endoff = new->br_startoff + new->br_blockcount; 698 new_endoff = new->br_startoff + new->br_blockcount;
791 ASSERT(PREV.br_startoff <= new->br_startoff); 699 ASSERT(PREV.br_startoff <= new->br_startoff);
792 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff); 700 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
701
793 /* 702 /*
794 * Set flags determining what part of the previous delayed allocation 703 * Set flags determining what part of the previous delayed allocation
795 * extent is being replaced by a real allocation. 704 * extent is being replaced by a real allocation.
796 */ 705 */
797 STATE_SET(LEFT_FILLING, PREV.br_startoff == new->br_startoff); 706 if (PREV.br_startoff == new->br_startoff)
798 STATE_SET(RIGHT_FILLING, 707 state |= BMAP_LEFT_FILLING;
799 PREV.br_startoff + PREV.br_blockcount == new_endoff); 708 if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
709 state |= BMAP_RIGHT_FILLING;
710
800 /* 711 /*
801 * Check and set flags if this segment has a left neighbor. 712 * Check and set flags if this segment has a left neighbor.
802 * Don't set contiguous if the combined extent would be too large. 713 * Don't set contiguous if the combined extent would be too large.
803 */ 714 */
804 if (STATE_SET_TEST(LEFT_VALID, idx > 0)) { 715 if (idx > 0) {
716 state |= BMAP_LEFT_VALID;
805 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &LEFT); 717 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &LEFT);
806 STATE_SET(LEFT_DELAY, isnullstartblock(LEFT.br_startblock)); 718
719 if (isnullstartblock(LEFT.br_startblock))
720 state |= BMAP_LEFT_DELAY;
807 } 721 }
808 STATE_SET(LEFT_CONTIG, 722
809 STATE_TEST(LEFT_VALID) && !STATE_TEST(LEFT_DELAY) && 723 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
810 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff && 724 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
811 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock && 725 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
812 LEFT.br_state == new->br_state && 726 LEFT.br_state == new->br_state &&
813 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN); 727 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
728 state |= BMAP_LEFT_CONTIG;
729
814 /* 730 /*
815 * Check and set flags if this segment has a right neighbor. 731 * Check and set flags if this segment has a right neighbor.
816 * Don't set contiguous if the combined extent would be too large. 732 * Don't set contiguous if the combined extent would be too large.
817 * Also check for all-three-contiguous being too large. 733 * Also check for all-three-contiguous being too large.
818 */ 734 */
819 if (STATE_SET_TEST(RIGHT_VALID, 735 if (idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) {
820 idx < 736 state |= BMAP_RIGHT_VALID;
821 ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1)) {
822 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx + 1), &RIGHT); 737 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx + 1), &RIGHT);
823 STATE_SET(RIGHT_DELAY, isnullstartblock(RIGHT.br_startblock)); 738
739 if (isnullstartblock(RIGHT.br_startblock))
740 state |= BMAP_RIGHT_DELAY;
824 } 741 }
825 STATE_SET(RIGHT_CONTIG, 742
826 STATE_TEST(RIGHT_VALID) && !STATE_TEST(RIGHT_DELAY) && 743 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
827 new_endoff == RIGHT.br_startoff && 744 new_endoff == RIGHT.br_startoff &&
828 new->br_startblock + new->br_blockcount == 745 new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
829 RIGHT.br_startblock && 746 new->br_state == RIGHT.br_state &&
830 new->br_state == RIGHT.br_state && 747 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
831 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN && 748 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
832 ((state & MASK3(LEFT_CONTIG, LEFT_FILLING, RIGHT_FILLING)) != 749 BMAP_RIGHT_FILLING)) !=
833 MASK3(LEFT_CONTIG, LEFT_FILLING, RIGHT_FILLING) || 750 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
834 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount 751 BMAP_RIGHT_FILLING) ||
835 <= MAXEXTLEN)); 752 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
753 <= MAXEXTLEN))
754 state |= BMAP_RIGHT_CONTIG;
755
836 error = 0; 756 error = 0;
837 /* 757 /*
838 * Switch out based on the FILLING and CONTIG state bits. 758 * Switch out based on the FILLING and CONTIG state bits.
839 */ 759 */
840 switch (SWITCH_STATE) { 760 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
841 761 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
842 case MASK4(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG): 762 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
763 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
843 /* 764 /*
844 * Filling in all of a previously delayed allocation extent. 765 * Filling in all of a previously delayed allocation extent.
845 * The left and right neighbors are both contiguous with new. 766 * The left and right neighbors are both contiguous with new.
846 */ 767 */
847 XFS_BMAP_TRACE_PRE_UPDATE("LF|RF|LC|RC", ip, idx - 1, 768 trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
848 XFS_DATA_FORK);
849 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), 769 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
850 LEFT.br_blockcount + PREV.br_blockcount + 770 LEFT.br_blockcount + PREV.br_blockcount +
851 RIGHT.br_blockcount); 771 RIGHT.br_blockcount);
852 XFS_BMAP_TRACE_POST_UPDATE("LF|RF|LC|RC", ip, idx - 1, 772 trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
853 XFS_DATA_FORK); 773
854 XFS_BMAP_TRACE_DELETE("LF|RF|LC|RC", ip, idx, 2, XFS_DATA_FORK); 774 xfs_iext_remove(ip, idx, 2, state);
855 xfs_iext_remove(ifp, idx, 2);
856 ip->i_df.if_lastex = idx - 1; 775 ip->i_df.if_lastex = idx - 1;
857 ip->i_d.di_nextents--; 776 ip->i_d.di_nextents--;
858 if (cur == NULL) 777 if (cur == NULL)
@@ -885,20 +804,18 @@ xfs_bmap_add_extent_delay_real(
885 RIGHT.br_blockcount; 804 RIGHT.br_blockcount;
886 break; 805 break;
887 806
888 case MASK3(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG): 807 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
889 /* 808 /*
890 * Filling in all of a previously delayed allocation extent. 809 * Filling in all of a previously delayed allocation extent.
891 * The left neighbor is contiguous, the right is not. 810 * The left neighbor is contiguous, the right is not.
892 */ 811 */
893 XFS_BMAP_TRACE_PRE_UPDATE("LF|RF|LC", ip, idx - 1, 812 trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
894 XFS_DATA_FORK);
895 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), 813 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
896 LEFT.br_blockcount + PREV.br_blockcount); 814 LEFT.br_blockcount + PREV.br_blockcount);
897 XFS_BMAP_TRACE_POST_UPDATE("LF|RF|LC", ip, idx - 1, 815 trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
898 XFS_DATA_FORK); 816
899 ip->i_df.if_lastex = idx - 1; 817 ip->i_df.if_lastex = idx - 1;
900 XFS_BMAP_TRACE_DELETE("LF|RF|LC", ip, idx, 1, XFS_DATA_FORK); 818 xfs_iext_remove(ip, idx, 1, state);
901 xfs_iext_remove(ifp, idx, 1);
902 if (cur == NULL) 819 if (cur == NULL)
903 rval = XFS_ILOG_DEXT; 820 rval = XFS_ILOG_DEXT;
904 else { 821 else {
@@ -921,19 +838,19 @@ xfs_bmap_add_extent_delay_real(
921 PREV.br_blockcount; 838 PREV.br_blockcount;
922 break; 839 break;
923 840
924 case MASK3(LEFT_FILLING, RIGHT_FILLING, RIGHT_CONTIG): 841 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
925 /* 842 /*
926 * Filling in all of a previously delayed allocation extent. 843 * Filling in all of a previously delayed allocation extent.
927 * The right neighbor is contiguous, the left is not. 844 * The right neighbor is contiguous, the left is not.
928 */ 845 */
929 XFS_BMAP_TRACE_PRE_UPDATE("LF|RF|RC", ip, idx, XFS_DATA_FORK); 846 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
930 xfs_bmbt_set_startblock(ep, new->br_startblock); 847 xfs_bmbt_set_startblock(ep, new->br_startblock);
931 xfs_bmbt_set_blockcount(ep, 848 xfs_bmbt_set_blockcount(ep,
932 PREV.br_blockcount + RIGHT.br_blockcount); 849 PREV.br_blockcount + RIGHT.br_blockcount);
933 XFS_BMAP_TRACE_POST_UPDATE("LF|RF|RC", ip, idx, XFS_DATA_FORK); 850 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
851
934 ip->i_df.if_lastex = idx; 852 ip->i_df.if_lastex = idx;
935 XFS_BMAP_TRACE_DELETE("LF|RF|RC", ip, idx + 1, 1, XFS_DATA_FORK); 853 xfs_iext_remove(ip, idx + 1, 1, state);
936 xfs_iext_remove(ifp, idx + 1, 1);
937 if (cur == NULL) 854 if (cur == NULL)
938 rval = XFS_ILOG_DEXT; 855 rval = XFS_ILOG_DEXT;
939 else { 856 else {
@@ -956,15 +873,16 @@ xfs_bmap_add_extent_delay_real(
956 RIGHT.br_blockcount; 873 RIGHT.br_blockcount;
957 break; 874 break;
958 875
959 case MASK2(LEFT_FILLING, RIGHT_FILLING): 876 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
960 /* 877 /*
961 * Filling in all of a previously delayed allocation extent. 878 * Filling in all of a previously delayed allocation extent.
962 * Neither the left nor right neighbors are contiguous with 879 * Neither the left nor right neighbors are contiguous with
963 * the new one. 880 * the new one.
964 */ 881 */
965 XFS_BMAP_TRACE_PRE_UPDATE("LF|RF", ip, idx, XFS_DATA_FORK); 882 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
966 xfs_bmbt_set_startblock(ep, new->br_startblock); 883 xfs_bmbt_set_startblock(ep, new->br_startblock);
967 XFS_BMAP_TRACE_POST_UPDATE("LF|RF", ip, idx, XFS_DATA_FORK); 884 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
885
968 ip->i_df.if_lastex = idx; 886 ip->i_df.if_lastex = idx;
969 ip->i_d.di_nextents++; 887 ip->i_d.di_nextents++;
970 if (cur == NULL) 888 if (cur == NULL)
@@ -987,19 +905,20 @@ xfs_bmap_add_extent_delay_real(
987 temp2 = new->br_blockcount; 905 temp2 = new->br_blockcount;
988 break; 906 break;
989 907
990 case MASK2(LEFT_FILLING, LEFT_CONTIG): 908 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
991 /* 909 /*
992 * Filling in the first part of a previous delayed allocation. 910 * Filling in the first part of a previous delayed allocation.
993 * The left neighbor is contiguous. 911 * The left neighbor is contiguous.
994 */ 912 */
995 XFS_BMAP_TRACE_PRE_UPDATE("LF|LC", ip, idx - 1, XFS_DATA_FORK); 913 trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
996 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), 914 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
997 LEFT.br_blockcount + new->br_blockcount); 915 LEFT.br_blockcount + new->br_blockcount);
998 xfs_bmbt_set_startoff(ep, 916 xfs_bmbt_set_startoff(ep,
999 PREV.br_startoff + new->br_blockcount); 917 PREV.br_startoff + new->br_blockcount);
1000 XFS_BMAP_TRACE_POST_UPDATE("LF|LC", ip, idx - 1, XFS_DATA_FORK); 918 trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
919
1001 temp = PREV.br_blockcount - new->br_blockcount; 920 temp = PREV.br_blockcount - new->br_blockcount;
1002 XFS_BMAP_TRACE_PRE_UPDATE("LF|LC", ip, idx, XFS_DATA_FORK); 921 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
1003 xfs_bmbt_set_blockcount(ep, temp); 922 xfs_bmbt_set_blockcount(ep, temp);
1004 ip->i_df.if_lastex = idx - 1; 923 ip->i_df.if_lastex = idx - 1;
1005 if (cur == NULL) 924 if (cur == NULL)
@@ -1021,7 +940,7 @@ xfs_bmap_add_extent_delay_real(
1021 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 940 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
1022 startblockval(PREV.br_startblock)); 941 startblockval(PREV.br_startblock));
1023 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); 942 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
1024 XFS_BMAP_TRACE_POST_UPDATE("LF|LC", ip, idx, XFS_DATA_FORK); 943 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
1025 *dnew = temp; 944 *dnew = temp;
1026 /* DELTA: The boundary between two in-core extents moved. */ 945 /* DELTA: The boundary between two in-core extents moved. */
1027 temp = LEFT.br_startoff; 946 temp = LEFT.br_startoff;
@@ -1029,18 +948,16 @@ xfs_bmap_add_extent_delay_real(
1029 PREV.br_blockcount; 948 PREV.br_blockcount;
1030 break; 949 break;
1031 950
1032 case MASK(LEFT_FILLING): 951 case BMAP_LEFT_FILLING:
1033 /* 952 /*
1034 * Filling in the first part of a previous delayed allocation. 953 * Filling in the first part of a previous delayed allocation.
1035 * The left neighbor is not contiguous. 954 * The left neighbor is not contiguous.
1036 */ 955 */
1037 XFS_BMAP_TRACE_PRE_UPDATE("LF", ip, idx, XFS_DATA_FORK); 956 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
1038 xfs_bmbt_set_startoff(ep, new_endoff); 957 xfs_bmbt_set_startoff(ep, new_endoff);
1039 temp = PREV.br_blockcount - new->br_blockcount; 958 temp = PREV.br_blockcount - new->br_blockcount;
1040 xfs_bmbt_set_blockcount(ep, temp); 959 xfs_bmbt_set_blockcount(ep, temp);
1041 XFS_BMAP_TRACE_INSERT("LF", ip, idx, 1, new, NULL, 960 xfs_iext_insert(ip, idx, 1, new, state);
1042 XFS_DATA_FORK);
1043 xfs_iext_insert(ifp, idx, 1, new);
1044 ip->i_df.if_lastex = idx; 961 ip->i_df.if_lastex = idx;
1045 ip->i_d.di_nextents++; 962 ip->i_d.di_nextents++;
1046 if (cur == NULL) 963 if (cur == NULL)
@@ -1071,27 +988,27 @@ xfs_bmap_add_extent_delay_real(
1071 (cur ? cur->bc_private.b.allocated : 0)); 988 (cur ? cur->bc_private.b.allocated : 0));
1072 ep = xfs_iext_get_ext(ifp, idx + 1); 989 ep = xfs_iext_get_ext(ifp, idx + 1);
1073 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); 990 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
1074 XFS_BMAP_TRACE_POST_UPDATE("LF", ip, idx + 1, XFS_DATA_FORK); 991 trace_xfs_bmap_post_update(ip, idx + 1, state, _THIS_IP_);
1075 *dnew = temp; 992 *dnew = temp;
1076 /* DELTA: One in-core extent is split in two. */ 993 /* DELTA: One in-core extent is split in two. */
1077 temp = PREV.br_startoff; 994 temp = PREV.br_startoff;
1078 temp2 = PREV.br_blockcount; 995 temp2 = PREV.br_blockcount;
1079 break; 996 break;
1080 997
1081 case MASK2(RIGHT_FILLING, RIGHT_CONTIG): 998 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1082 /* 999 /*
1083 * Filling in the last part of a previous delayed allocation. 1000 * Filling in the last part of a previous delayed allocation.
1084 * The right neighbor is contiguous with the new allocation. 1001 * The right neighbor is contiguous with the new allocation.
1085 */ 1002 */
1086 temp = PREV.br_blockcount - new->br_blockcount; 1003 temp = PREV.br_blockcount - new->br_blockcount;
1087 XFS_BMAP_TRACE_PRE_UPDATE("RF|RC", ip, idx, XFS_DATA_FORK); 1004 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
1088 XFS_BMAP_TRACE_PRE_UPDATE("RF|RC", ip, idx + 1, XFS_DATA_FORK); 1005 trace_xfs_bmap_pre_update(ip, idx + 1, state, _THIS_IP_);
1089 xfs_bmbt_set_blockcount(ep, temp); 1006 xfs_bmbt_set_blockcount(ep, temp);
1090 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, idx + 1), 1007 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, idx + 1),
1091 new->br_startoff, new->br_startblock, 1008 new->br_startoff, new->br_startblock,
1092 new->br_blockcount + RIGHT.br_blockcount, 1009 new->br_blockcount + RIGHT.br_blockcount,
1093 RIGHT.br_state); 1010 RIGHT.br_state);
1094 XFS_BMAP_TRACE_POST_UPDATE("RF|RC", ip, idx + 1, XFS_DATA_FORK); 1011 trace_xfs_bmap_post_update(ip, idx + 1, state, _THIS_IP_);
1095 ip->i_df.if_lastex = idx + 1; 1012 ip->i_df.if_lastex = idx + 1;
1096 if (cur == NULL) 1013 if (cur == NULL)
1097 rval = XFS_ILOG_DEXT; 1014 rval = XFS_ILOG_DEXT;
@@ -1112,7 +1029,7 @@ xfs_bmap_add_extent_delay_real(
1112 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 1029 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
1113 startblockval(PREV.br_startblock)); 1030 startblockval(PREV.br_startblock));
1114 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); 1031 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
1115 XFS_BMAP_TRACE_POST_UPDATE("RF|RC", ip, idx, XFS_DATA_FORK); 1032 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
1116 *dnew = temp; 1033 *dnew = temp;
1117 /* DELTA: The boundary between two in-core extents moved. */ 1034 /* DELTA: The boundary between two in-core extents moved. */
1118 temp = PREV.br_startoff; 1035 temp = PREV.br_startoff;
@@ -1120,17 +1037,15 @@ xfs_bmap_add_extent_delay_real(
1120 RIGHT.br_blockcount; 1037 RIGHT.br_blockcount;
1121 break; 1038 break;
1122 1039
1123 case MASK(RIGHT_FILLING): 1040 case BMAP_RIGHT_FILLING:
1124 /* 1041 /*
1125 * Filling in the last part of a previous delayed allocation. 1042 * Filling in the last part of a previous delayed allocation.
1126 * The right neighbor is not contiguous. 1043 * The right neighbor is not contiguous.
1127 */ 1044 */
1128 temp = PREV.br_blockcount - new->br_blockcount; 1045 temp = PREV.br_blockcount - new->br_blockcount;
1129 XFS_BMAP_TRACE_PRE_UPDATE("RF", ip, idx, XFS_DATA_FORK); 1046 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
1130 xfs_bmbt_set_blockcount(ep, temp); 1047 xfs_bmbt_set_blockcount(ep, temp);
1131 XFS_BMAP_TRACE_INSERT("RF", ip, idx + 1, 1, new, NULL, 1048 xfs_iext_insert(ip, idx + 1, 1, new, state);
1132 XFS_DATA_FORK);
1133 xfs_iext_insert(ifp, idx + 1, 1, new);
1134 ip->i_df.if_lastex = idx + 1; 1049 ip->i_df.if_lastex = idx + 1;
1135 ip->i_d.di_nextents++; 1050 ip->i_d.di_nextents++;
1136 if (cur == NULL) 1051 if (cur == NULL)
@@ -1161,7 +1076,7 @@ xfs_bmap_add_extent_delay_real(
1161 (cur ? cur->bc_private.b.allocated : 0)); 1076 (cur ? cur->bc_private.b.allocated : 0));
1162 ep = xfs_iext_get_ext(ifp, idx); 1077 ep = xfs_iext_get_ext(ifp, idx);
1163 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); 1078 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
1164 XFS_BMAP_TRACE_POST_UPDATE("RF", ip, idx, XFS_DATA_FORK); 1079 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
1165 *dnew = temp; 1080 *dnew = temp;
1166 /* DELTA: One in-core extent is split in two. */ 1081 /* DELTA: One in-core extent is split in two. */
1167 temp = PREV.br_startoff; 1082 temp = PREV.br_startoff;
@@ -1175,7 +1090,7 @@ xfs_bmap_add_extent_delay_real(
1175 * This case is avoided almost all the time. 1090 * This case is avoided almost all the time.
1176 */ 1091 */
1177 temp = new->br_startoff - PREV.br_startoff; 1092 temp = new->br_startoff - PREV.br_startoff;
1178 XFS_BMAP_TRACE_PRE_UPDATE("0", ip, idx, XFS_DATA_FORK); 1093 trace_xfs_bmap_pre_update(ip, idx, 0, _THIS_IP_);
1179 xfs_bmbt_set_blockcount(ep, temp); 1094 xfs_bmbt_set_blockcount(ep, temp);
1180 r[0] = *new; 1095 r[0] = *new;
1181 r[1].br_state = PREV.br_state; 1096 r[1].br_state = PREV.br_state;
@@ -1183,9 +1098,7 @@ xfs_bmap_add_extent_delay_real(
1183 r[1].br_startoff = new_endoff; 1098 r[1].br_startoff = new_endoff;
1184 temp2 = PREV.br_startoff + PREV.br_blockcount - new_endoff; 1099 temp2 = PREV.br_startoff + PREV.br_blockcount - new_endoff;
1185 r[1].br_blockcount = temp2; 1100 r[1].br_blockcount = temp2;
1186 XFS_BMAP_TRACE_INSERT("0", ip, idx + 1, 2, &r[0], &r[1], 1101 xfs_iext_insert(ip, idx + 1, 2, &r[0], state);
1187 XFS_DATA_FORK);
1188 xfs_iext_insert(ifp, idx + 1, 2, &r[0]);
1189 ip->i_df.if_lastex = idx + 1; 1102 ip->i_df.if_lastex = idx + 1;
1190 ip->i_d.di_nextents++; 1103 ip->i_d.di_nextents++;
1191 if (cur == NULL) 1104 if (cur == NULL)
@@ -1242,24 +1155,24 @@ xfs_bmap_add_extent_delay_real(
1242 } 1155 }
1243 ep = xfs_iext_get_ext(ifp, idx); 1156 ep = xfs_iext_get_ext(ifp, idx);
1244 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); 1157 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
1245 XFS_BMAP_TRACE_POST_UPDATE("0", ip, idx, XFS_DATA_FORK); 1158 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
1246 XFS_BMAP_TRACE_PRE_UPDATE("0", ip, idx + 2, XFS_DATA_FORK); 1159 trace_xfs_bmap_pre_update(ip, idx + 2, state, _THIS_IP_);
1247 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx + 2), 1160 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx + 2),
1248 nullstartblock((int)temp2)); 1161 nullstartblock((int)temp2));
1249 XFS_BMAP_TRACE_POST_UPDATE("0", ip, idx + 2, XFS_DATA_FORK); 1162 trace_xfs_bmap_post_update(ip, idx + 2, state, _THIS_IP_);
1250 *dnew = temp + temp2; 1163 *dnew = temp + temp2;
1251 /* DELTA: One in-core extent is split in three. */ 1164 /* DELTA: One in-core extent is split in three. */
1252 temp = PREV.br_startoff; 1165 temp = PREV.br_startoff;
1253 temp2 = PREV.br_blockcount; 1166 temp2 = PREV.br_blockcount;
1254 break; 1167 break;
1255 1168
1256 case MASK3(LEFT_FILLING, LEFT_CONTIG, RIGHT_CONTIG): 1169 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1257 case MASK3(RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG): 1170 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1258 case MASK2(LEFT_FILLING, RIGHT_CONTIG): 1171 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
1259 case MASK2(RIGHT_FILLING, LEFT_CONTIG): 1172 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1260 case MASK2(LEFT_CONTIG, RIGHT_CONTIG): 1173 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1261 case MASK(LEFT_CONTIG): 1174 case BMAP_LEFT_CONTIG:
1262 case MASK(RIGHT_CONTIG): 1175 case BMAP_RIGHT_CONTIG:
1263 /* 1176 /*
1264 * These cases are all impossible. 1177 * These cases are all impossible.
1265 */ 1178 */
@@ -1279,14 +1192,6 @@ done:
1279#undef LEFT 1192#undef LEFT
1280#undef RIGHT 1193#undef RIGHT
1281#undef PREV 1194#undef PREV
1282#undef MASK
1283#undef MASK2
1284#undef MASK3
1285#undef MASK4
1286#undef STATE_SET
1287#undef STATE_TEST
1288#undef STATE_SET_TEST
1289#undef SWITCH_STATE
1290} 1195}
1291 1196
1292/* 1197/*
@@ -1316,27 +1221,10 @@ xfs_bmap_add_extent_unwritten_real(
1316 int state = 0;/* state bits, accessed thru macros */ 1221 int state = 0;/* state bits, accessed thru macros */
1317 xfs_filblks_t temp=0; 1222 xfs_filblks_t temp=0;
1318 xfs_filblks_t temp2=0; 1223 xfs_filblks_t temp2=0;
1319 enum { /* bit number definitions for state */
1320 LEFT_CONTIG, RIGHT_CONTIG,
1321 LEFT_FILLING, RIGHT_FILLING,
1322 LEFT_DELAY, RIGHT_DELAY,
1323 LEFT_VALID, RIGHT_VALID
1324 };
1325 1224
1326#define LEFT r[0] 1225#define LEFT r[0]
1327#define RIGHT r[1] 1226#define RIGHT r[1]
1328#define PREV r[2] 1227#define PREV r[2]
1329#define MASK(b) (1 << (b))
1330#define MASK2(a,b) (MASK(a) | MASK(b))
1331#define MASK3(a,b,c) (MASK2(a,b) | MASK(c))
1332#define MASK4(a,b,c,d) (MASK3(a,b,c) | MASK(d))
1333#define STATE_SET(b,v) ((v) ? (state |= MASK(b)) : (state &= ~MASK(b)))
1334#define STATE_TEST(b) (state & MASK(b))
1335#define STATE_SET_TEST(b,v) ((v) ? ((state |= MASK(b)), 1) : \
1336 ((state &= ~MASK(b)), 0))
1337#define SWITCH_STATE \
1338 (state & MASK4(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG))
1339
1340 /* 1228 /*
1341 * Set up a bunch of variables to make the tests simpler. 1229 * Set up a bunch of variables to make the tests simpler.
1342 */ 1230 */
@@ -1352,68 +1240,78 @@ xfs_bmap_add_extent_unwritten_real(
1352 new_endoff = new->br_startoff + new->br_blockcount; 1240 new_endoff = new->br_startoff + new->br_blockcount;
1353 ASSERT(PREV.br_startoff <= new->br_startoff); 1241 ASSERT(PREV.br_startoff <= new->br_startoff);
1354 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff); 1242 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
1243
1355 /* 1244 /*
1356 * Set flags determining what part of the previous oldext allocation 1245 * Set flags determining what part of the previous oldext allocation
1357 * extent is being replaced by a newext allocation. 1246 * extent is being replaced by a newext allocation.
1358 */ 1247 */
1359 STATE_SET(LEFT_FILLING, PREV.br_startoff == new->br_startoff); 1248 if (PREV.br_startoff == new->br_startoff)
1360 STATE_SET(RIGHT_FILLING, 1249 state |= BMAP_LEFT_FILLING;
1361 PREV.br_startoff + PREV.br_blockcount == new_endoff); 1250 if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
1251 state |= BMAP_RIGHT_FILLING;
1252
1362 /* 1253 /*
1363 * Check and set flags if this segment has a left neighbor. 1254 * Check and set flags if this segment has a left neighbor.
1364 * Don't set contiguous if the combined extent would be too large. 1255 * Don't set contiguous if the combined extent would be too large.
1365 */ 1256 */
1366 if (STATE_SET_TEST(LEFT_VALID, idx > 0)) { 1257 if (idx > 0) {
1258 state |= BMAP_LEFT_VALID;
1367 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &LEFT); 1259 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &LEFT);
1368 STATE_SET(LEFT_DELAY, isnullstartblock(LEFT.br_startblock)); 1260
1261 if (isnullstartblock(LEFT.br_startblock))
1262 state |= BMAP_LEFT_DELAY;
1369 } 1263 }
1370 STATE_SET(LEFT_CONTIG, 1264
1371 STATE_TEST(LEFT_VALID) && !STATE_TEST(LEFT_DELAY) && 1265 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
1372 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff && 1266 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
1373 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock && 1267 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
1374 LEFT.br_state == newext && 1268 LEFT.br_state == newext &&
1375 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN); 1269 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
1270 state |= BMAP_LEFT_CONTIG;
1271
1376 /* 1272 /*
1377 * Check and set flags if this segment has a right neighbor. 1273 * Check and set flags if this segment has a right neighbor.
1378 * Don't set contiguous if the combined extent would be too large. 1274 * Don't set contiguous if the combined extent would be too large.
1379 * Also check for all-three-contiguous being too large. 1275 * Also check for all-three-contiguous being too large.
1380 */ 1276 */
1381 if (STATE_SET_TEST(RIGHT_VALID, 1277 if (idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) {
1382 idx < 1278 state |= BMAP_RIGHT_VALID;
1383 ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1)) {
1384 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx + 1), &RIGHT); 1279 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx + 1), &RIGHT);
1385 STATE_SET(RIGHT_DELAY, isnullstartblock(RIGHT.br_startblock)); 1280 if (isnullstartblock(RIGHT.br_startblock))
1281 state |= BMAP_RIGHT_DELAY;
1386 } 1282 }
1387 STATE_SET(RIGHT_CONTIG, 1283
1388 STATE_TEST(RIGHT_VALID) && !STATE_TEST(RIGHT_DELAY) && 1284 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
1389 new_endoff == RIGHT.br_startoff && 1285 new_endoff == RIGHT.br_startoff &&
1390 new->br_startblock + new->br_blockcount == 1286 new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
1391 RIGHT.br_startblock && 1287 newext == RIGHT.br_state &&
1392 newext == RIGHT.br_state && 1288 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
1393 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN && 1289 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1394 ((state & MASK3(LEFT_CONTIG, LEFT_FILLING, RIGHT_FILLING)) != 1290 BMAP_RIGHT_FILLING)) !=
1395 MASK3(LEFT_CONTIG, LEFT_FILLING, RIGHT_FILLING) || 1291 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1396 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount 1292 BMAP_RIGHT_FILLING) ||
1397 <= MAXEXTLEN)); 1293 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
1294 <= MAXEXTLEN))
1295 state |= BMAP_RIGHT_CONTIG;
1296
1398 /* 1297 /*
1399 * Switch out based on the FILLING and CONTIG state bits. 1298 * Switch out based on the FILLING and CONTIG state bits.
1400 */ 1299 */
1401 switch (SWITCH_STATE) { 1300 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1402 1301 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
1403 case MASK4(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG): 1302 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1303 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1404 /* 1304 /*
1405 * Setting all of a previous oldext extent to newext. 1305 * Setting all of a previous oldext extent to newext.
1406 * The left and right neighbors are both contiguous with new. 1306 * The left and right neighbors are both contiguous with new.
1407 */ 1307 */
1408 XFS_BMAP_TRACE_PRE_UPDATE("LF|RF|LC|RC", ip, idx - 1, 1308 trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
1409 XFS_DATA_FORK);
1410 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), 1309 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
1411 LEFT.br_blockcount + PREV.br_blockcount + 1310 LEFT.br_blockcount + PREV.br_blockcount +
1412 RIGHT.br_blockcount); 1311 RIGHT.br_blockcount);
1413 XFS_BMAP_TRACE_POST_UPDATE("LF|RF|LC|RC", ip, idx - 1, 1312 trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
1414 XFS_DATA_FORK); 1313
1415 XFS_BMAP_TRACE_DELETE("LF|RF|LC|RC", ip, idx, 2, XFS_DATA_FORK); 1314 xfs_iext_remove(ip, idx, 2, state);
1416 xfs_iext_remove(ifp, idx, 2);
1417 ip->i_df.if_lastex = idx - 1; 1315 ip->i_df.if_lastex = idx - 1;
1418 ip->i_d.di_nextents -= 2; 1316 ip->i_d.di_nextents -= 2;
1419 if (cur == NULL) 1317 if (cur == NULL)
@@ -1450,20 +1348,18 @@ xfs_bmap_add_extent_unwritten_real(
1450 RIGHT.br_blockcount; 1348 RIGHT.br_blockcount;
1451 break; 1349 break;
1452 1350
1453 case MASK3(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG): 1351 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1454 /* 1352 /*
1455 * Setting all of a previous oldext extent to newext. 1353 * Setting all of a previous oldext extent to newext.
1456 * The left neighbor is contiguous, the right is not. 1354 * The left neighbor is contiguous, the right is not.
1457 */ 1355 */
1458 XFS_BMAP_TRACE_PRE_UPDATE("LF|RF|LC", ip, idx - 1, 1356 trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
1459 XFS_DATA_FORK);
1460 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), 1357 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
1461 LEFT.br_blockcount + PREV.br_blockcount); 1358 LEFT.br_blockcount + PREV.br_blockcount);
1462 XFS_BMAP_TRACE_POST_UPDATE("LF|RF|LC", ip, idx - 1, 1359 trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
1463 XFS_DATA_FORK); 1360
1464 ip->i_df.if_lastex = idx - 1; 1361 ip->i_df.if_lastex = idx - 1;
1465 XFS_BMAP_TRACE_DELETE("LF|RF|LC", ip, idx, 1, XFS_DATA_FORK); 1362 xfs_iext_remove(ip, idx, 1, state);
1466 xfs_iext_remove(ifp, idx, 1);
1467 ip->i_d.di_nextents--; 1363 ip->i_d.di_nextents--;
1468 if (cur == NULL) 1364 if (cur == NULL)
1469 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1365 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
@@ -1492,21 +1388,18 @@ xfs_bmap_add_extent_unwritten_real(
1492 PREV.br_blockcount; 1388 PREV.br_blockcount;
1493 break; 1389 break;
1494 1390
1495 case MASK3(LEFT_FILLING, RIGHT_FILLING, RIGHT_CONTIG): 1391 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1496 /* 1392 /*
1497 * Setting all of a previous oldext extent to newext. 1393 * Setting all of a previous oldext extent to newext.
1498 * The right neighbor is contiguous, the left is not. 1394 * The right neighbor is contiguous, the left is not.
1499 */ 1395 */
1500 XFS_BMAP_TRACE_PRE_UPDATE("LF|RF|RC", ip, idx, 1396 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
1501 XFS_DATA_FORK);
1502 xfs_bmbt_set_blockcount(ep, 1397 xfs_bmbt_set_blockcount(ep,
1503 PREV.br_blockcount + RIGHT.br_blockcount); 1398 PREV.br_blockcount + RIGHT.br_blockcount);
1504 xfs_bmbt_set_state(ep, newext); 1399 xfs_bmbt_set_state(ep, newext);
1505 XFS_BMAP_TRACE_POST_UPDATE("LF|RF|RC", ip, idx, 1400 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
1506 XFS_DATA_FORK);
1507 ip->i_df.if_lastex = idx; 1401 ip->i_df.if_lastex = idx;
1508 XFS_BMAP_TRACE_DELETE("LF|RF|RC", ip, idx + 1, 1, XFS_DATA_FORK); 1402 xfs_iext_remove(ip, idx + 1, 1, state);
1509 xfs_iext_remove(ifp, idx + 1, 1);
1510 ip->i_d.di_nextents--; 1403 ip->i_d.di_nextents--;
1511 if (cur == NULL) 1404 if (cur == NULL)
1512 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1405 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
@@ -1535,17 +1428,16 @@ xfs_bmap_add_extent_unwritten_real(
1535 RIGHT.br_blockcount; 1428 RIGHT.br_blockcount;
1536 break; 1429 break;
1537 1430
1538 case MASK2(LEFT_FILLING, RIGHT_FILLING): 1431 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
1539 /* 1432 /*
1540 * Setting all of a previous oldext extent to newext. 1433 * Setting all of a previous oldext extent to newext.
1541 * Neither the left nor right neighbors are contiguous with 1434 * Neither the left nor right neighbors are contiguous with
1542 * the new one. 1435 * the new one.
1543 */ 1436 */
1544 XFS_BMAP_TRACE_PRE_UPDATE("LF|RF", ip, idx, 1437 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
1545 XFS_DATA_FORK);
1546 xfs_bmbt_set_state(ep, newext); 1438 xfs_bmbt_set_state(ep, newext);
1547 XFS_BMAP_TRACE_POST_UPDATE("LF|RF", ip, idx, 1439 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
1548 XFS_DATA_FORK); 1440
1549 ip->i_df.if_lastex = idx; 1441 ip->i_df.if_lastex = idx;
1550 if (cur == NULL) 1442 if (cur == NULL)
1551 rval = XFS_ILOG_DEXT; 1443 rval = XFS_ILOG_DEXT;
@@ -1566,27 +1458,25 @@ xfs_bmap_add_extent_unwritten_real(
1566 temp2 = new->br_blockcount; 1458 temp2 = new->br_blockcount;
1567 break; 1459 break;
1568 1460
1569 case MASK2(LEFT_FILLING, LEFT_CONTIG): 1461 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
1570 /* 1462 /*
1571 * Setting the first part of a previous oldext extent to newext. 1463 * Setting the first part of a previous oldext extent to newext.
1572 * The left neighbor is contiguous. 1464 * The left neighbor is contiguous.
1573 */ 1465 */
1574 XFS_BMAP_TRACE_PRE_UPDATE("LF|LC", ip, idx - 1, 1466 trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
1575 XFS_DATA_FORK);
1576 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), 1467 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
1577 LEFT.br_blockcount + new->br_blockcount); 1468 LEFT.br_blockcount + new->br_blockcount);
1578 xfs_bmbt_set_startoff(ep, 1469 xfs_bmbt_set_startoff(ep,
1579 PREV.br_startoff + new->br_blockcount); 1470 PREV.br_startoff + new->br_blockcount);
1580 XFS_BMAP_TRACE_POST_UPDATE("LF|LC", ip, idx - 1, 1471 trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
1581 XFS_DATA_FORK); 1472
1582 XFS_BMAP_TRACE_PRE_UPDATE("LF|LC", ip, idx, 1473 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
1583 XFS_DATA_FORK);
1584 xfs_bmbt_set_startblock(ep, 1474 xfs_bmbt_set_startblock(ep,
1585 new->br_startblock + new->br_blockcount); 1475 new->br_startblock + new->br_blockcount);
1586 xfs_bmbt_set_blockcount(ep, 1476 xfs_bmbt_set_blockcount(ep,
1587 PREV.br_blockcount - new->br_blockcount); 1477 PREV.br_blockcount - new->br_blockcount);
1588 XFS_BMAP_TRACE_POST_UPDATE("LF|LC", ip, idx, 1478 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
1589 XFS_DATA_FORK); 1479
1590 ip->i_df.if_lastex = idx - 1; 1480 ip->i_df.if_lastex = idx - 1;
1591 if (cur == NULL) 1481 if (cur == NULL)
1592 rval = XFS_ILOG_DEXT; 1482 rval = XFS_ILOG_DEXT;
@@ -1617,22 +1507,21 @@ xfs_bmap_add_extent_unwritten_real(
1617 PREV.br_blockcount; 1507 PREV.br_blockcount;
1618 break; 1508 break;
1619 1509
1620 case MASK(LEFT_FILLING): 1510 case BMAP_LEFT_FILLING:
1621 /* 1511 /*
1622 * Setting the first part of a previous oldext extent to newext. 1512 * Setting the first part of a previous oldext extent to newext.
1623 * The left neighbor is not contiguous. 1513 * The left neighbor is not contiguous.
1624 */ 1514 */
1625 XFS_BMAP_TRACE_PRE_UPDATE("LF", ip, idx, XFS_DATA_FORK); 1515 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
1626 ASSERT(ep && xfs_bmbt_get_state(ep) == oldext); 1516 ASSERT(ep && xfs_bmbt_get_state(ep) == oldext);
1627 xfs_bmbt_set_startoff(ep, new_endoff); 1517 xfs_bmbt_set_startoff(ep, new_endoff);
1628 xfs_bmbt_set_blockcount(ep, 1518 xfs_bmbt_set_blockcount(ep,
1629 PREV.br_blockcount - new->br_blockcount); 1519 PREV.br_blockcount - new->br_blockcount);
1630 xfs_bmbt_set_startblock(ep, 1520 xfs_bmbt_set_startblock(ep,
1631 new->br_startblock + new->br_blockcount); 1521 new->br_startblock + new->br_blockcount);
1632 XFS_BMAP_TRACE_POST_UPDATE("LF", ip, idx, XFS_DATA_FORK); 1522 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
1633 XFS_BMAP_TRACE_INSERT("LF", ip, idx, 1, new, NULL, 1523
1634 XFS_DATA_FORK); 1524 xfs_iext_insert(ip, idx, 1, new, state);
1635 xfs_iext_insert(ifp, idx, 1, new);
1636 ip->i_df.if_lastex = idx; 1525 ip->i_df.if_lastex = idx;
1637 ip->i_d.di_nextents++; 1526 ip->i_d.di_nextents++;
1638 if (cur == NULL) 1527 if (cur == NULL)
@@ -1660,24 +1549,21 @@ xfs_bmap_add_extent_unwritten_real(
1660 temp2 = PREV.br_blockcount; 1549 temp2 = PREV.br_blockcount;
1661 break; 1550 break;
1662 1551
1663 case MASK2(RIGHT_FILLING, RIGHT_CONTIG): 1552 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1664 /* 1553 /*
1665 * Setting the last part of a previous oldext extent to newext. 1554 * Setting the last part of a previous oldext extent to newext.
1666 * The right neighbor is contiguous with the new allocation. 1555 * The right neighbor is contiguous with the new allocation.
1667 */ 1556 */
1668 XFS_BMAP_TRACE_PRE_UPDATE("RF|RC", ip, idx, 1557 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
1669 XFS_DATA_FORK); 1558 trace_xfs_bmap_pre_update(ip, idx + 1, state, _THIS_IP_);
1670 XFS_BMAP_TRACE_PRE_UPDATE("RF|RC", ip, idx + 1,
1671 XFS_DATA_FORK);
1672 xfs_bmbt_set_blockcount(ep, 1559 xfs_bmbt_set_blockcount(ep,
1673 PREV.br_blockcount - new->br_blockcount); 1560 PREV.br_blockcount - new->br_blockcount);
1674 XFS_BMAP_TRACE_POST_UPDATE("RF|RC", ip, idx, 1561 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
1675 XFS_DATA_FORK);
1676 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, idx + 1), 1562 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, idx + 1),
1677 new->br_startoff, new->br_startblock, 1563 new->br_startoff, new->br_startblock,
1678 new->br_blockcount + RIGHT.br_blockcount, newext); 1564 new->br_blockcount + RIGHT.br_blockcount, newext);
1679 XFS_BMAP_TRACE_POST_UPDATE("RF|RC", ip, idx + 1, 1565 trace_xfs_bmap_post_update(ip, idx + 1, state, _THIS_IP_);
1680 XFS_DATA_FORK); 1566
1681 ip->i_df.if_lastex = idx + 1; 1567 ip->i_df.if_lastex = idx + 1;
1682 if (cur == NULL) 1568 if (cur == NULL)
1683 rval = XFS_ILOG_DEXT; 1569 rval = XFS_ILOG_DEXT;
@@ -1707,18 +1593,17 @@ xfs_bmap_add_extent_unwritten_real(
1707 RIGHT.br_blockcount; 1593 RIGHT.br_blockcount;
1708 break; 1594 break;
1709 1595
1710 case MASK(RIGHT_FILLING): 1596 case BMAP_RIGHT_FILLING:
1711 /* 1597 /*
1712 * Setting the last part of a previous oldext extent to newext. 1598 * Setting the last part of a previous oldext extent to newext.
1713 * The right neighbor is not contiguous. 1599 * The right neighbor is not contiguous.
1714 */ 1600 */
1715 XFS_BMAP_TRACE_PRE_UPDATE("RF", ip, idx, XFS_DATA_FORK); 1601 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
1716 xfs_bmbt_set_blockcount(ep, 1602 xfs_bmbt_set_blockcount(ep,
1717 PREV.br_blockcount - new->br_blockcount); 1603 PREV.br_blockcount - new->br_blockcount);
1718 XFS_BMAP_TRACE_POST_UPDATE("RF", ip, idx, XFS_DATA_FORK); 1604 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
1719 XFS_BMAP_TRACE_INSERT("RF", ip, idx + 1, 1, new, NULL, 1605
1720 XFS_DATA_FORK); 1606 xfs_iext_insert(ip, idx + 1, 1, new, state);
1721 xfs_iext_insert(ifp, idx + 1, 1, new);
1722 ip->i_df.if_lastex = idx + 1; 1607 ip->i_df.if_lastex = idx + 1;
1723 ip->i_d.di_nextents++; 1608 ip->i_d.di_nextents++;
1724 if (cur == NULL) 1609 if (cur == NULL)
@@ -1756,19 +1641,18 @@ xfs_bmap_add_extent_unwritten_real(
1756 * newext. Contiguity is impossible here. 1641 * newext. Contiguity is impossible here.
1757 * One extent becomes three extents. 1642 * One extent becomes three extents.
1758 */ 1643 */
1759 XFS_BMAP_TRACE_PRE_UPDATE("0", ip, idx, XFS_DATA_FORK); 1644 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
1760 xfs_bmbt_set_blockcount(ep, 1645 xfs_bmbt_set_blockcount(ep,
1761 new->br_startoff - PREV.br_startoff); 1646 new->br_startoff - PREV.br_startoff);
1762 XFS_BMAP_TRACE_POST_UPDATE("0", ip, idx, XFS_DATA_FORK); 1647 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
1648
1763 r[0] = *new; 1649 r[0] = *new;
1764 r[1].br_startoff = new_endoff; 1650 r[1].br_startoff = new_endoff;
1765 r[1].br_blockcount = 1651 r[1].br_blockcount =
1766 PREV.br_startoff + PREV.br_blockcount - new_endoff; 1652 PREV.br_startoff + PREV.br_blockcount - new_endoff;
1767 r[1].br_startblock = new->br_startblock + new->br_blockcount; 1653 r[1].br_startblock = new->br_startblock + new->br_blockcount;
1768 r[1].br_state = oldext; 1654 r[1].br_state = oldext;
1769 XFS_BMAP_TRACE_INSERT("0", ip, idx + 1, 2, &r[0], &r[1], 1655 xfs_iext_insert(ip, idx + 1, 2, &r[0], state);
1770 XFS_DATA_FORK);
1771 xfs_iext_insert(ifp, idx + 1, 2, &r[0]);
1772 ip->i_df.if_lastex = idx + 1; 1656 ip->i_df.if_lastex = idx + 1;
1773 ip->i_d.di_nextents += 2; 1657 ip->i_d.di_nextents += 2;
1774 if (cur == NULL) 1658 if (cur == NULL)
@@ -1813,13 +1697,13 @@ xfs_bmap_add_extent_unwritten_real(
1813 temp2 = PREV.br_blockcount; 1697 temp2 = PREV.br_blockcount;
1814 break; 1698 break;
1815 1699
1816 case MASK3(LEFT_FILLING, LEFT_CONTIG, RIGHT_CONTIG): 1700 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1817 case MASK3(RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG): 1701 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1818 case MASK2(LEFT_FILLING, RIGHT_CONTIG): 1702 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
1819 case MASK2(RIGHT_FILLING, LEFT_CONTIG): 1703 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1820 case MASK2(LEFT_CONTIG, RIGHT_CONTIG): 1704 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1821 case MASK(LEFT_CONTIG): 1705 case BMAP_LEFT_CONTIG:
1822 case MASK(RIGHT_CONTIG): 1706 case BMAP_RIGHT_CONTIG:
1823 /* 1707 /*
1824 * These cases are all impossible. 1708 * These cases are all impossible.
1825 */ 1709 */
@@ -1839,14 +1723,6 @@ done:
1839#undef LEFT 1723#undef LEFT
1840#undef RIGHT 1724#undef RIGHT
1841#undef PREV 1725#undef PREV
1842#undef MASK
1843#undef MASK2
1844#undef MASK3
1845#undef MASK4
1846#undef STATE_SET
1847#undef STATE_TEST
1848#undef STATE_SET_TEST
1849#undef SWITCH_STATE
1850} 1726}
1851 1727
1852/* 1728/*
@@ -1872,62 +1748,57 @@ xfs_bmap_add_extent_hole_delay(
1872 int state; /* state bits, accessed thru macros */ 1748 int state; /* state bits, accessed thru macros */
1873 xfs_filblks_t temp=0; /* temp for indirect calculations */ 1749 xfs_filblks_t temp=0; /* temp for indirect calculations */
1874 xfs_filblks_t temp2=0; 1750 xfs_filblks_t temp2=0;
1875 enum { /* bit number definitions for state */
1876 LEFT_CONTIG, RIGHT_CONTIG,
1877 LEFT_DELAY, RIGHT_DELAY,
1878 LEFT_VALID, RIGHT_VALID
1879 };
1880
1881#define MASK(b) (1 << (b))
1882#define MASK2(a,b) (MASK(a) | MASK(b))
1883#define STATE_SET(b,v) ((v) ? (state |= MASK(b)) : (state &= ~MASK(b)))
1884#define STATE_TEST(b) (state & MASK(b))
1885#define STATE_SET_TEST(b,v) ((v) ? ((state |= MASK(b)), 1) : \
1886 ((state &= ~MASK(b)), 0))
1887#define SWITCH_STATE (state & MASK2(LEFT_CONTIG, RIGHT_CONTIG))
1888 1751
1889 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); 1752 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1890 ep = xfs_iext_get_ext(ifp, idx); 1753 ep = xfs_iext_get_ext(ifp, idx);
1891 state = 0; 1754 state = 0;
1892 ASSERT(isnullstartblock(new->br_startblock)); 1755 ASSERT(isnullstartblock(new->br_startblock));
1756
1893 /* 1757 /*
1894 * Check and set flags if this segment has a left neighbor 1758 * Check and set flags if this segment has a left neighbor
1895 */ 1759 */
1896 if (STATE_SET_TEST(LEFT_VALID, idx > 0)) { 1760 if (idx > 0) {
1761 state |= BMAP_LEFT_VALID;
1897 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &left); 1762 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &left);
1898 STATE_SET(LEFT_DELAY, isnullstartblock(left.br_startblock)); 1763
1764 if (isnullstartblock(left.br_startblock))
1765 state |= BMAP_LEFT_DELAY;
1899 } 1766 }
1767
1900 /* 1768 /*
1901 * Check and set flags if the current (right) segment exists. 1769 * Check and set flags if the current (right) segment exists.
1902 * If it doesn't exist, we're converting the hole at end-of-file. 1770 * If it doesn't exist, we're converting the hole at end-of-file.
1903 */ 1771 */
1904 if (STATE_SET_TEST(RIGHT_VALID, 1772 if (idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) {
1905 idx < 1773 state |= BMAP_RIGHT_VALID;
1906 ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t))) {
1907 xfs_bmbt_get_all(ep, &right); 1774 xfs_bmbt_get_all(ep, &right);
1908 STATE_SET(RIGHT_DELAY, isnullstartblock(right.br_startblock)); 1775
1776 if (isnullstartblock(right.br_startblock))
1777 state |= BMAP_RIGHT_DELAY;
1909 } 1778 }
1779
1910 /* 1780 /*
1911 * Set contiguity flags on the left and right neighbors. 1781 * Set contiguity flags on the left and right neighbors.
1912 * Don't let extents get too large, even if the pieces are contiguous. 1782 * Don't let extents get too large, even if the pieces are contiguous.
1913 */ 1783 */
1914 STATE_SET(LEFT_CONTIG, 1784 if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) &&
1915 STATE_TEST(LEFT_VALID) && STATE_TEST(LEFT_DELAY) && 1785 left.br_startoff + left.br_blockcount == new->br_startoff &&
1916 left.br_startoff + left.br_blockcount == new->br_startoff && 1786 left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
1917 left.br_blockcount + new->br_blockcount <= MAXEXTLEN); 1787 state |= BMAP_LEFT_CONTIG;
1918 STATE_SET(RIGHT_CONTIG, 1788
1919 STATE_TEST(RIGHT_VALID) && STATE_TEST(RIGHT_DELAY) && 1789 if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) &&
1920 new->br_startoff + new->br_blockcount == right.br_startoff && 1790 new->br_startoff + new->br_blockcount == right.br_startoff &&
1921 new->br_blockcount + right.br_blockcount <= MAXEXTLEN && 1791 new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
1922 (!STATE_TEST(LEFT_CONTIG) || 1792 (!(state & BMAP_LEFT_CONTIG) ||
1923 (left.br_blockcount + new->br_blockcount + 1793 (left.br_blockcount + new->br_blockcount +
1924 right.br_blockcount <= MAXEXTLEN))); 1794 right.br_blockcount <= MAXEXTLEN)))
1795 state |= BMAP_RIGHT_CONTIG;
1796
1925 /* 1797 /*
1926 * Switch out based on the contiguity flags. 1798 * Switch out based on the contiguity flags.
1927 */ 1799 */
1928 switch (SWITCH_STATE) { 1800 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
1929 1801 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1930 case MASK2(LEFT_CONTIG, RIGHT_CONTIG):
1931 /* 1802 /*
1932 * New allocation is contiguous with delayed allocations 1803 * New allocation is contiguous with delayed allocations
1933 * on the left and on the right. 1804 * on the left and on the right.
@@ -1935,8 +1806,8 @@ xfs_bmap_add_extent_hole_delay(
1935 */ 1806 */
1936 temp = left.br_blockcount + new->br_blockcount + 1807 temp = left.br_blockcount + new->br_blockcount +
1937 right.br_blockcount; 1808 right.br_blockcount;
1938 XFS_BMAP_TRACE_PRE_UPDATE("LC|RC", ip, idx - 1, 1809
1939 XFS_DATA_FORK); 1810 trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
1940 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), temp); 1811 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), temp);
1941 oldlen = startblockval(left.br_startblock) + 1812 oldlen = startblockval(left.br_startblock) +
1942 startblockval(new->br_startblock) + 1813 startblockval(new->br_startblock) +
@@ -1944,53 +1815,52 @@ xfs_bmap_add_extent_hole_delay(
1944 newlen = xfs_bmap_worst_indlen(ip, temp); 1815 newlen = xfs_bmap_worst_indlen(ip, temp);
1945 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx - 1), 1816 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx - 1),
1946 nullstartblock((int)newlen)); 1817 nullstartblock((int)newlen));
1947 XFS_BMAP_TRACE_POST_UPDATE("LC|RC", ip, idx - 1, 1818 trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
1948 XFS_DATA_FORK); 1819
1949 XFS_BMAP_TRACE_DELETE("LC|RC", ip, idx, 1, XFS_DATA_FORK); 1820 xfs_iext_remove(ip, idx, 1, state);
1950 xfs_iext_remove(ifp, idx, 1);
1951 ip->i_df.if_lastex = idx - 1; 1821 ip->i_df.if_lastex = idx - 1;
1952 /* DELTA: Two in-core extents were replaced by one. */ 1822 /* DELTA: Two in-core extents were replaced by one. */
1953 temp2 = temp; 1823 temp2 = temp;
1954 temp = left.br_startoff; 1824 temp = left.br_startoff;
1955 break; 1825 break;
1956 1826
1957 case MASK(LEFT_CONTIG): 1827 case BMAP_LEFT_CONTIG:
1958 /* 1828 /*
1959 * New allocation is contiguous with a delayed allocation 1829 * New allocation is contiguous with a delayed allocation
1960 * on the left. 1830 * on the left.
1961 * Merge the new allocation with the left neighbor. 1831 * Merge the new allocation with the left neighbor.
1962 */ 1832 */
1963 temp = left.br_blockcount + new->br_blockcount; 1833 temp = left.br_blockcount + new->br_blockcount;
1964 XFS_BMAP_TRACE_PRE_UPDATE("LC", ip, idx - 1, 1834 trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
1965 XFS_DATA_FORK);
1966 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), temp); 1835 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), temp);
1967 oldlen = startblockval(left.br_startblock) + 1836 oldlen = startblockval(left.br_startblock) +
1968 startblockval(new->br_startblock); 1837 startblockval(new->br_startblock);
1969 newlen = xfs_bmap_worst_indlen(ip, temp); 1838 newlen = xfs_bmap_worst_indlen(ip, temp);
1970 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx - 1), 1839 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx - 1),
1971 nullstartblock((int)newlen)); 1840 nullstartblock((int)newlen));
1972 XFS_BMAP_TRACE_POST_UPDATE("LC", ip, idx - 1, 1841 trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
1973 XFS_DATA_FORK); 1842
1974 ip->i_df.if_lastex = idx - 1; 1843 ip->i_df.if_lastex = idx - 1;
1975 /* DELTA: One in-core extent grew into a hole. */ 1844 /* DELTA: One in-core extent grew into a hole. */
1976 temp2 = temp; 1845 temp2 = temp;
1977 temp = left.br_startoff; 1846 temp = left.br_startoff;
1978 break; 1847 break;
1979 1848
1980 case MASK(RIGHT_CONTIG): 1849 case BMAP_RIGHT_CONTIG:
1981 /* 1850 /*
1982 * New allocation is contiguous with a delayed allocation 1851 * New allocation is contiguous with a delayed allocation
1983 * on the right. 1852 * on the right.
1984 * Merge the new allocation with the right neighbor. 1853 * Merge the new allocation with the right neighbor.
1985 */ 1854 */
1986 XFS_BMAP_TRACE_PRE_UPDATE("RC", ip, idx, XFS_DATA_FORK); 1855 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
1987 temp = new->br_blockcount + right.br_blockcount; 1856 temp = new->br_blockcount + right.br_blockcount;
1988 oldlen = startblockval(new->br_startblock) + 1857 oldlen = startblockval(new->br_startblock) +
1989 startblockval(right.br_startblock); 1858 startblockval(right.br_startblock);
1990 newlen = xfs_bmap_worst_indlen(ip, temp); 1859 newlen = xfs_bmap_worst_indlen(ip, temp);
1991 xfs_bmbt_set_allf(ep, new->br_startoff, 1860 xfs_bmbt_set_allf(ep, new->br_startoff,
1992 nullstartblock((int)newlen), temp, right.br_state); 1861 nullstartblock((int)newlen), temp, right.br_state);
1993 XFS_BMAP_TRACE_POST_UPDATE("RC", ip, idx, XFS_DATA_FORK); 1862 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
1863
1994 ip->i_df.if_lastex = idx; 1864 ip->i_df.if_lastex = idx;
1995 /* DELTA: One in-core extent grew into a hole. */ 1865 /* DELTA: One in-core extent grew into a hole. */
1996 temp2 = temp; 1866 temp2 = temp;
@@ -2004,9 +1874,7 @@ xfs_bmap_add_extent_hole_delay(
2004 * Insert a new entry. 1874 * Insert a new entry.
2005 */ 1875 */
2006 oldlen = newlen = 0; 1876 oldlen = newlen = 0;
2007 XFS_BMAP_TRACE_INSERT("0", ip, idx, 1, new, NULL, 1877 xfs_iext_insert(ip, idx, 1, new, state);
2008 XFS_DATA_FORK);
2009 xfs_iext_insert(ifp, idx, 1, new);
2010 ip->i_df.if_lastex = idx; 1878 ip->i_df.if_lastex = idx;
2011 /* DELTA: A new in-core extent was added in a hole. */ 1879 /* DELTA: A new in-core extent was added in a hole. */
2012 temp2 = new->br_blockcount; 1880 temp2 = new->br_blockcount;
@@ -2030,12 +1898,6 @@ xfs_bmap_add_extent_hole_delay(
2030 } 1898 }
2031 *logflagsp = 0; 1899 *logflagsp = 0;
2032 return 0; 1900 return 0;
2033#undef MASK
2034#undef MASK2
2035#undef STATE_SET
2036#undef STATE_TEST
2037#undef STATE_SET_TEST
2038#undef SWITCH_STATE
2039} 1901}
2040 1902
2041/* 1903/*
@@ -2062,83 +1924,75 @@ xfs_bmap_add_extent_hole_real(
2062 int state; /* state bits, accessed thru macros */ 1924 int state; /* state bits, accessed thru macros */
2063 xfs_filblks_t temp=0; 1925 xfs_filblks_t temp=0;
2064 xfs_filblks_t temp2=0; 1926 xfs_filblks_t temp2=0;
2065 enum { /* bit number definitions for state */
2066 LEFT_CONTIG, RIGHT_CONTIG,
2067 LEFT_DELAY, RIGHT_DELAY,
2068 LEFT_VALID, RIGHT_VALID
2069 };
2070
2071#define MASK(b) (1 << (b))
2072#define MASK2(a,b) (MASK(a) | MASK(b))
2073#define STATE_SET(b,v) ((v) ? (state |= MASK(b)) : (state &= ~MASK(b)))
2074#define STATE_TEST(b) (state & MASK(b))
2075#define STATE_SET_TEST(b,v) ((v) ? ((state |= MASK(b)), 1) : \
2076 ((state &= ~MASK(b)), 0))
2077#define SWITCH_STATE (state & MASK2(LEFT_CONTIG, RIGHT_CONTIG))
2078 1927
2079 ifp = XFS_IFORK_PTR(ip, whichfork); 1928 ifp = XFS_IFORK_PTR(ip, whichfork);
2080 ASSERT(idx <= ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)); 1929 ASSERT(idx <= ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t));
2081 ep = xfs_iext_get_ext(ifp, idx); 1930 ep = xfs_iext_get_ext(ifp, idx);
2082 state = 0; 1931 state = 0;
1932
1933 if (whichfork == XFS_ATTR_FORK)
1934 state |= BMAP_ATTRFORK;
1935
2083 /* 1936 /*
2084 * Check and set flags if this segment has a left neighbor. 1937 * Check and set flags if this segment has a left neighbor.
2085 */ 1938 */
2086 if (STATE_SET_TEST(LEFT_VALID, idx > 0)) { 1939 if (idx > 0) {
1940 state |= BMAP_LEFT_VALID;
2087 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &left); 1941 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &left);
2088 STATE_SET(LEFT_DELAY, isnullstartblock(left.br_startblock)); 1942 if (isnullstartblock(left.br_startblock))
1943 state |= BMAP_LEFT_DELAY;
2089 } 1944 }
1945
2090 /* 1946 /*
2091 * Check and set flags if this segment has a current value. 1947 * Check and set flags if this segment has a current value.
2092 * Not true if we're inserting into the "hole" at eof. 1948 * Not true if we're inserting into the "hole" at eof.
2093 */ 1949 */
2094 if (STATE_SET_TEST(RIGHT_VALID, 1950 if (idx < ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) {
2095 idx < 1951 state |= BMAP_RIGHT_VALID;
2096 ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))) {
2097 xfs_bmbt_get_all(ep, &right); 1952 xfs_bmbt_get_all(ep, &right);
2098 STATE_SET(RIGHT_DELAY, isnullstartblock(right.br_startblock)); 1953 if (isnullstartblock(right.br_startblock))
1954 state |= BMAP_RIGHT_DELAY;
2099 } 1955 }
1956
2100 /* 1957 /*
2101 * We're inserting a real allocation between "left" and "right". 1958 * We're inserting a real allocation between "left" and "right".
2102 * Set the contiguity flags. Don't let extents get too large. 1959 * Set the contiguity flags. Don't let extents get too large.
2103 */ 1960 */
2104 STATE_SET(LEFT_CONTIG, 1961 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
2105 STATE_TEST(LEFT_VALID) && !STATE_TEST(LEFT_DELAY) && 1962 left.br_startoff + left.br_blockcount == new->br_startoff &&
2106 left.br_startoff + left.br_blockcount == new->br_startoff && 1963 left.br_startblock + left.br_blockcount == new->br_startblock &&
2107 left.br_startblock + left.br_blockcount == new->br_startblock && 1964 left.br_state == new->br_state &&
2108 left.br_state == new->br_state && 1965 left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2109 left.br_blockcount + new->br_blockcount <= MAXEXTLEN); 1966 state |= BMAP_LEFT_CONTIG;
2110 STATE_SET(RIGHT_CONTIG, 1967
2111 STATE_TEST(RIGHT_VALID) && !STATE_TEST(RIGHT_DELAY) && 1968 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
2112 new->br_startoff + new->br_blockcount == right.br_startoff && 1969 new->br_startoff + new->br_blockcount == right.br_startoff &&
2113 new->br_startblock + new->br_blockcount == 1970 new->br_startblock + new->br_blockcount == right.br_startblock &&
2114 right.br_startblock && 1971 new->br_state == right.br_state &&
2115 new->br_state == right.br_state && 1972 new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
2116 new->br_blockcount + right.br_blockcount <= MAXEXTLEN && 1973 (!(state & BMAP_LEFT_CONTIG) ||
2117 (!STATE_TEST(LEFT_CONTIG) || 1974 left.br_blockcount + new->br_blockcount +
2118 left.br_blockcount + new->br_blockcount + 1975 right.br_blockcount <= MAXEXTLEN))
2119 right.br_blockcount <= MAXEXTLEN)); 1976 state |= BMAP_RIGHT_CONTIG;
2120 1977
2121 error = 0; 1978 error = 0;
2122 /* 1979 /*
2123 * Select which case we're in here, and implement it. 1980 * Select which case we're in here, and implement it.
2124 */ 1981 */
2125 switch (SWITCH_STATE) { 1982 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
2126 1983 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2127 case MASK2(LEFT_CONTIG, RIGHT_CONTIG):
2128 /* 1984 /*
2129 * New allocation is contiguous with real allocations on the 1985 * New allocation is contiguous with real allocations on the
2130 * left and on the right. 1986 * left and on the right.
2131 * Merge all three into a single extent record. 1987 * Merge all three into a single extent record.
2132 */ 1988 */
2133 XFS_BMAP_TRACE_PRE_UPDATE("LC|RC", ip, idx - 1, 1989 trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
2134 whichfork);
2135 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), 1990 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
2136 left.br_blockcount + new->br_blockcount + 1991 left.br_blockcount + new->br_blockcount +
2137 right.br_blockcount); 1992 right.br_blockcount);
2138 XFS_BMAP_TRACE_POST_UPDATE("LC|RC", ip, idx - 1, 1993 trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
2139 whichfork); 1994
2140 XFS_BMAP_TRACE_DELETE("LC|RC", ip, idx, 1, whichfork); 1995 xfs_iext_remove(ip, idx, 1, state);
2141 xfs_iext_remove(ifp, idx, 1);
2142 ifp->if_lastex = idx - 1; 1996 ifp->if_lastex = idx - 1;
2143 XFS_IFORK_NEXT_SET(ip, whichfork, 1997 XFS_IFORK_NEXT_SET(ip, whichfork,
2144 XFS_IFORK_NEXTENTS(ip, whichfork) - 1); 1998 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
@@ -2173,16 +2027,17 @@ xfs_bmap_add_extent_hole_real(
2173 right.br_blockcount; 2027 right.br_blockcount;
2174 break; 2028 break;
2175 2029
2176 case MASK(LEFT_CONTIG): 2030 case BMAP_LEFT_CONTIG:
2177 /* 2031 /*
2178 * New allocation is contiguous with a real allocation 2032 * New allocation is contiguous with a real allocation
2179 * on the left. 2033 * on the left.
2180 * Merge the new allocation with the left neighbor. 2034 * Merge the new allocation with the left neighbor.
2181 */ 2035 */
2182 XFS_BMAP_TRACE_PRE_UPDATE("LC", ip, idx - 1, whichfork); 2036 trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
2183 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), 2037 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
2184 left.br_blockcount + new->br_blockcount); 2038 left.br_blockcount + new->br_blockcount);
2185 XFS_BMAP_TRACE_POST_UPDATE("LC", ip, idx - 1, whichfork); 2039 trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
2040
2186 ifp->if_lastex = idx - 1; 2041 ifp->if_lastex = idx - 1;
2187 if (cur == NULL) { 2042 if (cur == NULL) {
2188 rval = xfs_ilog_fext(whichfork); 2043 rval = xfs_ilog_fext(whichfork);
@@ -2207,17 +2062,18 @@ xfs_bmap_add_extent_hole_real(
2207 new->br_blockcount; 2062 new->br_blockcount;
2208 break; 2063 break;
2209 2064
2210 case MASK(RIGHT_CONTIG): 2065 case BMAP_RIGHT_CONTIG:
2211 /* 2066 /*
2212 * New allocation is contiguous with a real allocation 2067 * New allocation is contiguous with a real allocation
2213 * on the right. 2068 * on the right.
2214 * Merge the new allocation with the right neighbor. 2069 * Merge the new allocation with the right neighbor.
2215 */ 2070 */
2216 XFS_BMAP_TRACE_PRE_UPDATE("RC", ip, idx, whichfork); 2071 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
2217 xfs_bmbt_set_allf(ep, new->br_startoff, new->br_startblock, 2072 xfs_bmbt_set_allf(ep, new->br_startoff, new->br_startblock,
2218 new->br_blockcount + right.br_blockcount, 2073 new->br_blockcount + right.br_blockcount,
2219 right.br_state); 2074 right.br_state);
2220 XFS_BMAP_TRACE_POST_UPDATE("RC", ip, idx, whichfork); 2075 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
2076
2221 ifp->if_lastex = idx; 2077 ifp->if_lastex = idx;
2222 if (cur == NULL) { 2078 if (cur == NULL) {
2223 rval = xfs_ilog_fext(whichfork); 2079 rval = xfs_ilog_fext(whichfork);
@@ -2248,8 +2104,7 @@ xfs_bmap_add_extent_hole_real(
2248 * real allocation. 2104 * real allocation.
2249 * Insert a new entry. 2105 * Insert a new entry.
2250 */ 2106 */
2251 XFS_BMAP_TRACE_INSERT("0", ip, idx, 1, new, NULL, whichfork); 2107 xfs_iext_insert(ip, idx, 1, new, state);
2252 xfs_iext_insert(ifp, idx, 1, new);
2253 ifp->if_lastex = idx; 2108 ifp->if_lastex = idx;
2254 XFS_IFORK_NEXT_SET(ip, whichfork, 2109 XFS_IFORK_NEXT_SET(ip, whichfork,
2255 XFS_IFORK_NEXTENTS(ip, whichfork) + 1); 2110 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
@@ -2283,12 +2138,6 @@ xfs_bmap_add_extent_hole_real(
2283done: 2138done:
2284 *logflagsp = rval; 2139 *logflagsp = rval;
2285 return error; 2140 return error;
2286#undef MASK
2287#undef MASK2
2288#undef STATE_SET
2289#undef STATE_TEST
2290#undef STATE_SET_TEST
2291#undef SWITCH_STATE
2292} 2141}
2293 2142
2294/* 2143/*
@@ -3115,8 +2964,13 @@ xfs_bmap_del_extent(
3115 uint qfield; /* quota field to update */ 2964 uint qfield; /* quota field to update */
3116 xfs_filblks_t temp; /* for indirect length calculations */ 2965 xfs_filblks_t temp; /* for indirect length calculations */
3117 xfs_filblks_t temp2; /* for indirect length calculations */ 2966 xfs_filblks_t temp2; /* for indirect length calculations */
2967 int state = 0;
3118 2968
3119 XFS_STATS_INC(xs_del_exlist); 2969 XFS_STATS_INC(xs_del_exlist);
2970
2971 if (whichfork == XFS_ATTR_FORK)
2972 state |= BMAP_ATTRFORK;
2973
3120 mp = ip->i_mount; 2974 mp = ip->i_mount;
3121 ifp = XFS_IFORK_PTR(ip, whichfork); 2975 ifp = XFS_IFORK_PTR(ip, whichfork);
3122 ASSERT((idx >= 0) && (idx < ifp->if_bytes / 2976 ASSERT((idx >= 0) && (idx < ifp->if_bytes /
@@ -3196,8 +3050,8 @@ xfs_bmap_del_extent(
3196 /* 3050 /*
3197 * Matches the whole extent. Delete the entry. 3051 * Matches the whole extent. Delete the entry.
3198 */ 3052 */
3199 XFS_BMAP_TRACE_DELETE("3", ip, idx, 1, whichfork); 3053 xfs_iext_remove(ip, idx, 1,
3200 xfs_iext_remove(ifp, idx, 1); 3054 whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0);
3201 ifp->if_lastex = idx; 3055 ifp->if_lastex = idx;
3202 if (delay) 3056 if (delay)
3203 break; 3057 break;
@@ -3217,7 +3071,7 @@ xfs_bmap_del_extent(
3217 /* 3071 /*
3218 * Deleting the first part of the extent. 3072 * Deleting the first part of the extent.
3219 */ 3073 */
3220 XFS_BMAP_TRACE_PRE_UPDATE("2", ip, idx, whichfork); 3074 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
3221 xfs_bmbt_set_startoff(ep, del_endoff); 3075 xfs_bmbt_set_startoff(ep, del_endoff);
3222 temp = got.br_blockcount - del->br_blockcount; 3076 temp = got.br_blockcount - del->br_blockcount;
3223 xfs_bmbt_set_blockcount(ep, temp); 3077 xfs_bmbt_set_blockcount(ep, temp);
@@ -3226,13 +3080,12 @@ xfs_bmap_del_extent(
3226 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 3080 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
3227 da_old); 3081 da_old);
3228 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); 3082 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
3229 XFS_BMAP_TRACE_POST_UPDATE("2", ip, idx, 3083 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
3230 whichfork);
3231 da_new = temp; 3084 da_new = temp;
3232 break; 3085 break;
3233 } 3086 }
3234 xfs_bmbt_set_startblock(ep, del_endblock); 3087 xfs_bmbt_set_startblock(ep, del_endblock);
3235 XFS_BMAP_TRACE_POST_UPDATE("2", ip, idx, whichfork); 3088 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
3236 if (!cur) { 3089 if (!cur) {
3237 flags |= xfs_ilog_fext(whichfork); 3090 flags |= xfs_ilog_fext(whichfork);
3238 break; 3091 break;
@@ -3248,19 +3101,18 @@ xfs_bmap_del_extent(
3248 * Deleting the last part of the extent. 3101 * Deleting the last part of the extent.
3249 */ 3102 */
3250 temp = got.br_blockcount - del->br_blockcount; 3103 temp = got.br_blockcount - del->br_blockcount;
3251 XFS_BMAP_TRACE_PRE_UPDATE("1", ip, idx, whichfork); 3104 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
3252 xfs_bmbt_set_blockcount(ep, temp); 3105 xfs_bmbt_set_blockcount(ep, temp);
3253 ifp->if_lastex = idx; 3106 ifp->if_lastex = idx;
3254 if (delay) { 3107 if (delay) {
3255 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 3108 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
3256 da_old); 3109 da_old);
3257 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); 3110 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
3258 XFS_BMAP_TRACE_POST_UPDATE("1", ip, idx, 3111 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
3259 whichfork);
3260 da_new = temp; 3112 da_new = temp;
3261 break; 3113 break;
3262 } 3114 }
3263 XFS_BMAP_TRACE_POST_UPDATE("1", ip, idx, whichfork); 3115 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
3264 if (!cur) { 3116 if (!cur) {
3265 flags |= xfs_ilog_fext(whichfork); 3117 flags |= xfs_ilog_fext(whichfork);
3266 break; 3118 break;
@@ -3277,7 +3129,7 @@ xfs_bmap_del_extent(
3277 * Deleting the middle of the extent. 3129 * Deleting the middle of the extent.
3278 */ 3130 */
3279 temp = del->br_startoff - got.br_startoff; 3131 temp = del->br_startoff - got.br_startoff;
3280 XFS_BMAP_TRACE_PRE_UPDATE("0", ip, idx, whichfork); 3132 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
3281 xfs_bmbt_set_blockcount(ep, temp); 3133 xfs_bmbt_set_blockcount(ep, temp);
3282 new.br_startoff = del_endoff; 3134 new.br_startoff = del_endoff;
3283 temp2 = got_endoff - del_endoff; 3135 temp2 = got_endoff - del_endoff;
@@ -3364,10 +3216,8 @@ xfs_bmap_del_extent(
3364 } 3216 }
3365 } 3217 }
3366 } 3218 }
3367 XFS_BMAP_TRACE_POST_UPDATE("0", ip, idx, whichfork); 3219 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
3368 XFS_BMAP_TRACE_INSERT("0", ip, idx + 1, 1, &new, NULL, 3220 xfs_iext_insert(ip, idx + 1, 1, &new, state);
3369 whichfork);
3370 xfs_iext_insert(ifp, idx + 1, 1, &new);
3371 ifp->if_lastex = idx + 1; 3221 ifp->if_lastex = idx + 1;
3372 break; 3222 break;
3373 } 3223 }
@@ -3687,7 +3537,9 @@ xfs_bmap_local_to_extents(
3687 xfs_iext_add(ifp, 0, 1); 3537 xfs_iext_add(ifp, 0, 1);
3688 ep = xfs_iext_get_ext(ifp, 0); 3538 ep = xfs_iext_get_ext(ifp, 0);
3689 xfs_bmbt_set_allf(ep, 0, args.fsbno, 1, XFS_EXT_NORM); 3539 xfs_bmbt_set_allf(ep, 0, args.fsbno, 1, XFS_EXT_NORM);
3690 XFS_BMAP_TRACE_POST_UPDATE("new", ip, 0, whichfork); 3540 trace_xfs_bmap_post_update(ip, 0,
3541 whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0,
3542 _THIS_IP_);
3691 XFS_IFORK_NEXT_SET(ip, whichfork, 1); 3543 XFS_IFORK_NEXT_SET(ip, whichfork, 1);
3692 ip->i_d.di_nblocks = 1; 3544 ip->i_d.di_nblocks = 1;
3693 xfs_trans_mod_dquot_byino(tp, ip, 3545 xfs_trans_mod_dquot_byino(tp, ip,
@@ -3800,158 +3652,6 @@ xfs_bmap_search_extents(
3800 return ep; 3652 return ep;
3801} 3653}
3802 3654
3803
3804#ifdef XFS_BMAP_TRACE
3805ktrace_t *xfs_bmap_trace_buf;
3806
3807/*
3808 * Add a bmap trace buffer entry. Base routine for the others.
3809 */
3810STATIC void
3811xfs_bmap_trace_addentry(
3812 int opcode, /* operation */
3813 const char *fname, /* function name */
3814 char *desc, /* operation description */
3815 xfs_inode_t *ip, /* incore inode pointer */
3816 xfs_extnum_t idx, /* index of entry(ies) */
3817 xfs_extnum_t cnt, /* count of entries, 1 or 2 */
3818 xfs_bmbt_rec_host_t *r1, /* first record */
3819 xfs_bmbt_rec_host_t *r2, /* second record or null */
3820 int whichfork) /* data or attr fork */
3821{
3822 xfs_bmbt_rec_host_t tr2;
3823
3824 ASSERT(cnt == 1 || cnt == 2);
3825 ASSERT(r1 != NULL);
3826 if (cnt == 1) {
3827 ASSERT(r2 == NULL);
3828 r2 = &tr2;
3829 memset(&tr2, 0, sizeof(tr2));
3830 } else
3831 ASSERT(r2 != NULL);
3832 ktrace_enter(xfs_bmap_trace_buf,
3833 (void *)(__psint_t)(opcode | (whichfork << 16)),
3834 (void *)fname, (void *)desc, (void *)ip,
3835 (void *)(__psint_t)idx,
3836 (void *)(__psint_t)cnt,
3837 (void *)(__psunsigned_t)(ip->i_ino >> 32),
3838 (void *)(__psunsigned_t)(unsigned)ip->i_ino,
3839 (void *)(__psunsigned_t)(r1->l0 >> 32),
3840 (void *)(__psunsigned_t)(unsigned)(r1->l0),
3841 (void *)(__psunsigned_t)(r1->l1 >> 32),
3842 (void *)(__psunsigned_t)(unsigned)(r1->l1),
3843 (void *)(__psunsigned_t)(r2->l0 >> 32),
3844 (void *)(__psunsigned_t)(unsigned)(r2->l0),
3845 (void *)(__psunsigned_t)(r2->l1 >> 32),
3846 (void *)(__psunsigned_t)(unsigned)(r2->l1)
3847 );
3848 ASSERT(ip->i_xtrace);
3849 ktrace_enter(ip->i_xtrace,
3850 (void *)(__psint_t)(opcode | (whichfork << 16)),
3851 (void *)fname, (void *)desc, (void *)ip,
3852 (void *)(__psint_t)idx,
3853 (void *)(__psint_t)cnt,
3854 (void *)(__psunsigned_t)(ip->i_ino >> 32),
3855 (void *)(__psunsigned_t)(unsigned)ip->i_ino,
3856 (void *)(__psunsigned_t)(r1->l0 >> 32),
3857 (void *)(__psunsigned_t)(unsigned)(r1->l0),
3858 (void *)(__psunsigned_t)(r1->l1 >> 32),
3859 (void *)(__psunsigned_t)(unsigned)(r1->l1),
3860 (void *)(__psunsigned_t)(r2->l0 >> 32),
3861 (void *)(__psunsigned_t)(unsigned)(r2->l0),
3862 (void *)(__psunsigned_t)(r2->l1 >> 32),
3863 (void *)(__psunsigned_t)(unsigned)(r2->l1)
3864 );
3865}
3866
3867/*
3868 * Add bmap trace entry prior to a call to xfs_iext_remove.
3869 */
3870STATIC void
3871xfs_bmap_trace_delete(
3872 const char *fname, /* function name */
3873 char *desc, /* operation description */
3874 xfs_inode_t *ip, /* incore inode pointer */
3875 xfs_extnum_t idx, /* index of entry(entries) deleted */
3876 xfs_extnum_t cnt, /* count of entries deleted, 1 or 2 */
3877 int whichfork) /* data or attr fork */
3878{
3879 xfs_ifork_t *ifp; /* inode fork pointer */
3880
3881 ifp = XFS_IFORK_PTR(ip, whichfork);
3882 xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_DELETE, fname, desc, ip, idx,
3883 cnt, xfs_iext_get_ext(ifp, idx),
3884 cnt == 2 ? xfs_iext_get_ext(ifp, idx + 1) : NULL,
3885 whichfork);
3886}
3887
3888/*
3889 * Add bmap trace entry prior to a call to xfs_iext_insert, or
3890 * reading in the extents list from the disk (in the btree).
3891 */
3892STATIC void
3893xfs_bmap_trace_insert(
3894 const char *fname, /* function name */
3895 char *desc, /* operation description */
3896 xfs_inode_t *ip, /* incore inode pointer */
3897 xfs_extnum_t idx, /* index of entry(entries) inserted */
3898 xfs_extnum_t cnt, /* count of entries inserted, 1 or 2 */
3899 xfs_bmbt_irec_t *r1, /* inserted record 1 */
3900 xfs_bmbt_irec_t *r2, /* inserted record 2 or null */
3901 int whichfork) /* data or attr fork */
3902{
3903 xfs_bmbt_rec_host_t tr1; /* compressed record 1 */
3904 xfs_bmbt_rec_host_t tr2; /* compressed record 2 if needed */
3905
3906 xfs_bmbt_set_all(&tr1, r1);
3907 if (cnt == 2) {
3908 ASSERT(r2 != NULL);
3909 xfs_bmbt_set_all(&tr2, r2);
3910 } else {
3911 ASSERT(cnt == 1);
3912 ASSERT(r2 == NULL);
3913 }
3914 xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_INSERT, fname, desc, ip, idx,
3915 cnt, &tr1, cnt == 2 ? &tr2 : NULL, whichfork);
3916}
3917
3918/*
3919 * Add bmap trace entry after updating an extent record in place.
3920 */
3921STATIC void
3922xfs_bmap_trace_post_update(
3923 const char *fname, /* function name */
3924 char *desc, /* operation description */
3925 xfs_inode_t *ip, /* incore inode pointer */
3926 xfs_extnum_t idx, /* index of entry updated */
3927 int whichfork) /* data or attr fork */
3928{
3929 xfs_ifork_t *ifp; /* inode fork pointer */
3930
3931 ifp = XFS_IFORK_PTR(ip, whichfork);
3932 xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_POST_UP, fname, desc, ip, idx,
3933 1, xfs_iext_get_ext(ifp, idx), NULL, whichfork);
3934}
3935
3936/*
3937 * Add bmap trace entry prior to updating an extent record in place.
3938 */
3939STATIC void
3940xfs_bmap_trace_pre_update(
3941 const char *fname, /* function name */
3942 char *desc, /* operation description */
3943 xfs_inode_t *ip, /* incore inode pointer */
3944 xfs_extnum_t idx, /* index of entry to be updated */
3945 int whichfork) /* data or attr fork */
3946{
3947 xfs_ifork_t *ifp; /* inode fork pointer */
3948
3949 ifp = XFS_IFORK_PTR(ip, whichfork);
3950 xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_PRE_UP, fname, desc, ip, idx, 1,
3951 xfs_iext_get_ext(ifp, idx), NULL, whichfork);
3952}
3953#endif /* XFS_BMAP_TRACE */
3954
3955/* 3655/*
3956 * Compute the worst-case number of indirect blocks that will be used 3656 * Compute the worst-case number of indirect blocks that will be used
3957 * for ip's delayed extent of length "len". 3657 * for ip's delayed extent of length "len".
@@ -3983,37 +3683,6 @@ xfs_bmap_worst_indlen(
3983 return rval; 3683 return rval;
3984} 3684}
3985 3685
3986#if defined(XFS_RW_TRACE)
3987STATIC void
3988xfs_bunmap_trace(
3989 xfs_inode_t *ip,
3990 xfs_fileoff_t bno,
3991 xfs_filblks_t len,
3992 int flags,
3993 inst_t *ra)
3994{
3995 if (ip->i_rwtrace == NULL)
3996 return;
3997 ktrace_enter(ip->i_rwtrace,
3998 (void *)(__psint_t)XFS_BUNMAP,
3999 (void *)ip,
4000 (void *)(__psint_t)((ip->i_d.di_size >> 32) & 0xffffffff),
4001 (void *)(__psint_t)(ip->i_d.di_size & 0xffffffff),
4002 (void *)(__psint_t)(((xfs_dfiloff_t)bno >> 32) & 0xffffffff),
4003 (void *)(__psint_t)((xfs_dfiloff_t)bno & 0xffffffff),
4004 (void *)(__psint_t)len,
4005 (void *)(__psint_t)flags,
4006 (void *)(unsigned long)current_cpu(),
4007 (void *)ra,
4008 (void *)0,
4009 (void *)0,
4010 (void *)0,
4011 (void *)0,
4012 (void *)0,
4013 (void *)0);
4014}
4015#endif
4016
4017/* 3686/*
4018 * Convert inode from non-attributed to attributed. 3687 * Convert inode from non-attributed to attributed.
4019 * Must not be in a transaction, ip must not be locked. 3688 * Must not be in a transaction, ip must not be locked.
@@ -4702,34 +4371,30 @@ error0:
4702 return XFS_ERROR(EFSCORRUPTED); 4371 return XFS_ERROR(EFSCORRUPTED);
4703} 4372}
4704 4373
4705#ifdef XFS_BMAP_TRACE 4374#ifdef DEBUG
4706/* 4375/*
4707 * Add bmap trace insert entries for all the contents of the extent records. 4376 * Add bmap trace insert entries for all the contents of the extent records.
4708 */ 4377 */
4709void 4378void
4710xfs_bmap_trace_exlist( 4379xfs_bmap_trace_exlist(
4711 const char *fname, /* function name */
4712 xfs_inode_t *ip, /* incore inode pointer */ 4380 xfs_inode_t *ip, /* incore inode pointer */
4713 xfs_extnum_t cnt, /* count of entries in the list */ 4381 xfs_extnum_t cnt, /* count of entries in the list */
4714 int whichfork) /* data or attr fork */ 4382 int whichfork, /* data or attr fork */
4383 unsigned long caller_ip)
4715{ 4384{
4716 xfs_bmbt_rec_host_t *ep; /* current extent record */
4717 xfs_extnum_t idx; /* extent record index */ 4385 xfs_extnum_t idx; /* extent record index */
4718 xfs_ifork_t *ifp; /* inode fork pointer */ 4386 xfs_ifork_t *ifp; /* inode fork pointer */
4719 xfs_bmbt_irec_t s; /* file extent record */ 4387 int state = 0;
4388
4389 if (whichfork == XFS_ATTR_FORK)
4390 state |= BMAP_ATTRFORK;
4720 4391
4721 ifp = XFS_IFORK_PTR(ip, whichfork); 4392 ifp = XFS_IFORK_PTR(ip, whichfork);
4722 ASSERT(cnt == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))); 4393 ASSERT(cnt == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)));
4723 for (idx = 0; idx < cnt; idx++) { 4394 for (idx = 0; idx < cnt; idx++)
4724 ep = xfs_iext_get_ext(ifp, idx); 4395 trace_xfs_extlist(ip, idx, whichfork, caller_ip);
4725 xfs_bmbt_get_all(ep, &s);
4726 XFS_BMAP_TRACE_INSERT("exlist", ip, idx, 1, &s, NULL,
4727 whichfork);
4728 }
4729} 4396}
4730#endif
4731 4397
4732#ifdef DEBUG
4733/* 4398/*
4734 * Validate that the bmbt_irecs being returned from bmapi are valid 4399 * Validate that the bmbt_irecs being returned from bmapi are valid
4735 * given the callers original parameters. Specifically check the 4400 * given the callers original parameters. Specifically check the
@@ -5478,7 +5143,8 @@ xfs_bunmapi(
5478 int rsvd; /* OK to allocate reserved blocks */ 5143 int rsvd; /* OK to allocate reserved blocks */
5479 xfs_fsblock_t sum; 5144 xfs_fsblock_t sum;
5480 5145
5481 xfs_bunmap_trace(ip, bno, len, flags, (inst_t *)__return_address); 5146 trace_xfs_bunmap(ip, bno, len, flags, _RET_IP_);
5147
5482 whichfork = (flags & XFS_BMAPI_ATTRFORK) ? 5148 whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
5483 XFS_ATTR_FORK : XFS_DATA_FORK; 5149 XFS_ATTR_FORK : XFS_DATA_FORK;
5484 ifp = XFS_IFORK_PTR(ip, whichfork); 5150 ifp = XFS_IFORK_PTR(ip, whichfork);
diff --git a/fs/xfs/xfs_bmap.h b/fs/xfs/xfs_bmap.h
index 56f62d2edc35..419dafb9d87d 100644
--- a/fs/xfs/xfs_bmap.h
+++ b/fs/xfs/xfs_bmap.h
@@ -95,6 +95,21 @@ typedef struct xfs_bmap_free
95 /* need write cache flushing and no */ 95 /* need write cache flushing and no */
96 /* additional allocation alignments */ 96 /* additional allocation alignments */
97 97
98#define XFS_BMAPI_FLAGS \
99 { XFS_BMAPI_WRITE, "WRITE" }, \
100 { XFS_BMAPI_DELAY, "DELAY" }, \
101 { XFS_BMAPI_ENTIRE, "ENTIRE" }, \
102 { XFS_BMAPI_METADATA, "METADATA" }, \
103 { XFS_BMAPI_EXACT, "EXACT" }, \
104 { XFS_BMAPI_ATTRFORK, "ATTRFORK" }, \
105 { XFS_BMAPI_ASYNC, "ASYNC" }, \
106 { XFS_BMAPI_RSVBLOCKS, "RSVBLOCKS" }, \
107 { XFS_BMAPI_PREALLOC, "PREALLOC" }, \
108 { XFS_BMAPI_IGSTATE, "IGSTATE" }, \
109 { XFS_BMAPI_CONTIG, "CONTIG" }, \
110 { XFS_BMAPI_CONVERT, "CONVERT" }
111
112
98static inline int xfs_bmapi_aflag(int w) 113static inline int xfs_bmapi_aflag(int w)
99{ 114{
100 return (w == XFS_ATTR_FORK ? XFS_BMAPI_ATTRFORK : 0); 115 return (w == XFS_ATTR_FORK ? XFS_BMAPI_ATTRFORK : 0);
@@ -135,36 +150,43 @@ typedef struct xfs_bmalloca {
135 char conv; /* overwriting unwritten extents */ 150 char conv; /* overwriting unwritten extents */
136} xfs_bmalloca_t; 151} xfs_bmalloca_t;
137 152
138#if defined(__KERNEL__) && defined(XFS_BMAP_TRACE)
139/* 153/*
140 * Trace operations for bmap extent tracing 154 * Flags for xfs_bmap_add_extent*.
141 */ 155 */
142#define XFS_BMAP_KTRACE_DELETE 1 156#define BMAP_LEFT_CONTIG (1 << 0)
143#define XFS_BMAP_KTRACE_INSERT 2 157#define BMAP_RIGHT_CONTIG (1 << 1)
144#define XFS_BMAP_KTRACE_PRE_UP 3 158#define BMAP_LEFT_FILLING (1 << 2)
145#define XFS_BMAP_KTRACE_POST_UP 4 159#define BMAP_RIGHT_FILLING (1 << 3)
146 160#define BMAP_LEFT_DELAY (1 << 4)
147#define XFS_BMAP_TRACE_SIZE 4096 /* size of global trace buffer */ 161#define BMAP_RIGHT_DELAY (1 << 5)
148#define XFS_BMAP_KTRACE_SIZE 32 /* size of per-inode trace buffer */ 162#define BMAP_LEFT_VALID (1 << 6)
149extern ktrace_t *xfs_bmap_trace_buf; 163#define BMAP_RIGHT_VALID (1 << 7)
164#define BMAP_ATTRFORK (1 << 8)
165
166#define XFS_BMAP_EXT_FLAGS \
167 { BMAP_LEFT_CONTIG, "LC" }, \
168 { BMAP_RIGHT_CONTIG, "RC" }, \
169 { BMAP_LEFT_FILLING, "LF" }, \
170 { BMAP_RIGHT_FILLING, "RF" }, \
171 { BMAP_ATTRFORK, "ATTR" }
150 172
151/* 173/*
152 * Add bmap trace insert entries for all the contents of the extent list. 174 * Add bmap trace insert entries for all the contents of the extent list.
175 *
176 * Quite excessive tracing. Only do this for debug builds.
153 */ 177 */
178#if defined(__KERNEL) && defined(DEBUG)
154void 179void
155xfs_bmap_trace_exlist( 180xfs_bmap_trace_exlist(
156 const char *fname, /* function name */
157 struct xfs_inode *ip, /* incore inode pointer */ 181 struct xfs_inode *ip, /* incore inode pointer */
158 xfs_extnum_t cnt, /* count of entries in list */ 182 xfs_extnum_t cnt, /* count of entries in list */
159 int whichfork); /* data or attr fork */ 183 int whichfork,
184 unsigned long caller_ip); /* data or attr fork */
160#define XFS_BMAP_TRACE_EXLIST(ip,c,w) \ 185#define XFS_BMAP_TRACE_EXLIST(ip,c,w) \
161 xfs_bmap_trace_exlist(__func__,ip,c,w) 186 xfs_bmap_trace_exlist(ip,c,w, _THIS_IP_)
162 187#else
163#else /* __KERNEL__ && XFS_BMAP_TRACE */
164
165#define XFS_BMAP_TRACE_EXLIST(ip,c,w) 188#define XFS_BMAP_TRACE_EXLIST(ip,c,w)
166 189#endif
167#endif /* __KERNEL__ && XFS_BMAP_TRACE */
168 190
169/* 191/*
170 * Convert inode from non-attributed to attributed. 192 * Convert inode from non-attributed to attributed.
diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c
index 6f5ccede63f9..38751d5fac6f 100644
--- a/fs/xfs/xfs_bmap_btree.c
+++ b/fs/xfs/xfs_bmap_btree.c
@@ -768,12 +768,6 @@ xfs_bmbt_trace_enter(
768 (void *)a0, (void *)a1, (void *)a2, (void *)a3, 768 (void *)a0, (void *)a1, (void *)a2, (void *)a3,
769 (void *)a4, (void *)a5, (void *)a6, (void *)a7, 769 (void *)a4, (void *)a5, (void *)a6, (void *)a7,
770 (void *)a8, (void *)a9, (void *)a10); 770 (void *)a8, (void *)a9, (void *)a10);
771 ktrace_enter(ip->i_btrace,
772 (void *)((__psint_t)type | (whichfork << 8) | (line << 16)),
773 (void *)func, (void *)s, (void *)ip, (void *)cur,
774 (void *)a0, (void *)a1, (void *)a2, (void *)a3,
775 (void *)a4, (void *)a5, (void *)a6, (void *)a7,
776 (void *)a8, (void *)a9, (void *)a10);
777} 771}
778 772
779STATIC void 773STATIC void
diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c
index 52b5f14d0c32..36a0992dd669 100644
--- a/fs/xfs/xfs_btree.c
+++ b/fs/xfs/xfs_btree.c
@@ -39,6 +39,7 @@
39#include "xfs_btree_trace.h" 39#include "xfs_btree_trace.h"
40#include "xfs_ialloc.h" 40#include "xfs_ialloc.h"
41#include "xfs_error.h" 41#include "xfs_error.h"
42#include "xfs_trace.h"
42 43
43/* 44/*
44 * Cursor allocation zone. 45 * Cursor allocation zone.
@@ -81,7 +82,7 @@ xfs_btree_check_lblock(
81 XFS_ERRTAG_BTREE_CHECK_LBLOCK, 82 XFS_ERRTAG_BTREE_CHECK_LBLOCK,
82 XFS_RANDOM_BTREE_CHECK_LBLOCK))) { 83 XFS_RANDOM_BTREE_CHECK_LBLOCK))) {
83 if (bp) 84 if (bp)
84 xfs_buftrace("LBTREE ERROR", bp); 85 trace_xfs_btree_corrupt(bp, _RET_IP_);
85 XFS_ERROR_REPORT("xfs_btree_check_lblock", XFS_ERRLEVEL_LOW, 86 XFS_ERROR_REPORT("xfs_btree_check_lblock", XFS_ERRLEVEL_LOW,
86 mp); 87 mp);
87 return XFS_ERROR(EFSCORRUPTED); 88 return XFS_ERROR(EFSCORRUPTED);
@@ -119,7 +120,7 @@ xfs_btree_check_sblock(
119 XFS_ERRTAG_BTREE_CHECK_SBLOCK, 120 XFS_ERRTAG_BTREE_CHECK_SBLOCK,
120 XFS_RANDOM_BTREE_CHECK_SBLOCK))) { 121 XFS_RANDOM_BTREE_CHECK_SBLOCK))) {
121 if (bp) 122 if (bp)
122 xfs_buftrace("SBTREE ERROR", bp); 123 trace_xfs_btree_corrupt(bp, _RET_IP_);
123 XFS_CORRUPTION_ERROR("xfs_btree_check_sblock", 124 XFS_CORRUPTION_ERROR("xfs_btree_check_sblock",
124 XFS_ERRLEVEL_LOW, cur->bc_mp, block); 125 XFS_ERRLEVEL_LOW, cur->bc_mp, block);
125 return XFS_ERROR(EFSCORRUPTED); 126 return XFS_ERROR(EFSCORRUPTED);
diff --git a/fs/xfs/xfs_btree_trace.h b/fs/xfs/xfs_btree_trace.h
index b3f5eb3c3c6c..2d8a309873ea 100644
--- a/fs/xfs/xfs_btree_trace.h
+++ b/fs/xfs/xfs_btree_trace.h
@@ -58,8 +58,6 @@ void xfs_btree_trace_argbi(const char *, struct xfs_btree_cur *,
58 struct xfs_buf *, int, int); 58 struct xfs_buf *, int, int);
59void xfs_btree_trace_argbii(const char *, struct xfs_btree_cur *, 59void xfs_btree_trace_argbii(const char *, struct xfs_btree_cur *,
60 struct xfs_buf *, int, int, int); 60 struct xfs_buf *, int, int, int);
61void xfs_btree_trace_argfffi(const char *, struct xfs_btree_cur *,
62 xfs_dfiloff_t, xfs_dfsbno_t, xfs_dfilblks_t, int, int);
63void xfs_btree_trace_argi(const char *, struct xfs_btree_cur *, int, int); 61void xfs_btree_trace_argi(const char *, struct xfs_btree_cur *, int, int);
64void xfs_btree_trace_argipk(const char *, struct xfs_btree_cur *, int, 62void xfs_btree_trace_argipk(const char *, struct xfs_btree_cur *, int,
65 union xfs_btree_ptr, union xfs_btree_key *, int); 63 union xfs_btree_ptr, union xfs_btree_key *, int);
@@ -71,24 +69,10 @@ void xfs_btree_trace_argr(const char *, struct xfs_btree_cur *,
71 union xfs_btree_rec *, int); 69 union xfs_btree_rec *, int);
72void xfs_btree_trace_cursor(const char *, struct xfs_btree_cur *, int, int); 70void xfs_btree_trace_cursor(const char *, struct xfs_btree_cur *, int, int);
73 71
74
75#define XFS_ALLOCBT_TRACE_SIZE 4096 /* size of global trace buffer */
76extern ktrace_t *xfs_allocbt_trace_buf;
77
78#define XFS_INOBT_TRACE_SIZE 4096 /* size of global trace buffer */
79extern ktrace_t *xfs_inobt_trace_buf;
80
81#define XFS_BMBT_TRACE_SIZE 4096 /* size of global trace buffer */
82#define XFS_BMBT_KTRACE_SIZE 32 /* size of per-inode trace buffer */
83extern ktrace_t *xfs_bmbt_trace_buf;
84
85
86#define XFS_BTREE_TRACE_ARGBI(c, b, i) \ 72#define XFS_BTREE_TRACE_ARGBI(c, b, i) \
87 xfs_btree_trace_argbi(__func__, c, b, i, __LINE__) 73 xfs_btree_trace_argbi(__func__, c, b, i, __LINE__)
88#define XFS_BTREE_TRACE_ARGBII(c, b, i, j) \ 74#define XFS_BTREE_TRACE_ARGBII(c, b, i, j) \
89 xfs_btree_trace_argbii(__func__, c, b, i, j, __LINE__) 75 xfs_btree_trace_argbii(__func__, c, b, i, j, __LINE__)
90#define XFS_BTREE_TRACE_ARGFFFI(c, o, b, i, j) \
91 xfs_btree_trace_argfffi(__func__, c, o, b, i, j, __LINE__)
92#define XFS_BTREE_TRACE_ARGI(c, i) \ 76#define XFS_BTREE_TRACE_ARGI(c, i) \
93 xfs_btree_trace_argi(__func__, c, i, __LINE__) 77 xfs_btree_trace_argi(__func__, c, i, __LINE__)
94#define XFS_BTREE_TRACE_ARGIPK(c, i, p, k) \ 78#define XFS_BTREE_TRACE_ARGIPK(c, i, p, k) \
@@ -104,7 +88,6 @@ extern ktrace_t *xfs_bmbt_trace_buf;
104#else 88#else
105#define XFS_BTREE_TRACE_ARGBI(c, b, i) 89#define XFS_BTREE_TRACE_ARGBI(c, b, i)
106#define XFS_BTREE_TRACE_ARGBII(c, b, i, j) 90#define XFS_BTREE_TRACE_ARGBII(c, b, i, j)
107#define XFS_BTREE_TRACE_ARGFFFI(c, o, b, i, j)
108#define XFS_BTREE_TRACE_ARGI(c, i) 91#define XFS_BTREE_TRACE_ARGI(c, i)
109#define XFS_BTREE_TRACE_ARGIPK(c, i, p, s) 92#define XFS_BTREE_TRACE_ARGIPK(c, i, p, s)
110#define XFS_BTREE_TRACE_ARGIPR(c, i, p, r) 93#define XFS_BTREE_TRACE_ARGIPR(c, i, p, r)
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 92af4098c7e8..a30f7e9eb2b9 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -29,6 +29,7 @@
29#include "xfs_buf_item.h" 29#include "xfs_buf_item.h"
30#include "xfs_trans_priv.h" 30#include "xfs_trans_priv.h"
31#include "xfs_error.h" 31#include "xfs_error.h"
32#include "xfs_trace.h"
32 33
33 34
34kmem_zone_t *xfs_buf_item_zone; 35kmem_zone_t *xfs_buf_item_zone;
@@ -164,7 +165,7 @@ xfs_buf_item_size(
164 * is the buf log format structure with the 165 * is the buf log format structure with the
165 * cancel flag in it. 166 * cancel flag in it.
166 */ 167 */
167 xfs_buf_item_trace("SIZE STALE", bip); 168 trace_xfs_buf_item_size_stale(bip);
168 ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL); 169 ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL);
169 return 1; 170 return 1;
170 } 171 }
@@ -206,7 +207,7 @@ xfs_buf_item_size(
206 } 207 }
207 } 208 }
208 209
209 xfs_buf_item_trace("SIZE NORM", bip); 210 trace_xfs_buf_item_size(bip);
210 return nvecs; 211 return nvecs;
211} 212}
212 213
@@ -259,7 +260,7 @@ xfs_buf_item_format(
259 * is the buf log format structure with the 260 * is the buf log format structure with the
260 * cancel flag in it. 261 * cancel flag in it.
261 */ 262 */
262 xfs_buf_item_trace("FORMAT STALE", bip); 263 trace_xfs_buf_item_format_stale(bip);
263 ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL); 264 ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL);
264 bip->bli_format.blf_size = nvecs; 265 bip->bli_format.blf_size = nvecs;
265 return; 266 return;
@@ -335,7 +336,7 @@ xfs_buf_item_format(
335 /* 336 /*
336 * Check to make sure everything is consistent. 337 * Check to make sure everything is consistent.
337 */ 338 */
338 xfs_buf_item_trace("FORMAT NORM", bip); 339 trace_xfs_buf_item_format(bip);
339 xfs_buf_item_log_check(bip); 340 xfs_buf_item_log_check(bip);
340} 341}
341 342
@@ -355,8 +356,7 @@ xfs_buf_item_pin(
355 ASSERT(atomic_read(&bip->bli_refcount) > 0); 356 ASSERT(atomic_read(&bip->bli_refcount) > 0);
356 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) || 357 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
357 (bip->bli_flags & XFS_BLI_STALE)); 358 (bip->bli_flags & XFS_BLI_STALE));
358 xfs_buf_item_trace("PIN", bip); 359 trace_xfs_buf_item_pin(bip);
359 xfs_buftrace("XFS_PIN", bp);
360 xfs_bpin(bp); 360 xfs_bpin(bp);
361} 361}
362 362
@@ -383,8 +383,7 @@ xfs_buf_item_unpin(
383 ASSERT(bp != NULL); 383 ASSERT(bp != NULL);
384 ASSERT(XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *) == bip); 384 ASSERT(XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *) == bip);
385 ASSERT(atomic_read(&bip->bli_refcount) > 0); 385 ASSERT(atomic_read(&bip->bli_refcount) > 0);
386 xfs_buf_item_trace("UNPIN", bip); 386 trace_xfs_buf_item_unpin(bip);
387 xfs_buftrace("XFS_UNPIN", bp);
388 387
389 freed = atomic_dec_and_test(&bip->bli_refcount); 388 freed = atomic_dec_and_test(&bip->bli_refcount);
390 ailp = bip->bli_item.li_ailp; 389 ailp = bip->bli_item.li_ailp;
@@ -395,8 +394,8 @@ xfs_buf_item_unpin(
395 ASSERT(!(XFS_BUF_ISDELAYWRITE(bp))); 394 ASSERT(!(XFS_BUF_ISDELAYWRITE(bp)));
396 ASSERT(XFS_BUF_ISSTALE(bp)); 395 ASSERT(XFS_BUF_ISSTALE(bp));
397 ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL); 396 ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL);
398 xfs_buf_item_trace("UNPIN STALE", bip); 397 trace_xfs_buf_item_unpin_stale(bip);
399 xfs_buftrace("XFS_UNPIN STALE", bp); 398
400 /* 399 /*
401 * If we get called here because of an IO error, we may 400 * If we get called here because of an IO error, we may
402 * or may not have the item on the AIL. xfs_trans_ail_delete() 401 * or may not have the item on the AIL. xfs_trans_ail_delete()
@@ -440,8 +439,8 @@ xfs_buf_item_unpin_remove(
440 if ((atomic_read(&bip->bli_refcount) == 1) && 439 if ((atomic_read(&bip->bli_refcount) == 1) &&
441 (bip->bli_flags & XFS_BLI_STALE)) { 440 (bip->bli_flags & XFS_BLI_STALE)) {
442 ASSERT(XFS_BUF_VALUSEMA(bip->bli_buf) <= 0); 441 ASSERT(XFS_BUF_VALUSEMA(bip->bli_buf) <= 0);
443 xfs_buf_item_trace("UNPIN REMOVE", bip); 442 trace_xfs_buf_item_unpin_stale(bip);
444 xfs_buftrace("XFS_UNPIN_REMOVE", bp); 443
445 /* 444 /*
446 * yes -- clear the xaction descriptor in-use flag 445 * yes -- clear the xaction descriptor in-use flag
447 * and free the chunk if required. We can safely 446 * and free the chunk if required. We can safely
@@ -495,7 +494,7 @@ xfs_buf_item_trylock(
495 XFS_BUF_HOLD(bp); 494 XFS_BUF_HOLD(bp);
496 495
497 ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); 496 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
498 xfs_buf_item_trace("TRYLOCK SUCCESS", bip); 497 trace_xfs_buf_item_trylock(bip);
499 return XFS_ITEM_SUCCESS; 498 return XFS_ITEM_SUCCESS;
500} 499}
501 500
@@ -524,7 +523,6 @@ xfs_buf_item_unlock(
524 uint hold; 523 uint hold;
525 524
526 bp = bip->bli_buf; 525 bp = bip->bli_buf;
527 xfs_buftrace("XFS_UNLOCK", bp);
528 526
529 /* 527 /*
530 * Clear the buffer's association with this transaction. 528 * Clear the buffer's association with this transaction.
@@ -547,7 +545,7 @@ xfs_buf_item_unlock(
547 */ 545 */
548 if (bip->bli_flags & XFS_BLI_STALE) { 546 if (bip->bli_flags & XFS_BLI_STALE) {
549 bip->bli_flags &= ~XFS_BLI_LOGGED; 547 bip->bli_flags &= ~XFS_BLI_LOGGED;
550 xfs_buf_item_trace("UNLOCK STALE", bip); 548 trace_xfs_buf_item_unlock_stale(bip);
551 ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL); 549 ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL);
552 if (!aborted) 550 if (!aborted)
553 return; 551 return;
@@ -574,7 +572,7 @@ xfs_buf_item_unlock(
574 * release the buffer at the end of this routine. 572 * release the buffer at the end of this routine.
575 */ 573 */
576 hold = bip->bli_flags & XFS_BLI_HOLD; 574 hold = bip->bli_flags & XFS_BLI_HOLD;
577 xfs_buf_item_trace("UNLOCK", bip); 575 trace_xfs_buf_item_unlock(bip);
578 576
579 /* 577 /*
580 * If the buf item isn't tracking any data, free it. 578 * If the buf item isn't tracking any data, free it.
@@ -618,7 +616,8 @@ xfs_buf_item_committed(
618 xfs_buf_log_item_t *bip, 616 xfs_buf_log_item_t *bip,
619 xfs_lsn_t lsn) 617 xfs_lsn_t lsn)
620{ 618{
621 xfs_buf_item_trace("COMMITTED", bip); 619 trace_xfs_buf_item_committed(bip);
620
622 if ((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) && 621 if ((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) &&
623 (bip->bli_item.li_lsn != 0)) { 622 (bip->bli_item.li_lsn != 0)) {
624 return bip->bli_item.li_lsn; 623 return bip->bli_item.li_lsn;
@@ -640,7 +639,7 @@ xfs_buf_item_push(
640 xfs_buf_t *bp; 639 xfs_buf_t *bp;
641 640
642 ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); 641 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
643 xfs_buf_item_trace("PUSH", bip); 642 trace_xfs_buf_item_push(bip);
644 643
645 bp = bip->bli_buf; 644 bp = bip->bli_buf;
646 645
@@ -738,9 +737,6 @@ xfs_buf_item_init(
738 bip->bli_format.blf_blkno = (__int64_t)XFS_BUF_ADDR(bp); 737 bip->bli_format.blf_blkno = (__int64_t)XFS_BUF_ADDR(bp);
739 bip->bli_format.blf_len = (ushort)BTOBB(XFS_BUF_COUNT(bp)); 738 bip->bli_format.blf_len = (ushort)BTOBB(XFS_BUF_COUNT(bp));
740 bip->bli_format.blf_map_size = map_size; 739 bip->bli_format.blf_map_size = map_size;
741#ifdef XFS_BLI_TRACE
742 bip->bli_trace = ktrace_alloc(XFS_BLI_TRACE_SIZE, KM_NOFS);
743#endif
744 740
745#ifdef XFS_TRANS_DEBUG 741#ifdef XFS_TRANS_DEBUG
746 /* 742 /*
@@ -878,9 +874,6 @@ xfs_buf_item_free(
878 kmem_free(bip->bli_logged); 874 kmem_free(bip->bli_logged);
879#endif /* XFS_TRANS_DEBUG */ 875#endif /* XFS_TRANS_DEBUG */
880 876
881#ifdef XFS_BLI_TRACE
882 ktrace_free(bip->bli_trace);
883#endif
884 kmem_zone_free(xfs_buf_item_zone, bip); 877 kmem_zone_free(xfs_buf_item_zone, bip);
885} 878}
886 879
@@ -897,7 +890,8 @@ xfs_buf_item_relse(
897{ 890{
898 xfs_buf_log_item_t *bip; 891 xfs_buf_log_item_t *bip;
899 892
900 xfs_buftrace("XFS_RELSE", bp); 893 trace_xfs_buf_item_relse(bp, _RET_IP_);
894
901 bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*); 895 bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*);
902 XFS_BUF_SET_FSPRIVATE(bp, bip->bli_item.li_bio_list); 896 XFS_BUF_SET_FSPRIVATE(bp, bip->bli_item.li_bio_list);
903 if ((XFS_BUF_FSPRIVATE(bp, void *) == NULL) && 897 if ((XFS_BUF_FSPRIVATE(bp, void *) == NULL) &&
@@ -994,7 +988,7 @@ xfs_buf_iodone_callbacks(
994 if (XFS_FORCED_SHUTDOWN(mp)) { 988 if (XFS_FORCED_SHUTDOWN(mp)) {
995 ASSERT(XFS_BUF_TARGET(bp) == mp->m_ddev_targp); 989 ASSERT(XFS_BUF_TARGET(bp) == mp->m_ddev_targp);
996 XFS_BUF_SUPER_STALE(bp); 990 XFS_BUF_SUPER_STALE(bp);
997 xfs_buftrace("BUF_IODONE_CB", bp); 991 trace_xfs_buf_item_iodone(bp, _RET_IP_);
998 xfs_buf_do_callbacks(bp, lip); 992 xfs_buf_do_callbacks(bp, lip);
999 XFS_BUF_SET_FSPRIVATE(bp, NULL); 993 XFS_BUF_SET_FSPRIVATE(bp, NULL);
1000 XFS_BUF_CLR_IODONE_FUNC(bp); 994 XFS_BUF_CLR_IODONE_FUNC(bp);
@@ -1030,7 +1024,7 @@ xfs_buf_iodone_callbacks(
1030 XFS_BUF_SET_START(bp); 1024 XFS_BUF_SET_START(bp);
1031 } 1025 }
1032 ASSERT(XFS_BUF_IODONE_FUNC(bp)); 1026 ASSERT(XFS_BUF_IODONE_FUNC(bp));
1033 xfs_buftrace("BUF_IODONE ASYNC", bp); 1027 trace_xfs_buf_item_iodone_async(bp, _RET_IP_);
1034 xfs_buf_relse(bp); 1028 xfs_buf_relse(bp);
1035 } else { 1029 } else {
1036 /* 1030 /*
@@ -1053,9 +1047,7 @@ xfs_buf_iodone_callbacks(
1053 } 1047 }
1054 return; 1048 return;
1055 } 1049 }
1056#ifdef XFSERRORDEBUG 1050
1057 xfs_buftrace("XFS BUFCB NOERR", bp);
1058#endif
1059 xfs_buf_do_callbacks(bp, lip); 1051 xfs_buf_do_callbacks(bp, lip);
1060 XFS_BUF_SET_FSPRIVATE(bp, NULL); 1052 XFS_BUF_SET_FSPRIVATE(bp, NULL);
1061 XFS_BUF_CLR_IODONE_FUNC(bp); 1053 XFS_BUF_CLR_IODONE_FUNC(bp);
@@ -1081,7 +1073,9 @@ xfs_buf_error_relse(
1081 XFS_BUF_DONE(bp); 1073 XFS_BUF_DONE(bp);
1082 XFS_BUF_UNDELAYWRITE(bp); 1074 XFS_BUF_UNDELAYWRITE(bp);
1083 XFS_BUF_ERROR(bp,0); 1075 XFS_BUF_ERROR(bp,0);
1084 xfs_buftrace("BUF_ERROR_RELSE", bp); 1076
1077 trace_xfs_buf_error_relse(bp, _RET_IP_);
1078
1085 if (! XFS_FORCED_SHUTDOWN(mp)) 1079 if (! XFS_FORCED_SHUTDOWN(mp))
1086 xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR); 1080 xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
1087 /* 1081 /*
@@ -1128,34 +1122,3 @@ xfs_buf_iodone(
1128 xfs_trans_ail_delete(ailp, (xfs_log_item_t *)bip); 1122 xfs_trans_ail_delete(ailp, (xfs_log_item_t *)bip);
1129 xfs_buf_item_free(bip); 1123 xfs_buf_item_free(bip);
1130} 1124}
1131
1132#if defined(XFS_BLI_TRACE)
1133void
1134xfs_buf_item_trace(
1135 char *id,
1136 xfs_buf_log_item_t *bip)
1137{
1138 xfs_buf_t *bp;
1139 ASSERT(bip->bli_trace != NULL);
1140
1141 bp = bip->bli_buf;
1142 ktrace_enter(bip->bli_trace,
1143 (void *)id,
1144 (void *)bip->bli_buf,
1145 (void *)((unsigned long)bip->bli_flags),
1146 (void *)((unsigned long)bip->bli_recur),
1147 (void *)((unsigned long)atomic_read(&bip->bli_refcount)),
1148 (void *)((unsigned long)
1149 (0xFFFFFFFF & XFS_BUF_ADDR(bp) >> 32)),
1150 (void *)((unsigned long)(0xFFFFFFFF & XFS_BUF_ADDR(bp))),
1151 (void *)((unsigned long)XFS_BUF_COUNT(bp)),
1152 (void *)((unsigned long)XFS_BUF_BFLAGS(bp)),
1153 XFS_BUF_FSPRIVATE(bp, void *),
1154 XFS_BUF_FSPRIVATE2(bp, void *),
1155 (void *)(unsigned long)XFS_BUF_ISPINNED(bp),
1156 (void *)XFS_BUF_IODONE_FUNC(bp),
1157 (void *)((unsigned long)(XFS_BUF_VALUSEMA(bp))),
1158 (void *)bip->bli_item.li_desc,
1159 (void *)((unsigned long)bip->bli_item.li_flags));
1160}
1161#endif /* XFS_BLI_TRACE */
diff --git a/fs/xfs/xfs_buf_item.h b/fs/xfs/xfs_buf_item.h
index 5a41c348bb1c..217f34af00cb 100644
--- a/fs/xfs/xfs_buf_item.h
+++ b/fs/xfs/xfs_buf_item.h
@@ -70,22 +70,21 @@ typedef struct xfs_buf_log_format_t {
70#define XFS_BLI_INODE_ALLOC_BUF 0x10 70#define XFS_BLI_INODE_ALLOC_BUF 0x10
71#define XFS_BLI_STALE_INODE 0x20 71#define XFS_BLI_STALE_INODE 0x20
72 72
73#define XFS_BLI_FLAGS \
74 { XFS_BLI_HOLD, "HOLD" }, \
75 { XFS_BLI_DIRTY, "DIRTY" }, \
76 { XFS_BLI_STALE, "STALE" }, \
77 { XFS_BLI_LOGGED, "LOGGED" }, \
78 { XFS_BLI_INODE_ALLOC_BUF, "INODE_ALLOC" }, \
79 { XFS_BLI_STALE_INODE, "STALE_INODE" }
80
73 81
74#ifdef __KERNEL__ 82#ifdef __KERNEL__
75 83
76struct xfs_buf; 84struct xfs_buf;
77struct ktrace;
78struct xfs_mount; 85struct xfs_mount;
79struct xfs_buf_log_item; 86struct xfs_buf_log_item;
80 87
81#if defined(XFS_BLI_TRACE)
82#define XFS_BLI_TRACE_SIZE 32
83
84void xfs_buf_item_trace(char *, struct xfs_buf_log_item *);
85#else
86#define xfs_buf_item_trace(id, bip)
87#endif
88
89/* 88/*
90 * This is the in core log item structure used to track information 89 * This is the in core log item structure used to track information
91 * needed to log buffers. It tracks how many times the lock has been 90 * needed to log buffers. It tracks how many times the lock has been
@@ -97,9 +96,6 @@ typedef struct xfs_buf_log_item {
97 unsigned int bli_flags; /* misc flags */ 96 unsigned int bli_flags; /* misc flags */
98 unsigned int bli_recur; /* lock recursion count */ 97 unsigned int bli_recur; /* lock recursion count */
99 atomic_t bli_refcount; /* cnt of tp refs */ 98 atomic_t bli_refcount; /* cnt of tp refs */
100#ifdef XFS_BLI_TRACE
101 struct ktrace *bli_trace; /* event trace buf */
102#endif
103#ifdef XFS_TRANS_DEBUG 99#ifdef XFS_TRANS_DEBUG
104 char *bli_orig; /* original buffer copy */ 100 char *bli_orig; /* original buffer copy */
105 char *bli_logged; /* bytes logged (bitmap) */ 101 char *bli_logged; /* bytes logged (bitmap) */
diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c
index 2847bbc1c534..c0c8869115b1 100644
--- a/fs/xfs/xfs_da_btree.c
+++ b/fs/xfs/xfs_da_btree.c
@@ -46,6 +46,7 @@
46#include "xfs_dir2_block.h" 46#include "xfs_dir2_block.h"
47#include "xfs_dir2_node.h" 47#include "xfs_dir2_node.h"
48#include "xfs_error.h" 48#include "xfs_error.h"
49#include "xfs_trace.h"
49 50
50/* 51/*
51 * xfs_da_btree.c 52 * xfs_da_btree.c
@@ -2107,7 +2108,7 @@ xfs_da_do_buf(
2107 (be32_to_cpu(free->hdr.magic) != XFS_DIR2_FREE_MAGIC), 2108 (be32_to_cpu(free->hdr.magic) != XFS_DIR2_FREE_MAGIC),
2108 mp, XFS_ERRTAG_DA_READ_BUF, 2109 mp, XFS_ERRTAG_DA_READ_BUF,
2109 XFS_RANDOM_DA_READ_BUF))) { 2110 XFS_RANDOM_DA_READ_BUF))) {
2110 xfs_buftrace("DA READ ERROR", rbp->bps[0]); 2111 trace_xfs_da_btree_corrupt(rbp->bps[0], _RET_IP_);
2111 XFS_CORRUPTION_ERROR("xfs_da_do_buf(2)", 2112 XFS_CORRUPTION_ERROR("xfs_da_do_buf(2)",
2112 XFS_ERRLEVEL_LOW, mp, info); 2113 XFS_ERRLEVEL_LOW, mp, info);
2113 error = XFS_ERROR(EFSCORRUPTED); 2114 error = XFS_ERROR(EFSCORRUPTED);
diff --git a/fs/xfs/xfs_da_btree.h b/fs/xfs/xfs_da_btree.h
index 8c536167bf75..30cd08f56a3a 100644
--- a/fs/xfs/xfs_da_btree.h
+++ b/fs/xfs/xfs_da_btree.h
@@ -125,6 +125,13 @@ typedef struct xfs_da_args {
125#define XFS_DA_OP_OKNOENT 0x0008 /* lookup/add op, ENOENT ok, else die */ 125#define XFS_DA_OP_OKNOENT 0x0008 /* lookup/add op, ENOENT ok, else die */
126#define XFS_DA_OP_CILOOKUP 0x0010 /* lookup to return CI name if found */ 126#define XFS_DA_OP_CILOOKUP 0x0010 /* lookup to return CI name if found */
127 127
128#define XFS_DA_OP_FLAGS \
129 { XFS_DA_OP_JUSTCHECK, "JUSTCHECK" }, \
130 { XFS_DA_OP_RENAME, "RENAME" }, \
131 { XFS_DA_OP_ADDNAME, "ADDNAME" }, \
132 { XFS_DA_OP_OKNOENT, "OKNOENT" }, \
133 { XFS_DA_OP_CILOOKUP, "CILOOKUP" }
134
128/* 135/*
129 * Structure to describe buffer(s) for a block. 136 * Structure to describe buffer(s) for a block.
130 * This is needed in the directory version 2 format case, when 137 * This is needed in the directory version 2 format case, when
diff --git a/fs/xfs/xfs_dfrag.c b/fs/xfs/xfs_dfrag.c
index ab89a7e94a0f..d1483a4f71b8 100644
--- a/fs/xfs/xfs_dfrag.c
+++ b/fs/xfs/xfs_dfrag.c
@@ -43,6 +43,7 @@
43#include "xfs_error.h" 43#include "xfs_error.h"
44#include "xfs_rw.h" 44#include "xfs_rw.h"
45#include "xfs_vnodeops.h" 45#include "xfs_vnodeops.h"
46#include "xfs_trace.h"
46 47
47/* 48/*
48 * Syssgi interface for swapext 49 * Syssgi interface for swapext
@@ -168,7 +169,6 @@ xfs_swap_extents(
168 } 169 }
169 170
170 if (VN_CACHED(VFS_I(tip)) != 0) { 171 if (VN_CACHED(VFS_I(tip)) != 0) {
171 xfs_inval_cached_trace(tip, 0, -1, 0, -1);
172 error = xfs_flushinval_pages(tip, 0, -1, 172 error = xfs_flushinval_pages(tip, 0, -1,
173 FI_REMAPF_LOCKED); 173 FI_REMAPF_LOCKED);
174 if (error) 174 if (error)
diff --git a/fs/xfs/xfs_dir2.c b/fs/xfs/xfs_dir2.c
index bb1d58eb3982..93634a7e90e9 100644
--- a/fs/xfs/xfs_dir2.c
+++ b/fs/xfs/xfs_dir2.c
@@ -40,9 +40,9 @@
40#include "xfs_dir2_leaf.h" 40#include "xfs_dir2_leaf.h"
41#include "xfs_dir2_block.h" 41#include "xfs_dir2_block.h"
42#include "xfs_dir2_node.h" 42#include "xfs_dir2_node.h"
43#include "xfs_dir2_trace.h"
44#include "xfs_error.h" 43#include "xfs_error.h"
45#include "xfs_vnodeops.h" 44#include "xfs_vnodeops.h"
45#include "xfs_trace.h"
46 46
47struct xfs_name xfs_name_dotdot = {"..", 2}; 47struct xfs_name xfs_name_dotdot = {"..", 2};
48 48
@@ -525,7 +525,8 @@ xfs_dir2_grow_inode(
525 xfs_trans_t *tp; 525 xfs_trans_t *tp;
526 xfs_drfsbno_t nblks; 526 xfs_drfsbno_t nblks;
527 527
528 xfs_dir2_trace_args_s("grow_inode", args, space); 528 trace_xfs_dir2_grow_inode(args, space);
529
529 dp = args->dp; 530 dp = args->dp;
530 tp = args->trans; 531 tp = args->trans;
531 mp = dp->i_mount; 532 mp = dp->i_mount;
@@ -703,7 +704,8 @@ xfs_dir2_shrink_inode(
703 xfs_mount_t *mp; 704 xfs_mount_t *mp;
704 xfs_trans_t *tp; 705 xfs_trans_t *tp;
705 706
706 xfs_dir2_trace_args_db("shrink_inode", args, db, bp); 707 trace_xfs_dir2_shrink_inode(args, db);
708
707 dp = args->dp; 709 dp = args->dp;
708 mp = dp->i_mount; 710 mp = dp->i_mount;
709 tp = args->trans; 711 tp = args->trans;
diff --git a/fs/xfs/xfs_dir2_block.c b/fs/xfs/xfs_dir2_block.c
index ab52e9e1c1ee..ddc4ecc7807f 100644
--- a/fs/xfs/xfs_dir2_block.c
+++ b/fs/xfs/xfs_dir2_block.c
@@ -36,8 +36,8 @@
36#include "xfs_dir2_data.h" 36#include "xfs_dir2_data.h"
37#include "xfs_dir2_leaf.h" 37#include "xfs_dir2_leaf.h"
38#include "xfs_dir2_block.h" 38#include "xfs_dir2_block.h"
39#include "xfs_dir2_trace.h"
40#include "xfs_error.h" 39#include "xfs_error.h"
40#include "xfs_trace.h"
41 41
42/* 42/*
43 * Local function prototypes. 43 * Local function prototypes.
@@ -94,7 +94,8 @@ xfs_dir2_block_addname(
94 __be16 *tagp; /* pointer to tag value */ 94 __be16 *tagp; /* pointer to tag value */
95 xfs_trans_t *tp; /* transaction structure */ 95 xfs_trans_t *tp; /* transaction structure */
96 96
97 xfs_dir2_trace_args("block_addname", args); 97 trace_xfs_dir2_block_addname(args);
98
98 dp = args->dp; 99 dp = args->dp;
99 tp = args->trans; 100 tp = args->trans;
100 mp = dp->i_mount; 101 mp = dp->i_mount;
@@ -590,7 +591,8 @@ xfs_dir2_block_lookup(
590 int error; /* error return value */ 591 int error; /* error return value */
591 xfs_mount_t *mp; /* filesystem mount point */ 592 xfs_mount_t *mp; /* filesystem mount point */
592 593
593 xfs_dir2_trace_args("block_lookup", args); 594 trace_xfs_dir2_block_lookup(args);
595
594 /* 596 /*
595 * Get the buffer, look up the entry. 597 * Get the buffer, look up the entry.
596 * If not found (ENOENT) then return, have no buffer. 598 * If not found (ENOENT) then return, have no buffer.
@@ -747,7 +749,8 @@ xfs_dir2_block_removename(
747 int size; /* shortform size */ 749 int size; /* shortform size */
748 xfs_trans_t *tp; /* transaction pointer */ 750 xfs_trans_t *tp; /* transaction pointer */
749 751
750 xfs_dir2_trace_args("block_removename", args); 752 trace_xfs_dir2_block_removename(args);
753
751 /* 754 /*
752 * Look up the entry in the block. Gets the buffer and entry index. 755 * Look up the entry in the block. Gets the buffer and entry index.
753 * It will always be there, the vnodeops level does a lookup first. 756 * It will always be there, the vnodeops level does a lookup first.
@@ -823,7 +826,8 @@ xfs_dir2_block_replace(
823 int error; /* error return value */ 826 int error; /* error return value */
824 xfs_mount_t *mp; /* filesystem mount point */ 827 xfs_mount_t *mp; /* filesystem mount point */
825 828
826 xfs_dir2_trace_args("block_replace", args); 829 trace_xfs_dir2_block_replace(args);
830
827 /* 831 /*
828 * Lookup the entry in the directory. Get buffer and entry index. 832 * Lookup the entry in the directory. Get buffer and entry index.
829 * This will always succeed since the caller has already done a lookup. 833 * This will always succeed since the caller has already done a lookup.
@@ -897,7 +901,8 @@ xfs_dir2_leaf_to_block(
897 int to; /* block/leaf to index */ 901 int to; /* block/leaf to index */
898 xfs_trans_t *tp; /* transaction pointer */ 902 xfs_trans_t *tp; /* transaction pointer */
899 903
900 xfs_dir2_trace_args_bb("leaf_to_block", args, lbp, dbp); 904 trace_xfs_dir2_leaf_to_block(args);
905
901 dp = args->dp; 906 dp = args->dp;
902 tp = args->trans; 907 tp = args->trans;
903 mp = dp->i_mount; 908 mp = dp->i_mount;
@@ -1044,7 +1049,8 @@ xfs_dir2_sf_to_block(
1044 xfs_trans_t *tp; /* transaction pointer */ 1049 xfs_trans_t *tp; /* transaction pointer */
1045 struct xfs_name name; 1050 struct xfs_name name;
1046 1051
1047 xfs_dir2_trace_args("sf_to_block", args); 1052 trace_xfs_dir2_sf_to_block(args);
1053
1048 dp = args->dp; 1054 dp = args->dp;
1049 tp = args->trans; 1055 tp = args->trans;
1050 mp = dp->i_mount; 1056 mp = dp->i_mount;
diff --git a/fs/xfs/xfs_dir2_leaf.c b/fs/xfs/xfs_dir2_leaf.c
index 41ad537c49e9..29f484c11b3a 100644
--- a/fs/xfs/xfs_dir2_leaf.c
+++ b/fs/xfs/xfs_dir2_leaf.c
@@ -38,8 +38,8 @@
38#include "xfs_dir2_leaf.h" 38#include "xfs_dir2_leaf.h"
39#include "xfs_dir2_block.h" 39#include "xfs_dir2_block.h"
40#include "xfs_dir2_node.h" 40#include "xfs_dir2_node.h"
41#include "xfs_dir2_trace.h"
42#include "xfs_error.h" 41#include "xfs_error.h"
42#include "xfs_trace.h"
43 43
44/* 44/*
45 * Local function declarations. 45 * Local function declarations.
@@ -80,7 +80,8 @@ xfs_dir2_block_to_leaf(
80 int needscan; /* need to rescan bestfree */ 80 int needscan; /* need to rescan bestfree */
81 xfs_trans_t *tp; /* transaction pointer */ 81 xfs_trans_t *tp; /* transaction pointer */
82 82
83 xfs_dir2_trace_args_b("block_to_leaf", args, dbp); 83 trace_xfs_dir2_block_to_leaf(args);
84
84 dp = args->dp; 85 dp = args->dp;
85 mp = dp->i_mount; 86 mp = dp->i_mount;
86 tp = args->trans; 87 tp = args->trans;
@@ -188,7 +189,8 @@ xfs_dir2_leaf_addname(
188 xfs_trans_t *tp; /* transaction pointer */ 189 xfs_trans_t *tp; /* transaction pointer */
189 xfs_dir2_db_t use_block; /* data block number */ 190 xfs_dir2_db_t use_block; /* data block number */
190 191
191 xfs_dir2_trace_args("leaf_addname", args); 192 trace_xfs_dir2_leaf_addname(args);
193
192 dp = args->dp; 194 dp = args->dp;
193 tp = args->trans; 195 tp = args->trans;
194 mp = dp->i_mount; 196 mp = dp->i_mount;
@@ -1266,7 +1268,8 @@ xfs_dir2_leaf_lookup(
1266 xfs_dir2_leaf_entry_t *lep; /* leaf entry */ 1268 xfs_dir2_leaf_entry_t *lep; /* leaf entry */
1267 xfs_trans_t *tp; /* transaction pointer */ 1269 xfs_trans_t *tp; /* transaction pointer */
1268 1270
1269 xfs_dir2_trace_args("leaf_lookup", args); 1271 trace_xfs_dir2_leaf_lookup(args);
1272
1270 /* 1273 /*
1271 * Look up name in the leaf block, returning both buffers and index. 1274 * Look up name in the leaf block, returning both buffers and index.
1272 */ 1275 */
@@ -1454,7 +1457,8 @@ xfs_dir2_leaf_removename(
1454 xfs_dir2_data_off_t oldbest; /* old value of best free */ 1457 xfs_dir2_data_off_t oldbest; /* old value of best free */
1455 xfs_trans_t *tp; /* transaction pointer */ 1458 xfs_trans_t *tp; /* transaction pointer */
1456 1459
1457 xfs_dir2_trace_args("leaf_removename", args); 1460 trace_xfs_dir2_leaf_removename(args);
1461
1458 /* 1462 /*
1459 * Lookup the leaf entry, get the leaf and data blocks read in. 1463 * Lookup the leaf entry, get the leaf and data blocks read in.
1460 */ 1464 */
@@ -1586,7 +1590,8 @@ xfs_dir2_leaf_replace(
1586 xfs_dir2_leaf_entry_t *lep; /* leaf entry */ 1590 xfs_dir2_leaf_entry_t *lep; /* leaf entry */
1587 xfs_trans_t *tp; /* transaction pointer */ 1591 xfs_trans_t *tp; /* transaction pointer */
1588 1592
1589 xfs_dir2_trace_args("leaf_replace", args); 1593 trace_xfs_dir2_leaf_replace(args);
1594
1590 /* 1595 /*
1591 * Look up the entry. 1596 * Look up the entry.
1592 */ 1597 */
@@ -1766,7 +1771,9 @@ xfs_dir2_node_to_leaf(
1766 if (state->path.active > 1) 1771 if (state->path.active > 1)
1767 return 0; 1772 return 0;
1768 args = state->args; 1773 args = state->args;
1769 xfs_dir2_trace_args("node_to_leaf", args); 1774
1775 trace_xfs_dir2_node_to_leaf(args);
1776
1770 mp = state->mp; 1777 mp = state->mp;
1771 dp = args->dp; 1778 dp = args->dp;
1772 tp = args->trans; 1779 tp = args->trans;
diff --git a/fs/xfs/xfs_dir2_node.c b/fs/xfs/xfs_dir2_node.c
index 5a81ccd1045b..ce6e355199b5 100644
--- a/fs/xfs/xfs_dir2_node.c
+++ b/fs/xfs/xfs_dir2_node.c
@@ -37,8 +37,8 @@
37#include "xfs_dir2_leaf.h" 37#include "xfs_dir2_leaf.h"
38#include "xfs_dir2_block.h" 38#include "xfs_dir2_block.h"
39#include "xfs_dir2_node.h" 39#include "xfs_dir2_node.h"
40#include "xfs_dir2_trace.h"
41#include "xfs_error.h" 40#include "xfs_error.h"
41#include "xfs_trace.h"
42 42
43/* 43/*
44 * Function declarations. 44 * Function declarations.
@@ -123,7 +123,8 @@ xfs_dir2_leaf_to_node(
123 __be16 *to; /* pointer to freespace entry */ 123 __be16 *to; /* pointer to freespace entry */
124 xfs_trans_t *tp; /* transaction pointer */ 124 xfs_trans_t *tp; /* transaction pointer */
125 125
126 xfs_dir2_trace_args_b("leaf_to_node", args, lbp); 126 trace_xfs_dir2_leaf_to_node(args);
127
127 dp = args->dp; 128 dp = args->dp;
128 mp = dp->i_mount; 129 mp = dp->i_mount;
129 tp = args->trans; 130 tp = args->trans;
@@ -196,7 +197,8 @@ xfs_dir2_leafn_add(
196 xfs_mount_t *mp; /* filesystem mount point */ 197 xfs_mount_t *mp; /* filesystem mount point */
197 xfs_trans_t *tp; /* transaction pointer */ 198 xfs_trans_t *tp; /* transaction pointer */
198 199
199 xfs_dir2_trace_args_sb("leafn_add", args, index, bp); 200 trace_xfs_dir2_leafn_add(args, index);
201
200 dp = args->dp; 202 dp = args->dp;
201 mp = dp->i_mount; 203 mp = dp->i_mount;
202 tp = args->trans; 204 tp = args->trans;
@@ -711,8 +713,8 @@ xfs_dir2_leafn_moveents(
711 int stale; /* count stale leaves copied */ 713 int stale; /* count stale leaves copied */
712 xfs_trans_t *tp; /* transaction pointer */ 714 xfs_trans_t *tp; /* transaction pointer */
713 715
714 xfs_dir2_trace_args_bibii("leafn_moveents", args, bp_s, start_s, bp_d, 716 trace_xfs_dir2_leafn_moveents(args, start_s, start_d, count);
715 start_d, count); 717
716 /* 718 /*
717 * Silently return if nothing to do. 719 * Silently return if nothing to do.
718 */ 720 */
@@ -933,7 +935,8 @@ xfs_dir2_leafn_remove(
933 int needscan; /* need to rescan data frees */ 935 int needscan; /* need to rescan data frees */
934 xfs_trans_t *tp; /* transaction pointer */ 936 xfs_trans_t *tp; /* transaction pointer */
935 937
936 xfs_dir2_trace_args_sb("leafn_remove", args, index, bp); 938 trace_xfs_dir2_leafn_remove(args, index);
939
937 dp = args->dp; 940 dp = args->dp;
938 tp = args->trans; 941 tp = args->trans;
939 mp = dp->i_mount; 942 mp = dp->i_mount;
@@ -1363,7 +1366,8 @@ xfs_dir2_node_addname(
1363 int rval; /* sub-return value */ 1366 int rval; /* sub-return value */
1364 xfs_da_state_t *state; /* btree cursor */ 1367 xfs_da_state_t *state; /* btree cursor */
1365 1368
1366 xfs_dir2_trace_args("node_addname", args); 1369 trace_xfs_dir2_node_addname(args);
1370
1367 /* 1371 /*
1368 * Allocate and initialize the state (btree cursor). 1372 * Allocate and initialize the state (btree cursor).
1369 */ 1373 */
@@ -1822,7 +1826,8 @@ xfs_dir2_node_lookup(
1822 int rval; /* operation return value */ 1826 int rval; /* operation return value */
1823 xfs_da_state_t *state; /* btree cursor */ 1827 xfs_da_state_t *state; /* btree cursor */
1824 1828
1825 xfs_dir2_trace_args("node_lookup", args); 1829 trace_xfs_dir2_node_lookup(args);
1830
1826 /* 1831 /*
1827 * Allocate and initialize the btree cursor. 1832 * Allocate and initialize the btree cursor.
1828 */ 1833 */
@@ -1875,7 +1880,8 @@ xfs_dir2_node_removename(
1875 int rval; /* operation return value */ 1880 int rval; /* operation return value */
1876 xfs_da_state_t *state; /* btree cursor */ 1881 xfs_da_state_t *state; /* btree cursor */
1877 1882
1878 xfs_dir2_trace_args("node_removename", args); 1883 trace_xfs_dir2_node_removename(args);
1884
1879 /* 1885 /*
1880 * Allocate and initialize the btree cursor. 1886 * Allocate and initialize the btree cursor.
1881 */ 1887 */
@@ -1944,7 +1950,8 @@ xfs_dir2_node_replace(
1944 int rval; /* internal return value */ 1950 int rval; /* internal return value */
1945 xfs_da_state_t *state; /* btree cursor */ 1951 xfs_da_state_t *state; /* btree cursor */
1946 1952
1947 xfs_dir2_trace_args("node_replace", args); 1953 trace_xfs_dir2_node_replace(args);
1954
1948 /* 1955 /*
1949 * Allocate and initialize the btree cursor. 1956 * Allocate and initialize the btree cursor.
1950 */ 1957 */
diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
index e89734e84646..9d4f17a69676 100644
--- a/fs/xfs/xfs_dir2_sf.c
+++ b/fs/xfs/xfs_dir2_sf.c
@@ -37,7 +37,7 @@
37#include "xfs_dir2_data.h" 37#include "xfs_dir2_data.h"
38#include "xfs_dir2_leaf.h" 38#include "xfs_dir2_leaf.h"
39#include "xfs_dir2_block.h" 39#include "xfs_dir2_block.h"
40#include "xfs_dir2_trace.h" 40#include "xfs_trace.h"
41 41
42/* 42/*
43 * Prototypes for internal functions. 43 * Prototypes for internal functions.
@@ -169,7 +169,8 @@ xfs_dir2_block_to_sf(
169 xfs_dir2_sf_t *sfp; /* shortform structure */ 169 xfs_dir2_sf_t *sfp; /* shortform structure */
170 xfs_ino_t temp; 170 xfs_ino_t temp;
171 171
172 xfs_dir2_trace_args_sb("block_to_sf", args, size, bp); 172 trace_xfs_dir2_block_to_sf(args);
173
173 dp = args->dp; 174 dp = args->dp;
174 mp = dp->i_mount; 175 mp = dp->i_mount;
175 176
@@ -281,7 +282,8 @@ xfs_dir2_sf_addname(
281 xfs_dir2_sf_t *sfp; /* shortform structure */ 282 xfs_dir2_sf_t *sfp; /* shortform structure */
282 xfs_dir2_sf_entry_t *sfep = NULL; /* shortform entry */ 283 xfs_dir2_sf_entry_t *sfep = NULL; /* shortform entry */
283 284
284 xfs_dir2_trace_args("sf_addname", args); 285 trace_xfs_dir2_sf_addname(args);
286
285 ASSERT(xfs_dir2_sf_lookup(args) == ENOENT); 287 ASSERT(xfs_dir2_sf_lookup(args) == ENOENT);
286 dp = args->dp; 288 dp = args->dp;
287 ASSERT(dp->i_df.if_flags & XFS_IFINLINE); 289 ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
@@ -654,7 +656,8 @@ xfs_dir2_sf_create(
654 xfs_dir2_sf_t *sfp; /* shortform structure */ 656 xfs_dir2_sf_t *sfp; /* shortform structure */
655 int size; /* directory size */ 657 int size; /* directory size */
656 658
657 xfs_dir2_trace_args_i("sf_create", args, pino); 659 trace_xfs_dir2_sf_create(args);
660
658 dp = args->dp; 661 dp = args->dp;
659 662
660 ASSERT(dp != NULL); 663 ASSERT(dp != NULL);
@@ -808,7 +811,8 @@ xfs_dir2_sf_lookup(
808 enum xfs_dacmp cmp; /* comparison result */ 811 enum xfs_dacmp cmp; /* comparison result */
809 xfs_dir2_sf_entry_t *ci_sfep; /* case-insens. entry */ 812 xfs_dir2_sf_entry_t *ci_sfep; /* case-insens. entry */
810 813
811 xfs_dir2_trace_args("sf_lookup", args); 814 trace_xfs_dir2_sf_lookup(args);
815
812 xfs_dir2_sf_check(args); 816 xfs_dir2_sf_check(args);
813 dp = args->dp; 817 dp = args->dp;
814 818
@@ -891,7 +895,8 @@ xfs_dir2_sf_removename(
891 xfs_dir2_sf_entry_t *sfep; /* shortform directory entry */ 895 xfs_dir2_sf_entry_t *sfep; /* shortform directory entry */
892 xfs_dir2_sf_t *sfp; /* shortform structure */ 896 xfs_dir2_sf_t *sfp; /* shortform structure */
893 897
894 xfs_dir2_trace_args("sf_removename", args); 898 trace_xfs_dir2_sf_removename(args);
899
895 dp = args->dp; 900 dp = args->dp;
896 901
897 ASSERT(dp->i_df.if_flags & XFS_IFINLINE); 902 ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
@@ -982,7 +987,8 @@ xfs_dir2_sf_replace(
982 xfs_dir2_sf_entry_t *sfep; /* shortform directory entry */ 987 xfs_dir2_sf_entry_t *sfep; /* shortform directory entry */
983 xfs_dir2_sf_t *sfp; /* shortform structure */ 988 xfs_dir2_sf_t *sfp; /* shortform structure */
984 989
985 xfs_dir2_trace_args("sf_replace", args); 990 trace_xfs_dir2_sf_replace(args);
991
986 dp = args->dp; 992 dp = args->dp;
987 993
988 ASSERT(dp->i_df.if_flags & XFS_IFINLINE); 994 ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
@@ -1125,7 +1131,8 @@ xfs_dir2_sf_toino4(
1125 xfs_dir2_sf_entry_t *sfep; /* new sf entry */ 1131 xfs_dir2_sf_entry_t *sfep; /* new sf entry */
1126 xfs_dir2_sf_t *sfp; /* new sf directory */ 1132 xfs_dir2_sf_t *sfp; /* new sf directory */
1127 1133
1128 xfs_dir2_trace_args("sf_toino4", args); 1134 trace_xfs_dir2_sf_toino4(args);
1135
1129 dp = args->dp; 1136 dp = args->dp;
1130 1137
1131 /* 1138 /*
@@ -1202,7 +1209,8 @@ xfs_dir2_sf_toino8(
1202 xfs_dir2_sf_entry_t *sfep; /* new sf entry */ 1209 xfs_dir2_sf_entry_t *sfep; /* new sf entry */
1203 xfs_dir2_sf_t *sfp; /* new sf directory */ 1210 xfs_dir2_sf_t *sfp; /* new sf directory */
1204 1211
1205 xfs_dir2_trace_args("sf_toino8", args); 1212 trace_xfs_dir2_sf_toino8(args);
1213
1206 dp = args->dp; 1214 dp = args->dp;
1207 1215
1208 /* 1216 /*
diff --git a/fs/xfs/xfs_dir2_trace.c b/fs/xfs/xfs_dir2_trace.c
deleted file mode 100644
index 6cc7c0c681ac..000000000000
--- a/fs/xfs/xfs_dir2_trace.c
+++ /dev/null
@@ -1,216 +0,0 @@
1/*
2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_types.h"
21#include "xfs_inum.h"
22#include "xfs_dir2.h"
23#include "xfs_da_btree.h"
24#include "xfs_bmap_btree.h"
25#include "xfs_dir2_sf.h"
26#include "xfs_attr_sf.h"
27#include "xfs_dinode.h"
28#include "xfs_inode.h"
29#include "xfs_dir2_trace.h"
30
31#ifdef XFS_DIR2_TRACE
32ktrace_t *xfs_dir2_trace_buf;
33
34/*
35 * Enter something in the trace buffers.
36 */
37static void
38xfs_dir2_trace_enter(
39 xfs_inode_t *dp,
40 int type,
41 char *where,
42 char *name,
43 int namelen,
44 void *a0,
45 void *a1,
46 void *a2,
47 void *a3,
48 void *a4,
49 void *a5,
50 void *a6,
51 void *a7)
52{
53 void *n[5];
54
55 ASSERT(xfs_dir2_trace_buf);
56 ASSERT(dp->i_dir_trace);
57 if (name)
58 memcpy(n, name, min((int)sizeof(n), namelen));
59 else
60 memset((char *)n, 0, sizeof(n));
61 ktrace_enter(xfs_dir2_trace_buf,
62 (void *)(long)type, (void *)where,
63 (void *)a0, (void *)a1, (void *)a2, (void *)a3,
64 (void *)a4, (void *)a5, (void *)a6, (void *)a7,
65 (void *)(long)namelen,
66 (void *)n[0], (void *)n[1], (void *)n[2],
67 (void *)n[3], (void *)n[4]);
68 ktrace_enter(dp->i_dir_trace,
69 (void *)(long)type, (void *)where,
70 (void *)a0, (void *)a1, (void *)a2, (void *)a3,
71 (void *)a4, (void *)a5, (void *)a6, (void *)a7,
72 (void *)(long)namelen,
73 (void *)n[0], (void *)n[1], (void *)n[2],
74 (void *)n[3], (void *)n[4]);
75}
76
77void
78xfs_dir2_trace_args(
79 char *where,
80 xfs_da_args_t *args)
81{
82 xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS, where,
83 (char *)args->name, (int)args->namelen,
84 (void *)(unsigned long)args->hashval,
85 (void *)((unsigned long)(args->inumber >> 32)),
86 (void *)((unsigned long)(args->inumber & 0xFFFFFFFF)),
87 (void *)args->dp, (void *)args->trans,
88 (void *)(unsigned long)(args->op_flags & XFS_DA_OP_JUSTCHECK),
89 NULL, NULL);
90}
91
92void
93xfs_dir2_trace_args_b(
94 char *where,
95 xfs_da_args_t *args,
96 xfs_dabuf_t *bp)
97{
98 xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS_B, where,
99 (char *)args->name, (int)args->namelen,
100 (void *)(unsigned long)args->hashval,
101 (void *)((unsigned long)(args->inumber >> 32)),
102 (void *)((unsigned long)(args->inumber & 0xFFFFFFFF)),
103 (void *)args->dp, (void *)args->trans,
104 (void *)(unsigned long)(args->op_flags & XFS_DA_OP_JUSTCHECK),
105 (void *)(bp ? bp->bps[0] : NULL), NULL);
106}
107
108void
109xfs_dir2_trace_args_bb(
110 char *where,
111 xfs_da_args_t *args,
112 xfs_dabuf_t *lbp,
113 xfs_dabuf_t *dbp)
114{
115 xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS_BB, where,
116 (char *)args->name, (int)args->namelen,
117 (void *)(unsigned long)args->hashval,
118 (void *)((unsigned long)(args->inumber >> 32)),
119 (void *)((unsigned long)(args->inumber & 0xFFFFFFFF)),
120 (void *)args->dp, (void *)args->trans,
121 (void *)(unsigned long)(args->op_flags & XFS_DA_OP_JUSTCHECK),
122 (void *)(lbp ? lbp->bps[0] : NULL),
123 (void *)(dbp ? dbp->bps[0] : NULL));
124}
125
126void
127xfs_dir2_trace_args_bibii(
128 char *where,
129 xfs_da_args_t *args,
130 xfs_dabuf_t *bs,
131 int ss,
132 xfs_dabuf_t *bd,
133 int sd,
134 int c)
135{
136 xfs_buf_t *bpbs = bs ? bs->bps[0] : NULL;
137 xfs_buf_t *bpbd = bd ? bd->bps[0] : NULL;
138
139 xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS_BIBII, where,
140 (char *)args->name, (int)args->namelen,
141 (void *)args->dp, (void *)args->trans,
142 (void *)bpbs, (void *)(long)ss, (void *)bpbd, (void *)(long)sd,
143 (void *)(long)c, NULL);
144}
145
146void
147xfs_dir2_trace_args_db(
148 char *where,
149 xfs_da_args_t *args,
150 xfs_dir2_db_t db,
151 xfs_dabuf_t *bp)
152{
153 xfs_buf_t *dbp = bp ? bp->bps[0] : NULL;
154
155 xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS_DB, where,
156 (char *)args->name, (int)args->namelen,
157 (void *)(unsigned long)args->hashval,
158 (void *)((unsigned long)(args->inumber >> 32)),
159 (void *)((unsigned long)(args->inumber & 0xFFFFFFFF)),
160 (void *)args->dp, (void *)args->trans,
161 (void *)(unsigned long)(args->op_flags & XFS_DA_OP_JUSTCHECK),
162 (void *)(long)db, (void *)dbp);
163}
164
165void
166xfs_dir2_trace_args_i(
167 char *where,
168 xfs_da_args_t *args,
169 xfs_ino_t i)
170{
171 xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS_I, where,
172 (char *)args->name, (int)args->namelen,
173 (void *)(unsigned long)args->hashval,
174 (void *)((unsigned long)(args->inumber >> 32)),
175 (void *)((unsigned long)(args->inumber & 0xFFFFFFFF)),
176 (void *)args->dp, (void *)args->trans,
177 (void *)(unsigned long)(args->op_flags & XFS_DA_OP_JUSTCHECK),
178 (void *)((unsigned long)(i >> 32)),
179 (void *)((unsigned long)(i & 0xFFFFFFFF)));
180}
181
182void
183xfs_dir2_trace_args_s(
184 char *where,
185 xfs_da_args_t *args,
186 int s)
187{
188 xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS_S, where,
189 (char *)args->name, (int)args->namelen,
190 (void *)(unsigned long)args->hashval,
191 (void *)((unsigned long)(args->inumber >> 32)),
192 (void *)((unsigned long)(args->inumber & 0xFFFFFFFF)),
193 (void *)args->dp, (void *)args->trans,
194 (void *)(unsigned long)(args->op_flags & XFS_DA_OP_JUSTCHECK),
195 (void *)(long)s, NULL);
196}
197
198void
199xfs_dir2_trace_args_sb(
200 char *where,
201 xfs_da_args_t *args,
202 int s,
203 xfs_dabuf_t *bp)
204{
205 xfs_buf_t *dbp = bp ? bp->bps[0] : NULL;
206
207 xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS_SB, where,
208 (char *)args->name, (int)args->namelen,
209 (void *)(unsigned long)args->hashval,
210 (void *)((unsigned long)(args->inumber >> 32)),
211 (void *)((unsigned long)(args->inumber & 0xFFFFFFFF)),
212 (void *)args->dp, (void *)args->trans,
213 (void *)(unsigned long)(args->op_flags & XFS_DA_OP_JUSTCHECK),
214 (void *)(long)s, (void *)dbp);
215}
216#endif /* XFS_DIR2_TRACE */
diff --git a/fs/xfs/xfs_dir2_trace.h b/fs/xfs/xfs_dir2_trace.h
deleted file mode 100644
index ca3c754f4822..000000000000
--- a/fs/xfs/xfs_dir2_trace.h
+++ /dev/null
@@ -1,72 +0,0 @@
1/*
2 * Copyright (c) 2000,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#ifndef __XFS_DIR2_TRACE_H__
19#define __XFS_DIR2_TRACE_H__
20
21/*
22 * Tracing for xfs v2 directories.
23 */
24
25#if defined(XFS_DIR2_TRACE)
26
27struct ktrace;
28struct xfs_dabuf;
29struct xfs_da_args;
30
31#define XFS_DIR2_GTRACE_SIZE 4096 /* global buffer */
32#define XFS_DIR2_KTRACE_SIZE 32 /* per-inode buffer */
33extern struct ktrace *xfs_dir2_trace_buf;
34
35#define XFS_DIR2_KTRACE_ARGS 1 /* args only */
36#define XFS_DIR2_KTRACE_ARGS_B 2 /* args + buffer */
37#define XFS_DIR2_KTRACE_ARGS_BB 3 /* args + 2 buffers */
38#define XFS_DIR2_KTRACE_ARGS_DB 4 /* args, db, buffer */
39#define XFS_DIR2_KTRACE_ARGS_I 5 /* args, inum */
40#define XFS_DIR2_KTRACE_ARGS_S 6 /* args, int */
41#define XFS_DIR2_KTRACE_ARGS_SB 7 /* args, int, buffer */
42#define XFS_DIR2_KTRACE_ARGS_BIBII 8 /* args, buf/int/buf/int/int */
43
44void xfs_dir2_trace_args(char *where, struct xfs_da_args *args);
45void xfs_dir2_trace_args_b(char *where, struct xfs_da_args *args,
46 struct xfs_dabuf *bp);
47void xfs_dir2_trace_args_bb(char *where, struct xfs_da_args *args,
48 struct xfs_dabuf *lbp, struct xfs_dabuf *dbp);
49void xfs_dir2_trace_args_bibii(char *where, struct xfs_da_args *args,
50 struct xfs_dabuf *bs, int ss,
51 struct xfs_dabuf *bd, int sd, int c);
52void xfs_dir2_trace_args_db(char *where, struct xfs_da_args *args,
53 xfs_dir2_db_t db, struct xfs_dabuf *bp);
54void xfs_dir2_trace_args_i(char *where, struct xfs_da_args *args, xfs_ino_t i);
55void xfs_dir2_trace_args_s(char *where, struct xfs_da_args *args, int s);
56void xfs_dir2_trace_args_sb(char *where, struct xfs_da_args *args, int s,
57 struct xfs_dabuf *bp);
58
59#else /* XFS_DIR2_TRACE */
60
61#define xfs_dir2_trace_args(where, args)
62#define xfs_dir2_trace_args_b(where, args, bp)
63#define xfs_dir2_trace_args_bb(where, args, lbp, dbp)
64#define xfs_dir2_trace_args_bibii(where, args, bs, ss, bd, sd, c)
65#define xfs_dir2_trace_args_db(where, args, db, bp)
66#define xfs_dir2_trace_args_i(where, args, i)
67#define xfs_dir2_trace_args_s(where, args, s)
68#define xfs_dir2_trace_args_sb(where, args, s, bp)
69
70#endif /* XFS_DIR2_TRACE */
71
72#endif /* __XFS_DIR2_TRACE_H__ */
diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c
index edf8bdf4141f..a631e1451abb 100644
--- a/fs/xfs/xfs_filestream.c
+++ b/fs/xfs/xfs_filestream.c
@@ -34,6 +34,7 @@
34#include "xfs_utils.h" 34#include "xfs_utils.h"
35#include "xfs_mru_cache.h" 35#include "xfs_mru_cache.h"
36#include "xfs_filestream.h" 36#include "xfs_filestream.h"
37#include "xfs_trace.h"
37 38
38#ifdef XFS_FILESTREAMS_TRACE 39#ifdef XFS_FILESTREAMS_TRACE
39 40
@@ -394,9 +395,7 @@ xfs_filestream_init(void)
394 item_zone = kmem_zone_init(sizeof(fstrm_item_t), "fstrm_item"); 395 item_zone = kmem_zone_init(sizeof(fstrm_item_t), "fstrm_item");
395 if (!item_zone) 396 if (!item_zone)
396 return -ENOMEM; 397 return -ENOMEM;
397#ifdef XFS_FILESTREAMS_TRACE 398
398 xfs_filestreams_trace_buf = ktrace_alloc(XFS_FSTRM_KTRACE_SIZE, KM_NOFS);
399#endif
400 return 0; 399 return 0;
401} 400}
402 401
@@ -407,9 +406,6 @@ xfs_filestream_init(void)
407void 406void
408xfs_filestream_uninit(void) 407xfs_filestream_uninit(void)
409{ 408{
410#ifdef XFS_FILESTREAMS_TRACE
411 ktrace_free(xfs_filestreams_trace_buf);
412#endif
413 kmem_zone_destroy(item_zone); 409 kmem_zone_destroy(item_zone);
414} 410}
415 411
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index 36079aa91344..a13919a6a364 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -45,6 +45,7 @@
45#include "xfs_rtalloc.h" 45#include "xfs_rtalloc.h"
46#include "xfs_rw.h" 46#include "xfs_rw.h"
47#include "xfs_filestream.h" 47#include "xfs_filestream.h"
48#include "xfs_trace.h"
48 49
49/* 50/*
50 * File system operations 51 * File system operations
@@ -347,6 +348,7 @@ xfs_growfs_data_private(
347 be32_add_cpu(&agf->agf_length, new); 348 be32_add_cpu(&agf->agf_length, new);
348 ASSERT(be32_to_cpu(agf->agf_length) == 349 ASSERT(be32_to_cpu(agf->agf_length) ==
349 be32_to_cpu(agi->agi_length)); 350 be32_to_cpu(agi->agi_length));
351
350 xfs_alloc_log_agf(tp, bp, XFS_AGF_LENGTH); 352 xfs_alloc_log_agf(tp, bp, XFS_AGF_LENGTH);
351 /* 353 /*
352 * Free the new space. 354 * Free the new space.
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
index 073bb4a26b19..f5c904a10c11 100644
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -43,7 +43,7 @@
43#include "xfs_inode_item.h" 43#include "xfs_inode_item.h"
44#include "xfs_bmap.h" 44#include "xfs_bmap.h"
45#include "xfs_btree_trace.h" 45#include "xfs_btree_trace.h"
46#include "xfs_dir2_trace.h" 46#include "xfs_trace.h"
47 47
48 48
49/* 49/*
@@ -90,28 +90,6 @@ xfs_inode_alloc(
90 ip->i_size = 0; 90 ip->i_size = 0;
91 ip->i_new_size = 0; 91 ip->i_new_size = 0;
92 92
93 /*
94 * Initialize inode's trace buffers.
95 */
96#ifdef XFS_INODE_TRACE
97 ip->i_trace = ktrace_alloc(INODE_TRACE_SIZE, KM_NOFS);
98#endif
99#ifdef XFS_BMAP_TRACE
100 ip->i_xtrace = ktrace_alloc(XFS_BMAP_KTRACE_SIZE, KM_NOFS);
101#endif
102#ifdef XFS_BTREE_TRACE
103 ip->i_btrace = ktrace_alloc(XFS_BMBT_KTRACE_SIZE, KM_NOFS);
104#endif
105#ifdef XFS_RW_TRACE
106 ip->i_rwtrace = ktrace_alloc(XFS_RW_KTRACE_SIZE, KM_NOFS);
107#endif
108#ifdef XFS_ILOCK_TRACE
109 ip->i_lock_trace = ktrace_alloc(XFS_ILOCK_KTRACE_SIZE, KM_NOFS);
110#endif
111#ifdef XFS_DIR2_TRACE
112 ip->i_dir_trace = ktrace_alloc(XFS_DIR2_KTRACE_SIZE, KM_NOFS);
113#endif
114
115 /* prevent anyone from using this yet */ 93 /* prevent anyone from using this yet */
116 VFS_I(ip)->i_state = I_NEW|I_LOCK; 94 VFS_I(ip)->i_state = I_NEW|I_LOCK;
117 95
@@ -133,25 +111,6 @@ xfs_inode_free(
133 if (ip->i_afp) 111 if (ip->i_afp)
134 xfs_idestroy_fork(ip, XFS_ATTR_FORK); 112 xfs_idestroy_fork(ip, XFS_ATTR_FORK);
135 113
136#ifdef XFS_INODE_TRACE
137 ktrace_free(ip->i_trace);
138#endif
139#ifdef XFS_BMAP_TRACE
140 ktrace_free(ip->i_xtrace);
141#endif
142#ifdef XFS_BTREE_TRACE
143 ktrace_free(ip->i_btrace);
144#endif
145#ifdef XFS_RW_TRACE
146 ktrace_free(ip->i_rwtrace);
147#endif
148#ifdef XFS_ILOCK_TRACE
149 ktrace_free(ip->i_lock_trace);
150#endif
151#ifdef XFS_DIR2_TRACE
152 ktrace_free(ip->i_dir_trace);
153#endif
154
155 if (ip->i_itemp) { 114 if (ip->i_itemp) {
156 /* 115 /*
157 * Only if we are shutting down the fs will we see an 116 * Only if we are shutting down the fs will we see an
@@ -210,6 +169,7 @@ xfs_iget_cache_hit(
210 * instead of polling for it. 169 * instead of polling for it.
211 */ 170 */
212 if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) { 171 if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) {
172 trace_xfs_iget_skip(ip);
213 XFS_STATS_INC(xs_ig_frecycle); 173 XFS_STATS_INC(xs_ig_frecycle);
214 error = EAGAIN; 174 error = EAGAIN;
215 goto out_error; 175 goto out_error;
@@ -228,7 +188,7 @@ xfs_iget_cache_hit(
228 * Need to carefully get it back into useable state. 188 * Need to carefully get it back into useable state.
229 */ 189 */
230 if (ip->i_flags & XFS_IRECLAIMABLE) { 190 if (ip->i_flags & XFS_IRECLAIMABLE) {
231 xfs_itrace_exit_tag(ip, "xfs_iget.alloc"); 191 trace_xfs_iget_reclaim(ip);
232 192
233 /* 193 /*
234 * We need to set XFS_INEW atomically with clearing the 194 * We need to set XFS_INEW atomically with clearing the
@@ -254,6 +214,7 @@ xfs_iget_cache_hit(
254 ip->i_flags &= ~XFS_INEW; 214 ip->i_flags &= ~XFS_INEW;
255 ip->i_flags |= XFS_IRECLAIMABLE; 215 ip->i_flags |= XFS_IRECLAIMABLE;
256 __xfs_inode_set_reclaim_tag(pag, ip); 216 __xfs_inode_set_reclaim_tag(pag, ip);
217 trace_xfs_iget_reclaim(ip);
257 goto out_error; 218 goto out_error;
258 } 219 }
259 inode->i_state = I_LOCK|I_NEW; 220 inode->i_state = I_LOCK|I_NEW;
@@ -273,8 +234,9 @@ xfs_iget_cache_hit(
273 xfs_ilock(ip, lock_flags); 234 xfs_ilock(ip, lock_flags);
274 235
275 xfs_iflags_clear(ip, XFS_ISTALE); 236 xfs_iflags_clear(ip, XFS_ISTALE);
276 xfs_itrace_exit_tag(ip, "xfs_iget.found");
277 XFS_STATS_INC(xs_ig_found); 237 XFS_STATS_INC(xs_ig_found);
238
239 trace_xfs_iget_found(ip);
278 return 0; 240 return 0;
279 241
280out_error: 242out_error:
@@ -308,7 +270,7 @@ xfs_iget_cache_miss(
308 if (error) 270 if (error)
309 goto out_destroy; 271 goto out_destroy;
310 272
311 xfs_itrace_exit_tag(ip, "xfs_iget.alloc"); 273 xfs_itrace_entry(ip);
312 274
313 if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) { 275 if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) {
314 error = ENOENT; 276 error = ENOENT;
@@ -353,6 +315,8 @@ xfs_iget_cache_miss(
353 315
354 write_unlock(&pag->pag_ici_lock); 316 write_unlock(&pag->pag_ici_lock);
355 radix_tree_preload_end(); 317 radix_tree_preload_end();
318
319 trace_xfs_iget_alloc(ip);
356 *ipp = ip; 320 *ipp = ip;
357 return 0; 321 return 0;
358 322
@@ -639,7 +603,7 @@ xfs_ilock(
639 else if (lock_flags & XFS_ILOCK_SHARED) 603 else if (lock_flags & XFS_ILOCK_SHARED)
640 mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags)); 604 mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
641 605
642 xfs_ilock_trace(ip, 1, lock_flags, (inst_t *)__return_address); 606 trace_xfs_ilock(ip, lock_flags, _RET_IP_);
643} 607}
644 608
645/* 609/*
@@ -684,7 +648,7 @@ xfs_ilock_nowait(
684 if (!mrtryaccess(&ip->i_lock)) 648 if (!mrtryaccess(&ip->i_lock))
685 goto out_undo_iolock; 649 goto out_undo_iolock;
686 } 650 }
687 xfs_ilock_trace(ip, 2, lock_flags, (inst_t *)__return_address); 651 trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
688 return 1; 652 return 1;
689 653
690 out_undo_iolock: 654 out_undo_iolock:
@@ -746,7 +710,7 @@ xfs_iunlock(
746 xfs_trans_unlocked_item(ip->i_itemp->ili_item.li_ailp, 710 xfs_trans_unlocked_item(ip->i_itemp->ili_item.li_ailp,
747 (xfs_log_item_t*)(ip->i_itemp)); 711 (xfs_log_item_t*)(ip->i_itemp));
748 } 712 }
749 xfs_ilock_trace(ip, 3, lock_flags, (inst_t *)__return_address); 713 trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
750} 714}
751 715
752/* 716/*
@@ -765,6 +729,8 @@ xfs_ilock_demote(
765 mrdemote(&ip->i_lock); 729 mrdemote(&ip->i_lock);
766 if (lock_flags & XFS_IOLOCK_EXCL) 730 if (lock_flags & XFS_IOLOCK_EXCL)
767 mrdemote(&ip->i_iolock); 731 mrdemote(&ip->i_iolock);
732
733 trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
768} 734}
769 735
770#ifdef DEBUG 736#ifdef DEBUG
@@ -795,52 +761,3 @@ xfs_isilocked(
795 return 1; 761 return 1;
796} 762}
797#endif 763#endif
798
799#ifdef XFS_INODE_TRACE
800
801#define KTRACE_ENTER(ip, vk, s, line, ra) \
802 ktrace_enter((ip)->i_trace, \
803/* 0 */ (void *)(__psint_t)(vk), \
804/* 1 */ (void *)(s), \
805/* 2 */ (void *)(__psint_t) line, \
806/* 3 */ (void *)(__psint_t)atomic_read(&VFS_I(ip)->i_count), \
807/* 4 */ (void *)(ra), \
808/* 5 */ NULL, \
809/* 6 */ (void *)(__psint_t)current_cpu(), \
810/* 7 */ (void *)(__psint_t)current_pid(), \
811/* 8 */ (void *)__return_address, \
812/* 9 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL)
813
814/*
815 * Vnode tracing code.
816 */
817void
818_xfs_itrace_entry(xfs_inode_t *ip, const char *func, inst_t *ra)
819{
820 KTRACE_ENTER(ip, INODE_KTRACE_ENTRY, func, 0, ra);
821}
822
823void
824_xfs_itrace_exit(xfs_inode_t *ip, const char *func, inst_t *ra)
825{
826 KTRACE_ENTER(ip, INODE_KTRACE_EXIT, func, 0, ra);
827}
828
829void
830xfs_itrace_hold(xfs_inode_t *ip, char *file, int line, inst_t *ra)
831{
832 KTRACE_ENTER(ip, INODE_KTRACE_HOLD, file, line, ra);
833}
834
835void
836_xfs_itrace_ref(xfs_inode_t *ip, char *file, int line, inst_t *ra)
837{
838 KTRACE_ENTER(ip, INODE_KTRACE_REF, file, line, ra);
839}
840
841void
842xfs_itrace_rele(xfs_inode_t *ip, char *file, int line, inst_t *ra)
843{
844 KTRACE_ENTER(ip, INODE_KTRACE_RELE, file, line, ra);
845}
846#endif /* XFS_INODE_TRACE */
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index b92a4fa2a0a1..ce278b3ae7fc 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -47,10 +47,10 @@
47#include "xfs_rw.h" 47#include "xfs_rw.h"
48#include "xfs_error.h" 48#include "xfs_error.h"
49#include "xfs_utils.h" 49#include "xfs_utils.h"
50#include "xfs_dir2_trace.h"
51#include "xfs_quota.h" 50#include "xfs_quota.h"
52#include "xfs_filestream.h" 51#include "xfs_filestream.h"
53#include "xfs_vnodeops.h" 52#include "xfs_vnodeops.h"
53#include "xfs_trace.h"
54 54
55kmem_zone_t *xfs_ifork_zone; 55kmem_zone_t *xfs_ifork_zone;
56kmem_zone_t *xfs_inode_zone; 56kmem_zone_t *xfs_inode_zone;
@@ -1291,42 +1291,6 @@ xfs_file_last_byte(
1291 return last_byte; 1291 return last_byte;
1292} 1292}
1293 1293
1294#if defined(XFS_RW_TRACE)
1295STATIC void
1296xfs_itrunc_trace(
1297 int tag,
1298 xfs_inode_t *ip,
1299 int flag,
1300 xfs_fsize_t new_size,
1301 xfs_off_t toss_start,
1302 xfs_off_t toss_finish)
1303{
1304 if (ip->i_rwtrace == NULL) {
1305 return;
1306 }
1307
1308 ktrace_enter(ip->i_rwtrace,
1309 (void*)((long)tag),
1310 (void*)ip,
1311 (void*)(unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff),
1312 (void*)(unsigned long)(ip->i_d.di_size & 0xffffffff),
1313 (void*)((long)flag),
1314 (void*)(unsigned long)((new_size >> 32) & 0xffffffff),
1315 (void*)(unsigned long)(new_size & 0xffffffff),
1316 (void*)(unsigned long)((toss_start >> 32) & 0xffffffff),
1317 (void*)(unsigned long)(toss_start & 0xffffffff),
1318 (void*)(unsigned long)((toss_finish >> 32) & 0xffffffff),
1319 (void*)(unsigned long)(toss_finish & 0xffffffff),
1320 (void*)(unsigned long)current_cpu(),
1321 (void*)(unsigned long)current_pid(),
1322 (void*)NULL,
1323 (void*)NULL,
1324 (void*)NULL);
1325}
1326#else
1327#define xfs_itrunc_trace(tag, ip, flag, new_size, toss_start, toss_finish)
1328#endif
1329
1330/* 1294/*
1331 * Start the truncation of the file to new_size. The new size 1295 * Start the truncation of the file to new_size. The new size
1332 * must be smaller than the current size. This routine will 1296 * must be smaller than the current size. This routine will
@@ -1409,8 +1373,7 @@ xfs_itruncate_start(
1409 return 0; 1373 return 0;
1410 } 1374 }
1411 last_byte = xfs_file_last_byte(ip); 1375 last_byte = xfs_file_last_byte(ip);
1412 xfs_itrunc_trace(XFS_ITRUNC_START, ip, flags, new_size, toss_start, 1376 trace_xfs_itruncate_start(ip, flags, new_size, toss_start, last_byte);
1413 last_byte);
1414 if (last_byte > toss_start) { 1377 if (last_byte > toss_start) {
1415 if (flags & XFS_ITRUNC_DEFINITE) { 1378 if (flags & XFS_ITRUNC_DEFINITE) {
1416 xfs_tosspages(ip, toss_start, 1379 xfs_tosspages(ip, toss_start,
@@ -1514,7 +1477,8 @@ xfs_itruncate_finish(
1514 new_size = 0LL; 1477 new_size = 0LL;
1515 } 1478 }
1516 first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size); 1479 first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
1517 xfs_itrunc_trace(XFS_ITRUNC_FINISH1, ip, 0, new_size, 0, 0); 1480 trace_xfs_itruncate_finish_start(ip, new_size);
1481
1518 /* 1482 /*
1519 * The first thing we do is set the size to new_size permanently 1483 * The first thing we do is set the size to new_size permanently
1520 * on disk. This way we don't have to worry about anyone ever 1484 * on disk. This way we don't have to worry about anyone ever
@@ -1731,7 +1695,7 @@ xfs_itruncate_finish(
1731 ASSERT((new_size != 0) || 1695 ASSERT((new_size != 0) ||
1732 (fork == XFS_ATTR_FORK) || 1696 (fork == XFS_ATTR_FORK) ||
1733 (ip->i_d.di_nextents == 0)); 1697 (ip->i_d.di_nextents == 0));
1734 xfs_itrunc_trace(XFS_ITRUNC_FINISH2, ip, 0, new_size, 0, 0); 1698 trace_xfs_itruncate_finish_end(ip, new_size);
1735 return 0; 1699 return 0;
1736} 1700}
1737 1701
@@ -3252,23 +3216,6 @@ corrupt_out:
3252 return XFS_ERROR(EFSCORRUPTED); 3216 return XFS_ERROR(EFSCORRUPTED);
3253} 3217}
3254 3218
3255
3256
3257#ifdef XFS_ILOCK_TRACE
3258void
3259xfs_ilock_trace(xfs_inode_t *ip, int lock, unsigned int lockflags, inst_t *ra)
3260{
3261 ktrace_enter(ip->i_lock_trace,
3262 (void *)ip,
3263 (void *)(unsigned long)lock, /* 1 = LOCK, 3=UNLOCK, etc */
3264 (void *)(unsigned long)lockflags, /* XFS_ILOCK_EXCL etc */
3265 (void *)ra, /* caller of ilock */
3266 (void *)(unsigned long)current_cpu(),
3267 (void *)(unsigned long)current_pid(),
3268 NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL);
3269}
3270#endif
3271
3272/* 3219/*
3273 * Return a pointer to the extent record at file index idx. 3220 * Return a pointer to the extent record at file index idx.
3274 */ 3221 */
@@ -3300,13 +3247,17 @@ xfs_iext_get_ext(
3300 */ 3247 */
3301void 3248void
3302xfs_iext_insert( 3249xfs_iext_insert(
3303 xfs_ifork_t *ifp, /* inode fork pointer */ 3250 xfs_inode_t *ip, /* incore inode pointer */
3304 xfs_extnum_t idx, /* starting index of new items */ 3251 xfs_extnum_t idx, /* starting index of new items */
3305 xfs_extnum_t count, /* number of inserted items */ 3252 xfs_extnum_t count, /* number of inserted items */
3306 xfs_bmbt_irec_t *new) /* items to insert */ 3253 xfs_bmbt_irec_t *new, /* items to insert */
3254 int state) /* type of extent conversion */
3307{ 3255{
3256 xfs_ifork_t *ifp = (state & BMAP_ATTRFORK) ? ip->i_afp : &ip->i_df;
3308 xfs_extnum_t i; /* extent record index */ 3257 xfs_extnum_t i; /* extent record index */
3309 3258
3259 trace_xfs_iext_insert(ip, idx, new, state, _RET_IP_);
3260
3310 ASSERT(ifp->if_flags & XFS_IFEXTENTS); 3261 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
3311 xfs_iext_add(ifp, idx, count); 3262 xfs_iext_add(ifp, idx, count);
3312 for (i = idx; i < idx + count; i++, new++) 3263 for (i = idx; i < idx + count; i++, new++)
@@ -3549,13 +3500,17 @@ xfs_iext_add_indirect_multi(
3549 */ 3500 */
3550void 3501void
3551xfs_iext_remove( 3502xfs_iext_remove(
3552 xfs_ifork_t *ifp, /* inode fork pointer */ 3503 xfs_inode_t *ip, /* incore inode pointer */
3553 xfs_extnum_t idx, /* index to begin removing exts */ 3504 xfs_extnum_t idx, /* index to begin removing exts */
3554 int ext_diff) /* number of extents to remove */ 3505 int ext_diff, /* number of extents to remove */
3506 int state) /* type of extent conversion */
3555{ 3507{
3508 xfs_ifork_t *ifp = (state & BMAP_ATTRFORK) ? ip->i_afp : &ip->i_df;
3556 xfs_extnum_t nextents; /* number of extents in file */ 3509 xfs_extnum_t nextents; /* number of extents in file */
3557 int new_size; /* size of extents after removal */ 3510 int new_size; /* size of extents after removal */
3558 3511
3512 trace_xfs_iext_remove(ip, idx, state, _RET_IP_);
3513
3559 ASSERT(ext_diff > 0); 3514 ASSERT(ext_diff > 0);
3560 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 3515 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3561 new_size = (nextents - ext_diff) * sizeof(xfs_bmbt_rec_t); 3516 new_size = (nextents - ext_diff) * sizeof(xfs_bmbt_rec_t);
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 41555de1d1db..ec1f28c4fc4f 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -213,7 +213,6 @@ typedef struct xfs_icdinode {
213 213
214struct bhv_desc; 214struct bhv_desc;
215struct cred; 215struct cred;
216struct ktrace;
217struct xfs_buf; 216struct xfs_buf;
218struct xfs_bmap_free; 217struct xfs_bmap_free;
219struct xfs_bmbt_irec; 218struct xfs_bmbt_irec;
@@ -222,13 +221,6 @@ struct xfs_mount;
222struct xfs_trans; 221struct xfs_trans;
223struct xfs_dquot; 222struct xfs_dquot;
224 223
225#if defined(XFS_ILOCK_TRACE)
226#define XFS_ILOCK_KTRACE_SIZE 32
227extern void xfs_ilock_trace(struct xfs_inode *, int, unsigned int, inst_t *);
228#else
229#define xfs_ilock_trace(i,n,f,ra)
230#endif
231
232typedef struct dm_attrs_s { 224typedef struct dm_attrs_s {
233 __uint32_t da_dmevmask; /* DMIG event mask */ 225 __uint32_t da_dmevmask; /* DMIG event mask */
234 __uint16_t da_dmstate; /* DMIG state info */ 226 __uint16_t da_dmstate; /* DMIG state info */
@@ -271,26 +263,6 @@ typedef struct xfs_inode {
271 263
272 /* VFS inode */ 264 /* VFS inode */
273 struct inode i_vnode; /* embedded VFS inode */ 265 struct inode i_vnode; /* embedded VFS inode */
274
275 /* Trace buffers per inode. */
276#ifdef XFS_INODE_TRACE
277 struct ktrace *i_trace; /* general inode trace */
278#endif
279#ifdef XFS_BMAP_TRACE
280 struct ktrace *i_xtrace; /* inode extent list trace */
281#endif
282#ifdef XFS_BTREE_TRACE
283 struct ktrace *i_btrace; /* inode bmap btree trace */
284#endif
285#ifdef XFS_RW_TRACE
286 struct ktrace *i_rwtrace; /* inode read/write trace */
287#endif
288#ifdef XFS_ILOCK_TRACE
289 struct ktrace *i_lock_trace; /* inode lock/unlock trace */
290#endif
291#ifdef XFS_DIR2_TRACE
292 struct ktrace *i_dir_trace; /* inode directory trace */
293#endif
294} xfs_inode_t; 266} xfs_inode_t;
295 267
296#define XFS_ISIZE(ip) (((ip)->i_d.di_mode & S_IFMT) == S_IFREG) ? \ 268#define XFS_ISIZE(ip) (((ip)->i_d.di_mode & S_IFMT) == S_IFREG) ? \
@@ -406,6 +378,14 @@ static inline void xfs_ifunlock(xfs_inode_t *ip)
406#define XFS_LOCK_MASK (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED \ 378#define XFS_LOCK_MASK (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED \
407 | XFS_ILOCK_EXCL | XFS_ILOCK_SHARED) 379 | XFS_ILOCK_EXCL | XFS_ILOCK_SHARED)
408 380
381#define XFS_LOCK_FLAGS \
382 { XFS_IOLOCK_EXCL, "IOLOCK_EXCL" }, \
383 { XFS_IOLOCK_SHARED, "IOLOCK_SHARED" }, \
384 { XFS_ILOCK_EXCL, "ILOCK_EXCL" }, \
385 { XFS_ILOCK_SHARED, "ILOCK_SHARED" }, \
386 { XFS_IUNLOCK_NONOTIFY, "IUNLOCK_NONOTIFY" }
387
388
409/* 389/*
410 * Flags for lockdep annotations. 390 * Flags for lockdep annotations.
411 * 391 *
@@ -455,6 +435,10 @@ static inline void xfs_ifunlock(xfs_inode_t *ip)
455#define XFS_ITRUNC_DEFINITE 0x1 435#define XFS_ITRUNC_DEFINITE 0x1
456#define XFS_ITRUNC_MAYBE 0x2 436#define XFS_ITRUNC_MAYBE 0x2
457 437
438#define XFS_ITRUNC_FLAGS \
439 { XFS_ITRUNC_DEFINITE, "DEFINITE" }, \
440 { XFS_ITRUNC_MAYBE, "MAYBE" }
441
458/* 442/*
459 * For multiple groups support: if S_ISGID bit is set in the parent 443 * For multiple groups support: if S_ISGID bit is set in the parent
460 * directory, group of new file is set to that of the parent, and 444 * directory, group of new file is set to that of the parent, and
@@ -507,48 +491,16 @@ void xfs_lock_two_inodes(xfs_inode_t *, xfs_inode_t *, uint);
507void xfs_synchronize_times(xfs_inode_t *); 491void xfs_synchronize_times(xfs_inode_t *);
508void xfs_mark_inode_dirty_sync(xfs_inode_t *); 492void xfs_mark_inode_dirty_sync(xfs_inode_t *);
509 493
510#if defined(XFS_INODE_TRACE)
511
512#define INODE_TRACE_SIZE 16 /* number of trace entries */
513#define INODE_KTRACE_ENTRY 1
514#define INODE_KTRACE_EXIT 2
515#define INODE_KTRACE_HOLD 3
516#define INODE_KTRACE_REF 4
517#define INODE_KTRACE_RELE 5
518
519extern void _xfs_itrace_entry(struct xfs_inode *, const char *, inst_t *);
520extern void _xfs_itrace_exit(struct xfs_inode *, const char *, inst_t *);
521extern void xfs_itrace_hold(struct xfs_inode *, char *, int, inst_t *);
522extern void _xfs_itrace_ref(struct xfs_inode *, char *, int, inst_t *);
523extern void xfs_itrace_rele(struct xfs_inode *, char *, int, inst_t *);
524#define xfs_itrace_entry(ip) \
525 _xfs_itrace_entry(ip, __func__, (inst_t *)__return_address)
526#define xfs_itrace_exit(ip) \
527 _xfs_itrace_exit(ip, __func__, (inst_t *)__return_address)
528#define xfs_itrace_exit_tag(ip, tag) \
529 _xfs_itrace_exit(ip, tag, (inst_t *)__return_address)
530#define xfs_itrace_ref(ip) \
531 _xfs_itrace_ref(ip, __FILE__, __LINE__, (inst_t *)__return_address)
532
533#else
534#define xfs_itrace_entry(a)
535#define xfs_itrace_exit(a)
536#define xfs_itrace_exit_tag(a, b)
537#define xfs_itrace_hold(a, b, c, d)
538#define xfs_itrace_ref(a)
539#define xfs_itrace_rele(a, b, c, d)
540#endif
541
542#define IHOLD(ip) \ 494#define IHOLD(ip) \
543do { \ 495do { \
544 ASSERT(atomic_read(&VFS_I(ip)->i_count) > 0) ; \ 496 ASSERT(atomic_read(&VFS_I(ip)->i_count) > 0) ; \
545 atomic_inc(&(VFS_I(ip)->i_count)); \ 497 atomic_inc(&(VFS_I(ip)->i_count)); \
546 xfs_itrace_hold((ip), __FILE__, __LINE__, (inst_t *)__return_address); \ 498 trace_xfs_ihold(ip, _THIS_IP_); \
547} while (0) 499} while (0)
548 500
549#define IRELE(ip) \ 501#define IRELE(ip) \
550do { \ 502do { \
551 xfs_itrace_rele((ip), __FILE__, __LINE__, (inst_t *)__return_address); \ 503 trace_xfs_irele(ip, _THIS_IP_); \
552 iput(VFS_I(ip)); \ 504 iput(VFS_I(ip)); \
553} while (0) 505} while (0)
554 506
@@ -577,11 +529,11 @@ int xfs_iread_extents(struct xfs_trans *, struct xfs_inode *, int);
577int xfs_iextents_copy(struct xfs_inode *, xfs_bmbt_rec_t *, int); 529int xfs_iextents_copy(struct xfs_inode *, xfs_bmbt_rec_t *, int);
578 530
579xfs_bmbt_rec_host_t *xfs_iext_get_ext(xfs_ifork_t *, xfs_extnum_t); 531xfs_bmbt_rec_host_t *xfs_iext_get_ext(xfs_ifork_t *, xfs_extnum_t);
580void xfs_iext_insert(xfs_ifork_t *, xfs_extnum_t, xfs_extnum_t, 532void xfs_iext_insert(xfs_inode_t *, xfs_extnum_t, xfs_extnum_t,
581 xfs_bmbt_irec_t *); 533 xfs_bmbt_irec_t *, int);
582void xfs_iext_add(xfs_ifork_t *, xfs_extnum_t, int); 534void xfs_iext_add(xfs_ifork_t *, xfs_extnum_t, int);
583void xfs_iext_add_indirect_multi(xfs_ifork_t *, int, xfs_extnum_t, int); 535void xfs_iext_add_indirect_multi(xfs_ifork_t *, int, xfs_extnum_t, int);
584void xfs_iext_remove(xfs_ifork_t *, xfs_extnum_t, int); 536void xfs_iext_remove(xfs_inode_t *, xfs_extnum_t, int, int);
585void xfs_iext_remove_inline(xfs_ifork_t *, xfs_extnum_t, int); 537void xfs_iext_remove_inline(xfs_ifork_t *, xfs_extnum_t, int);
586void xfs_iext_remove_direct(xfs_ifork_t *, xfs_extnum_t, int); 538void xfs_iext_remove_direct(xfs_ifork_t *, xfs_extnum_t, int);
587void xfs_iext_remove_indirect(xfs_ifork_t *, xfs_extnum_t, int); 539void xfs_iext_remove_indirect(xfs_ifork_t *, xfs_extnum_t, int);
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 9794b876d6ff..f38855d21ea5 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -41,6 +41,7 @@
41#include "xfs_ialloc.h" 41#include "xfs_ialloc.h"
42#include "xfs_rw.h" 42#include "xfs_rw.h"
43#include "xfs_error.h" 43#include "xfs_error.h"
44#include "xfs_trace.h"
44 45
45 46
46kmem_zone_t *xfs_ili_zone; /* inode log item zone */ 47kmem_zone_t *xfs_ili_zone; /* inode log item zone */
@@ -800,7 +801,9 @@ xfs_inode_item_pushbuf(
800 !completion_done(&ip->i_flush)); 801 !completion_done(&ip->i_flush));
801 iip->ili_pushbuf_flag = 0; 802 iip->ili_pushbuf_flag = 0;
802 xfs_iunlock(ip, XFS_ILOCK_SHARED); 803 xfs_iunlock(ip, XFS_ILOCK_SHARED);
803 xfs_buftrace("INODE ITEM PUSH", bp); 804
805 trace_xfs_inode_item_push(bp, _RET_IP_);
806
804 if (XFS_BUF_ISPINNED(bp)) { 807 if (XFS_BUF_ISPINNED(bp)) {
805 xfs_log_force(mp, (xfs_lsn_t)0, 808 xfs_log_force(mp, (xfs_lsn_t)0,
806 XFS_LOG_FORCE); 809 XFS_LOG_FORCE);
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 7294abce6ef2..0b65039951a0 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -47,72 +47,8 @@
47#include "xfs_trans_space.h" 47#include "xfs_trans_space.h"
48#include "xfs_utils.h" 48#include "xfs_utils.h"
49#include "xfs_iomap.h" 49#include "xfs_iomap.h"
50#include "xfs_trace.h"
50 51
51#if defined(XFS_RW_TRACE)
52void
53xfs_iomap_enter_trace(
54 int tag,
55 xfs_inode_t *ip,
56 xfs_off_t offset,
57 ssize_t count)
58{
59 if (!ip->i_rwtrace)
60 return;
61
62 ktrace_enter(ip->i_rwtrace,
63 (void *)((unsigned long)tag),
64 (void *)ip,
65 (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
66 (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
67 (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
68 (void *)((unsigned long)(offset & 0xffffffff)),
69 (void *)((unsigned long)count),
70 (void *)((unsigned long)((ip->i_new_size >> 32) & 0xffffffff)),
71 (void *)((unsigned long)(ip->i_new_size & 0xffffffff)),
72 (void *)((unsigned long)current_pid()),
73 (void *)NULL,
74 (void *)NULL,
75 (void *)NULL,
76 (void *)NULL,
77 (void *)NULL,
78 (void *)NULL);
79}
80
81void
82xfs_iomap_map_trace(
83 int tag,
84 xfs_inode_t *ip,
85 xfs_off_t offset,
86 ssize_t count,
87 xfs_iomap_t *iomapp,
88 xfs_bmbt_irec_t *imapp,
89 int flags)
90{
91 if (!ip->i_rwtrace)
92 return;
93
94 ktrace_enter(ip->i_rwtrace,
95 (void *)((unsigned long)tag),
96 (void *)ip,
97 (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
98 (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
99 (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
100 (void *)((unsigned long)(offset & 0xffffffff)),
101 (void *)((unsigned long)count),
102 (void *)((unsigned long)flags),
103 (void *)((unsigned long)((iomapp->iomap_offset >> 32) & 0xffffffff)),
104 (void *)((unsigned long)(iomapp->iomap_offset & 0xffffffff)),
105 (void *)((unsigned long)(iomapp->iomap_delta)),
106 (void *)((unsigned long)(iomapp->iomap_bsize)),
107 (void *)((unsigned long)(iomapp->iomap_bn)),
108 (void *)(__psint_t)(imapp->br_startoff),
109 (void *)((unsigned long)(imapp->br_blockcount)),
110 (void *)(__psint_t)(imapp->br_startblock));
111}
112#else
113#define xfs_iomap_enter_trace(tag, io, offset, count)
114#define xfs_iomap_map_trace(tag, io, offset, count, iomapp, imapp, flags)
115#endif
116 52
117#define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \ 53#define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \
118 << mp->m_writeio_log) 54 << mp->m_writeio_log)
@@ -187,21 +123,20 @@ xfs_iomap(
187 if (XFS_FORCED_SHUTDOWN(mp)) 123 if (XFS_FORCED_SHUTDOWN(mp))
188 return XFS_ERROR(EIO); 124 return XFS_ERROR(EIO);
189 125
126 trace_xfs_iomap_enter(ip, offset, count, flags, NULL);
127
190 switch (flags & (BMAPI_READ | BMAPI_WRITE | BMAPI_ALLOCATE)) { 128 switch (flags & (BMAPI_READ | BMAPI_WRITE | BMAPI_ALLOCATE)) {
191 case BMAPI_READ: 129 case BMAPI_READ:
192 xfs_iomap_enter_trace(XFS_IOMAP_READ_ENTER, ip, offset, count);
193 lockmode = xfs_ilock_map_shared(ip); 130 lockmode = xfs_ilock_map_shared(ip);
194 bmapi_flags = XFS_BMAPI_ENTIRE; 131 bmapi_flags = XFS_BMAPI_ENTIRE;
195 break; 132 break;
196 case BMAPI_WRITE: 133 case BMAPI_WRITE:
197 xfs_iomap_enter_trace(XFS_IOMAP_WRITE_ENTER, ip, offset, count);
198 lockmode = XFS_ILOCK_EXCL; 134 lockmode = XFS_ILOCK_EXCL;
199 if (flags & BMAPI_IGNSTATE) 135 if (flags & BMAPI_IGNSTATE)
200 bmapi_flags |= XFS_BMAPI_IGSTATE|XFS_BMAPI_ENTIRE; 136 bmapi_flags |= XFS_BMAPI_IGSTATE|XFS_BMAPI_ENTIRE;
201 xfs_ilock(ip, lockmode); 137 xfs_ilock(ip, lockmode);
202 break; 138 break;
203 case BMAPI_ALLOCATE: 139 case BMAPI_ALLOCATE:
204 xfs_iomap_enter_trace(XFS_IOMAP_ALLOC_ENTER, ip, offset, count);
205 lockmode = XFS_ILOCK_SHARED; 140 lockmode = XFS_ILOCK_SHARED;
206 bmapi_flags = XFS_BMAPI_ENTIRE; 141 bmapi_flags = XFS_BMAPI_ENTIRE;
207 142
@@ -237,8 +172,7 @@ xfs_iomap(
237 if (nimaps && 172 if (nimaps &&
238 (imap.br_startblock != HOLESTARTBLOCK) && 173 (imap.br_startblock != HOLESTARTBLOCK) &&
239 (imap.br_startblock != DELAYSTARTBLOCK)) { 174 (imap.br_startblock != DELAYSTARTBLOCK)) {
240 xfs_iomap_map_trace(XFS_IOMAP_WRITE_MAP, ip, 175 trace_xfs_iomap_found(ip, offset, count, flags, &imap);
241 offset, count, iomapp, &imap, flags);
242 break; 176 break;
243 } 177 }
244 178
@@ -250,8 +184,7 @@ xfs_iomap(
250 &imap, &nimaps); 184 &imap, &nimaps);
251 } 185 }
252 if (!error) { 186 if (!error) {
253 xfs_iomap_map_trace(XFS_IOMAP_ALLOC_MAP, ip, 187 trace_xfs_iomap_alloc(ip, offset, count, flags, &imap);
254 offset, count, iomapp, &imap, flags);
255 } 188 }
256 iomap_flags = IOMAP_NEW; 189 iomap_flags = IOMAP_NEW;
257 break; 190 break;
@@ -261,8 +194,7 @@ xfs_iomap(
261 lockmode = 0; 194 lockmode = 0;
262 195
263 if (nimaps && !isnullstartblock(imap.br_startblock)) { 196 if (nimaps && !isnullstartblock(imap.br_startblock)) {
264 xfs_iomap_map_trace(XFS_IOMAP_WRITE_MAP, ip, 197 trace_xfs_iomap_found(ip, offset, count, flags, &imap);
265 offset, count, iomapp, &imap, flags);
266 break; 198 break;
267 } 199 }
268 200
@@ -623,8 +555,7 @@ retry:
623 * delalloc blocks and retry without EOF preallocation. 555 * delalloc blocks and retry without EOF preallocation.
624 */ 556 */
625 if (nimaps == 0) { 557 if (nimaps == 0) {
626 xfs_iomap_enter_trace(XFS_IOMAP_WRITE_NOSPACE, 558 trace_xfs_delalloc_enospc(ip, offset, count);
627 ip, offset, count);
628 if (flushed) 559 if (flushed)
629 return XFS_ERROR(ENOSPC); 560 return XFS_ERROR(ENOSPC);
630 561
@@ -837,7 +768,7 @@ xfs_iomap_write_unwritten(
837 int committed; 768 int committed;
838 int error; 769 int error;
839 770
840 xfs_iomap_enter_trace(XFS_IOMAP_UNWRITTEN, ip, offset, count); 771 trace_xfs_unwritten_convert(ip, offset, count);
841 772
842 offset_fsb = XFS_B_TO_FSBT(mp, offset); 773 offset_fsb = XFS_B_TO_FSBT(mp, offset);
843 count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count); 774 count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h
index fdcf7b82747f..174f29990991 100644
--- a/fs/xfs/xfs_iomap.h
+++ b/fs/xfs/xfs_iomap.h
@@ -43,6 +43,14 @@ typedef enum {
43 BMAPI_TRYLOCK = (1 << 7), /* non-blocking request */ 43 BMAPI_TRYLOCK = (1 << 7), /* non-blocking request */
44} bmapi_flags_t; 44} bmapi_flags_t;
45 45
46#define BMAPI_FLAGS \
47 { BMAPI_READ, "READ" }, \
48 { BMAPI_WRITE, "WRITE" }, \
49 { BMAPI_ALLOCATE, "ALLOCATE" }, \
50 { BMAPI_IGNSTATE, "IGNSTATE" }, \
51 { BMAPI_DIRECT, "DIRECT" }, \
52 { BMAPI_MMAP, "MMAP" }, \
53 { BMAPI_TRYLOCK, "TRYLOCK" }
46 54
47/* 55/*
48 * xfs_iomap_t: File system I/O map 56 * xfs_iomap_t: File system I/O map
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 9dbdff3ea484..4cb1792040e3 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -40,6 +40,7 @@
40#include "xfs_dinode.h" 40#include "xfs_dinode.h"
41#include "xfs_inode.h" 41#include "xfs_inode.h"
42#include "xfs_rw.h" 42#include "xfs_rw.h"
43#include "xfs_trace.h"
43 44
44kmem_zone_t *xfs_log_ticket_zone; 45kmem_zone_t *xfs_log_ticket_zone;
45 46
@@ -122,85 +123,6 @@ STATIC void xlog_verify_tail_lsn(xlog_t *log, xlog_in_core_t *iclog,
122 123
123STATIC int xlog_iclogs_empty(xlog_t *log); 124STATIC int xlog_iclogs_empty(xlog_t *log);
124 125
125#if defined(XFS_LOG_TRACE)
126
127#define XLOG_TRACE_LOGGRANT_SIZE 2048
128#define XLOG_TRACE_ICLOG_SIZE 256
129
130void
131xlog_trace_loggrant_alloc(xlog_t *log)
132{
133 log->l_grant_trace = ktrace_alloc(XLOG_TRACE_LOGGRANT_SIZE, KM_NOFS);
134}
135
136void
137xlog_trace_loggrant_dealloc(xlog_t *log)
138{
139 ktrace_free(log->l_grant_trace);
140}
141
142void
143xlog_trace_loggrant(xlog_t *log, xlog_ticket_t *tic, xfs_caddr_t string)
144{
145 unsigned long cnts;
146
147 /* ticket counts are 1 byte each */
148 cnts = ((unsigned long)tic->t_ocnt) | ((unsigned long)tic->t_cnt) << 8;
149
150 ktrace_enter(log->l_grant_trace,
151 (void *)tic,
152 (void *)log->l_reserve_headq,
153 (void *)log->l_write_headq,
154 (void *)((unsigned long)log->l_grant_reserve_cycle),
155 (void *)((unsigned long)log->l_grant_reserve_bytes),
156 (void *)((unsigned long)log->l_grant_write_cycle),
157 (void *)((unsigned long)log->l_grant_write_bytes),
158 (void *)((unsigned long)log->l_curr_cycle),
159 (void *)((unsigned long)log->l_curr_block),
160 (void *)((unsigned long)CYCLE_LSN(log->l_tail_lsn)),
161 (void *)((unsigned long)BLOCK_LSN(log->l_tail_lsn)),
162 (void *)string,
163 (void *)((unsigned long)tic->t_trans_type),
164 (void *)cnts,
165 (void *)((unsigned long)tic->t_curr_res),
166 (void *)((unsigned long)tic->t_unit_res));
167}
168
169void
170xlog_trace_iclog_alloc(xlog_in_core_t *iclog)
171{
172 iclog->ic_trace = ktrace_alloc(XLOG_TRACE_ICLOG_SIZE, KM_NOFS);
173}
174
175void
176xlog_trace_iclog_dealloc(xlog_in_core_t *iclog)
177{
178 ktrace_free(iclog->ic_trace);
179}
180
181void
182xlog_trace_iclog(xlog_in_core_t *iclog, uint state)
183{
184 ktrace_enter(iclog->ic_trace,
185 (void *)((unsigned long)state),
186 (void *)((unsigned long)current_pid()),
187 (void *)NULL, (void *)NULL, (void *)NULL, (void *)NULL,
188 (void *)NULL, (void *)NULL, (void *)NULL, (void *)NULL,
189 (void *)NULL, (void *)NULL, (void *)NULL, (void *)NULL,
190 (void *)NULL, (void *)NULL);
191}
192#else
193
194#define xlog_trace_loggrant_alloc(log)
195#define xlog_trace_loggrant_dealloc(log)
196#define xlog_trace_loggrant(log,tic,string)
197
198#define xlog_trace_iclog_alloc(iclog)
199#define xlog_trace_iclog_dealloc(iclog)
200#define xlog_trace_iclog(iclog,state)
201
202#endif /* XFS_LOG_TRACE */
203
204 126
205static void 127static void
206xlog_ins_ticketq(struct xlog_ticket **qp, struct xlog_ticket *tic) 128xlog_ins_ticketq(struct xlog_ticket **qp, struct xlog_ticket *tic)
@@ -353,15 +275,17 @@ xfs_log_done(xfs_mount_t *mp,
353 275
354 if ((ticket->t_flags & XLOG_TIC_PERM_RESERV) == 0 || 276 if ((ticket->t_flags & XLOG_TIC_PERM_RESERV) == 0 ||
355 (flags & XFS_LOG_REL_PERM_RESERV)) { 277 (flags & XFS_LOG_REL_PERM_RESERV)) {
278 trace_xfs_log_done_nonperm(log, ticket);
279
356 /* 280 /*
357 * Release ticket if not permanent reservation or a specific 281 * Release ticket if not permanent reservation or a specific
358 * request has been made to release a permanent reservation. 282 * request has been made to release a permanent reservation.
359 */ 283 */
360 xlog_trace_loggrant(log, ticket, "xfs_log_done: (non-permanent)");
361 xlog_ungrant_log_space(log, ticket); 284 xlog_ungrant_log_space(log, ticket);
362 xfs_log_ticket_put(ticket); 285 xfs_log_ticket_put(ticket);
363 } else { 286 } else {
364 xlog_trace_loggrant(log, ticket, "xfs_log_done: (permanent)"); 287 trace_xfs_log_done_perm(log, ticket);
288
365 xlog_regrant_reserve_log_space(log, ticket); 289 xlog_regrant_reserve_log_space(log, ticket);
366 /* If this ticket was a permanent reservation and we aren't 290 /* If this ticket was a permanent reservation and we aren't
367 * trying to release it, reset the inited flags; so next time 291 * trying to release it, reset the inited flags; so next time
@@ -505,10 +429,13 @@ xfs_log_reserve(xfs_mount_t *mp,
505 429
506 XFS_STATS_INC(xs_try_logspace); 430 XFS_STATS_INC(xs_try_logspace);
507 431
432
508 if (*ticket != NULL) { 433 if (*ticket != NULL) {
509 ASSERT(flags & XFS_LOG_PERM_RESERV); 434 ASSERT(flags & XFS_LOG_PERM_RESERV);
510 internal_ticket = (xlog_ticket_t *)*ticket; 435 internal_ticket = (xlog_ticket_t *)*ticket;
511 xlog_trace_loggrant(log, internal_ticket, "xfs_log_reserve: existing ticket (permanent trans)"); 436
437 trace_xfs_log_reserve(log, internal_ticket);
438
512 xlog_grant_push_ail(mp, internal_ticket->t_unit_res); 439 xlog_grant_push_ail(mp, internal_ticket->t_unit_res);
513 retval = xlog_regrant_write_log_space(log, internal_ticket); 440 retval = xlog_regrant_write_log_space(log, internal_ticket);
514 } else { 441 } else {
@@ -519,10 +446,9 @@ xfs_log_reserve(xfs_mount_t *mp,
519 return XFS_ERROR(ENOMEM); 446 return XFS_ERROR(ENOMEM);
520 internal_ticket->t_trans_type = t_type; 447 internal_ticket->t_trans_type = t_type;
521 *ticket = internal_ticket; 448 *ticket = internal_ticket;
522 xlog_trace_loggrant(log, internal_ticket, 449
523 (internal_ticket->t_flags & XLOG_TIC_PERM_RESERV) ? 450 trace_xfs_log_reserve(log, internal_ticket);
524 "xfs_log_reserve: create new ticket (permanent trans)" : 451
525 "xfs_log_reserve: create new ticket");
526 xlog_grant_push_ail(mp, 452 xlog_grant_push_ail(mp,
527 (internal_ticket->t_unit_res * 453 (internal_ticket->t_unit_res *
528 internal_ticket->t_cnt)); 454 internal_ticket->t_cnt));
@@ -734,7 +660,7 @@ xfs_log_unmount_write(xfs_mount_t *mp)
734 spin_unlock(&log->l_icloglock); 660 spin_unlock(&log->l_icloglock);
735 } 661 }
736 if (tic) { 662 if (tic) {
737 xlog_trace_loggrant(log, tic, "unmount rec"); 663 trace_xfs_log_umount_write(log, tic);
738 xlog_ungrant_log_space(log, tic); 664 xlog_ungrant_log_space(log, tic);
739 xfs_log_ticket_put(tic); 665 xfs_log_ticket_put(tic);
740 } 666 }
@@ -1030,7 +956,6 @@ xlog_iodone(xfs_buf_t *bp)
1030 xfs_fs_cmn_err(CE_WARN, l->l_mp, 956 xfs_fs_cmn_err(CE_WARN, l->l_mp,
1031 "xlog_iodone: Barriers are no longer supported" 957 "xlog_iodone: Barriers are no longer supported"
1032 " by device. Disabling barriers\n"); 958 " by device. Disabling barriers\n");
1033 xfs_buftrace("XLOG_IODONE BARRIERS OFF", bp);
1034 } 959 }
1035 960
1036 /* 961 /*
@@ -1085,13 +1010,10 @@ xlog_bdstrat_cb(struct xfs_buf *bp)
1085 return 0; 1010 return 0;
1086 } 1011 }
1087 1012
1088 xfs_buftrace("XLOG__BDSTRAT IOERROR", bp);
1089 XFS_BUF_ERROR(bp, EIO); 1013 XFS_BUF_ERROR(bp, EIO);
1090 XFS_BUF_STALE(bp); 1014 XFS_BUF_STALE(bp);
1091 xfs_biodone(bp); 1015 xfs_biodone(bp);
1092 return XFS_ERROR(EIO); 1016 return XFS_ERROR(EIO);
1093
1094
1095} 1017}
1096 1018
1097/* 1019/*
@@ -1246,7 +1168,6 @@ xlog_alloc_log(xfs_mount_t *mp,
1246 spin_lock_init(&log->l_grant_lock); 1168 spin_lock_init(&log->l_grant_lock);
1247 sv_init(&log->l_flush_wait, 0, "flush_wait"); 1169 sv_init(&log->l_flush_wait, 0, "flush_wait");
1248 1170
1249 xlog_trace_loggrant_alloc(log);
1250 /* log record size must be multiple of BBSIZE; see xlog_rec_header_t */ 1171 /* log record size must be multiple of BBSIZE; see xlog_rec_header_t */
1251 ASSERT((XFS_BUF_SIZE(bp) & BBMASK) == 0); 1172 ASSERT((XFS_BUF_SIZE(bp) & BBMASK) == 0);
1252 1173
@@ -1305,8 +1226,6 @@ xlog_alloc_log(xfs_mount_t *mp,
1305 sv_init(&iclog->ic_force_wait, SV_DEFAULT, "iclog-force"); 1226 sv_init(&iclog->ic_force_wait, SV_DEFAULT, "iclog-force");
1306 sv_init(&iclog->ic_write_wait, SV_DEFAULT, "iclog-write"); 1227 sv_init(&iclog->ic_write_wait, SV_DEFAULT, "iclog-write");
1307 1228
1308 xlog_trace_iclog_alloc(iclog);
1309
1310 iclogp = &iclog->ic_next; 1229 iclogp = &iclog->ic_next;
1311 } 1230 }
1312 *iclogp = log->l_iclog; /* complete ring */ 1231 *iclogp = log->l_iclog; /* complete ring */
@@ -1321,13 +1240,11 @@ out_free_iclog:
1321 sv_destroy(&iclog->ic_force_wait); 1240 sv_destroy(&iclog->ic_force_wait);
1322 sv_destroy(&iclog->ic_write_wait); 1241 sv_destroy(&iclog->ic_write_wait);
1323 xfs_buf_free(iclog->ic_bp); 1242 xfs_buf_free(iclog->ic_bp);
1324 xlog_trace_iclog_dealloc(iclog);
1325 } 1243 }
1326 kmem_free(iclog); 1244 kmem_free(iclog);
1327 } 1245 }
1328 spinlock_destroy(&log->l_icloglock); 1246 spinlock_destroy(&log->l_icloglock);
1329 spinlock_destroy(&log->l_grant_lock); 1247 spinlock_destroy(&log->l_grant_lock);
1330 xlog_trace_loggrant_dealloc(log);
1331 xfs_buf_free(log->l_xbuf); 1248 xfs_buf_free(log->l_xbuf);
1332out_free_log: 1249out_free_log:
1333 kmem_free(log); 1250 kmem_free(log);
@@ -1607,7 +1524,6 @@ xlog_dealloc_log(xlog_t *log)
1607 sv_destroy(&iclog->ic_force_wait); 1524 sv_destroy(&iclog->ic_force_wait);
1608 sv_destroy(&iclog->ic_write_wait); 1525 sv_destroy(&iclog->ic_write_wait);
1609 xfs_buf_free(iclog->ic_bp); 1526 xfs_buf_free(iclog->ic_bp);
1610 xlog_trace_iclog_dealloc(iclog);
1611 next_iclog = iclog->ic_next; 1527 next_iclog = iclog->ic_next;
1612 kmem_free(iclog); 1528 kmem_free(iclog);
1613 iclog = next_iclog; 1529 iclog = next_iclog;
@@ -1616,7 +1532,6 @@ xlog_dealloc_log(xlog_t *log)
1616 spinlock_destroy(&log->l_grant_lock); 1532 spinlock_destroy(&log->l_grant_lock);
1617 1533
1618 xfs_buf_free(log->l_xbuf); 1534 xfs_buf_free(log->l_xbuf);
1619 xlog_trace_loggrant_dealloc(log);
1620 log->l_mp->m_log = NULL; 1535 log->l_mp->m_log = NULL;
1621 kmem_free(log); 1536 kmem_free(log);
1622} /* xlog_dealloc_log */ 1537} /* xlog_dealloc_log */
@@ -2414,7 +2329,6 @@ restart:
2414 2329
2415 iclog = log->l_iclog; 2330 iclog = log->l_iclog;
2416 if (iclog->ic_state != XLOG_STATE_ACTIVE) { 2331 if (iclog->ic_state != XLOG_STATE_ACTIVE) {
2417 xlog_trace_iclog(iclog, XLOG_TRACE_SLEEP_FLUSH);
2418 XFS_STATS_INC(xs_log_noiclogs); 2332 XFS_STATS_INC(xs_log_noiclogs);
2419 2333
2420 /* Wait for log writes to have flushed */ 2334 /* Wait for log writes to have flushed */
@@ -2520,13 +2434,15 @@ xlog_grant_log_space(xlog_t *log,
2520 2434
2521 /* Is there space or do we need to sleep? */ 2435 /* Is there space or do we need to sleep? */
2522 spin_lock(&log->l_grant_lock); 2436 spin_lock(&log->l_grant_lock);
2523 xlog_trace_loggrant(log, tic, "xlog_grant_log_space: enter"); 2437
2438 trace_xfs_log_grant_enter(log, tic);
2524 2439
2525 /* something is already sleeping; insert new transaction at end */ 2440 /* something is already sleeping; insert new transaction at end */
2526 if (log->l_reserve_headq) { 2441 if (log->l_reserve_headq) {
2527 xlog_ins_ticketq(&log->l_reserve_headq, tic); 2442 xlog_ins_ticketq(&log->l_reserve_headq, tic);
2528 xlog_trace_loggrant(log, tic, 2443
2529 "xlog_grant_log_space: sleep 1"); 2444 trace_xfs_log_grant_sleep1(log, tic);
2445
2530 /* 2446 /*
2531 * Gotta check this before going to sleep, while we're 2447 * Gotta check this before going to sleep, while we're
2532 * holding the grant lock. 2448 * holding the grant lock.
@@ -2540,8 +2456,7 @@ xlog_grant_log_space(xlog_t *log,
2540 * If we got an error, and the filesystem is shutting down, 2456 * If we got an error, and the filesystem is shutting down,
2541 * we'll catch it down below. So just continue... 2457 * we'll catch it down below. So just continue...
2542 */ 2458 */
2543 xlog_trace_loggrant(log, tic, 2459 trace_xfs_log_grant_wake1(log, tic);
2544 "xlog_grant_log_space: wake 1");
2545 spin_lock(&log->l_grant_lock); 2460 spin_lock(&log->l_grant_lock);
2546 } 2461 }
2547 if (tic->t_flags & XFS_LOG_PERM_RESERV) 2462 if (tic->t_flags & XFS_LOG_PERM_RESERV)
@@ -2558,8 +2473,9 @@ redo:
2558 if (free_bytes < need_bytes) { 2473 if (free_bytes < need_bytes) {
2559 if ((tic->t_flags & XLOG_TIC_IN_Q) == 0) 2474 if ((tic->t_flags & XLOG_TIC_IN_Q) == 0)
2560 xlog_ins_ticketq(&log->l_reserve_headq, tic); 2475 xlog_ins_ticketq(&log->l_reserve_headq, tic);
2561 xlog_trace_loggrant(log, tic, 2476
2562 "xlog_grant_log_space: sleep 2"); 2477 trace_xfs_log_grant_sleep2(log, tic);
2478
2563 spin_unlock(&log->l_grant_lock); 2479 spin_unlock(&log->l_grant_lock);
2564 xlog_grant_push_ail(log->l_mp, need_bytes); 2480 xlog_grant_push_ail(log->l_mp, need_bytes);
2565 spin_lock(&log->l_grant_lock); 2481 spin_lock(&log->l_grant_lock);
@@ -2571,8 +2487,8 @@ redo:
2571 if (XLOG_FORCED_SHUTDOWN(log)) 2487 if (XLOG_FORCED_SHUTDOWN(log))
2572 goto error_return; 2488 goto error_return;
2573 2489
2574 xlog_trace_loggrant(log, tic, 2490 trace_xfs_log_grant_wake2(log, tic);
2575 "xlog_grant_log_space: wake 2"); 2491
2576 goto redo; 2492 goto redo;
2577 } else if (tic->t_flags & XLOG_TIC_IN_Q) 2493 } else if (tic->t_flags & XLOG_TIC_IN_Q)
2578 xlog_del_ticketq(&log->l_reserve_headq, tic); 2494 xlog_del_ticketq(&log->l_reserve_headq, tic);
@@ -2592,7 +2508,7 @@ redo:
2592 ASSERT(log->l_grant_write_bytes <= BBTOB(BLOCK_LSN(tail_lsn))); 2508 ASSERT(log->l_grant_write_bytes <= BBTOB(BLOCK_LSN(tail_lsn)));
2593 } 2509 }
2594#endif 2510#endif
2595 xlog_trace_loggrant(log, tic, "xlog_grant_log_space: exit"); 2511 trace_xfs_log_grant_exit(log, tic);
2596 xlog_verify_grant_head(log, 1); 2512 xlog_verify_grant_head(log, 1);
2597 spin_unlock(&log->l_grant_lock); 2513 spin_unlock(&log->l_grant_lock);
2598 return 0; 2514 return 0;
@@ -2600,7 +2516,9 @@ redo:
2600 error_return: 2516 error_return:
2601 if (tic->t_flags & XLOG_TIC_IN_Q) 2517 if (tic->t_flags & XLOG_TIC_IN_Q)
2602 xlog_del_ticketq(&log->l_reserve_headq, tic); 2518 xlog_del_ticketq(&log->l_reserve_headq, tic);
2603 xlog_trace_loggrant(log, tic, "xlog_grant_log_space: err_ret"); 2519
2520 trace_xfs_log_grant_error(log, tic);
2521
2604 /* 2522 /*
2605 * If we are failing, make sure the ticket doesn't have any 2523 * If we are failing, make sure the ticket doesn't have any
2606 * current reservations. We don't want to add this back when 2524 * current reservations. We don't want to add this back when
@@ -2640,7 +2558,8 @@ xlog_regrant_write_log_space(xlog_t *log,
2640#endif 2558#endif
2641 2559
2642 spin_lock(&log->l_grant_lock); 2560 spin_lock(&log->l_grant_lock);
2643 xlog_trace_loggrant(log, tic, "xlog_regrant_write_log_space: enter"); 2561
2562 trace_xfs_log_regrant_write_enter(log, tic);
2644 2563
2645 if (XLOG_FORCED_SHUTDOWN(log)) 2564 if (XLOG_FORCED_SHUTDOWN(log))
2646 goto error_return; 2565 goto error_return;
@@ -2669,8 +2588,8 @@ xlog_regrant_write_log_space(xlog_t *log,
2669 if ((tic->t_flags & XLOG_TIC_IN_Q) == 0) 2588 if ((tic->t_flags & XLOG_TIC_IN_Q) == 0)
2670 xlog_ins_ticketq(&log->l_write_headq, tic); 2589 xlog_ins_ticketq(&log->l_write_headq, tic);
2671 2590
2672 xlog_trace_loggrant(log, tic, 2591 trace_xfs_log_regrant_write_sleep1(log, tic);
2673 "xlog_regrant_write_log_space: sleep 1"); 2592
2674 spin_unlock(&log->l_grant_lock); 2593 spin_unlock(&log->l_grant_lock);
2675 xlog_grant_push_ail(log->l_mp, need_bytes); 2594 xlog_grant_push_ail(log->l_mp, need_bytes);
2676 spin_lock(&log->l_grant_lock); 2595 spin_lock(&log->l_grant_lock);
@@ -2685,8 +2604,7 @@ xlog_regrant_write_log_space(xlog_t *log,
2685 if (XLOG_FORCED_SHUTDOWN(log)) 2604 if (XLOG_FORCED_SHUTDOWN(log))
2686 goto error_return; 2605 goto error_return;
2687 2606
2688 xlog_trace_loggrant(log, tic, 2607 trace_xfs_log_regrant_write_wake1(log, tic);
2689 "xlog_regrant_write_log_space: wake 1");
2690 } 2608 }
2691 } 2609 }
2692 2610
@@ -2704,6 +2622,8 @@ redo:
2704 spin_lock(&log->l_grant_lock); 2622 spin_lock(&log->l_grant_lock);
2705 2623
2706 XFS_STATS_INC(xs_sleep_logspace); 2624 XFS_STATS_INC(xs_sleep_logspace);
2625 trace_xfs_log_regrant_write_sleep2(log, tic);
2626
2707 sv_wait(&tic->t_wait, PINOD|PLTWAIT, &log->l_grant_lock, s); 2627 sv_wait(&tic->t_wait, PINOD|PLTWAIT, &log->l_grant_lock, s);
2708 2628
2709 /* If we're shutting down, this tic is already off the queue */ 2629 /* If we're shutting down, this tic is already off the queue */
@@ -2711,8 +2631,7 @@ redo:
2711 if (XLOG_FORCED_SHUTDOWN(log)) 2631 if (XLOG_FORCED_SHUTDOWN(log))
2712 goto error_return; 2632 goto error_return;
2713 2633
2714 xlog_trace_loggrant(log, tic, 2634 trace_xfs_log_regrant_write_wake2(log, tic);
2715 "xlog_regrant_write_log_space: wake 2");
2716 goto redo; 2635 goto redo;
2717 } else if (tic->t_flags & XLOG_TIC_IN_Q) 2636 } else if (tic->t_flags & XLOG_TIC_IN_Q)
2718 xlog_del_ticketq(&log->l_write_headq, tic); 2637 xlog_del_ticketq(&log->l_write_headq, tic);
@@ -2727,7 +2646,8 @@ redo:
2727 } 2646 }
2728#endif 2647#endif
2729 2648
2730 xlog_trace_loggrant(log, tic, "xlog_regrant_write_log_space: exit"); 2649 trace_xfs_log_regrant_write_exit(log, tic);
2650
2731 xlog_verify_grant_head(log, 1); 2651 xlog_verify_grant_head(log, 1);
2732 spin_unlock(&log->l_grant_lock); 2652 spin_unlock(&log->l_grant_lock);
2733 return 0; 2653 return 0;
@@ -2736,7 +2656,9 @@ redo:
2736 error_return: 2656 error_return:
2737 if (tic->t_flags & XLOG_TIC_IN_Q) 2657 if (tic->t_flags & XLOG_TIC_IN_Q)
2738 xlog_del_ticketq(&log->l_reserve_headq, tic); 2658 xlog_del_ticketq(&log->l_reserve_headq, tic);
2739 xlog_trace_loggrant(log, tic, "xlog_regrant_write_log_space: err_ret"); 2659
2660 trace_xfs_log_regrant_write_error(log, tic);
2661
2740 /* 2662 /*
2741 * If we are failing, make sure the ticket doesn't have any 2663 * If we are failing, make sure the ticket doesn't have any
2742 * current reservations. We don't want to add this back when 2664 * current reservations. We don't want to add this back when
@@ -2760,8 +2682,8 @@ STATIC void
2760xlog_regrant_reserve_log_space(xlog_t *log, 2682xlog_regrant_reserve_log_space(xlog_t *log,
2761 xlog_ticket_t *ticket) 2683 xlog_ticket_t *ticket)
2762{ 2684{
2763 xlog_trace_loggrant(log, ticket, 2685 trace_xfs_log_regrant_reserve_enter(log, ticket);
2764 "xlog_regrant_reserve_log_space: enter"); 2686
2765 if (ticket->t_cnt > 0) 2687 if (ticket->t_cnt > 0)
2766 ticket->t_cnt--; 2688 ticket->t_cnt--;
2767 2689
@@ -2769,8 +2691,9 @@ xlog_regrant_reserve_log_space(xlog_t *log,
2769 xlog_grant_sub_space(log, ticket->t_curr_res); 2691 xlog_grant_sub_space(log, ticket->t_curr_res);
2770 ticket->t_curr_res = ticket->t_unit_res; 2692 ticket->t_curr_res = ticket->t_unit_res;
2771 xlog_tic_reset_res(ticket); 2693 xlog_tic_reset_res(ticket);
2772 xlog_trace_loggrant(log, ticket, 2694
2773 "xlog_regrant_reserve_log_space: sub current res"); 2695 trace_xfs_log_regrant_reserve_sub(log, ticket);
2696
2774 xlog_verify_grant_head(log, 1); 2697 xlog_verify_grant_head(log, 1);
2775 2698
2776 /* just return if we still have some of the pre-reserved space */ 2699 /* just return if we still have some of the pre-reserved space */
@@ -2780,8 +2703,9 @@ xlog_regrant_reserve_log_space(xlog_t *log,
2780 } 2703 }
2781 2704
2782 xlog_grant_add_space_reserve(log, ticket->t_unit_res); 2705 xlog_grant_add_space_reserve(log, ticket->t_unit_res);
2783 xlog_trace_loggrant(log, ticket, 2706
2784 "xlog_regrant_reserve_log_space: exit"); 2707 trace_xfs_log_regrant_reserve_exit(log, ticket);
2708
2785 xlog_verify_grant_head(log, 0); 2709 xlog_verify_grant_head(log, 0);
2786 spin_unlock(&log->l_grant_lock); 2710 spin_unlock(&log->l_grant_lock);
2787 ticket->t_curr_res = ticket->t_unit_res; 2711 ticket->t_curr_res = ticket->t_unit_res;
@@ -2811,11 +2735,11 @@ xlog_ungrant_log_space(xlog_t *log,
2811 ticket->t_cnt--; 2735 ticket->t_cnt--;
2812 2736
2813 spin_lock(&log->l_grant_lock); 2737 spin_lock(&log->l_grant_lock);
2814 xlog_trace_loggrant(log, ticket, "xlog_ungrant_log_space: enter"); 2738 trace_xfs_log_ungrant_enter(log, ticket);
2815 2739
2816 xlog_grant_sub_space(log, ticket->t_curr_res); 2740 xlog_grant_sub_space(log, ticket->t_curr_res);
2817 2741
2818 xlog_trace_loggrant(log, ticket, "xlog_ungrant_log_space: sub current"); 2742 trace_xfs_log_ungrant_sub(log, ticket);
2819 2743
2820 /* If this is a permanent reservation ticket, we may be able to free 2744 /* If this is a permanent reservation ticket, we may be able to free
2821 * up more space based on the remaining count. 2745 * up more space based on the remaining count.
@@ -2825,7 +2749,8 @@ xlog_ungrant_log_space(xlog_t *log,
2825 xlog_grant_sub_space(log, ticket->t_unit_res*ticket->t_cnt); 2749 xlog_grant_sub_space(log, ticket->t_unit_res*ticket->t_cnt);
2826 } 2750 }
2827 2751
2828 xlog_trace_loggrant(log, ticket, "xlog_ungrant_log_space: exit"); 2752 trace_xfs_log_ungrant_exit(log, ticket);
2753
2829 xlog_verify_grant_head(log, 1); 2754 xlog_verify_grant_head(log, 1);
2830 spin_unlock(&log->l_grant_lock); 2755 spin_unlock(&log->l_grant_lock);
2831 xfs_log_move_tail(log->l_mp, 1); 2756 xfs_log_move_tail(log->l_mp, 1);
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index 679c7c4926a2..d55662db7077 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -19,7 +19,6 @@
19#define __XFS_LOG_PRIV_H__ 19#define __XFS_LOG_PRIV_H__
20 20
21struct xfs_buf; 21struct xfs_buf;
22struct ktrace;
23struct log; 22struct log;
24struct xlog_ticket; 23struct xlog_ticket;
25struct xfs_buf_cancel; 24struct xfs_buf_cancel;
@@ -135,6 +134,12 @@ static inline uint xlog_get_client_id(__be32 i)
135#define XLOG_TIC_INITED 0x1 /* has been initialized */ 134#define XLOG_TIC_INITED 0x1 /* has been initialized */
136#define XLOG_TIC_PERM_RESERV 0x2 /* permanent reservation */ 135#define XLOG_TIC_PERM_RESERV 0x2 /* permanent reservation */
137#define XLOG_TIC_IN_Q 0x4 136#define XLOG_TIC_IN_Q 0x4
137
138#define XLOG_TIC_FLAGS \
139 { XLOG_TIC_INITED, "XLOG_TIC_INITED" }, \
140 { XLOG_TIC_PERM_RESERV, "XLOG_TIC_PERM_RESERV" }, \
141 { XLOG_TIC_IN_Q, "XLOG_TIC_IN_Q" }
142
138#endif /* __KERNEL__ */ 143#endif /* __KERNEL__ */
139 144
140#define XLOG_UNMOUNT_TYPE 0x556e /* Un for Unmount */ 145#define XLOG_UNMOUNT_TYPE 0x556e /* Un for Unmount */
@@ -361,9 +366,6 @@ typedef struct xlog_in_core {
361 int ic_bwritecnt; 366 int ic_bwritecnt;
362 unsigned short ic_state; 367 unsigned short ic_state;
363 char *ic_datap; /* pointer to iclog data */ 368 char *ic_datap; /* pointer to iclog data */
364#ifdef XFS_LOG_TRACE
365 struct ktrace *ic_trace;
366#endif
367 369
368 /* Callback structures need their own cacheline */ 370 /* Callback structures need their own cacheline */
369 spinlock_t ic_callback_lock ____cacheline_aligned_in_smp; 371 spinlock_t ic_callback_lock ____cacheline_aligned_in_smp;
@@ -429,10 +431,6 @@ typedef struct log {
429 int l_grant_write_cycle; 431 int l_grant_write_cycle;
430 int l_grant_write_bytes; 432 int l_grant_write_bytes;
431 433
432#ifdef XFS_LOG_TRACE
433 struct ktrace *l_grant_trace;
434#endif
435
436 /* The following field are used for debugging; need to hold icloglock */ 434 /* The following field are used for debugging; need to hold icloglock */
437#ifdef DEBUG 435#ifdef DEBUG
438 char *l_iclog_bak[XLOG_MAX_ICLOGS]; 436 char *l_iclog_bak[XLOG_MAX_ICLOGS];
@@ -456,12 +454,6 @@ extern void xlog_put_bp(struct xfs_buf *);
456 454
457extern kmem_zone_t *xfs_log_ticket_zone; 455extern kmem_zone_t *xfs_log_ticket_zone;
458 456
459/* iclog tracing */
460#define XLOG_TRACE_GRAB_FLUSH 1
461#define XLOG_TRACE_REL_FLUSH 2
462#define XLOG_TRACE_SLEEP_FLUSH 3
463#define XLOG_TRACE_WAKE_FLUSH 4
464
465/* 457/*
466 * Unmount record type is used as a pseudo transaction type for the ticket. 458 * Unmount record type is used as a pseudo transaction type for the ticket.
467 * It's value must be outside the range of XFS_TRANS_* values. 459 * It's value must be outside the range of XFS_TRANS_* values.
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 1ec98ed914d4..69ac2e5ef20c 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -46,6 +46,7 @@
46#include "xfs_quota.h" 46#include "xfs_quota.h"
47#include "xfs_rw.h" 47#include "xfs_rw.h"
48#include "xfs_utils.h" 48#include "xfs_utils.h"
49#include "xfs_trace.h"
49 50
50STATIC int xlog_find_zeroed(xlog_t *, xfs_daddr_t *); 51STATIC int xlog_find_zeroed(xlog_t *, xfs_daddr_t *);
51STATIC int xlog_clear_stale_blocks(xlog_t *, xfs_lsn_t); 52STATIC int xlog_clear_stale_blocks(xlog_t *, xfs_lsn_t);
@@ -225,16 +226,10 @@ xlog_header_check_dump(
225 xfs_mount_t *mp, 226 xfs_mount_t *mp,
226 xlog_rec_header_t *head) 227 xlog_rec_header_t *head)
227{ 228{
228 int b; 229 cmn_err(CE_DEBUG, "%s: SB : uuid = %pU, fmt = %d\n",
229 230 __func__, &mp->m_sb.sb_uuid, XLOG_FMT);
230 cmn_err(CE_DEBUG, "%s: SB : uuid = ", __func__); 231 cmn_err(CE_DEBUG, " log : uuid = %pU, fmt = %d\n",
231 for (b = 0; b < 16; b++) 232 &head->h_fs_uuid, be32_to_cpu(head->h_fmt));
232 cmn_err(CE_DEBUG, "%02x", ((__uint8_t *)&mp->m_sb.sb_uuid)[b]);
233 cmn_err(CE_DEBUG, ", fmt = %d\n", XLOG_FMT);
234 cmn_err(CE_DEBUG, " log : uuid = ");
235 for (b = 0; b < 16; b++)
236 cmn_err(CE_DEBUG, "%02x", ((__uint8_t *)&head->h_fs_uuid)[b]);
237 cmn_err(CE_DEBUG, ", fmt = %d\n", be32_to_cpu(head->h_fmt));
238} 233}
239#else 234#else
240#define xlog_header_check_dump(mp, head) 235#define xlog_header_check_dump(mp, head)
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index bfffd6334abb..eb403b40e120 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -44,6 +44,8 @@
44#include "xfs_quota.h" 44#include "xfs_quota.h"
45#include "xfs_fsops.h" 45#include "xfs_fsops.h"
46#include "xfs_utils.h" 46#include "xfs_utils.h"
47#include "xfs_trace.h"
48
47 49
48STATIC void xfs_unmountfs_wait(xfs_mount_t *); 50STATIC void xfs_unmountfs_wait(xfs_mount_t *);
49 51
diff --git a/fs/xfs/xfs_quota.h b/fs/xfs/xfs_quota.h
index 3ec91ac74c2a..91bfd60f4c74 100644
--- a/fs/xfs/xfs_quota.h
+++ b/fs/xfs/xfs_quota.h
@@ -92,6 +92,14 @@ typedef struct xfs_dqblk {
92 92
93#define XFS_DQ_ALLTYPES (XFS_DQ_USER|XFS_DQ_PROJ|XFS_DQ_GROUP) 93#define XFS_DQ_ALLTYPES (XFS_DQ_USER|XFS_DQ_PROJ|XFS_DQ_GROUP)
94 94
95#define XFS_DQ_FLAGS \
96 { XFS_DQ_USER, "USER" }, \
97 { XFS_DQ_PROJ, "PROJ" }, \
98 { XFS_DQ_GROUP, "GROUP" }, \
99 { XFS_DQ_DIRTY, "DIRTY" }, \
100 { XFS_DQ_WANT, "WANT" }, \
101 { XFS_DQ_INACTIVE, "INACTIVE" }
102
95/* 103/*
96 * In the worst case, when both user and group quotas are on, 104 * In the worst case, when both user and group quotas are on,
97 * we can have a max of three dquots changing in a single transaction. 105 * we can have a max of three dquots changing in a single transaction.
diff --git a/fs/xfs/xfs_rename.c b/fs/xfs/xfs_rename.c
index b81deea0ce19..fc1cda23b817 100644
--- a/fs/xfs/xfs_rename.c
+++ b/fs/xfs/xfs_rename.c
@@ -39,6 +39,7 @@
39#include "xfs_utils.h" 39#include "xfs_utils.h"
40#include "xfs_trans_space.h" 40#include "xfs_trans_space.h"
41#include "xfs_vnodeops.h" 41#include "xfs_vnodeops.h"
42#include "xfs_trace.h"
42 43
43 44
44/* 45/*
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index 385f6dceba5d..9e15a1185362 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -45,6 +45,7 @@
45#include "xfs_inode_item.h" 45#include "xfs_inode_item.h"
46#include "xfs_trans_space.h" 46#include "xfs_trans_space.h"
47#include "xfs_utils.h" 47#include "xfs_utils.h"
48#include "xfs_trace.h"
48 49
49 50
50/* 51/*
diff --git a/fs/xfs/xfs_rw.c b/fs/xfs/xfs_rw.c
index 4c199d18f850..5aa07caea5f1 100644
--- a/fs/xfs/xfs_rw.c
+++ b/fs/xfs/xfs_rw.c
@@ -44,6 +44,7 @@
44#include "xfs_error.h" 44#include "xfs_error.h"
45#include "xfs_buf_item.h" 45#include "xfs_buf_item.h"
46#include "xfs_rw.h" 46#include "xfs_rw.h"
47#include "xfs_trace.h"
47 48
48/* 49/*
49 * This is a subroutine for xfs_write() and other writers (xfs_ioctl) 50 * This is a subroutine for xfs_write() and other writers (xfs_ioctl)
@@ -171,7 +172,6 @@ xfs_bioerror(
171 * No need to wait until the buffer is unpinned. 172 * No need to wait until the buffer is unpinned.
172 * We aren't flushing it. 173 * We aren't flushing it.
173 */ 174 */
174 xfs_buftrace("XFS IOERROR", bp);
175 XFS_BUF_ERROR(bp, EIO); 175 XFS_BUF_ERROR(bp, EIO);
176 /* 176 /*
177 * We're calling biodone, so delete B_DONE flag. Either way 177 * We're calling biodone, so delete B_DONE flag. Either way
@@ -205,7 +205,6 @@ xfs_bioerror_relse(
205 ASSERT(XFS_BUF_IODONE_FUNC(bp) != xfs_buf_iodone_callbacks); 205 ASSERT(XFS_BUF_IODONE_FUNC(bp) != xfs_buf_iodone_callbacks);
206 ASSERT(XFS_BUF_IODONE_FUNC(bp) != xlog_iodone); 206 ASSERT(XFS_BUF_IODONE_FUNC(bp) != xlog_iodone);
207 207
208 xfs_buftrace("XFS IOERRELSE", bp);
209 fl = XFS_BUF_BFLAGS(bp); 208 fl = XFS_BUF_BFLAGS(bp);
210 /* 209 /*
211 * No need to wait until the buffer is unpinned. 210 * No need to wait until the buffer is unpinned.
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index a0574f593f52..ca64f33c63a3 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -100,6 +100,49 @@ typedef struct xfs_trans_header {
100#define XFS_TRANS_TYPE_MAX 41 100#define XFS_TRANS_TYPE_MAX 41
101/* new transaction types need to be reflected in xfs_logprint(8) */ 101/* new transaction types need to be reflected in xfs_logprint(8) */
102 102
103#define XFS_TRANS_TYPES \
104 { XFS_TRANS_SETATTR_NOT_SIZE, "SETATTR_NOT_SIZE" }, \
105 { XFS_TRANS_SETATTR_SIZE, "SETATTR_SIZE" }, \
106 { XFS_TRANS_INACTIVE, "INACTIVE" }, \
107 { XFS_TRANS_CREATE, "CREATE" }, \
108 { XFS_TRANS_CREATE_TRUNC, "CREATE_TRUNC" }, \
109 { XFS_TRANS_TRUNCATE_FILE, "TRUNCATE_FILE" }, \
110 { XFS_TRANS_REMOVE, "REMOVE" }, \
111 { XFS_TRANS_LINK, "LINK" }, \
112 { XFS_TRANS_RENAME, "RENAME" }, \
113 { XFS_TRANS_MKDIR, "MKDIR" }, \
114 { XFS_TRANS_RMDIR, "RMDIR" }, \
115 { XFS_TRANS_SYMLINK, "SYMLINK" }, \
116 { XFS_TRANS_SET_DMATTRS, "SET_DMATTRS" }, \
117 { XFS_TRANS_GROWFS, "GROWFS" }, \
118 { XFS_TRANS_STRAT_WRITE, "STRAT_WRITE" }, \
119 { XFS_TRANS_DIOSTRAT, "DIOSTRAT" }, \
120 { XFS_TRANS_WRITEID, "WRITEID" }, \
121 { XFS_TRANS_ADDAFORK, "ADDAFORK" }, \
122 { XFS_TRANS_ATTRINVAL, "ATTRINVAL" }, \
123 { XFS_TRANS_ATRUNCATE, "ATRUNCATE" }, \
124 { XFS_TRANS_ATTR_SET, "ATTR_SET" }, \
125 { XFS_TRANS_ATTR_RM, "ATTR_RM" }, \
126 { XFS_TRANS_ATTR_FLAG, "ATTR_FLAG" }, \
127 { XFS_TRANS_CLEAR_AGI_BUCKET, "CLEAR_AGI_BUCKET" }, \
128 { XFS_TRANS_QM_SBCHANGE, "QM_SBCHANGE" }, \
129 { XFS_TRANS_QM_QUOTAOFF, "QM_QUOTAOFF" }, \
130 { XFS_TRANS_QM_DQALLOC, "QM_DQALLOC" }, \
131 { XFS_TRANS_QM_SETQLIM, "QM_SETQLIM" }, \
132 { XFS_TRANS_QM_DQCLUSTER, "QM_DQCLUSTER" }, \
133 { XFS_TRANS_QM_QINOCREATE, "QM_QINOCREATE" }, \
134 { XFS_TRANS_QM_QUOTAOFF_END, "QM_QOFF_END" }, \
135 { XFS_TRANS_SB_UNIT, "SB_UNIT" }, \
136 { XFS_TRANS_FSYNC_TS, "FSYNC_TS" }, \
137 { XFS_TRANS_GROWFSRT_ALLOC, "GROWFSRT_ALLOC" }, \
138 { XFS_TRANS_GROWFSRT_ZERO, "GROWFSRT_ZERO" }, \
139 { XFS_TRANS_GROWFSRT_FREE, "GROWFSRT_FREE" }, \
140 { XFS_TRANS_SWAPEXT, "SWAPEXT" }, \
141 { XFS_TRANS_SB_COUNT, "SB_COUNT" }, \
142 { XFS_TRANS_DUMMY1, "DUMMY1" }, \
143 { XFS_TRANS_DUMMY2, "DUMMY2" }, \
144 { XLOG_UNMOUNT_REC_TYPE, "UNMOUNT" }
145
103/* 146/*
104 * This structure is used to track log items associated with 147 * This structure is used to track log items associated with
105 * a transaction. It points to the log item and keeps some 148 * a transaction. It points to the log item and keeps some
@@ -782,6 +825,10 @@ typedef struct xfs_log_item {
782#define XFS_LI_IN_AIL 0x1 825#define XFS_LI_IN_AIL 0x1
783#define XFS_LI_ABORTED 0x2 826#define XFS_LI_ABORTED 0x2
784 827
828#define XFS_LI_FLAGS \
829 { XFS_LI_IN_AIL, "IN_AIL" }, \
830 { XFS_LI_ABORTED, "ABORTED" }
831
785typedef struct xfs_item_ops { 832typedef struct xfs_item_ops {
786 uint (*iop_size)(xfs_log_item_t *); 833 uint (*iop_size)(xfs_log_item_t *);
787 void (*iop_format)(xfs_log_item_t *, struct xfs_log_iovec *); 834 void (*iop_format)(xfs_log_item_t *, struct xfs_log_iovec *);
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index 03a1f701fea8..49130628d5ef 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -38,6 +38,7 @@
38#include "xfs_trans_priv.h" 38#include "xfs_trans_priv.h"
39#include "xfs_error.h" 39#include "xfs_error.h"
40#include "xfs_rw.h" 40#include "xfs_rw.h"
41#include "xfs_trace.h"
41 42
42 43
43STATIC xfs_buf_t *xfs_trans_buf_item_match(xfs_trans_t *, xfs_buftarg_t *, 44STATIC xfs_buf_t *xfs_trans_buf_item_match(xfs_trans_t *, xfs_buftarg_t *,
@@ -95,26 +96,23 @@ xfs_trans_get_buf(xfs_trans_t *tp,
95 } 96 }
96 if (bp != NULL) { 97 if (bp != NULL) {
97 ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); 98 ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
98 if (XFS_FORCED_SHUTDOWN(tp->t_mountp)) { 99 if (XFS_FORCED_SHUTDOWN(tp->t_mountp))
99 xfs_buftrace("TRANS GET RECUR SHUT", bp);
100 XFS_BUF_SUPER_STALE(bp); 100 XFS_BUF_SUPER_STALE(bp);
101 } 101
102 /* 102 /*
103 * If the buffer is stale then it was binval'ed 103 * If the buffer is stale then it was binval'ed
104 * since last read. This doesn't matter since the 104 * since last read. This doesn't matter since the
105 * caller isn't allowed to use the data anyway. 105 * caller isn't allowed to use the data anyway.
106 */ 106 */
107 else if (XFS_BUF_ISSTALE(bp)) { 107 else if (XFS_BUF_ISSTALE(bp))
108 xfs_buftrace("TRANS GET RECUR STALE", bp);
109 ASSERT(!XFS_BUF_ISDELAYWRITE(bp)); 108 ASSERT(!XFS_BUF_ISDELAYWRITE(bp));
110 } 109
111 ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp); 110 ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
112 bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); 111 bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
113 ASSERT(bip != NULL); 112 ASSERT(bip != NULL);
114 ASSERT(atomic_read(&bip->bli_refcount) > 0); 113 ASSERT(atomic_read(&bip->bli_refcount) > 0);
115 bip->bli_recur++; 114 bip->bli_recur++;
116 xfs_buftrace("TRANS GET RECUR", bp); 115 trace_xfs_trans_get_buf_recur(bip);
117 xfs_buf_item_trace("GET RECUR", bip);
118 return (bp); 116 return (bp);
119 } 117 }
120 118
@@ -166,8 +164,7 @@ xfs_trans_get_buf(xfs_trans_t *tp,
166 */ 164 */
167 XFS_BUF_SET_FSPRIVATE2(bp, tp); 165 XFS_BUF_SET_FSPRIVATE2(bp, tp);
168 166
169 xfs_buftrace("TRANS GET", bp); 167 trace_xfs_trans_get_buf(bip);
170 xfs_buf_item_trace("GET", bip);
171 return (bp); 168 return (bp);
172} 169}
173 170
@@ -207,7 +204,7 @@ xfs_trans_getsb(xfs_trans_t *tp,
207 ASSERT(bip != NULL); 204 ASSERT(bip != NULL);
208 ASSERT(atomic_read(&bip->bli_refcount) > 0); 205 ASSERT(atomic_read(&bip->bli_refcount) > 0);
209 bip->bli_recur++; 206 bip->bli_recur++;
210 xfs_buf_item_trace("GETSB RECUR", bip); 207 trace_xfs_trans_getsb_recur(bip);
211 return (bp); 208 return (bp);
212 } 209 }
213 210
@@ -249,7 +246,7 @@ xfs_trans_getsb(xfs_trans_t *tp,
249 */ 246 */
250 XFS_BUF_SET_FSPRIVATE2(bp, tp); 247 XFS_BUF_SET_FSPRIVATE2(bp, tp);
251 248
252 xfs_buf_item_trace("GETSB", bip); 249 trace_xfs_trans_getsb(bip);
253 return (bp); 250 return (bp);
254} 251}
255 252
@@ -347,7 +344,7 @@ xfs_trans_read_buf(
347 ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); 344 ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
348 ASSERT((XFS_BUF_ISERROR(bp)) == 0); 345 ASSERT((XFS_BUF_ISERROR(bp)) == 0);
349 if (!(XFS_BUF_ISDONE(bp))) { 346 if (!(XFS_BUF_ISDONE(bp))) {
350 xfs_buftrace("READ_BUF_INCORE !DONE", bp); 347 trace_xfs_trans_read_buf_io(bp, _RET_IP_);
351 ASSERT(!XFS_BUF_ISASYNC(bp)); 348 ASSERT(!XFS_BUF_ISASYNC(bp));
352 XFS_BUF_READ(bp); 349 XFS_BUF_READ(bp);
353 xfsbdstrat(tp->t_mountp, bp); 350 xfsbdstrat(tp->t_mountp, bp);
@@ -372,7 +369,7 @@ xfs_trans_read_buf(
372 * brelse it either. Just get out. 369 * brelse it either. Just get out.
373 */ 370 */
374 if (XFS_FORCED_SHUTDOWN(mp)) { 371 if (XFS_FORCED_SHUTDOWN(mp)) {
375 xfs_buftrace("READ_BUF_INCORE XFSSHUTDN", bp); 372 trace_xfs_trans_read_buf_shut(bp, _RET_IP_);
376 *bpp = NULL; 373 *bpp = NULL;
377 return XFS_ERROR(EIO); 374 return XFS_ERROR(EIO);
378 } 375 }
@@ -382,7 +379,7 @@ xfs_trans_read_buf(
382 bip->bli_recur++; 379 bip->bli_recur++;
383 380
384 ASSERT(atomic_read(&bip->bli_refcount) > 0); 381 ASSERT(atomic_read(&bip->bli_refcount) > 0);
385 xfs_buf_item_trace("READ RECUR", bip); 382 trace_xfs_trans_read_buf_recur(bip);
386 *bpp = bp; 383 *bpp = bp;
387 return 0; 384 return 0;
388 } 385 }
@@ -402,7 +399,6 @@ xfs_trans_read_buf(
402 } 399 }
403 if (XFS_BUF_GETERROR(bp) != 0) { 400 if (XFS_BUF_GETERROR(bp) != 0) {
404 XFS_BUF_SUPER_STALE(bp); 401 XFS_BUF_SUPER_STALE(bp);
405 xfs_buftrace("READ ERROR", bp);
406 error = XFS_BUF_GETERROR(bp); 402 error = XFS_BUF_GETERROR(bp);
407 403
408 xfs_ioerror_alert("xfs_trans_read_buf", mp, 404 xfs_ioerror_alert("xfs_trans_read_buf", mp,
@@ -461,8 +457,7 @@ xfs_trans_read_buf(
461 */ 457 */
462 XFS_BUF_SET_FSPRIVATE2(bp, tp); 458 XFS_BUF_SET_FSPRIVATE2(bp, tp);
463 459
464 xfs_buftrace("TRANS READ", bp); 460 trace_xfs_trans_read_buf(bip);
465 xfs_buf_item_trace("READ", bip);
466 *bpp = bp; 461 *bpp = bp;
467 return 0; 462 return 0;
468 463
@@ -480,7 +475,7 @@ shutdown_abort:
480 ASSERT((XFS_BUF_BFLAGS(bp) & (XFS_B_STALE|XFS_B_DELWRI)) != 475 ASSERT((XFS_BUF_BFLAGS(bp) & (XFS_B_STALE|XFS_B_DELWRI)) !=
481 (XFS_B_STALE|XFS_B_DELWRI)); 476 (XFS_B_STALE|XFS_B_DELWRI));
482 477
483 xfs_buftrace("READ_BUF XFSSHUTDN", bp); 478 trace_xfs_trans_read_buf_shut(bp, _RET_IP_);
484 xfs_buf_relse(bp); 479 xfs_buf_relse(bp);
485 *bpp = NULL; 480 *bpp = NULL;
486 return XFS_ERROR(EIO); 481 return XFS_ERROR(EIO);
@@ -546,13 +541,14 @@ xfs_trans_brelse(xfs_trans_t *tp,
546 lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)bip); 541 lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)bip);
547 ASSERT(lidp != NULL); 542 ASSERT(lidp != NULL);
548 543
544 trace_xfs_trans_brelse(bip);
545
549 /* 546 /*
550 * If the release is just for a recursive lock, 547 * If the release is just for a recursive lock,
551 * then decrement the count and return. 548 * then decrement the count and return.
552 */ 549 */
553 if (bip->bli_recur > 0) { 550 if (bip->bli_recur > 0) {
554 bip->bli_recur--; 551 bip->bli_recur--;
555 xfs_buf_item_trace("RELSE RECUR", bip);
556 return; 552 return;
557 } 553 }
558 554
@@ -560,10 +556,8 @@ xfs_trans_brelse(xfs_trans_t *tp,
560 * If the buffer is dirty within this transaction, we can't 556 * If the buffer is dirty within this transaction, we can't
561 * release it until we commit. 557 * release it until we commit.
562 */ 558 */
563 if (lidp->lid_flags & XFS_LID_DIRTY) { 559 if (lidp->lid_flags & XFS_LID_DIRTY)
564 xfs_buf_item_trace("RELSE DIRTY", bip);
565 return; 560 return;
566 }
567 561
568 /* 562 /*
569 * If the buffer has been invalidated, then we can't release 563 * If the buffer has been invalidated, then we can't release
@@ -571,13 +565,10 @@ xfs_trans_brelse(xfs_trans_t *tp,
571 * as part of this transaction. This prevents us from pulling 565 * as part of this transaction. This prevents us from pulling
572 * the item from the AIL before we should. 566 * the item from the AIL before we should.
573 */ 567 */
574 if (bip->bli_flags & XFS_BLI_STALE) { 568 if (bip->bli_flags & XFS_BLI_STALE)
575 xfs_buf_item_trace("RELSE STALE", bip);
576 return; 569 return;
577 }
578 570
579 ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED)); 571 ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
580 xfs_buf_item_trace("RELSE", bip);
581 572
582 /* 573 /*
583 * Free up the log item descriptor tracking the released item. 574 * Free up the log item descriptor tracking the released item.
@@ -674,7 +665,7 @@ xfs_trans_bjoin(xfs_trans_t *tp,
674 */ 665 */
675 XFS_BUF_SET_FSPRIVATE2(bp, tp); 666 XFS_BUF_SET_FSPRIVATE2(bp, tp);
676 667
677 xfs_buf_item_trace("BJOIN", bip); 668 trace_xfs_trans_bjoin(bip);
678} 669}
679 670
680/* 671/*
@@ -698,7 +689,7 @@ xfs_trans_bhold(xfs_trans_t *tp,
698 ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL)); 689 ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL));
699 ASSERT(atomic_read(&bip->bli_refcount) > 0); 690 ASSERT(atomic_read(&bip->bli_refcount) > 0);
700 bip->bli_flags |= XFS_BLI_HOLD; 691 bip->bli_flags |= XFS_BLI_HOLD;
701 xfs_buf_item_trace("BHOLD", bip); 692 trace_xfs_trans_bhold(bip);
702} 693}
703 694
704/* 695/*
@@ -721,7 +712,8 @@ xfs_trans_bhold_release(xfs_trans_t *tp,
721 ASSERT(atomic_read(&bip->bli_refcount) > 0); 712 ASSERT(atomic_read(&bip->bli_refcount) > 0);
722 ASSERT(bip->bli_flags & XFS_BLI_HOLD); 713 ASSERT(bip->bli_flags & XFS_BLI_HOLD);
723 bip->bli_flags &= ~XFS_BLI_HOLD; 714 bip->bli_flags &= ~XFS_BLI_HOLD;
724 xfs_buf_item_trace("BHOLD RELEASE", bip); 715
716 trace_xfs_trans_bhold_release(bip);
725} 717}
726 718
727/* 719/*
@@ -767,6 +759,8 @@ xfs_trans_log_buf(xfs_trans_t *tp,
767 XFS_BUF_SET_IODONE_FUNC(bp, xfs_buf_iodone_callbacks); 759 XFS_BUF_SET_IODONE_FUNC(bp, xfs_buf_iodone_callbacks);
768 bip->bli_item.li_cb = (void(*)(xfs_buf_t*,xfs_log_item_t*))xfs_buf_iodone; 760 bip->bli_item.li_cb = (void(*)(xfs_buf_t*,xfs_log_item_t*))xfs_buf_iodone;
769 761
762 trace_xfs_trans_log_buf(bip);
763
770 /* 764 /*
771 * If we invalidated the buffer within this transaction, then 765 * If we invalidated the buffer within this transaction, then
772 * cancel the invalidation now that we're dirtying the buffer 766 * cancel the invalidation now that we're dirtying the buffer
@@ -774,7 +768,6 @@ xfs_trans_log_buf(xfs_trans_t *tp,
774 * because we have a reference to the buffer this entire time. 768 * because we have a reference to the buffer this entire time.
775 */ 769 */
776 if (bip->bli_flags & XFS_BLI_STALE) { 770 if (bip->bli_flags & XFS_BLI_STALE) {
777 xfs_buf_item_trace("BLOG UNSTALE", bip);
778 bip->bli_flags &= ~XFS_BLI_STALE; 771 bip->bli_flags &= ~XFS_BLI_STALE;
779 ASSERT(XFS_BUF_ISSTALE(bp)); 772 ASSERT(XFS_BUF_ISSTALE(bp));
780 XFS_BUF_UNSTALE(bp); 773 XFS_BUF_UNSTALE(bp);
@@ -789,7 +782,6 @@ xfs_trans_log_buf(xfs_trans_t *tp,
789 lidp->lid_flags &= ~XFS_LID_BUF_STALE; 782 lidp->lid_flags &= ~XFS_LID_BUF_STALE;
790 bip->bli_flags |= XFS_BLI_LOGGED; 783 bip->bli_flags |= XFS_BLI_LOGGED;
791 xfs_buf_item_log(bip, first, last); 784 xfs_buf_item_log(bip, first, last);
792 xfs_buf_item_trace("BLOG", bip);
793} 785}
794 786
795 787
@@ -828,6 +820,8 @@ xfs_trans_binval(
828 ASSERT(lidp != NULL); 820 ASSERT(lidp != NULL);
829 ASSERT(atomic_read(&bip->bli_refcount) > 0); 821 ASSERT(atomic_read(&bip->bli_refcount) > 0);
830 822
823 trace_xfs_trans_binval(bip);
824
831 if (bip->bli_flags & XFS_BLI_STALE) { 825 if (bip->bli_flags & XFS_BLI_STALE) {
832 /* 826 /*
833 * If the buffer is already invalidated, then 827 * If the buffer is already invalidated, then
@@ -840,8 +834,6 @@ xfs_trans_binval(
840 ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL); 834 ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL);
841 ASSERT(lidp->lid_flags & XFS_LID_DIRTY); 835 ASSERT(lidp->lid_flags & XFS_LID_DIRTY);
842 ASSERT(tp->t_flags & XFS_TRANS_DIRTY); 836 ASSERT(tp->t_flags & XFS_TRANS_DIRTY);
843 xfs_buftrace("XFS_BINVAL RECUR", bp);
844 xfs_buf_item_trace("BINVAL RECUR", bip);
845 return; 837 return;
846 } 838 }
847 839
@@ -875,8 +867,6 @@ xfs_trans_binval(
875 (bip->bli_format.blf_map_size * sizeof(uint))); 867 (bip->bli_format.blf_map_size * sizeof(uint)));
876 lidp->lid_flags |= XFS_LID_DIRTY|XFS_LID_BUF_STALE; 868 lidp->lid_flags |= XFS_LID_DIRTY|XFS_LID_BUF_STALE;
877 tp->t_flags |= XFS_TRANS_DIRTY; 869 tp->t_flags |= XFS_TRANS_DIRTY;
878 xfs_buftrace("XFS_BINVAL", bp);
879 xfs_buf_item_trace("BINVAL", bip);
880} 870}
881 871
882/* 872/*
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index 578f3f59b789..6558ffd8d140 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -53,6 +53,7 @@
53#include "xfs_log_priv.h" 53#include "xfs_log_priv.h"
54#include "xfs_filestream.h" 54#include "xfs_filestream.h"
55#include "xfs_vnodeops.h" 55#include "xfs_vnodeops.h"
56#include "xfs_trace.h"
56 57
57int 58int
58xfs_setattr( 59xfs_setattr(
@@ -1397,7 +1398,6 @@ xfs_lookup(
1397 if (error) 1398 if (error)
1398 goto out_free_name; 1399 goto out_free_name;
1399 1400
1400 xfs_itrace_ref(*ipp);
1401 return 0; 1401 return 0;
1402 1402
1403out_free_name: 1403out_free_name:
@@ -1543,7 +1543,6 @@ xfs_create(
1543 * At this point, we've gotten a newly allocated inode. 1543 * At this point, we've gotten a newly allocated inode.
1544 * It is locked (and joined to the transaction). 1544 * It is locked (and joined to the transaction).
1545 */ 1545 */
1546 xfs_itrace_ref(ip);
1547 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 1546 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1548 1547
1549 /* 1548 /*
@@ -2003,9 +2002,6 @@ xfs_remove(
2003 if (!is_dir && link_zero && xfs_inode_is_filestream(ip)) 2002 if (!is_dir && link_zero && xfs_inode_is_filestream(ip))
2004 xfs_filestream_deassociate(ip); 2003 xfs_filestream_deassociate(ip);
2005 2004
2006 xfs_itrace_exit(ip);
2007 xfs_itrace_exit(dp);
2008
2009 std_return: 2005 std_return:
2010 if (DM_EVENT_ENABLED(dp, DM_EVENT_POSTREMOVE)) { 2006 if (DM_EVENT_ENABLED(dp, DM_EVENT_POSTREMOVE)) {
2011 XFS_SEND_NAMESP(mp, DM_EVENT_POSTREMOVE, dp, DM_RIGHT_NULL, 2007 XFS_SEND_NAMESP(mp, DM_EVENT_POSTREMOVE, dp, DM_RIGHT_NULL,
@@ -2302,7 +2298,6 @@ xfs_symlink(
2302 goto error_return; 2298 goto error_return;
2303 goto error1; 2299 goto error1;
2304 } 2300 }
2305 xfs_itrace_ref(ip);
2306 2301
2307 /* 2302 /*
2308 * An error after we've joined dp to the transaction will result in the 2303 * An error after we've joined dp to the transaction will result in the
@@ -2845,7 +2840,6 @@ xfs_free_file_space(
2845 ioffset = offset & ~(rounding - 1); 2840 ioffset = offset & ~(rounding - 1);
2846 2841
2847 if (VN_CACHED(VFS_I(ip)) != 0) { 2842 if (VN_CACHED(VFS_I(ip)) != 0) {
2848 xfs_inval_cached_trace(ip, ioffset, -1, ioffset, -1);
2849 error = xfs_flushinval_pages(ip, ioffset, -1, FI_REMAPF_LOCKED); 2843 error = xfs_flushinval_pages(ip, ioffset, -1, FI_REMAPF_LOCKED);
2850 if (error) 2844 if (error)
2851 goto out_unlock_iolock; 2845 goto out_unlock_iolock;
diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h
index c8946465e63a..ecc44a8e2b44 100644
--- a/include/asm-generic/bitops/atomic.h
+++ b/include/asm-generic/bitops/atomic.h
@@ -15,19 +15,19 @@
15# define ATOMIC_HASH_SIZE 4 15# define ATOMIC_HASH_SIZE 4
16# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) 16# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
17 17
18extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; 18extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
19 19
20/* Can't use raw_spin_lock_irq because of #include problems, so 20/* Can't use raw_spin_lock_irq because of #include problems, so
21 * this is the substitute */ 21 * this is the substitute */
22#define _atomic_spin_lock_irqsave(l,f) do { \ 22#define _atomic_spin_lock_irqsave(l,f) do { \
23 raw_spinlock_t *s = ATOMIC_HASH(l); \ 23 arch_spinlock_t *s = ATOMIC_HASH(l); \
24 local_irq_save(f); \ 24 local_irq_save(f); \
25 __raw_spin_lock(s); \ 25 arch_spin_lock(s); \
26} while(0) 26} while(0)
27 27
28#define _atomic_spin_unlock_irqrestore(l,f) do { \ 28#define _atomic_spin_unlock_irqrestore(l,f) do { \
29 raw_spinlock_t *s = ATOMIC_HASH(l); \ 29 arch_spinlock_t *s = ATOMIC_HASH(l); \
30 __raw_spin_unlock(s); \ 30 arch_spin_unlock(s); \
31 local_irq_restore(f); \ 31 local_irq_restore(f); \
32} while(0) 32} while(0)
33 33
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 4b6755984d24..18c435d7c082 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -113,22 +113,22 @@ extern void warn_slowpath_null(const char *file, const int line);
113#endif 113#endif
114 114
115#define WARN_ON_ONCE(condition) ({ \ 115#define WARN_ON_ONCE(condition) ({ \
116 static int __warned; \ 116 static bool __warned; \
117 int __ret_warn_once = !!(condition); \ 117 int __ret_warn_once = !!(condition); \
118 \ 118 \
119 if (unlikely(__ret_warn_once)) \ 119 if (unlikely(__ret_warn_once)) \
120 if (WARN_ON(!__warned)) \ 120 if (WARN_ON(!__warned)) \
121 __warned = 1; \ 121 __warned = true; \
122 unlikely(__ret_warn_once); \ 122 unlikely(__ret_warn_once); \
123}) 123})
124 124
125#define WARN_ONCE(condition, format...) ({ \ 125#define WARN_ONCE(condition, format...) ({ \
126 static int __warned; \ 126 static bool __warned; \
127 int __ret_warn_once = !!(condition); \ 127 int __ret_warn_once = !!(condition); \
128 \ 128 \
129 if (unlikely(__ret_warn_once)) \ 129 if (unlikely(__ret_warn_once)) \
130 if (WARN(!__warned, format)) \ 130 if (WARN(!__warned, format)) \
131 __warned = 1; \ 131 __warned = true; \
132 unlikely(__ret_warn_once); \ 132 unlikely(__ret_warn_once); \
133}) 133})
134 134
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h
index 204bed37e82d..485eeb6c4ef3 100644
--- a/include/asm-generic/gpio.h
+++ b/include/asm-generic/gpio.h
@@ -145,6 +145,7 @@ extern int __gpio_to_irq(unsigned gpio);
145extern int gpio_export(unsigned gpio, bool direction_may_change); 145extern int gpio_export(unsigned gpio, bool direction_may_change);
146extern int gpio_export_link(struct device *dev, const char *name, 146extern int gpio_export_link(struct device *dev, const char *name,
147 unsigned gpio); 147 unsigned gpio);
148extern int gpio_sysfs_set_active_low(unsigned gpio, int value);
148extern void gpio_unexport(unsigned gpio); 149extern void gpio_unexport(unsigned gpio);
149 150
150#endif /* CONFIG_GPIO_SYSFS */ 151#endif /* CONFIG_GPIO_SYSFS */
@@ -197,6 +198,11 @@ static inline int gpio_export_link(struct device *dev, const char *name,
197 return -ENOSYS; 198 return -ENOSYS;
198} 199}
199 200
201static inline int gpio_sysfs_set_active_low(unsigned gpio, int value)
202{
203 return -ENOSYS;
204}
205
200static inline void gpio_unexport(unsigned gpio) 206static inline void gpio_unexport(unsigned gpio)
201{ 207{
202} 208}
diff --git a/include/asm-generic/mman-common.h b/include/asm-generic/mman-common.h
index 5ee13b2fd223..20111265afd8 100644
--- a/include/asm-generic/mman-common.h
+++ b/include/asm-generic/mman-common.h
@@ -19,6 +19,11 @@
19#define MAP_TYPE 0x0f /* Mask for type of mapping */ 19#define MAP_TYPE 0x0f /* Mask for type of mapping */
20#define MAP_FIXED 0x10 /* Interpret addr exactly */ 20#define MAP_FIXED 0x10 /* Interpret addr exactly */
21#define MAP_ANONYMOUS 0x20 /* don't use a file */ 21#define MAP_ANONYMOUS 0x20 /* don't use a file */
22#ifdef CONFIG_MMAP_ALLOW_UNINITIALIZED
23# define MAP_UNINITIALIZED 0x4000000 /* For anonymous mmap, memory could be uninitialized */
24#else
25# define MAP_UNINITIALIZED 0x0 /* Don't support this flag */
26#endif
22 27
23#define MS_ASYNC 1 /* sync memory asynchronously */ 28#define MS_ASYNC 1 /* sync memory asynchronously */
24#define MS_INVALIDATE 2 /* invalidate the caches */ 29#define MS_INVALIDATE 2 /* invalidate the caches */
diff --git a/include/linux/aio.h b/include/linux/aio.h
index aea219d7d8d1..811dbb369379 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -102,7 +102,6 @@ struct kiocb {
102 } ki_obj; 102 } ki_obj;
103 103
104 __u64 ki_user_data; /* user's data for completion */ 104 __u64 ki_user_data; /* user's data for completion */
105 wait_queue_t ki_wait;
106 loff_t ki_pos; 105 loff_t ki_pos;
107 106
108 void *private; 107 void *private;
@@ -140,7 +139,6 @@ struct kiocb {
140 (x)->ki_dtor = NULL; \ 139 (x)->ki_dtor = NULL; \
141 (x)->ki_obj.tsk = tsk; \ 140 (x)->ki_obj.tsk = tsk; \
142 (x)->ki_user_data = 0; \ 141 (x)->ki_user_data = 0; \
143 init_wait((&(x)->ki_wait)); \
144 } while (0) 142 } while (0)
145 143
146#define AIO_RING_MAGIC 0xa10a10a1 144#define AIO_RING_MAGIC 0xa10a10a1
@@ -223,8 +221,6 @@ struct mm_struct;
223static inline void exit_aio(struct mm_struct *mm) { } 221static inline void exit_aio(struct mm_struct *mm) { }
224#endif /* CONFIG_AIO */ 222#endif /* CONFIG_AIO */
225 223
226#define io_wait_to_kiocb(wait) container_of(wait, struct kiocb, ki_wait)
227
228static inline struct kiocb *list_kiocb(struct list_head *h) 224static inline struct kiocb *list_kiocb(struct list_head *h)
229{ 225{
230 return list_entry(h, struct kiocb, ki_list); 226 return list_entry(h, struct kiocb, ki_list);
diff --git a/include/linux/atmel-mci.h b/include/linux/atmel-mci.h
index 57b1846a3c87..3e09b345f4d6 100644
--- a/include/linux/atmel-mci.h
+++ b/include/linux/atmel-mci.h
@@ -3,8 +3,6 @@
3 3
4#define ATMEL_MCI_MAX_NR_SLOTS 2 4#define ATMEL_MCI_MAX_NR_SLOTS 2
5 5
6#include <linux/dw_dmac.h>
7
8/** 6/**
9 * struct mci_slot_pdata - board-specific per-slot configuration 7 * struct mci_slot_pdata - board-specific per-slot configuration
10 * @bus_width: Number of data lines wired up the slot 8 * @bus_width: Number of data lines wired up the slot
@@ -34,7 +32,7 @@ struct mci_slot_pdata {
34 * @slot: Per-slot configuration data. 32 * @slot: Per-slot configuration data.
35 */ 33 */
36struct mci_platform_data { 34struct mci_platform_data {
37 struct dw_dma_slave dma_slave; 35 struct mci_dma_data *dma_slave;
38 struct mci_slot_pdata slot[ATMEL_MCI_MAX_NR_SLOTS]; 36 struct mci_slot_pdata slot[ATMEL_MCI_MAX_NR_SLOTS];
39}; 37};
40 38
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index 756d78b8c1c5..daf8c480c786 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -42,6 +42,9 @@
42 * bitmap_empty(src, nbits) Are all bits zero in *src? 42 * bitmap_empty(src, nbits) Are all bits zero in *src?
43 * bitmap_full(src, nbits) Are all bits set in *src? 43 * bitmap_full(src, nbits) Are all bits set in *src?
44 * bitmap_weight(src, nbits) Hamming Weight: number set bits 44 * bitmap_weight(src, nbits) Hamming Weight: number set bits
45 * bitmap_set(dst, pos, nbits) Set specified bit area
46 * bitmap_clear(dst, pos, nbits) Clear specified bit area
47 * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area
45 * bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n 48 * bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n
46 * bitmap_shift_left(dst, src, n, nbits) *dst = *src << n 49 * bitmap_shift_left(dst, src, n, nbits) *dst = *src << n
47 * bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src) 50 * bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src)
@@ -108,6 +111,14 @@ extern int __bitmap_subset(const unsigned long *bitmap1,
108 const unsigned long *bitmap2, int bits); 111 const unsigned long *bitmap2, int bits);
109extern int __bitmap_weight(const unsigned long *bitmap, int bits); 112extern int __bitmap_weight(const unsigned long *bitmap, int bits);
110 113
114extern void bitmap_set(unsigned long *map, int i, int len);
115extern void bitmap_clear(unsigned long *map, int start, int nr);
116extern unsigned long bitmap_find_next_zero_area(unsigned long *map,
117 unsigned long size,
118 unsigned long start,
119 unsigned int nr,
120 unsigned long align_mask);
121
111extern int bitmap_scnprintf(char *buf, unsigned int len, 122extern int bitmap_scnprintf(char *buf, unsigned int len,
112 const unsigned long *src, int nbits); 123 const unsigned long *src, int nbits);
113extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user, 124extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user,
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 1ed2a5cc03f5..3db7767d2a17 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -51,6 +51,15 @@ struct can_priv {
51 struct sk_buff **echo_skb; 51 struct sk_buff **echo_skb;
52}; 52};
53 53
54/*
55 * get_can_dlc(value) - helper macro to cast a given data length code (dlc)
56 * to __u8 and ensure the dlc value to be max. 8 bytes.
57 *
58 * To be used in the CAN netdriver receive path to ensure conformance with
59 * ISO 11898-1 Chapter 8.4.2.3 (DLC field)
60 */
61#define get_can_dlc(i) (min_t(__u8, (i), 8))
62
54struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max); 63struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max);
55void free_candev(struct net_device *dev); 64void free_candev(struct net_device *dev);
56 65
diff --git a/include/linux/cs5535.h b/include/linux/cs5535.h
new file mode 100644
index 000000000000..d5a1d4810b80
--- /dev/null
+++ b/include/linux/cs5535.h
@@ -0,0 +1,172 @@
1/*
2 * AMD CS5535/CS5536 definitions
3 * Copyright (C) 2006 Advanced Micro Devices, Inc.
4 * Copyright (C) 2009 Andres Salomon <dilinger@collabora.co.uk>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public License
8 * as published by the Free Software Foundation.
9 */
10
11#ifndef _CS5535_H
12#define _CS5535_H
13
14/* MSRs */
15#define MSR_GLIU_P2D_RO0 0x10000029
16
17#define MSR_LX_GLD_MSR_CONFIG 0x48002001
18#define MSR_LX_MSR_PADSEL 0x48002011 /* NOT 0x48000011; the data
19 * sheet has the wrong value */
20#define MSR_GLCP_SYS_RSTPLL 0x4C000014
21#define MSR_GLCP_DOTPLL 0x4C000015
22
23#define MSR_LBAR_SMB 0x5140000B
24#define MSR_LBAR_GPIO 0x5140000C
25#define MSR_LBAR_MFGPT 0x5140000D
26#define MSR_LBAR_ACPI 0x5140000E
27#define MSR_LBAR_PMS 0x5140000F
28
29#define MSR_DIVIL_SOFT_RESET 0x51400017
30
31#define MSR_PIC_YSEL_LOW 0x51400020
32#define MSR_PIC_YSEL_HIGH 0x51400021
33#define MSR_PIC_ZSEL_LOW 0x51400022
34#define MSR_PIC_ZSEL_HIGH 0x51400023
35#define MSR_PIC_IRQM_LPC 0x51400025
36
37#define MSR_MFGPT_IRQ 0x51400028
38#define MSR_MFGPT_NR 0x51400029
39#define MSR_MFGPT_SETUP 0x5140002B
40
41#define MSR_LX_SPARE_MSR 0x80000011 /* DC-specific */
42
43#define MSR_GX_GLD_MSR_CONFIG 0xC0002001
44#define MSR_GX_MSR_PADSEL 0xC0002011
45
46/* resource sizes */
47#define LBAR_GPIO_SIZE 0xFF
48#define LBAR_MFGPT_SIZE 0x40
49#define LBAR_ACPI_SIZE 0x40
50#define LBAR_PMS_SIZE 0x80
51
52/* VSA2 magic values */
53#define VSA_VRC_INDEX 0xAC1C
54#define VSA_VRC_DATA 0xAC1E
55#define VSA_VR_UNLOCK 0xFC53 /* unlock virtual register */
56#define VSA_VR_SIGNATURE 0x0003
57#define VSA_VR_MEM_SIZE 0x0200
58#define AMD_VSA_SIG 0x4132 /* signature is ascii 'VSA2' */
59#define GSW_VSA_SIG 0x534d /* General Software signature */
60
61#include <linux/io.h>
62
63static inline int cs5535_has_vsa2(void)
64{
65 static int has_vsa2 = -1;
66
67 if (has_vsa2 == -1) {
68 uint16_t val;
69
70 /*
71 * The VSA has virtual registers that we can query for a
72 * signature.
73 */
74 outw(VSA_VR_UNLOCK, VSA_VRC_INDEX);
75 outw(VSA_VR_SIGNATURE, VSA_VRC_INDEX);
76
77 val = inw(VSA_VRC_DATA);
78 has_vsa2 = (val == AMD_VSA_SIG || val == GSW_VSA_SIG);
79 }
80
81 return has_vsa2;
82}
83
84/* GPIOs */
85#define GPIO_OUTPUT_VAL 0x00
86#define GPIO_OUTPUT_ENABLE 0x04
87#define GPIO_OUTPUT_OPEN_DRAIN 0x08
88#define GPIO_OUTPUT_INVERT 0x0C
89#define GPIO_OUTPUT_AUX1 0x10
90#define GPIO_OUTPUT_AUX2 0x14
91#define GPIO_PULL_UP 0x18
92#define GPIO_PULL_DOWN 0x1C
93#define GPIO_INPUT_ENABLE 0x20
94#define GPIO_INPUT_INVERT 0x24
95#define GPIO_INPUT_FILTER 0x28
96#define GPIO_INPUT_EVENT_COUNT 0x2C
97#define GPIO_READ_BACK 0x30
98#define GPIO_INPUT_AUX1 0x34
99#define GPIO_EVENTS_ENABLE 0x38
100#define GPIO_LOCK_ENABLE 0x3C
101#define GPIO_POSITIVE_EDGE_EN 0x40
102#define GPIO_NEGATIVE_EDGE_EN 0x44
103#define GPIO_POSITIVE_EDGE_STS 0x48
104#define GPIO_NEGATIVE_EDGE_STS 0x4C
105
106#define GPIO_MAP_X 0xE0
107#define GPIO_MAP_Y 0xE4
108#define GPIO_MAP_Z 0xE8
109#define GPIO_MAP_W 0xEC
110
111void cs5535_gpio_set(unsigned offset, unsigned int reg);
112void cs5535_gpio_clear(unsigned offset, unsigned int reg);
113int cs5535_gpio_isset(unsigned offset, unsigned int reg);
114
115/* MFGPTs */
116
117#define MFGPT_MAX_TIMERS 8
118#define MFGPT_TIMER_ANY (-1)
119
120#define MFGPT_DOMAIN_WORKING 1
121#define MFGPT_DOMAIN_STANDBY 2
122#define MFGPT_DOMAIN_ANY (MFGPT_DOMAIN_WORKING | MFGPT_DOMAIN_STANDBY)
123
124#define MFGPT_CMP1 0
125#define MFGPT_CMP2 1
126
127#define MFGPT_EVENT_IRQ 0
128#define MFGPT_EVENT_NMI 1
129#define MFGPT_EVENT_RESET 3
130
131#define MFGPT_REG_CMP1 0
132#define MFGPT_REG_CMP2 2
133#define MFGPT_REG_COUNTER 4
134#define MFGPT_REG_SETUP 6
135
136#define MFGPT_SETUP_CNTEN (1 << 15)
137#define MFGPT_SETUP_CMP2 (1 << 14)
138#define MFGPT_SETUP_CMP1 (1 << 13)
139#define MFGPT_SETUP_SETUP (1 << 12)
140#define MFGPT_SETUP_STOPEN (1 << 11)
141#define MFGPT_SETUP_EXTEN (1 << 10)
142#define MFGPT_SETUP_REVEN (1 << 5)
143#define MFGPT_SETUP_CLKSEL (1 << 4)
144
145struct cs5535_mfgpt_timer;
146
147extern uint16_t cs5535_mfgpt_read(struct cs5535_mfgpt_timer *timer,
148 uint16_t reg);
149extern void cs5535_mfgpt_write(struct cs5535_mfgpt_timer *timer, uint16_t reg,
150 uint16_t value);
151
152extern int cs5535_mfgpt_toggle_event(struct cs5535_mfgpt_timer *timer, int cmp,
153 int event, int enable);
154extern int cs5535_mfgpt_set_irq(struct cs5535_mfgpt_timer *timer, int cmp,
155 int *irq, int enable);
156extern struct cs5535_mfgpt_timer *cs5535_mfgpt_alloc_timer(int timer,
157 int domain);
158extern void cs5535_mfgpt_free_timer(struct cs5535_mfgpt_timer *timer);
159
160static inline int cs5535_mfgpt_setup_irq(struct cs5535_mfgpt_timer *timer,
161 int cmp, int *irq)
162{
163 return cs5535_mfgpt_set_irq(timer, cmp, irq, 1);
164}
165
166static inline int cs5535_mfgpt_release_irq(struct cs5535_mfgpt_timer *timer,
167 int cmp, int *irq)
168{
169 return cs5535_mfgpt_set_irq(timer, cmp, irq, 0);
170}
171
172#endif
diff --git a/include/linux/ctype.h b/include/linux/ctype.h
index afa36392297a..a3d6ee0044f9 100644
--- a/include/linux/ctype.h
+++ b/include/linux/ctype.h
@@ -15,7 +15,7 @@
15#define _X 0x40 /* hex digit */ 15#define _X 0x40 /* hex digit */
16#define _SP 0x80 /* hard space (0x20) */ 16#define _SP 0x80 /* hard space (0x20) */
17 17
18extern unsigned char _ctype[]; 18extern const unsigned char _ctype[];
19 19
20#define __ismask(x) (_ctype[(int)(unsigned char)(x)]) 20#define __ismask(x) (_ctype[(int)(unsigned char)(x)])
21 21
@@ -27,6 +27,7 @@ extern unsigned char _ctype[];
27#define islower(c) ((__ismask(c)&(_L)) != 0) 27#define islower(c) ((__ismask(c)&(_L)) != 0)
28#define isprint(c) ((__ismask(c)&(_P|_U|_L|_D|_SP)) != 0) 28#define isprint(c) ((__ismask(c)&(_P|_U|_L|_D|_SP)) != 0)
29#define ispunct(c) ((__ismask(c)&(_P)) != 0) 29#define ispunct(c) ((__ismask(c)&(_P)) != 0)
30/* Note: isspace() must return false for %NUL-terminator */
30#define isspace(c) ((__ismask(c)&(_S)) != 0) 31#define isspace(c) ((__ismask(c)&(_S)) != 0)
31#define isupper(c) ((__ismask(c)&(_U)) != 0) 32#define isupper(c) ((__ismask(c)&(_U)) != 0)
32#define isxdigit(c) ((__ismask(c)&(_D|_X)) != 0) 33#define isxdigit(c) ((__ismask(c)&(_D|_X)) != 0)
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index df7607e6dce8..d4c9c0b88adc 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -235,7 +235,7 @@ void dm_uevent_add(struct mapped_device *md, struct list_head *elist);
235const char *dm_device_name(struct mapped_device *md); 235const char *dm_device_name(struct mapped_device *md);
236int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid); 236int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
237struct gendisk *dm_disk(struct mapped_device *md); 237struct gendisk *dm_disk(struct mapped_device *md);
238int dm_suspended(struct mapped_device *md); 238int dm_suspended(struct dm_target *ti);
239int dm_noflush_suspending(struct dm_target *ti); 239int dm_noflush_suspending(struct dm_target *ti);
240union map_info *dm_get_mapinfo(struct bio *bio); 240union map_info *dm_get_mapinfo(struct bio *bio);
241union map_info *dm_get_rq_mapinfo(struct request *rq); 241union map_info *dm_get_rq_mapinfo(struct request *rq);
@@ -276,7 +276,7 @@ void dm_table_unplug_all(struct dm_table *t);
276/* 276/*
277 * Table reference counting. 277 * Table reference counting.
278 */ 278 */
279struct dm_table *dm_get_table(struct mapped_device *md); 279struct dm_table *dm_get_live_table(struct mapped_device *md);
280void dm_table_get(struct dm_table *t); 280void dm_table_get(struct dm_table *t);
281void dm_table_put(struct dm_table *t); 281void dm_table_put(struct dm_table *t);
282 282
@@ -295,8 +295,10 @@ void dm_table_event(struct dm_table *t);
295 295
296/* 296/*
297 * The device must be suspended before calling this method. 297 * The device must be suspended before calling this method.
298 * Returns the previous table, which the caller must destroy.
298 */ 299 */
299int dm_swap_table(struct mapped_device *md, struct dm_table *t); 300struct dm_table *dm_swap_table(struct mapped_device *md,
301 struct dm_table *t);
300 302
301/* 303/*
302 * A wrapper around vmalloc. 304 * A wrapper around vmalloc.
diff --git a/include/linux/dm-dirty-log.h b/include/linux/dm-dirty-log.h
index 5e8b11d88f6f..7084503c3405 100644
--- a/include/linux/dm-dirty-log.h
+++ b/include/linux/dm-dirty-log.h
@@ -21,6 +21,7 @@ struct dm_dirty_log_type;
21 21
22struct dm_dirty_log { 22struct dm_dirty_log {
23 struct dm_dirty_log_type *type; 23 struct dm_dirty_log_type *type;
24 int (*flush_callback_fn)(struct dm_target *ti);
24 void *context; 25 void *context;
25}; 26};
26 27
@@ -136,8 +137,9 @@ int dm_dirty_log_type_unregister(struct dm_dirty_log_type *type);
136 * type->constructor/destructor() directly. 137 * type->constructor/destructor() directly.
137 */ 138 */
138struct dm_dirty_log *dm_dirty_log_create(const char *type_name, 139struct dm_dirty_log *dm_dirty_log_create(const char *type_name,
139 struct dm_target *ti, 140 struct dm_target *ti,
140 unsigned argc, char **argv); 141 int (*flush_callback_fn)(struct dm_target *ti),
142 unsigned argc, char **argv);
141void dm_dirty_log_destroy(struct dm_dirty_log *log); 143void dm_dirty_log_destroy(struct dm_dirty_log *log);
142 144
143#endif /* __KERNEL__ */ 145#endif /* __KERNEL__ */
diff --git a/include/linux/dm-ioctl.h b/include/linux/dm-ioctl.h
index 2ab84c83c31a..aa95508d2f95 100644
--- a/include/linux/dm-ioctl.h
+++ b/include/linux/dm-ioctl.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2001 - 2003 Sistina Software (UK) Limited. 2 * Copyright (C) 2001 - 2003 Sistina Software (UK) Limited.
3 * Copyright (C) 2004 - 2005 Red Hat, Inc. All rights reserved. 3 * Copyright (C) 2004 - 2009 Red Hat, Inc. All rights reserved.
4 * 4 *
5 * This file is released under the LGPL. 5 * This file is released under the LGPL.
6 */ 6 */
@@ -266,9 +266,9 @@ enum {
266#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) 266#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
267 267
268#define DM_VERSION_MAJOR 4 268#define DM_VERSION_MAJOR 4
269#define DM_VERSION_MINOR 15 269#define DM_VERSION_MINOR 16
270#define DM_VERSION_PATCHLEVEL 0 270#define DM_VERSION_PATCHLEVEL 0
271#define DM_VERSION_EXTRA "-ioctl (2009-04-01)" 271#define DM_VERSION_EXTRA "-ioctl (2009-11-05)"
272 272
273/* Status bits */ 273/* Status bits */
274#define DM_READONLY_FLAG (1 << 0) /* In/Out */ 274#define DM_READONLY_FLAG (1 << 0) /* In/Out */
@@ -309,4 +309,11 @@ enum {
309 */ 309 */
310#define DM_NOFLUSH_FLAG (1 << 11) /* In */ 310#define DM_NOFLUSH_FLAG (1 << 11) /* In */
311 311
312/*
313 * If set, any table information returned will relate to the inactive
314 * table instead of the live one. Always check DM_INACTIVE_PRESENT_FLAG
315 * is set before using the data returned.
316 */
317#define DM_QUERY_INACTIVE_TABLE_FLAG (1 << 12) /* In */
318
312#endif /* _LINUX_DM_IOCTL_H */ 319#endif /* _LINUX_DM_IOCTL_H */
diff --git a/include/linux/dm-region-hash.h b/include/linux/dm-region-hash.h
index a9e652a41373..9e2a7a401df5 100644
--- a/include/linux/dm-region-hash.h
+++ b/include/linux/dm-region-hash.h
@@ -78,8 +78,7 @@ void dm_rh_dec(struct dm_region_hash *rh, region_t region);
78/* Delay bios on regions. */ 78/* Delay bios on regions. */
79void dm_rh_delay(struct dm_region_hash *rh, struct bio *bio); 79void dm_rh_delay(struct dm_region_hash *rh, struct bio *bio);
80 80
81void dm_rh_mark_nosync(struct dm_region_hash *rh, 81void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio);
82 struct bio *bio, unsigned done, int error);
83 82
84/* 83/*
85 * Region recovery control. 84 * Region recovery control.
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 2b9f2ac7ed60..78784982b33e 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -74,7 +74,7 @@ enum dma_transaction_type {
74 * control completion, and communicate status. 74 * control completion, and communicate status.
75 * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of 75 * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of
76 * this transaction 76 * this transaction
77 * @DMA_CTRL_ACK - the descriptor cannot be reused until the client 77 * @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client
78 * acknowledges receipt, i.e. has has a chance to establish any dependency 78 * acknowledges receipt, i.e. has has a chance to establish any dependency
79 * chains 79 * chains
80 * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s) 80 * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s)
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h
index a0d9422a1569..f8c2e1767500 100644
--- a/include/linux/dynamic_debug.h
+++ b/include/linux/dynamic_debug.h
@@ -57,8 +57,7 @@ extern int ddebug_remove_module(char *mod_name);
57 { KBUILD_MODNAME, __func__, __FILE__, fmt, DEBUG_HASH, \ 57 { KBUILD_MODNAME, __func__, __FILE__, fmt, DEBUG_HASH, \
58 DEBUG_HASH2, __LINE__, _DPRINTK_FLAGS_DEFAULT }; \ 58 DEBUG_HASH2, __LINE__, _DPRINTK_FLAGS_DEFAULT }; \
59 if (__dynamic_dbg_enabled(descriptor)) \ 59 if (__dynamic_dbg_enabled(descriptor)) \
60 printk(KERN_DEBUG KBUILD_MODNAME ":" pr_fmt(fmt), \ 60 printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \
61 ##__VA_ARGS__); \
62 } while (0) 61 } while (0)
63 62
64 63
@@ -69,9 +68,7 @@ extern int ddebug_remove_module(char *mod_name);
69 { KBUILD_MODNAME, __func__, __FILE__, fmt, DEBUG_HASH, \ 68 { KBUILD_MODNAME, __func__, __FILE__, fmt, DEBUG_HASH, \
70 DEBUG_HASH2, __LINE__, _DPRINTK_FLAGS_DEFAULT }; \ 69 DEBUG_HASH2, __LINE__, _DPRINTK_FLAGS_DEFAULT }; \
71 if (__dynamic_dbg_enabled(descriptor)) \ 70 if (__dynamic_dbg_enabled(descriptor)) \
72 dev_printk(KERN_DEBUG, dev, \ 71 dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \
73 KBUILD_MODNAME ": " fmt, \
74 ##__VA_ARGS__); \
75 } while (0) 72 } while (0)
76 73
77#else 74#else
@@ -81,8 +78,10 @@ static inline int ddebug_remove_module(char *mod)
81 return 0; 78 return 0;
82} 79}
83 80
84#define dynamic_pr_debug(fmt, ...) do { } while (0) 81#define dynamic_pr_debug(fmt, ...) \
85#define dynamic_dev_dbg(dev, format, ...) do { } while (0) 82 do { if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); } while (0)
83#define dynamic_dev_dbg(dev, format, ...) \
84 do { if (0) dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); } while (0)
86#endif 85#endif
87 86
88#endif 87#endif
diff --git a/include/linux/efi.h b/include/linux/efi.h
index ce4581fbc08b..fb737bc19a8c 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -280,11 +280,7 @@ efi_guidcmp (efi_guid_t left, efi_guid_t right)
280static inline char * 280static inline char *
281efi_guid_unparse(efi_guid_t *guid, char *out) 281efi_guid_unparse(efi_guid_t *guid, char *out)
282{ 282{
283 sprintf(out, "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x", 283 sprintf(out, "%pUl", guid->b);
284 guid->b[3], guid->b[2], guid->b[1], guid->b[0],
285 guid->b[5], guid->b[4], guid->b[7], guid->b[6],
286 guid->b[8], guid->b[9], guid->b[10], guid->b[11],
287 guid->b[12], guid->b[13], guid->b[14], guid->b[15]);
288 return out; 284 return out;
289} 285}
290 286
diff --git a/include/linux/err.h b/include/linux/err.h
index ec87f3142bf3..1b12642636c7 100644
--- a/include/linux/err.h
+++ b/include/linux/err.h
@@ -34,6 +34,11 @@ static inline long IS_ERR(const void *ptr)
34 return IS_ERR_VALUE((unsigned long)ptr); 34 return IS_ERR_VALUE((unsigned long)ptr);
35} 35}
36 36
37static inline long IS_ERR_OR_NULL(const void *ptr)
38{
39 return !ptr || IS_ERR_VALUE((unsigned long)ptr);
40}
41
37/** 42/**
38 * ERR_CAST - Explicitly cast an error-valued pointer to another pointer type 43 * ERR_CAST - Explicitly cast an error-valued pointer to another pointer type
39 * @ptr: The pointer to cast. 44 * @ptr: The pointer to cast.
diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h
index 27e772cefb6a..dc12f416a49f 100644
--- a/include/linux/exportfs.h
+++ b/include/linux/exportfs.h
@@ -97,7 +97,7 @@ struct fid {
97 * @get_name: find the name for a given inode in a given directory 97 * @get_name: find the name for a given inode in a given directory
98 * @get_parent: find the parent of a given directory 98 * @get_parent: find the parent of a given directory
99 * 99 *
100 * See Documentation/filesystems/Exporting for details on how to use 100 * See Documentation/filesystems/nfs/Exporting for details on how to use
101 * this interface correctly. 101 * this interface correctly.
102 * 102 *
103 * encode_fh: 103 * encode_fh:
diff --git a/include/linux/fs.h b/include/linux/fs.h
index a057f48eb156..b23a7018eb90 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2264,9 +2264,11 @@ ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
2264 int lock_type); 2264 int lock_type);
2265 2265
2266enum { 2266enum {
2267 DIO_LOCKING = 1, /* need locking between buffered and direct access */ 2267 /* need locking between buffered and direct access */
2268 DIO_NO_LOCKING, /* bdev; no locking at all between buffered/direct */ 2268 DIO_LOCKING = 0x01,
2269 DIO_OWN_LOCKING, /* filesystem locks buffered and direct internally */ 2269
2270 /* filesystem does not support filling holes */
2271 DIO_SKIP_HOLES = 0x02,
2270}; 2272};
2271 2273
2272static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb, 2274static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb,
@@ -2275,7 +2277,8 @@ static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb,
2275 dio_iodone_t end_io) 2277 dio_iodone_t end_io)
2276{ 2278{
2277 return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset, 2279 return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
2278 nr_segs, get_block, end_io, DIO_LOCKING); 2280 nr_segs, get_block, end_io,
2281 DIO_LOCKING | DIO_SKIP_HOLES);
2279} 2282}
2280 2283
2281static inline ssize_t blockdev_direct_IO_no_locking(int rw, struct kiocb *iocb, 2284static inline ssize_t blockdev_direct_IO_no_locking(int rw, struct kiocb *iocb,
@@ -2284,16 +2287,7 @@ static inline ssize_t blockdev_direct_IO_no_locking(int rw, struct kiocb *iocb,
2284 dio_iodone_t end_io) 2287 dio_iodone_t end_io)
2285{ 2288{
2286 return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset, 2289 return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
2287 nr_segs, get_block, end_io, DIO_NO_LOCKING); 2290 nr_segs, get_block, end_io, 0);
2288}
2289
2290static inline ssize_t blockdev_direct_IO_own_locking(int rw, struct kiocb *iocb,
2291 struct inode *inode, struct block_device *bdev, const struct iovec *iov,
2292 loff_t offset, unsigned long nr_segs, get_block_t get_block,
2293 dio_iodone_t end_io)
2294{
2295 return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
2296 nr_segs, get_block, end_io, DIO_OWN_LOCKING);
2297} 2291}
2298#endif 2292#endif
2299 2293
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index 059bd189d35d..4e949a5b5b85 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -99,6 +99,12 @@ static inline int gpio_export_link(struct device *dev, const char *name,
99 return -EINVAL; 99 return -EINVAL;
100} 100}
101 101
102static inline int gpio_sysfs_set_active_low(unsigned gpio, int value)
103{
104 /* GPIO can never have been requested */
105 WARN_ON(1);
106 return -EINVAL;
107}
102 108
103static inline void gpio_unexport(unsigned gpio) 109static inline void gpio_unexport(unsigned gpio)
104{ 110{
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index af634e95871d..5d86fb2309d2 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -169,7 +169,7 @@ struct hrtimer_clock_base {
169 * @max_hang_time: Maximum time spent in hrtimer_interrupt 169 * @max_hang_time: Maximum time spent in hrtimer_interrupt
170 */ 170 */
171struct hrtimer_cpu_base { 171struct hrtimer_cpu_base {
172 spinlock_t lock; 172 raw_spinlock_t lock;
173 struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; 173 struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
174#ifdef CONFIG_HIGH_RES_TIMERS 174#ifdef CONFIG_HIGH_RES_TIMERS
175 ktime_t expires_next; 175 ktime_t expires_next;
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 41a59afc70fa..78b4bc64c006 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -23,6 +23,12 @@ void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
23int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); 23int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
24int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); 24int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
25int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); 25int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
26
27#ifdef CONFIG_NUMA
28int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int,
29 void __user *, size_t *, loff_t *);
30#endif
31
26int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *); 32int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
27int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, 33int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
28 struct page **, struct vm_area_struct **, 34 struct page **, struct vm_area_struct **,
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 419ab546b266..02fc617782ef 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -110,7 +110,7 @@ extern s32 i2c_smbus_write_i2c_block_data(struct i2c_client *client,
110 * @driver: Device driver model driver 110 * @driver: Device driver model driver
111 * @id_table: List of I2C devices supported by this driver 111 * @id_table: List of I2C devices supported by this driver
112 * @detect: Callback for device detection 112 * @detect: Callback for device detection
113 * @address_data: The I2C addresses to probe (for detect) 113 * @address_list: The I2C addresses to probe (for detect)
114 * @clients: List of detected clients we created (for i2c-core use only) 114 * @clients: List of detected clients we created (for i2c-core use only)
115 * 115 *
116 * The driver.owner field should be set to the module owner of this driver. 116 * The driver.owner field should be set to the module owner of this driver.
@@ -161,8 +161,8 @@ struct i2c_driver {
161 const struct i2c_device_id *id_table; 161 const struct i2c_device_id *id_table;
162 162
163 /* Device detection callback for automatic device creation */ 163 /* Device detection callback for automatic device creation */
164 int (*detect)(struct i2c_client *, int kind, struct i2c_board_info *); 164 int (*detect)(struct i2c_client *, struct i2c_board_info *);
165 const struct i2c_client_address_data *address_data; 165 const unsigned short *address_list;
166 struct list_head clients; 166 struct list_head clients;
167}; 167};
168#define to_i2c_driver(d) container_of(d, struct i2c_driver, driver) 168#define to_i2c_driver(d) container_of(d, struct i2c_driver, driver)
@@ -391,14 +391,6 @@ static inline void i2c_unlock_adapter(struct i2c_adapter *adapter)
391#define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */ 391#define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */
392#define I2C_CLASS_SPD (1<<7) /* SPD EEPROMs and similar */ 392#define I2C_CLASS_SPD (1<<7) /* SPD EEPROMs and similar */
393 393
394/* i2c_client_address_data is the struct for holding default client
395 * addresses for a driver and for the parameters supplied on the
396 * command line
397 */
398struct i2c_client_address_data {
399 const unsigned short *normal_i2c;
400};
401
402/* Internal numbers to terminate lists */ 394/* Internal numbers to terminate lists */
403#define I2C_CLIENT_END 0xfffeU 395#define I2C_CLIENT_END 0xfffeU
404 396
@@ -576,82 +568,4 @@ union i2c_smbus_data {
576#define I2C_SMBUS_BLOCK_PROC_CALL 7 /* SMBus 2.0 */ 568#define I2C_SMBUS_BLOCK_PROC_CALL 7 /* SMBus 2.0 */
577#define I2C_SMBUS_I2C_BLOCK_DATA 8 569#define I2C_SMBUS_I2C_BLOCK_DATA 8
578 570
579
580#ifdef __KERNEL__
581
582/* These defines are used for probing i2c client addresses */
583/* The length of the option lists */
584#define I2C_CLIENT_MAX_OPTS 48
585
586/* Default fill of many variables */
587#define I2C_CLIENT_DEFAULTS {I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
588 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
589 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
590 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
591 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
592 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
593 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
594 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
595 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
596 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
597 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
598 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
599 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
600 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
601 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
602 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END}
603
604/* I2C_CLIENT_MODULE_PARM creates a module parameter, and puts it in the
605 module header */
606
607#define I2C_CLIENT_MODULE_PARM(var,desc) \
608 static unsigned short var[I2C_CLIENT_MAX_OPTS] = I2C_CLIENT_DEFAULTS; \
609 static unsigned int var##_num; \
610 module_param_array(var, short, &var##_num, 0); \
611 MODULE_PARM_DESC(var, desc)
612
613#define I2C_CLIENT_INSMOD_COMMON \
614static const struct i2c_client_address_data addr_data = { \
615 .normal_i2c = normal_i2c, \
616}
617
618/* These are the ones you want to use in your own drivers. Pick the one
619 which matches the number of devices the driver differenciates between. */
620#define I2C_CLIENT_INSMOD \
621I2C_CLIENT_INSMOD_COMMON
622
623#define I2C_CLIENT_INSMOD_1(chip1) \
624enum chips { any_chip, chip1 }; \
625I2C_CLIENT_INSMOD_COMMON
626
627#define I2C_CLIENT_INSMOD_2(chip1, chip2) \
628enum chips { any_chip, chip1, chip2 }; \
629I2C_CLIENT_INSMOD_COMMON
630
631#define I2C_CLIENT_INSMOD_3(chip1, chip2, chip3) \
632enum chips { any_chip, chip1, chip2, chip3 }; \
633I2C_CLIENT_INSMOD_COMMON
634
635#define I2C_CLIENT_INSMOD_4(chip1, chip2, chip3, chip4) \
636enum chips { any_chip, chip1, chip2, chip3, chip4 }; \
637I2C_CLIENT_INSMOD_COMMON
638
639#define I2C_CLIENT_INSMOD_5(chip1, chip2, chip3, chip4, chip5) \
640enum chips { any_chip, chip1, chip2, chip3, chip4, chip5 }; \
641I2C_CLIENT_INSMOD_COMMON
642
643#define I2C_CLIENT_INSMOD_6(chip1, chip2, chip3, chip4, chip5, chip6) \
644enum chips { any_chip, chip1, chip2, chip3, chip4, chip5, chip6 }; \
645I2C_CLIENT_INSMOD_COMMON
646
647#define I2C_CLIENT_INSMOD_7(chip1, chip2, chip3, chip4, chip5, chip6, chip7) \
648enum chips { any_chip, chip1, chip2, chip3, chip4, chip5, chip6, \
649 chip7 }; \
650I2C_CLIENT_INSMOD_COMMON
651
652#define I2C_CLIENT_INSMOD_8(chip1, chip2, chip3, chip4, chip5, chip6, chip7, chip8) \
653enum chips { any_chip, chip1, chip2, chip3, chip4, chip5, chip6, \
654 chip7, chip8 }; \
655I2C_CLIENT_INSMOD_COMMON
656#endif /* __KERNEL__ */
657#endif /* _LINUX_I2C_H */ 571#endif /* _LINUX_I2C_H */
diff --git a/include/linux/i8042.h b/include/linux/i8042.h
index 60c3360ef6ad..9bf6870ee5f4 100644
--- a/include/linux/i8042.h
+++ b/include/linux/i8042.h
@@ -39,6 +39,10 @@ void i8042_lock_chip(void);
39void i8042_unlock_chip(void); 39void i8042_unlock_chip(void);
40int i8042_command(unsigned char *param, int command); 40int i8042_command(unsigned char *param, int command);
41bool i8042_check_port_owner(const struct serio *); 41bool i8042_check_port_owner(const struct serio *);
42int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str,
43 struct serio *serio));
44int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str,
45 struct serio *serio));
42 46
43#else 47#else
44 48
@@ -52,7 +56,7 @@ void i8042_unlock_chip(void)
52 56
53int i8042_command(unsigned char *param, int command) 57int i8042_command(unsigned char *param, int command)
54{ 58{
55 return -ENOSYS; 59 return -ENODEV;
56} 60}
57 61
58bool i8042_check_port_owner(const struct serio *serio) 62bool i8042_check_port_owner(const struct serio *serio)
@@ -60,6 +64,18 @@ bool i8042_check_port_owner(const struct serio *serio)
60 return false; 64 return false;
61} 65}
62 66
67int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str,
68 struct serio *serio))
69{
70 return -ENODEV;
71}
72
73int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str,
74 struct serio *serio))
75{
76 return -ENODEV;
77}
78
63#endif 79#endif
64 80
65#endif 81#endif
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 8d10aa7fd4c9..5ed8b9c50355 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -111,6 +111,12 @@ extern struct cred init_cred;
111# define INIT_PERF_EVENTS(tsk) 111# define INIT_PERF_EVENTS(tsk)
112#endif 112#endif
113 113
114#ifdef CONFIG_FS_JOURNAL_INFO
115#define INIT_JOURNAL_INFO .journal_info = NULL,
116#else
117#define INIT_JOURNAL_INFO
118#endif
119
114/* 120/*
115 * INIT_TASK is used to set up the first task table, touch at 121 * INIT_TASK is used to set up the first task table, touch at
116 * your own risk!. Base=0, limit=0x1fffff (=2MB) 122 * your own risk!. Base=0, limit=0x1fffff (=2MB)
@@ -162,10 +168,9 @@ extern struct cred init_cred;
162 .signal = {{0}}}, \ 168 .signal = {{0}}}, \
163 .blocked = {{0}}, \ 169 .blocked = {{0}}, \
164 .alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \ 170 .alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \
165 .journal_info = NULL, \
166 .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ 171 .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
167 .fs_excl = ATOMIC_INIT(0), \ 172 .fs_excl = ATOMIC_INIT(0), \
168 .pi_lock = __SPIN_LOCK_UNLOCKED(tsk.pi_lock), \ 173 .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
169 .timer_slack_ns = 50000, /* 50 usec default slack */ \ 174 .timer_slack_ns = 50000, /* 50 usec default slack */ \
170 .pids = { \ 175 .pids = { \
171 [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \ 176 [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
@@ -173,6 +178,7 @@ extern struct cred init_cred;
173 [PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \ 178 [PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \
174 }, \ 179 }, \
175 .dirties = INIT_PROP_LOCAL_SINGLE(dirties), \ 180 .dirties = INIT_PROP_LOCAL_SINGLE(dirties), \
181 INIT_JOURNAL_INFO \
176 INIT_IDS \ 182 INIT_IDS \
177 INIT_PERF_EVENTS(tsk) \ 183 INIT_PERF_EVENTS(tsk) \
178 INIT_TRACE_IRQFLAGS \ 184 INIT_TRACE_IRQFLAGS \
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 4f0a72a9740c..9310c699a37d 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -332,6 +332,7 @@ struct intel_iommu {
332#ifdef CONFIG_INTR_REMAP 332#ifdef CONFIG_INTR_REMAP
333 struct ir_table *ir_table; /* Interrupt remapping info */ 333 struct ir_table *ir_table; /* Interrupt remapping info */
334#endif 334#endif
335 int node;
335}; 336};
336 337
337static inline void __iommu_flush_cache( 338static inline void __iommu_flush_cache(
diff --git a/include/linux/iommu-helper.h b/include/linux/iommu-helper.h
index 3b068e5b5671..64d1b638745d 100644
--- a/include/linux/iommu-helper.h
+++ b/include/linux/iommu-helper.h
@@ -14,14 +14,11 @@ static inline unsigned long iommu_device_max_index(unsigned long size,
14extern int iommu_is_span_boundary(unsigned int index, unsigned int nr, 14extern int iommu_is_span_boundary(unsigned int index, unsigned int nr,
15 unsigned long shift, 15 unsigned long shift,
16 unsigned long boundary_size); 16 unsigned long boundary_size);
17extern void iommu_area_reserve(unsigned long *map, unsigned long i, int len);
18extern unsigned long iommu_area_alloc(unsigned long *map, unsigned long size, 17extern unsigned long iommu_area_alloc(unsigned long *map, unsigned long size,
19 unsigned long start, unsigned int nr, 18 unsigned long start, unsigned int nr,
20 unsigned long shift, 19 unsigned long shift,
21 unsigned long boundary_size, 20 unsigned long boundary_size,
22 unsigned long align_mask); 21 unsigned long align_mask);
23extern void iommu_area_free(unsigned long *map, unsigned long start,
24 unsigned int nr);
25 22
26extern unsigned long iommu_num_pages(unsigned long addr, unsigned long len, 23extern unsigned long iommu_num_pages(unsigned long addr, unsigned long len,
27 unsigned long io_page_size); 24 unsigned long io_page_size);
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 83aa81297ea3..7129504e053d 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -126,11 +126,11 @@ extern int allocate_resource(struct resource *root, struct resource *new,
126int adjust_resource(struct resource *res, resource_size_t start, 126int adjust_resource(struct resource *res, resource_size_t start,
127 resource_size_t size); 127 resource_size_t size);
128resource_size_t resource_alignment(struct resource *res); 128resource_size_t resource_alignment(struct resource *res);
129static inline resource_size_t resource_size(struct resource *res) 129static inline resource_size_t resource_size(const struct resource *res)
130{ 130{
131 return res->end - res->start + 1; 131 return res->end - res->start + 1;
132} 132}
133static inline unsigned long resource_type(struct resource *res) 133static inline unsigned long resource_type(const struct resource *res)
134{ 134{
135 return res->flags & IORESOURCE_TYPE_BITS; 135 return res->flags & IORESOURCE_TYPE_BITS;
136} 136}
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
index e408722a84c7..07baa38bce37 100644
--- a/include/linux/ipc_namespace.h
+++ b/include/linux/ipc_namespace.h
@@ -87,7 +87,7 @@ extern int mq_init_ns(struct ipc_namespace *ns);
87/* default values */ 87/* default values */
88#define DFLT_QUEUESMAX 256 /* max number of message queues */ 88#define DFLT_QUEUESMAX 256 /* max number of message queues */
89#define DFLT_MSGMAX 10 /* max number of messages in each queue */ 89#define DFLT_MSGMAX 10 /* max number of messages in each queue */
90#define HARD_MSGMAX (131072/sizeof(void *)) 90#define HARD_MSGMAX (32768*sizeof(void *)/4)
91#define DFLT_MSGSIZEMAX 8192 /* max message size */ 91#define DFLT_MSGSIZEMAX 8192 /* max message size */
92#else 92#else
93static inline int mq_init_ns(struct ipc_namespace *ns) { return 0; } 93static inline int mq_init_ns(struct ipc_namespace *ns) { return 0; }
diff --git a/include/linux/irq.h b/include/linux/irq.h
index a287cfc0b1a6..451481c082b5 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -192,7 +192,7 @@ struct irq_desc {
192 unsigned int irq_count; /* For detecting broken IRQs */ 192 unsigned int irq_count; /* For detecting broken IRQs */
193 unsigned long last_unhandled; /* Aging timer for unhandled count */ 193 unsigned long last_unhandled; /* Aging timer for unhandled count */
194 unsigned int irqs_unhandled; 194 unsigned int irqs_unhandled;
195 spinlock_t lock; 195 raw_spinlock_t lock;
196#ifdef CONFIG_SMP 196#ifdef CONFIG_SMP
197 cpumask_var_t affinity; 197 cpumask_var_t affinity;
198 unsigned int node; 198 unsigned int node;
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
index 792274269f2b..d8e9b3d1c23c 100644
--- a/include/linux/kallsyms.h
+++ b/include/linux/kallsyms.h
@@ -107,18 +107,6 @@ static inline void print_symbol(const char *fmt, unsigned long addr)
107 __builtin_extract_return_addr((void *)addr)); 107 __builtin_extract_return_addr((void *)addr));
108} 108}
109 109
110/*
111 * Pretty-print a function pointer. This function is deprecated.
112 * Please use the "%pF" vsprintf format instead.
113 */
114static inline void __deprecated print_fn_descriptor_symbol(const char *fmt, void *addr)
115{
116#if defined(CONFIG_IA64) || defined(CONFIG_PPC64)
117 addr = *(void **)addr;
118#endif
119 print_symbol(fmt, (unsigned long)addr);
120}
121
122static inline void print_ip_sym(unsigned long ip) 110static inline void print_ip_sym(unsigned long ip)
123{ 111{
124 printk("[<%p>] %pS\n", (void *) ip, (void *) ip); 112 printk("[<%p>] %pS\n", (void *) ip, (void *) ip);
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 3fa4c590cf12..4d9c916d06d9 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -251,10 +251,10 @@ extern int printk_delay_msec;
251 * Print a one-time message (analogous to WARN_ONCE() et al): 251 * Print a one-time message (analogous to WARN_ONCE() et al):
252 */ 252 */
253#define printk_once(x...) ({ \ 253#define printk_once(x...) ({ \
254 static bool __print_once = true; \ 254 static bool __print_once; \
255 \ 255 \
256 if (__print_once) { \ 256 if (!__print_once) { \
257 __print_once = false; \ 257 __print_once = true; \
258 printk(x); \ 258 printk(x); \
259 } \ 259 } \
260}) 260})
@@ -397,15 +397,58 @@ static inline char *pack_hex_byte(char *buf, u8 byte)
397 printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) 397 printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
398#elif defined(CONFIG_DYNAMIC_DEBUG) 398#elif defined(CONFIG_DYNAMIC_DEBUG)
399/* dynamic_pr_debug() uses pr_fmt() internally so we don't need it here */ 399/* dynamic_pr_debug() uses pr_fmt() internally so we don't need it here */
400#define pr_debug(fmt, ...) do { \ 400#define pr_debug(fmt, ...) \
401 dynamic_pr_debug(fmt, ##__VA_ARGS__); \ 401 dynamic_pr_debug(fmt, ##__VA_ARGS__)
402 } while (0)
403#else 402#else
404#define pr_debug(fmt, ...) \ 403#define pr_debug(fmt, ...) \
405 ({ if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); 0; }) 404 ({ if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); 0; })
406#endif 405#endif
407 406
408/* 407/*
408 * ratelimited messages with local ratelimit_state,
409 * no local ratelimit_state used in the !PRINTK case
410 */
411#ifdef CONFIG_PRINTK
412#define printk_ratelimited(fmt, ...) ({ \
413 static struct ratelimit_state _rs = { \
414 .interval = DEFAULT_RATELIMIT_INTERVAL, \
415 .burst = DEFAULT_RATELIMIT_BURST, \
416 }; \
417 \
418 if (!__ratelimit(&_rs)) \
419 printk(fmt, ##__VA_ARGS__); \
420})
421#else
422/* No effect, but we still get type checking even in the !PRINTK case: */
423#define printk_ratelimited printk
424#endif
425
426#define pr_emerg_ratelimited(fmt, ...) \
427 printk_ratelimited(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
428#define pr_alert_ratelimited(fmt, ...) \
429 printk_ratelimited(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__)
430#define pr_crit_ratelimited(fmt, ...) \
431 printk_ratelimited(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__)
432#define pr_err_ratelimited(fmt, ...) \
433 printk_ratelimited(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
434#define pr_warning_ratelimited(fmt, ...) \
435 printk_ratelimited(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
436#define pr_notice_ratelimited(fmt, ...) \
437 printk_ratelimited(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
438#define pr_info_ratelimited(fmt, ...) \
439 printk_ratelimited(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
440/* no pr_cont_ratelimited, don't do that... */
441/* If you are writing a driver, please use dev_dbg instead */
442#if defined(DEBUG)
443#define pr_debug_ratelimited(fmt, ...) \
444 printk_ratelimited(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
445#else
446#define pr_debug_ratelimited(fmt, ...) \
447 ({ if (0) printk_ratelimited(KERN_DEBUG pr_fmt(fmt), \
448 ##__VA_ARGS__); 0; })
449#endif
450
451/*
409 * General tracing related utility functions - trace_printk(), 452 * General tracing related utility functions - trace_printk(),
410 * tracing_on/tracing_off and tracing_start()/tracing_stop 453 * tracing_on/tracing_off and tracing_start()/tracing_stop
411 * 454 *
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index adc34f2c6eff..c356b6914ffd 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -206,6 +206,8 @@ extern size_t vmcoreinfo_max_size;
206 206
207int __init parse_crashkernel(char *cmdline, unsigned long long system_ram, 207int __init parse_crashkernel(char *cmdline, unsigned long long system_ram,
208 unsigned long long *crash_size, unsigned long long *crash_base); 208 unsigned long long *crash_size, unsigned long long *crash_base);
209int crash_shrink_memory(unsigned long new_size);
210size_t crash_get_memory_size(void);
209 211
210#else /* !CONFIG_KEXEC */ 212#else /* !CONFIG_KEXEC */
211struct pt_regs; 213struct pt_regs;
diff --git a/include/linux/kmsg_dump.h b/include/linux/kmsg_dump.h
new file mode 100644
index 000000000000..e32aa268efac
--- /dev/null
+++ b/include/linux/kmsg_dump.h
@@ -0,0 +1,60 @@
1/*
2 * linux/include/kmsg_dump.h
3 *
4 * Copyright (C) 2009 Net Insight AB
5 *
6 * Author: Simon Kagstrom <simon.kagstrom@netinsight.net>
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file COPYING in the main directory of this archive
10 * for more details.
11 */
12#ifndef _LINUX_KMSG_DUMP_H
13#define _LINUX_KMSG_DUMP_H
14
15#include <linux/list.h>
16
17enum kmsg_dump_reason {
18 KMSG_DUMP_OOPS,
19 KMSG_DUMP_PANIC,
20};
21
22/**
23 * struct kmsg_dumper - kernel crash message dumper structure
24 * @dump: The callback which gets called on crashes. The buffer is passed
25 * as two sections, where s1 (length l1) contains the older
26 * messages and s2 (length l2) contains the newer.
27 * @list: Entry in the dumper list (private)
28 * @registered: Flag that specifies if this is already registered
29 */
30struct kmsg_dumper {
31 void (*dump)(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason,
32 const char *s1, unsigned long l1,
33 const char *s2, unsigned long l2);
34 struct list_head list;
35 int registered;
36};
37
38#ifdef CONFIG_PRINTK
39void kmsg_dump(enum kmsg_dump_reason reason);
40
41int kmsg_dump_register(struct kmsg_dumper *dumper);
42
43int kmsg_dump_unregister(struct kmsg_dumper *dumper);
44#else
45static inline void kmsg_dump(enum kmsg_dump_reason reason)
46{
47}
48
49static inline int kmsg_dump_register(struct kmsg_dumper *dumper)
50{
51 return -EINVAL;
52}
53
54static inline int kmsg_dump_unregister(struct kmsg_dumper *dumper)
55{
56 return -EINVAL;
57}
58#endif
59
60#endif /* _LINUX_KMSG_DUMP_H */
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index a485c14ecd5d..43bdab769fc3 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -9,8 +9,12 @@
9 9
10#include <linux/bitops.h> 10#include <linux/bitops.h>
11#include <linux/mm.h> 11#include <linux/mm.h>
12#include <linux/pagemap.h>
13#include <linux/rmap.h>
12#include <linux/sched.h> 14#include <linux/sched.h>
13#include <linux/vmstat.h> 15
16struct stable_node;
17struct mem_cgroup;
14 18
15#ifdef CONFIG_KSM 19#ifdef CONFIG_KSM
16int ksm_madvise(struct vm_area_struct *vma, unsigned long start, 20int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
@@ -34,46 +38,110 @@ static inline void ksm_exit(struct mm_struct *mm)
34/* 38/*
35 * A KSM page is one of those write-protected "shared pages" or "merged pages" 39 * A KSM page is one of those write-protected "shared pages" or "merged pages"
36 * which KSM maps into multiple mms, wherever identical anonymous page content 40 * which KSM maps into multiple mms, wherever identical anonymous page content
37 * is found in VM_MERGEABLE vmas. It's a PageAnon page, with NULL anon_vma. 41 * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
42 * anon_vma, but to that page's node of the stable tree.
38 */ 43 */
39static inline int PageKsm(struct page *page) 44static inline int PageKsm(struct page *page)
40{ 45{
41 return ((unsigned long)page->mapping == PAGE_MAPPING_ANON); 46 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
47 (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
48}
49
50static inline struct stable_node *page_stable_node(struct page *page)
51{
52 return PageKsm(page) ? page_rmapping(page) : NULL;
53}
54
55static inline void set_page_stable_node(struct page *page,
56 struct stable_node *stable_node)
57{
58 page->mapping = (void *)stable_node +
59 (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
42} 60}
43 61
44/* 62/*
45 * But we have to avoid the checking which page_add_anon_rmap() performs. 63 * When do_swap_page() first faults in from swap what used to be a KSM page,
64 * no problem, it will be assigned to this vma's anon_vma; but thereafter,
65 * it might be faulted into a different anon_vma (or perhaps to a different
66 * offset in the same anon_vma). do_swap_page() cannot do all the locking
67 * needed to reconstitute a cross-anon_vma KSM page: for now it has to make
68 * a copy, and leave remerging the pages to a later pass of ksmd.
69 *
70 * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
71 * but what if the vma was unmerged while the page was swapped out?
46 */ 72 */
47static inline void page_add_ksm_rmap(struct page *page) 73struct page *ksm_does_need_to_copy(struct page *page,
74 struct vm_area_struct *vma, unsigned long address);
75static inline struct page *ksm_might_need_to_copy(struct page *page,
76 struct vm_area_struct *vma, unsigned long address)
48{ 77{
49 if (atomic_inc_and_test(&page->_mapcount)) { 78 struct anon_vma *anon_vma = page_anon_vma(page);
50 page->mapping = (void *) PAGE_MAPPING_ANON; 79
51 __inc_zone_page_state(page, NR_ANON_PAGES); 80 if (!anon_vma ||
52 } 81 (anon_vma == vma->anon_vma &&
82 page->index == linear_page_index(vma, address)))
83 return page;
84
85 return ksm_does_need_to_copy(page, vma, address);
53} 86}
87
88int page_referenced_ksm(struct page *page,
89 struct mem_cgroup *memcg, unsigned long *vm_flags);
90int try_to_unmap_ksm(struct page *page, enum ttu_flags flags);
91int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *,
92 struct vm_area_struct *, unsigned long, void *), void *arg);
93void ksm_migrate_page(struct page *newpage, struct page *oldpage);
94
54#else /* !CONFIG_KSM */ 95#else /* !CONFIG_KSM */
55 96
97static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
98{
99 return 0;
100}
101
102static inline void ksm_exit(struct mm_struct *mm)
103{
104}
105
106static inline int PageKsm(struct page *page)
107{
108 return 0;
109}
110
111#ifdef CONFIG_MMU
56static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, 112static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
57 unsigned long end, int advice, unsigned long *vm_flags) 113 unsigned long end, int advice, unsigned long *vm_flags)
58{ 114{
59 return 0; 115 return 0;
60} 116}
61 117
62static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) 118static inline struct page *ksm_might_need_to_copy(struct page *page,
119 struct vm_area_struct *vma, unsigned long address)
120{
121 return page;
122}
123
124static inline int page_referenced_ksm(struct page *page,
125 struct mem_cgroup *memcg, unsigned long *vm_flags)
63{ 126{
64 return 0; 127 return 0;
65} 128}
66 129
67static inline void ksm_exit(struct mm_struct *mm) 130static inline int try_to_unmap_ksm(struct page *page, enum ttu_flags flags)
68{ 131{
132 return 0;
69} 133}
70 134
71static inline int PageKsm(struct page *page) 135static inline int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page*,
136 struct vm_area_struct *, unsigned long, void *), void *arg)
72{ 137{
73 return 0; 138 return 0;
74} 139}
75 140
76/* No stub required for page_add_ksm_rmap(page) */ 141static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage)
142{
143}
144#endif /* CONFIG_MMU */
77#endif /* !CONFIG_KSM */ 145#endif /* !CONFIG_KSM */
78 146
79#endif 147#endif /* __LINUX_KSM_H */
diff --git a/include/linux/lis3lv02d.h b/include/linux/lis3lv02d.h
index 3cc2f2c53e4c..f1ca0dcc1628 100644
--- a/include/linux/lis3lv02d.h
+++ b/include/linux/lis3lv02d.h
@@ -43,6 +43,21 @@ struct lis3lv02d_platform_data {
43#define LIS3_WAKEUP_Z_HI (1 << 5) 43#define LIS3_WAKEUP_Z_HI (1 << 5)
44 unsigned char wakeup_flags; 44 unsigned char wakeup_flags;
45 unsigned char wakeup_thresh; 45 unsigned char wakeup_thresh;
46#define LIS3_NO_MAP 0
47#define LIS3_DEV_X 1
48#define LIS3_DEV_Y 2
49#define LIS3_DEV_Z 3
50#define LIS3_INV_DEV_X -1
51#define LIS3_INV_DEV_Y -2
52#define LIS3_INV_DEV_Z -3
53 s8 axis_x;
54 s8 axis_y;
55 s8 axis_z;
56 int (*setup_resources)(void);
57 int (*release_resources)(void);
58 /* Limits for selftest are specified in chip data sheet */
59 s16 st_min_limits[3]; /* min pass limit x, y, z */
60 s16 st_max_limits[3]; /* max pass limit x, y, z */
46}; 61};
47 62
48#endif /* __LIS3LV02D_H_ */ 63#endif /* __LIS3LV02D_H_ */
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index bf9213b2db8f..0b46c2068b96 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -54,6 +54,11 @@ extern void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru);
54extern void mem_cgroup_del_lru(struct page *page); 54extern void mem_cgroup_del_lru(struct page *page);
55extern void mem_cgroup_move_lists(struct page *page, 55extern void mem_cgroup_move_lists(struct page *page,
56 enum lru_list from, enum lru_list to); 56 enum lru_list from, enum lru_list to);
57
58/* For coalescing uncharge for reducing memcg' overhead*/
59extern void mem_cgroup_uncharge_start(void);
60extern void mem_cgroup_uncharge_end(void);
61
57extern void mem_cgroup_uncharge_page(struct page *page); 62extern void mem_cgroup_uncharge_page(struct page *page);
58extern void mem_cgroup_uncharge_cache_page(struct page *page); 63extern void mem_cgroup_uncharge_cache_page(struct page *page);
59extern int mem_cgroup_shmem_charge_fallback(struct page *page, 64extern int mem_cgroup_shmem_charge_fallback(struct page *page,
@@ -117,7 +122,7 @@ static inline bool mem_cgroup_disabled(void)
117} 122}
118 123
119extern bool mem_cgroup_oom_called(struct task_struct *task); 124extern bool mem_cgroup_oom_called(struct task_struct *task);
120void mem_cgroup_update_mapped_file_stat(struct page *page, int val); 125void mem_cgroup_update_file_mapped(struct page *page, int val);
121unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, 126unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
122 gfp_t gfp_mask, int nid, 127 gfp_t gfp_mask, int nid,
123 int zid); 128 int zid);
@@ -151,6 +156,14 @@ static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr)
151{ 156{
152} 157}
153 158
159static inline void mem_cgroup_uncharge_start(void)
160{
161}
162
163static inline void mem_cgroup_uncharge_end(void)
164{
165}
166
154static inline void mem_cgroup_uncharge_page(struct page *page) 167static inline void mem_cgroup_uncharge_page(struct page *page)
155{ 168{
156} 169}
@@ -274,7 +287,7 @@ mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
274{ 287{
275} 288}
276 289
277static inline void mem_cgroup_update_mapped_file_stat(struct page *page, 290static inline void mem_cgroup_update_file_mapped(struct page *page,
278 int val) 291 int val)
279{ 292{
280} 293}
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index fed969281a41..35b07b773e6c 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -69,7 +69,6 @@ extern void online_page(struct page *page);
69/* VM interface that may be used by firmware interface */ 69/* VM interface that may be used by firmware interface */
70extern int online_pages(unsigned long, unsigned long); 70extern int online_pages(unsigned long, unsigned long);
71extern void __offline_isolated_pages(unsigned long, unsigned long); 71extern void __offline_isolated_pages(unsigned long, unsigned long);
72extern int offline_pages(unsigned long, unsigned long, unsigned long);
73 72
74/* reasonably generic interface to expand the physical pages in a zone */ 73/* reasonably generic interface to expand the physical pages in a zone */
75extern int __add_pages(int nid, struct zone *zone, unsigned long start_pfn, 74extern int __add_pages(int nid, struct zone *zone, unsigned long start_pfn,
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 085c903fe0f1..1cc966cd3e5f 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -201,6 +201,7 @@ extern void mpol_fix_fork_child_flag(struct task_struct *p);
201extern struct zonelist *huge_zonelist(struct vm_area_struct *vma, 201extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
202 unsigned long addr, gfp_t gfp_flags, 202 unsigned long addr, gfp_t gfp_flags,
203 struct mempolicy **mpol, nodemask_t **nodemask); 203 struct mempolicy **mpol, nodemask_t **nodemask);
204extern bool init_nodemask_of_mempolicy(nodemask_t *mask);
204extern unsigned slab_node(struct mempolicy *policy); 205extern unsigned slab_node(struct mempolicy *policy);
205 206
206extern enum zone_type policy_zone; 207extern enum zone_type policy_zone;
@@ -328,6 +329,8 @@ static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma,
328 return node_zonelist(0, gfp_flags); 329 return node_zonelist(0, gfp_flags);
329} 330}
330 331
332static inline bool init_nodemask_of_mempolicy(nodemask_t *m) { return false; }
333
331static inline int do_migrate_pages(struct mm_struct *mm, 334static inline int do_migrate_pages(struct mm_struct *mm,
332 const nodemask_t *from_nodes, 335 const nodemask_t *from_nodes,
333 const nodemask_t *to_nodes, int flags) 336 const nodemask_t *to_nodes, int flags)
diff --git a/include/linux/mfd/pcf50633/core.h b/include/linux/mfd/pcf50633/core.h
index d9034cc87f18..3398bd9aab11 100644
--- a/include/linux/mfd/pcf50633/core.h
+++ b/include/linux/mfd/pcf50633/core.h
@@ -29,7 +29,12 @@ struct pcf50633_platform_data {
29 char **batteries; 29 char **batteries;
30 int num_batteries; 30 int num_batteries;
31 31
32 int charging_restart_interval; 32 /*
33 * Should be set accordingly to the reference resistor used, see
34 * I_{ch(ref)} charger reference current in the pcf50633 User
35 * Manual.
36 */
37 int charger_reference_current_ma;
33 38
34 /* Callbacks */ 39 /* Callbacks */
35 void (*probe_done)(struct pcf50633 *); 40 void (*probe_done)(struct pcf50633 *);
diff --git a/include/linux/mfd/pcf50633/mbc.h b/include/linux/mfd/pcf50633/mbc.h
index 4119579acf2c..df4f5fa88de3 100644
--- a/include/linux/mfd/pcf50633/mbc.h
+++ b/include/linux/mfd/pcf50633/mbc.h
@@ -128,6 +128,7 @@ enum pcf50633_reg_mbcs3 {
128int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma); 128int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma);
129 129
130int pcf50633_mbc_get_status(struct pcf50633 *); 130int pcf50633_mbc_get_status(struct pcf50633 *);
131int pcf50633_mbc_get_usb_online_status(struct pcf50633 *);
131 132
132#endif 133#endif
133 134
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 527602cdea1c..7f085c97c799 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -12,7 +12,8 @@ typedef struct page *new_page_t(struct page *, unsigned long private, int **);
12extern int putback_lru_pages(struct list_head *l); 12extern int putback_lru_pages(struct list_head *l);
13extern int migrate_page(struct address_space *, 13extern int migrate_page(struct address_space *,
14 struct page *, struct page *); 14 struct page *, struct page *);
15extern int migrate_pages(struct list_head *l, new_page_t x, unsigned long); 15extern int migrate_pages(struct list_head *l, new_page_t x,
16 unsigned long private, int offlining);
16 17
17extern int fail_migrate_page(struct address_space *, 18extern int fail_migrate_page(struct address_space *,
18 struct page *, struct page *); 19 struct page *, struct page *);
@@ -26,10 +27,7 @@ extern int migrate_vmas(struct mm_struct *mm,
26 27
27static inline int putback_lru_pages(struct list_head *l) { return 0; } 28static inline int putback_lru_pages(struct list_head *l) { return 0; }
28static inline int migrate_pages(struct list_head *l, new_page_t x, 29static inline int migrate_pages(struct list_head *l, new_page_t x,
29 unsigned long private) { return -ENOSYS; } 30 unsigned long private, int offlining) { return -ENOSYS; }
30
31static inline int migrate_pages_to(struct list_head *pagelist,
32 struct vm_area_struct *vma, int dest) { return 0; }
33 31
34static inline int migrate_prep(void) { return -ENOSYS; } 32static inline int migrate_prep(void) { return -ENOSYS; }
35 33
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index ce7cc6c7bcbb..e92d1bfdb330 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -61,6 +61,7 @@ enum {
61 MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1 << 8, 61 MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1 << 8,
62 MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1 << 9, 62 MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1 << 9,
63 MLX4_DEV_CAP_FLAG_DPDP = 1 << 12, 63 MLX4_DEV_CAP_FLAG_DPDP = 1 << 12,
64 MLX4_DEV_CAP_FLAG_BLH = 1 << 15,
64 MLX4_DEV_CAP_FLAG_MEM_WINDOW = 1 << 16, 65 MLX4_DEV_CAP_FLAG_MEM_WINDOW = 1 << 16,
65 MLX4_DEV_CAP_FLAG_APM = 1 << 17, 66 MLX4_DEV_CAP_FLAG_APM = 1 << 17,
66 MLX4_DEV_CAP_FLAG_ATOMIC = 1 << 18, 67 MLX4_DEV_CAP_FLAG_ATOMIC = 1 << 18,
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 24c395694f4d..9d65ae4ba0e0 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -620,13 +620,22 @@ void page_address_init(void);
620/* 620/*
621 * On an anonymous page mapped into a user virtual memory area, 621 * On an anonymous page mapped into a user virtual memory area,
622 * page->mapping points to its anon_vma, not to a struct address_space; 622 * page->mapping points to its anon_vma, not to a struct address_space;
623 * with the PAGE_MAPPING_ANON bit set to distinguish it. 623 * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h.
624 *
625 * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
626 * the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit;
627 * and then page->mapping points, not to an anon_vma, but to a private
628 * structure which KSM associates with that merged page. See ksm.h.
629 *
630 * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used.
624 * 631 *
625 * Please note that, confusingly, "page_mapping" refers to the inode 632 * Please note that, confusingly, "page_mapping" refers to the inode
626 * address_space which maps the page from disk; whereas "page_mapped" 633 * address_space which maps the page from disk; whereas "page_mapped"
627 * refers to user virtual address space into which the page is mapped. 634 * refers to user virtual address space into which the page is mapped.
628 */ 635 */
629#define PAGE_MAPPING_ANON 1 636#define PAGE_MAPPING_ANON 1
637#define PAGE_MAPPING_KSM 2
638#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM)
630 639
631extern struct address_space swapper_space; 640extern struct address_space swapper_space;
632static inline struct address_space *page_mapping(struct page *page) 641static inline struct address_space *page_mapping(struct page *page)
@@ -634,16 +643,19 @@ static inline struct address_space *page_mapping(struct page *page)
634 struct address_space *mapping = page->mapping; 643 struct address_space *mapping = page->mapping;
635 644
636 VM_BUG_ON(PageSlab(page)); 645 VM_BUG_ON(PageSlab(page));
637#ifdef CONFIG_SWAP
638 if (unlikely(PageSwapCache(page))) 646 if (unlikely(PageSwapCache(page)))
639 mapping = &swapper_space; 647 mapping = &swapper_space;
640 else 648 else if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON))
641#endif
642 if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON))
643 mapping = NULL; 649 mapping = NULL;
644 return mapping; 650 return mapping;
645} 651}
646 652
653/* Neutral page->mapping pointer to address_space or anon_vma or other */
654static inline void *page_rmapping(struct page *page)
655{
656 return (void *)((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS);
657}
658
647static inline int PageAnon(struct page *page) 659static inline int PageAnon(struct page *page)
648{ 660{
649 return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0; 661 return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
@@ -758,6 +770,7 @@ unsigned long unmap_vmas(struct mmu_gather **tlb,
758 * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry 770 * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry
759 * @pte_entry: if set, called for each non-empty PTE (4th-level) entry 771 * @pte_entry: if set, called for each non-empty PTE (4th-level) entry
760 * @pte_hole: if set, called for each hole at all levels 772 * @pte_hole: if set, called for each hole at all levels
773 * @hugetlb_entry: if set, called for each hugetlb entry
761 * 774 *
762 * (see walk_page_range for more details) 775 * (see walk_page_range for more details)
763 */ 776 */
@@ -767,6 +780,8 @@ struct mm_walk {
767 int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, struct mm_walk *); 780 int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, struct mm_walk *);
768 int (*pte_entry)(pte_t *, unsigned long, unsigned long, struct mm_walk *); 781 int (*pte_entry)(pte_t *, unsigned long, unsigned long, struct mm_walk *);
769 int (*pte_hole)(unsigned long, unsigned long, struct mm_walk *); 782 int (*pte_hole)(unsigned long, unsigned long, struct mm_walk *);
783 int (*hugetlb_entry)(pte_t *, unsigned long, unsigned long,
784 struct mm_walk *);
770 struct mm_struct *mm; 785 struct mm_struct *mm;
771 void *private; 786 void *private;
772}; 787};
diff --git a/include/linux/mtd/bbm.h b/include/linux/mtd/bbm.h
index fff8c53e5434..9c3757c5759d 100644
--- a/include/linux/mtd/bbm.h
+++ b/include/linux/mtd/bbm.h
@@ -19,22 +19,21 @@
19 19
20/** 20/**
21 * struct nand_bbt_descr - bad block table descriptor 21 * struct nand_bbt_descr - bad block table descriptor
22 * @options: options for this descriptor 22 * @options: options for this descriptor
23 * @pages: the page(s) where we find the bbt, used with 23 * @pages: the page(s) where we find the bbt, used with option BBT_ABSPAGE
24 * option BBT_ABSPAGE when bbt is searched, 24 * when bbt is searched, then we store the found bbts pages here.
25 * then we store the found bbts pages here. 25 * Its an array and supports up to 8 chips now
26 * Its an array and supports up to 8 chips now 26 * @offs: offset of the pattern in the oob area of the page
27 * @offs: offset of the pattern in the oob area of the page 27 * @veroffs: offset of the bbt version counter in the oob are of the page
28 * @veroffs: offset of the bbt version counter in the oob area of the page 28 * @version: version read from the bbt page during scan
29 * @version: version read from the bbt page during scan 29 * @len: length of the pattern, if 0 no pattern check is performed
30 * @len: length of the pattern, if 0 no pattern check is performed 30 * @maxblocks: maximum number of blocks to search for a bbt. This number of
31 * @maxblocks: maximum number of blocks to search for a bbt. This 31 * blocks is reserved at the end of the device where the tables are
32 * number of blocks is reserved at the end of the device 32 * written.
33 * where the tables are written. 33 * @reserved_block_code: if non-0, this pattern denotes a reserved (rather than
34 * @reserved_block_code: if non-0, this pattern denotes a reserved 34 * bad) block in the stored bbt
35 * (rather than bad) block in the stored bbt 35 * @pattern: pattern to identify bad block table or factory marked good /
36 * @pattern: pattern to identify bad block table or factory marked 36 * bad blocks, can be NULL, if len = 0
37 * good / bad blocks, can be NULL, if len = 0
38 * 37 *
39 * Descriptor for the bad block table marker and the descriptor for the 38 * Descriptor for the bad block table marker and the descriptor for the
40 * pattern which identifies good and bad blocks. The assumption is made 39 * pattern which identifies good and bad blocks. The assumption is made
@@ -90,7 +89,9 @@ struct nand_bbt_descr {
90/* 89/*
91 * Constants for oob configuration 90 * Constants for oob configuration
92 */ 91 */
93#define ONENAND_BADBLOCK_POS 0 92#define NAND_SMALL_BADBLOCK_POS 5
93#define NAND_LARGE_BADBLOCK_POS 0
94#define ONENAND_BADBLOCK_POS 0
94 95
95/* 96/*
96 * Bad block scanning errors 97 * Bad block scanning errors
diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h
index 88d3d8fbf9f2..df89f4275232 100644
--- a/include/linux/mtd/cfi.h
+++ b/include/linux/mtd/cfi.h
@@ -518,10 +518,11 @@ struct cfi_fixup {
518#define CFI_MFR_ANY 0xffff 518#define CFI_MFR_ANY 0xffff
519#define CFI_ID_ANY 0xffff 519#define CFI_ID_ANY 0xffff
520 520
521#define CFI_MFR_AMD 0x0001 521#define CFI_MFR_AMD 0x0001
522#define CFI_MFR_ATMEL 0x001F 522#define CFI_MFR_INTEL 0x0089
523#define CFI_MFR_SAMSUNG 0x00EC 523#define CFI_MFR_ATMEL 0x001F
524#define CFI_MFR_ST 0x0020 /* STMicroelectronics */ 524#define CFI_MFR_SAMSUNG 0x00EC
525#define CFI_MFR_ST 0x0020 /* STMicroelectronics */
525 526
526void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup* fixups); 527void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup* fixups);
527 528
diff --git a/include/linux/mtd/flashchip.h b/include/linux/mtd/flashchip.h
index d4f38c5fd44e..d0bf422ae374 100644
--- a/include/linux/mtd/flashchip.h
+++ b/include/linux/mtd/flashchip.h
@@ -38,6 +38,15 @@ typedef enum {
38 FL_XIP_WHILE_ERASING, 38 FL_XIP_WHILE_ERASING,
39 FL_XIP_WHILE_WRITING, 39 FL_XIP_WHILE_WRITING,
40 FL_SHUTDOWN, 40 FL_SHUTDOWN,
41 /* These 2 come from nand_state_t, which has been unified here */
42 FL_READING,
43 FL_CACHEDPRG,
44 /* These 4 come from onenand_state_t, which has been unified here */
45 FL_RESETING,
46 FL_OTPING,
47 FL_PREPARING_ERASE,
48 FL_VERIFYING_ERASE,
49
41 FL_UNKNOWN 50 FL_UNKNOWN
42} flstate_t; 51} flstate_t;
43 52
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 7a232a9bdd62..ccab9dfc5217 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -21,6 +21,8 @@
21#include <linux/wait.h> 21#include <linux/wait.h>
22#include <linux/spinlock.h> 22#include <linux/spinlock.h>
23#include <linux/mtd/mtd.h> 23#include <linux/mtd/mtd.h>
24#include <linux/mtd/flashchip.h>
25#include <linux/mtd/bbm.h>
24 26
25struct mtd_info; 27struct mtd_info;
26/* Scan and identify a NAND device */ 28/* Scan and identify a NAND device */
@@ -168,7 +170,6 @@ typedef enum {
168/* Chip does not allow subpage writes */ 170/* Chip does not allow subpage writes */
169#define NAND_NO_SUBPAGE_WRITE 0x00000200 171#define NAND_NO_SUBPAGE_WRITE 0x00000200
170 172
171
172/* Options valid for Samsung large page devices */ 173/* Options valid for Samsung large page devices */
173#define NAND_SAMSUNG_LP_OPTIONS \ 174#define NAND_SAMSUNG_LP_OPTIONS \
174 (NAND_NO_PADDING | NAND_CACHEPRG | NAND_COPYBACK) 175 (NAND_NO_PADDING | NAND_CACHEPRG | NAND_COPYBACK)
@@ -194,6 +195,9 @@ typedef enum {
194/* This option is defined if the board driver allocates its own buffers 195/* This option is defined if the board driver allocates its own buffers
195 (e.g. because it needs them DMA-coherent */ 196 (e.g. because it needs them DMA-coherent */
196#define NAND_OWN_BUFFERS 0x00040000 197#define NAND_OWN_BUFFERS 0x00040000
198/* Chip may not exist, so silence any errors in scan */
199#define NAND_SCAN_SILENT_NODEV 0x00080000
200
197/* Options set by nand scan */ 201/* Options set by nand scan */
198/* Nand scan has allocated controller struct */ 202/* Nand scan has allocated controller struct */
199#define NAND_CONTROLLER_ALLOC 0x80000000 203#define NAND_CONTROLLER_ALLOC 0x80000000
@@ -202,20 +206,6 @@ typedef enum {
202#define NAND_CI_CHIPNR_MSK 0x03 206#define NAND_CI_CHIPNR_MSK 0x03
203#define NAND_CI_CELLTYPE_MSK 0x0C 207#define NAND_CI_CELLTYPE_MSK 0x0C
204 208
205/*
206 * nand_state_t - chip states
207 * Enumeration for NAND flash chip state
208 */
209typedef enum {
210 FL_READY,
211 FL_READING,
212 FL_WRITING,
213 FL_ERASING,
214 FL_SYNCING,
215 FL_CACHEDPRG,
216 FL_PM_SUSPENDED,
217} nand_state_t;
218
219/* Keep gcc happy */ 209/* Keep gcc happy */
220struct nand_chip; 210struct nand_chip;
221 211
@@ -402,7 +392,7 @@ struct nand_chip {
402 uint8_t cellinfo; 392 uint8_t cellinfo;
403 int badblockpos; 393 int badblockpos;
404 394
405 nand_state_t state; 395 flstate_t state;
406 396
407 uint8_t *oob_poi; 397 uint8_t *oob_poi;
408 struct nand_hw_control *controller; 398 struct nand_hw_control *controller;
@@ -470,75 +460,6 @@ struct nand_manufacturers {
470extern struct nand_flash_dev nand_flash_ids[]; 460extern struct nand_flash_dev nand_flash_ids[];
471extern struct nand_manufacturers nand_manuf_ids[]; 461extern struct nand_manufacturers nand_manuf_ids[];
472 462
473/**
474 * struct nand_bbt_descr - bad block table descriptor
475 * @options: options for this descriptor
476 * @pages: the page(s) where we find the bbt, used with option BBT_ABSPAGE
477 * when bbt is searched, then we store the found bbts pages here.
478 * Its an array and supports up to 8 chips now
479 * @offs: offset of the pattern in the oob area of the page
480 * @veroffs: offset of the bbt version counter in the oob are of the page
481 * @version: version read from the bbt page during scan
482 * @len: length of the pattern, if 0 no pattern check is performed
483 * @maxblocks: maximum number of blocks to search for a bbt. This number of
484 * blocks is reserved at the end of the device where the tables are
485 * written.
486 * @reserved_block_code: if non-0, this pattern denotes a reserved (rather than
487 * bad) block in the stored bbt
488 * @pattern: pattern to identify bad block table or factory marked good /
489 * bad blocks, can be NULL, if len = 0
490 *
491 * Descriptor for the bad block table marker and the descriptor for the
492 * pattern which identifies good and bad blocks. The assumption is made
493 * that the pattern and the version count are always located in the oob area
494 * of the first block.
495 */
496struct nand_bbt_descr {
497 int options;
498 int pages[NAND_MAX_CHIPS];
499 int offs;
500 int veroffs;
501 uint8_t version[NAND_MAX_CHIPS];
502 int len;
503 int maxblocks;
504 int reserved_block_code;
505 uint8_t *pattern;
506};
507
508/* Options for the bad block table descriptors */
509
510/* The number of bits used per block in the bbt on the device */
511#define NAND_BBT_NRBITS_MSK 0x0000000F
512#define NAND_BBT_1BIT 0x00000001
513#define NAND_BBT_2BIT 0x00000002
514#define NAND_BBT_4BIT 0x00000004
515#define NAND_BBT_8BIT 0x00000008
516/* The bad block table is in the last good block of the device */
517#define NAND_BBT_LASTBLOCK 0x00000010
518/* The bbt is at the given page, else we must scan for the bbt */
519#define NAND_BBT_ABSPAGE 0x00000020
520/* The bbt is at the given page, else we must scan for the bbt */
521#define NAND_BBT_SEARCH 0x00000040
522/* bbt is stored per chip on multichip devices */
523#define NAND_BBT_PERCHIP 0x00000080
524/* bbt has a version counter at offset veroffs */
525#define NAND_BBT_VERSION 0x00000100
526/* Create a bbt if none axists */
527#define NAND_BBT_CREATE 0x00000200
528/* Search good / bad pattern through all pages of a block */
529#define NAND_BBT_SCANALLPAGES 0x00000400
530/* Scan block empty during good / bad block scan */
531#define NAND_BBT_SCANEMPTY 0x00000800
532/* Write bbt if neccecary */
533#define NAND_BBT_WRITE 0x00001000
534/* Read and write back block contents when writing bbt */
535#define NAND_BBT_SAVECONTENT 0x00002000
536/* Search good / bad pattern on the first and the second page */
537#define NAND_BBT_SCAN2NDPAGE 0x00004000
538
539/* The maximum number of blocks to scan for a bbt */
540#define NAND_BBT_SCAN_MAXBLOCKS 4
541
542extern int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd); 463extern int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd);
543extern int nand_update_bbt(struct mtd_info *mtd, loff_t offs); 464extern int nand_update_bbt(struct mtd_info *mtd, loff_t offs);
544extern int nand_default_bbt(struct mtd_info *mtd); 465extern int nand_default_bbt(struct mtd_info *mtd);
@@ -548,12 +469,6 @@ extern int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
548extern int nand_do_read(struct mtd_info *mtd, loff_t from, size_t len, 469extern int nand_do_read(struct mtd_info *mtd, loff_t from, size_t len,
549 size_t * retlen, uint8_t * buf); 470 size_t * retlen, uint8_t * buf);
550 471
551/*
552* Constants for oob configuration
553*/
554#define NAND_SMALL_BADBLOCK_POS 5
555#define NAND_LARGE_BADBLOCK_POS 0
556
557/** 472/**
558 * struct platform_nand_chip - chip level device structure 473 * struct platform_nand_chip - chip level device structure
559 * @nr_chips: max. number of chips to scan for 474 * @nr_chips: max. number of chips to scan for
diff --git a/include/linux/mtd/nand_ecc.h b/include/linux/mtd/nand_ecc.h
index 052ea8ca2434..41bc013571d0 100644
--- a/include/linux/mtd/nand_ecc.h
+++ b/include/linux/mtd/nand_ecc.h
@@ -16,7 +16,13 @@
16struct mtd_info; 16struct mtd_info;
17 17
18/* 18/*
19 * Calculate 3 byte ECC code for 256 byte block 19 * Calculate 3 byte ECC code for eccsize byte block
20 */
21void __nand_calculate_ecc(const u_char *dat, unsigned int eccsize,
22 u_char *ecc_code);
23
24/*
25 * Calculate 3 byte ECC code for 256/512 byte block
20 */ 26 */
21int nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code); 27int nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code);
22 28
@@ -27,7 +33,7 @@ int __nand_correct_data(u_char *dat, u_char *read_ecc, u_char *calc_ecc,
27 unsigned int eccsize); 33 unsigned int eccsize);
28 34
29/* 35/*
30 * Detect and correct a 1 bit error for 256 byte block 36 * Detect and correct a 1 bit error for 256/512 byte block
31 */ 37 */
32int nand_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc); 38int nand_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc);
33 39
diff --git a/include/linux/mtd/onenand.h b/include/linux/mtd/onenand.h
index 4e49f3350678..5509eb06b326 100644
--- a/include/linux/mtd/onenand.h
+++ b/include/linux/mtd/onenand.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * linux/include/linux/mtd/onenand.h 2 * linux/include/linux/mtd/onenand.h
3 * 3 *
4 * Copyright (C) 2005-2007 Samsung Electronics 4 * Copyright © 2005-2009 Samsung Electronics
5 * Kyungmin Park <kyungmin.park@samsung.com> 5 * Kyungmin Park <kyungmin.park@samsung.com>
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
@@ -14,6 +14,7 @@
14 14
15#include <linux/spinlock.h> 15#include <linux/spinlock.h>
16#include <linux/completion.h> 16#include <linux/completion.h>
17#include <linux/mtd/flashchip.h>
17#include <linux/mtd/onenand_regs.h> 18#include <linux/mtd/onenand_regs.h>
18#include <linux/mtd/bbm.h> 19#include <linux/mtd/bbm.h>
19 20
@@ -25,22 +26,6 @@ extern int onenand_scan(struct mtd_info *mtd, int max_chips);
25/* Free resources held by the OneNAND device */ 26/* Free resources held by the OneNAND device */
26extern void onenand_release(struct mtd_info *mtd); 27extern void onenand_release(struct mtd_info *mtd);
27 28
28/*
29 * onenand_state_t - chip states
30 * Enumeration for OneNAND flash chip state
31 */
32typedef enum {
33 FL_READY,
34 FL_READING,
35 FL_WRITING,
36 FL_ERASING,
37 FL_SYNCING,
38 FL_LOCKING,
39 FL_RESETING,
40 FL_OTPING,
41 FL_PM_SUSPENDED,
42} onenand_state_t;
43
44/** 29/**
45 * struct onenand_bufferram - OneNAND BufferRAM Data 30 * struct onenand_bufferram - OneNAND BufferRAM Data
46 * @blockpage: block & page address in BufferRAM 31 * @blockpage: block & page address in BufferRAM
@@ -137,7 +122,7 @@ struct onenand_chip {
137 122
138 spinlock_t chip_lock; 123 spinlock_t chip_lock;
139 wait_queue_head_t wq; 124 wait_queue_head_t wq;
140 onenand_state_t state; 125 flstate_t state;
141 unsigned char *page_buf; 126 unsigned char *page_buf;
142 unsigned char *oob_buf; 127 unsigned char *oob_buf;
143 128
@@ -152,6 +137,8 @@ struct onenand_chip {
152/* 137/*
153 * Helper macros 138 * Helper macros
154 */ 139 */
140#define ONENAND_PAGES_PER_BLOCK (1<<6)
141
155#define ONENAND_CURRENT_BUFFERRAM(this) (this->bufferram_index) 142#define ONENAND_CURRENT_BUFFERRAM(this) (this->bufferram_index)
156#define ONENAND_NEXT_BUFFERRAM(this) (this->bufferram_index ^ 1) 143#define ONENAND_NEXT_BUFFERRAM(this) (this->bufferram_index ^ 1)
157#define ONENAND_SET_NEXT_BUFFERRAM(this) (this->bufferram_index ^= 1) 144#define ONENAND_SET_NEXT_BUFFERRAM(this) (this->bufferram_index ^= 1)
diff --git a/include/linux/mtd/onenand_regs.h b/include/linux/mtd/onenand_regs.h
index acadbf53a69f..cd6f3b431195 100644
--- a/include/linux/mtd/onenand_regs.h
+++ b/include/linux/mtd/onenand_regs.h
@@ -131,6 +131,8 @@
131#define ONENAND_CMD_LOCK_TIGHT (0x2C) 131#define ONENAND_CMD_LOCK_TIGHT (0x2C)
132#define ONENAND_CMD_UNLOCK_ALL (0x27) 132#define ONENAND_CMD_UNLOCK_ALL (0x27)
133#define ONENAND_CMD_ERASE (0x94) 133#define ONENAND_CMD_ERASE (0x94)
134#define ONENAND_CMD_MULTIBLOCK_ERASE (0x95)
135#define ONENAND_CMD_ERASE_VERIFY (0x71)
134#define ONENAND_CMD_RESET (0xF0) 136#define ONENAND_CMD_RESET (0xF0)
135#define ONENAND_CMD_OTP_ACCESS (0x65) 137#define ONENAND_CMD_OTP_ACCESS (0x65)
136#define ONENAND_CMD_READID (0x90) 138#define ONENAND_CMD_READID (0x90)
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 51071b335751..89b28812ec24 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -2,6 +2,7 @@
2#define _LINUX_NFS_XDR_H 2#define _LINUX_NFS_XDR_H
3 3
4#include <linux/nfsacl.h> 4#include <linux/nfsacl.h>
5#include <linux/nfs3.h>
5 6
6/* 7/*
7 * To change the maximum rsize and wsize supported by the NFS client, adjust 8 * To change the maximum rsize and wsize supported by the NFS client, adjust
diff --git a/include/linux/nfsacl.h b/include/linux/nfsacl.h
index 43011b69297c..f321b578edeb 100644
--- a/include/linux/nfsacl.h
+++ b/include/linux/nfsacl.h
@@ -29,6 +29,7 @@
29#ifdef __KERNEL__ 29#ifdef __KERNEL__
30 30
31#include <linux/posix_acl.h> 31#include <linux/posix_acl.h>
32#include <linux/sunrpc/xdr.h>
32 33
33/* Maximum number of ACL entries over NFS */ 34/* Maximum number of ACL entries over NFS */
34#define NFS_ACL_MAX_ENTRIES 1024 35#define NFS_ACL_MAX_ENTRIES 1024
diff --git a/include/linux/nfsd/export.h b/include/linux/nfsd/export.h
index a6d9ef2bb34a..8ae78a61eea4 100644
--- a/include/linux/nfsd/export.h
+++ b/include/linux/nfsd/export.h
@@ -12,7 +12,7 @@
12 12
13# include <linux/types.h> 13# include <linux/types.h>
14#ifdef __KERNEL__ 14#ifdef __KERNEL__
15# include <linux/in.h> 15# include <linux/nfsd/nfsfh.h>
16#endif 16#endif
17 17
18/* 18/*
@@ -39,11 +39,23 @@
39#define NFSEXP_FSID 0x2000 39#define NFSEXP_FSID 0x2000
40#define NFSEXP_CROSSMOUNT 0x4000 40#define NFSEXP_CROSSMOUNT 0x4000
41#define NFSEXP_NOACL 0x8000 /* reserved for possible ACL related use */ 41#define NFSEXP_NOACL 0x8000 /* reserved for possible ACL related use */
42#define NFSEXP_ALLFLAGS 0xFE3F 42/*
43 * The NFSEXP_V4ROOT flag causes the kernel to give access only to NFSv4
44 * clients, and only to the single directory that is the root of the
45 * export; further lookup and readdir operations are treated as if every
46 * subdirectory was a mountpoint, and ignored if they are not themselves
47 * exported. This is used by nfsd and mountd to construct the NFSv4
48 * pseudofilesystem, which provides access only to paths leading to each
49 * exported filesystem.
50 */
51#define NFSEXP_V4ROOT 0x10000
52/* All flags that we claim to support. (Note we don't support NOACL.) */
53#define NFSEXP_ALLFLAGS 0x17E3F
43 54
44/* The flags that may vary depending on security flavor: */ 55/* The flags that may vary depending on security flavor: */
45#define NFSEXP_SECINFO_FLAGS (NFSEXP_READONLY | NFSEXP_ROOTSQUASH \ 56#define NFSEXP_SECINFO_FLAGS (NFSEXP_READONLY | NFSEXP_ROOTSQUASH \
46 | NFSEXP_ALLSQUASH) 57 | NFSEXP_ALLSQUASH \
58 | NFSEXP_INSECURE_PORT)
47 59
48#ifdef __KERNEL__ 60#ifdef __KERNEL__
49 61
@@ -108,7 +120,6 @@ struct svc_expkey {
108 struct path ek_path; 120 struct path ek_path;
109}; 121};
110 122
111#define EX_SECURE(exp) (!((exp)->ex_flags & NFSEXP_INSECURE_PORT))
112#define EX_ISSYNC(exp) (!((exp)->ex_flags & NFSEXP_ASYNC)) 123#define EX_ISSYNC(exp) (!((exp)->ex_flags & NFSEXP_ASYNC))
113#define EX_NOHIDE(exp) ((exp)->ex_flags & NFSEXP_NOHIDE) 124#define EX_NOHIDE(exp) ((exp)->ex_flags & NFSEXP_NOHIDE)
114#define EX_WGATHER(exp) ((exp)->ex_flags & NFSEXP_GATHERED_WRITES) 125#define EX_WGATHER(exp) ((exp)->ex_flags & NFSEXP_GATHERED_WRITES)
diff --git a/include/linux/nfsd/nfsfh.h b/include/linux/nfsd/nfsfh.h
index 8f641c908450..65e333afaee4 100644
--- a/include/linux/nfsd/nfsfh.h
+++ b/include/linux/nfsd/nfsfh.h
@@ -16,11 +16,9 @@
16 16
17# include <linux/types.h> 17# include <linux/types.h>
18#ifdef __KERNEL__ 18#ifdef __KERNEL__
19# include <linux/string.h> 19# include <linux/sunrpc/svc.h>
20# include <linux/fs.h>
21#endif 20#endif
22#include <linux/nfsd/const.h> 21#include <linux/nfsd/const.h>
23#include <linux/nfsd/debug.h>
24 22
25/* 23/*
26 * This is the old "dentry style" Linux NFSv2 file handle. 24 * This is the old "dentry style" Linux NFSv2 file handle.
@@ -164,208 +162,6 @@ typedef struct svc_fh {
164 162
165} svc_fh; 163} svc_fh;
166 164
167enum nfsd_fsid {
168 FSID_DEV = 0,
169 FSID_NUM,
170 FSID_MAJOR_MINOR,
171 FSID_ENCODE_DEV,
172 FSID_UUID4_INUM,
173 FSID_UUID8,
174 FSID_UUID16,
175 FSID_UUID16_INUM,
176};
177
178enum fsid_source {
179 FSIDSOURCE_DEV,
180 FSIDSOURCE_FSID,
181 FSIDSOURCE_UUID,
182};
183extern enum fsid_source fsid_source(struct svc_fh *fhp);
184
185
186/* This might look a little large to "inline" but in all calls except
187 * one, 'vers' is constant so moste of the function disappears.
188 */
189static inline void mk_fsid(int vers, u32 *fsidv, dev_t dev, ino_t ino,
190 u32 fsid, unsigned char *uuid)
191{
192 u32 *up;
193 switch(vers) {
194 case FSID_DEV:
195 fsidv[0] = htonl((MAJOR(dev)<<16) |
196 MINOR(dev));
197 fsidv[1] = ino_t_to_u32(ino);
198 break;
199 case FSID_NUM:
200 fsidv[0] = fsid;
201 break;
202 case FSID_MAJOR_MINOR:
203 fsidv[0] = htonl(MAJOR(dev));
204 fsidv[1] = htonl(MINOR(dev));
205 fsidv[2] = ino_t_to_u32(ino);
206 break;
207
208 case FSID_ENCODE_DEV:
209 fsidv[0] = new_encode_dev(dev);
210 fsidv[1] = ino_t_to_u32(ino);
211 break;
212
213 case FSID_UUID4_INUM:
214 /* 4 byte fsid and inode number */
215 up = (u32*)uuid;
216 fsidv[0] = ino_t_to_u32(ino);
217 fsidv[1] = up[0] ^ up[1] ^ up[2] ^ up[3];
218 break;
219
220 case FSID_UUID8:
221 /* 8 byte fsid */
222 up = (u32*)uuid;
223 fsidv[0] = up[0] ^ up[2];
224 fsidv[1] = up[1] ^ up[3];
225 break;
226
227 case FSID_UUID16:
228 /* 16 byte fsid - NFSv3+ only */
229 memcpy(fsidv, uuid, 16);
230 break;
231
232 case FSID_UUID16_INUM:
233 /* 8 byte inode and 16 byte fsid */
234 *(u64*)fsidv = (u64)ino;
235 memcpy(fsidv+2, uuid, 16);
236 break;
237 default: BUG();
238 }
239}
240
241static inline int key_len(int type)
242{
243 switch(type) {
244 case FSID_DEV: return 8;
245 case FSID_NUM: return 4;
246 case FSID_MAJOR_MINOR: return 12;
247 case FSID_ENCODE_DEV: return 8;
248 case FSID_UUID4_INUM: return 8;
249 case FSID_UUID8: return 8;
250 case FSID_UUID16: return 16;
251 case FSID_UUID16_INUM: return 24;
252 default: return 0;
253 }
254}
255
256/*
257 * Shorthand for dprintk()'s
258 */
259extern char * SVCFH_fmt(struct svc_fh *fhp);
260
261/*
262 * Function prototypes
263 */
264__be32 fh_verify(struct svc_rqst *, struct svc_fh *, int, int);
265__be32 fh_compose(struct svc_fh *, struct svc_export *, struct dentry *, struct svc_fh *);
266__be32 fh_update(struct svc_fh *);
267void fh_put(struct svc_fh *);
268
269static __inline__ struct svc_fh *
270fh_copy(struct svc_fh *dst, struct svc_fh *src)
271{
272 WARN_ON(src->fh_dentry || src->fh_locked);
273
274 *dst = *src;
275 return dst;
276}
277
278static inline void
279fh_copy_shallow(struct knfsd_fh *dst, struct knfsd_fh *src)
280{
281 dst->fh_size = src->fh_size;
282 memcpy(&dst->fh_base, &src->fh_base, src->fh_size);
283}
284
285static __inline__ struct svc_fh *
286fh_init(struct svc_fh *fhp, int maxsize)
287{
288 memset(fhp, 0, sizeof(*fhp));
289 fhp->fh_maxsize = maxsize;
290 return fhp;
291}
292
293#ifdef CONFIG_NFSD_V3
294/*
295 * Fill in the pre_op attr for the wcc data
296 */
297static inline void
298fill_pre_wcc(struct svc_fh *fhp)
299{
300 struct inode *inode;
301
302 inode = fhp->fh_dentry->d_inode;
303 if (!fhp->fh_pre_saved) {
304 fhp->fh_pre_mtime = inode->i_mtime;
305 fhp->fh_pre_ctime = inode->i_ctime;
306 fhp->fh_pre_size = inode->i_size;
307 fhp->fh_pre_change = inode->i_version;
308 fhp->fh_pre_saved = 1;
309 }
310}
311
312extern void fill_post_wcc(struct svc_fh *);
313#else
314#define fill_pre_wcc(ignored)
315#define fill_post_wcc(notused)
316#endif /* CONFIG_NFSD_V3 */
317
318
319/*
320 * Lock a file handle/inode
321 * NOTE: both fh_lock and fh_unlock are done "by hand" in
322 * vfs.c:nfsd_rename as it needs to grab 2 i_mutex's at once
323 * so, any changes here should be reflected there.
324 */
325
326static inline void
327fh_lock_nested(struct svc_fh *fhp, unsigned int subclass)
328{
329 struct dentry *dentry = fhp->fh_dentry;
330 struct inode *inode;
331
332 dfprintk(FILEOP, "nfsd: fh_lock(%s) locked = %d\n",
333 SVCFH_fmt(fhp), fhp->fh_locked);
334
335 BUG_ON(!dentry);
336
337 if (fhp->fh_locked) {
338 printk(KERN_WARNING "fh_lock: %s/%s already locked!\n",
339 dentry->d_parent->d_name.name, dentry->d_name.name);
340 return;
341 }
342
343 inode = dentry->d_inode;
344 mutex_lock_nested(&inode->i_mutex, subclass);
345 fill_pre_wcc(fhp);
346 fhp->fh_locked = 1;
347}
348
349static inline void
350fh_lock(struct svc_fh *fhp)
351{
352 fh_lock_nested(fhp, I_MUTEX_NORMAL);
353}
354
355/*
356 * Unlock a file handle/inode
357 */
358static inline void
359fh_unlock(struct svc_fh *fhp)
360{
361 BUG_ON(!fhp->fh_dentry);
362
363 if (fhp->fh_locked) {
364 fill_post_wcc(fhp);
365 mutex_unlock(&fhp->fh_dentry->d_inode->i_mutex);
366 fhp->fh_locked = 0;
367 }
368}
369#endif /* __KERNEL__ */ 165#endif /* __KERNEL__ */
370 166
371 167
diff --git a/include/linux/nfsd/syscall.h b/include/linux/nfsd/syscall.h
index 7a3b565b898f..812bc1e160dc 100644
--- a/include/linux/nfsd/syscall.h
+++ b/include/linux/nfsd/syscall.h
@@ -9,14 +9,8 @@
9#ifndef NFSD_SYSCALL_H 9#ifndef NFSD_SYSCALL_H
10#define NFSD_SYSCALL_H 10#define NFSD_SYSCALL_H
11 11
12# include <linux/types.h> 12#include <linux/types.h>
13#ifdef __KERNEL__
14# include <linux/in.h>
15#endif
16#include <linux/posix_types.h>
17#include <linux/nfsd/const.h>
18#include <linux/nfsd/export.h> 13#include <linux/nfsd/export.h>
19#include <linux/nfsd/nfsfh.h>
20 14
21/* 15/*
22 * Version of the syscall interface 16 * Version of the syscall interface
diff --git a/include/linux/node.h b/include/linux/node.h
index 681a697b9a86..06292dac3eab 100644
--- a/include/linux/node.h
+++ b/include/linux/node.h
@@ -21,13 +21,19 @@
21 21
22#include <linux/sysdev.h> 22#include <linux/sysdev.h>
23#include <linux/cpumask.h> 23#include <linux/cpumask.h>
24#include <linux/workqueue.h>
24 25
25struct node { 26struct node {
26 struct sys_device sysdev; 27 struct sys_device sysdev;
28
29#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HUGETLBFS)
30 struct work_struct node_work;
31#endif
27}; 32};
28 33
29struct memory_block; 34struct memory_block;
30extern struct node node_devices[]; 35extern struct node node_devices[];
36typedef void (*node_registration_func_t)(struct node *);
31 37
32extern int register_node(struct node *, int, struct node *); 38extern int register_node(struct node *, int, struct node *);
33extern void unregister_node(struct node *node); 39extern void unregister_node(struct node *node);
@@ -39,6 +45,11 @@ extern int unregister_cpu_under_node(unsigned int cpu, unsigned int nid);
39extern int register_mem_sect_under_node(struct memory_block *mem_blk, 45extern int register_mem_sect_under_node(struct memory_block *mem_blk,
40 int nid); 46 int nid);
41extern int unregister_mem_sect_under_nodes(struct memory_block *mem_blk); 47extern int unregister_mem_sect_under_nodes(struct memory_block *mem_blk);
48
49#ifdef CONFIG_HUGETLBFS
50extern void register_hugetlbfs_with_node(node_registration_func_t doregister,
51 node_registration_func_t unregister);
52#endif
42#else 53#else
43static inline int register_one_node(int nid) 54static inline int register_one_node(int nid)
44{ 55{
@@ -65,6 +76,11 @@ static inline int unregister_mem_sect_under_nodes(struct memory_block *mem_blk)
65{ 76{
66 return 0; 77 return 0;
67} 78}
79
80static inline void register_hugetlbfs_with_node(node_registration_func_t reg,
81 node_registration_func_t unreg)
82{
83}
68#endif 84#endif
69 85
70#define to_node(sys_device) container_of(sys_device, struct node, sysdev) 86#define to_node(sys_device) container_of(sys_device, struct node, sysdev)
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index b359c4a9ec9e..454997cccbd8 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -245,14 +245,19 @@ static inline int __next_node(int n, const nodemask_t *srcp)
245 return min_t(int,MAX_NUMNODES,find_next_bit(srcp->bits, MAX_NUMNODES, n+1)); 245 return min_t(int,MAX_NUMNODES,find_next_bit(srcp->bits, MAX_NUMNODES, n+1));
246} 246}
247 247
248static inline void init_nodemask_of_node(nodemask_t *mask, int node)
249{
250 nodes_clear(*mask);
251 node_set(node, *mask);
252}
253
248#define nodemask_of_node(node) \ 254#define nodemask_of_node(node) \
249({ \ 255({ \
250 typeof(_unused_nodemask_arg_) m; \ 256 typeof(_unused_nodemask_arg_) m; \
251 if (sizeof(m) == sizeof(unsigned long)) { \ 257 if (sizeof(m) == sizeof(unsigned long)) { \
252 m.bits[0] = 1UL<<(node); \ 258 m.bits[0] = 1UL << (node); \
253 } else { \ 259 } else { \
254 nodes_clear(m); \ 260 init_nodemask_of_node(&m, (node)); \
255 node_set((node), m); \
256 } \ 261 } \
257 m; \ 262 m; \
258}) 263})
@@ -480,15 +485,17 @@ static inline int num_node_state(enum node_states state)
480#define for_each_online_node(node) for_each_node_state(node, N_ONLINE) 485#define for_each_online_node(node) for_each_node_state(node, N_ONLINE)
481 486
482/* 487/*
483 * For nodemask scrach area.(See CPUMASK_ALLOC() in cpumask.h) 488 * For nodemask scrach area.
489 * NODEMASK_ALLOC(type, name) allocates an object with a specified type and
490 * name.
484 */ 491 */
485 492#if NODES_SHIFT > 8 /* nodemask_t > 256 bytes */
486#if NODES_SHIFT > 8 /* nodemask_t > 64 bytes */ 493#define NODEMASK_ALLOC(type, name, gfp_flags) \
487#define NODEMASK_ALLOC(x, m) struct x *m = kmalloc(sizeof(*m), GFP_KERNEL) 494 type *name = kmalloc(sizeof(*name), gfp_flags)
488#define NODEMASK_FREE(m) kfree(m) 495#define NODEMASK_FREE(m) kfree(m)
489#else 496#else
490#define NODEMASK_ALLOC(x, m) struct x _m, *m = &_m 497#define NODEMASK_ALLOC(type, name, gfp_flags) type _name, *name = &_name
491#define NODEMASK_FREE(m) 498#define NODEMASK_FREE(m) do {} while (0)
492#endif 499#endif
493 500
494/* A example struture for using NODEMASK_ALLOC, used in mempolicy. */ 501/* A example struture for using NODEMASK_ALLOC, used in mempolicy. */
@@ -497,8 +504,10 @@ struct nodemask_scratch {
497 nodemask_t mask2; 504 nodemask_t mask2;
498}; 505};
499 506
500#define NODEMASK_SCRATCH(x) NODEMASK_ALLOC(nodemask_scratch, x) 507#define NODEMASK_SCRATCH(x) \
501#define NODEMASK_SCRATCH_FREE(x) NODEMASK_FREE(x) 508 NODEMASK_ALLOC(struct nodemask_scratch, x, \
509 GFP_KERNEL | __GFP_NORETRY)
510#define NODEMASK_SCRATCH_FREE(x) NODEMASK_FREE(x)
502 511
503 512
504#endif /* __LINUX_NODEMASK_H */ 513#endif /* __LINUX_NODEMASK_H */
diff --git a/include/linux/numa.h b/include/linux/numa.h
index a31a7301b159..3aaa31603a86 100644
--- a/include/linux/numa.h
+++ b/include/linux/numa.h
@@ -10,4 +10,6 @@
10 10
11#define MAX_NUMNODES (1 << NODES_SHIFT) 11#define MAX_NUMNODES (1 << NODES_SHIFT)
12 12
13#define NUMA_NO_NODE (-1)
14
13#endif /* _LINUX_NUMA_H */ 15#endif /* _LINUX_NUMA_H */
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 6aac5fe4f6f1..537662315627 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -10,6 +10,7 @@
10#ifdef __KERNEL__ 10#ifdef __KERNEL__
11 11
12#include <linux/types.h> 12#include <linux/types.h>
13#include <linux/nodemask.h>
13 14
14struct zonelist; 15struct zonelist;
15struct notifier_block; 16struct notifier_block;
@@ -26,7 +27,8 @@ enum oom_constraint {
26extern int try_set_zone_oom(struct zonelist *zonelist, gfp_t gfp_flags); 27extern int try_set_zone_oom(struct zonelist *zonelist, gfp_t gfp_flags);
27extern void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags); 28extern void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags);
28 29
29extern void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order); 30extern void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
31 int order, nodemask_t *mask);
30extern int register_oom_notifier(struct notifier_block *nb); 32extern int register_oom_notifier(struct notifier_block *nb);
31extern int unregister_oom_notifier(struct notifier_block *nb); 33extern int unregister_oom_notifier(struct notifier_block *nb);
32 34
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 6b202b173955..49e907bd067f 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -99,7 +99,7 @@ enum pageflags {
99 PG_buddy, /* Page is free, on buddy lists */ 99 PG_buddy, /* Page is free, on buddy lists */
100 PG_swapbacked, /* Page is backed by RAM/swap */ 100 PG_swapbacked, /* Page is backed by RAM/swap */
101 PG_unevictable, /* Page is "unevictable" */ 101 PG_unevictable, /* Page is "unevictable" */
102#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT 102#ifdef CONFIG_MMU
103 PG_mlocked, /* Page is vma mlocked */ 103 PG_mlocked, /* Page is vma mlocked */
104#endif 104#endif
105#ifdef CONFIG_ARCH_USES_PG_UNCACHED 105#ifdef CONFIG_ARCH_USES_PG_UNCACHED
@@ -259,12 +259,10 @@ PAGEFLAG_FALSE(SwapCache)
259PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable) 259PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable)
260 TESTCLEARFLAG(Unevictable, unevictable) 260 TESTCLEARFLAG(Unevictable, unevictable)
261 261
262#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT 262#ifdef CONFIG_MMU
263#define MLOCK_PAGES 1
264PAGEFLAG(Mlocked, mlocked) __CLEARPAGEFLAG(Mlocked, mlocked) 263PAGEFLAG(Mlocked, mlocked) __CLEARPAGEFLAG(Mlocked, mlocked)
265 TESTSCFLAG(Mlocked, mlocked) __TESTCLEARFLAG(Mlocked, mlocked) 264 TESTSCFLAG(Mlocked, mlocked) __TESTCLEARFLAG(Mlocked, mlocked)
266#else 265#else
267#define MLOCK_PAGES 0
268PAGEFLAG_FALSE(Mlocked) SETPAGEFLAG_NOOP(Mlocked) 266PAGEFLAG_FALSE(Mlocked) SETPAGEFLAG_NOOP(Mlocked)
269 TESTCLEARFLAG_FALSE(Mlocked) __TESTCLEARFLAG_FALSE(Mlocked) 267 TESTCLEARFLAG_FALSE(Mlocked) __TESTCLEARFLAG_FALSE(Mlocked)
270#endif 268#endif
@@ -393,7 +391,7 @@ static inline void __ClearPageTail(struct page *page)
393 391
394#endif /* !PAGEFLAGS_EXTENDED */ 392#endif /* !PAGEFLAGS_EXTENDED */
395 393
396#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT 394#ifdef CONFIG_MMU
397#define __PG_MLOCKED (1 << PG_mlocked) 395#define __PG_MLOCKED (1 << PG_mlocked)
398#else 396#else
399#define __PG_MLOCKED 0 397#define __PG_MLOCKED 0
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h
index 4b938d4f3ac2..b0e4eb126236 100644
--- a/include/linux/page_cgroup.h
+++ b/include/linux/page_cgroup.h
@@ -57,6 +57,8 @@ static inline void ClearPageCgroup##uname(struct page_cgroup *pc) \
57static inline int TestClearPageCgroup##uname(struct page_cgroup *pc) \ 57static inline int TestClearPageCgroup##uname(struct page_cgroup *pc) \
58 { return test_and_clear_bit(PCG_##lname, &pc->flags); } 58 { return test_and_clear_bit(PCG_##lname, &pc->flags); }
59 59
60TESTPCGFLAG(Locked, LOCK)
61
60/* Cache flag is set only once (at allocation) */ 62/* Cache flag is set only once (at allocation) */
61TESTPCGFLAG(Cache, CACHE) 63TESTPCGFLAG(Cache, CACHE)
62CLEARPCGFLAG(Cache, CACHE) 64CLEARPCGFLAG(Cache, CACHE)
@@ -86,11 +88,6 @@ static inline void lock_page_cgroup(struct page_cgroup *pc)
86 bit_spin_lock(PCG_LOCK, &pc->flags); 88 bit_spin_lock(PCG_LOCK, &pc->flags);
87} 89}
88 90
89static inline int trylock_page_cgroup(struct page_cgroup *pc)
90{
91 return bit_spin_trylock(PCG_LOCK, &pc->flags);
92}
93
94static inline void unlock_page_cgroup(struct page_cgroup *pc) 91static inline void unlock_page_cgroup(struct page_cgroup *pc)
95{ 92{
96 bit_spin_unlock(PCG_LOCK, &pc->flags); 93 bit_spin_unlock(PCG_LOCK, &pc->flags);
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 64a53f74c9a9..da7bdc23f279 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -681,7 +681,7 @@ struct perf_event_context {
681 * Protect the states of the events in the list, 681 * Protect the states of the events in the list,
682 * nr_active, and the list: 682 * nr_active, and the list:
683 */ 683 */
684 spinlock_t lock; 684 raw_spinlock_t lock;
685 /* 685 /*
686 * Protect the list of events. Locking either mutex or lock 686 * Protect the list of events. Locking either mutex or lock
687 * is sufficient to ensure the list doesn't change; to change 687 * is sufficient to ensure the list doesn't change; to change
diff --git a/include/linux/plist.h b/include/linux/plist.h
index 45926d77d6ac..8227f717c70f 100644
--- a/include/linux/plist.h
+++ b/include/linux/plist.h
@@ -81,7 +81,8 @@ struct plist_head {
81 struct list_head prio_list; 81 struct list_head prio_list;
82 struct list_head node_list; 82 struct list_head node_list;
83#ifdef CONFIG_DEBUG_PI_LIST 83#ifdef CONFIG_DEBUG_PI_LIST
84 spinlock_t *lock; 84 raw_spinlock_t *rawlock;
85 spinlock_t *spinlock;
85#endif 86#endif
86}; 87};
87 88
@@ -91,9 +92,11 @@ struct plist_node {
91}; 92};
92 93
93#ifdef CONFIG_DEBUG_PI_LIST 94#ifdef CONFIG_DEBUG_PI_LIST
94# define PLIST_HEAD_LOCK_INIT(_lock) .lock = _lock 95# define PLIST_HEAD_LOCK_INIT(_lock) .spinlock = _lock
96# define PLIST_HEAD_LOCK_INIT_RAW(_lock) .rawlock = _lock
95#else 97#else
96# define PLIST_HEAD_LOCK_INIT(_lock) 98# define PLIST_HEAD_LOCK_INIT(_lock)
99# define PLIST_HEAD_LOCK_INIT_RAW(_lock)
97#endif 100#endif
98 101
99#define _PLIST_HEAD_INIT(head) \ 102#define _PLIST_HEAD_INIT(head) \
@@ -107,11 +110,22 @@ struct plist_node {
107 */ 110 */
108#define PLIST_HEAD_INIT(head, _lock) \ 111#define PLIST_HEAD_INIT(head, _lock) \
109{ \ 112{ \
110 _PLIST_HEAD_INIT(head), \ 113 _PLIST_HEAD_INIT(head), \
111 PLIST_HEAD_LOCK_INIT(&(_lock)) \ 114 PLIST_HEAD_LOCK_INIT(&(_lock)) \
112} 115}
113 116
114/** 117/**
118 * PLIST_HEAD_INIT_RAW - static struct plist_head initializer
119 * @head: struct plist_head variable name
120 * @_lock: lock to initialize for this list
121 */
122#define PLIST_HEAD_INIT_RAW(head, _lock) \
123{ \
124 _PLIST_HEAD_INIT(head), \
125 PLIST_HEAD_LOCK_INIT_RAW(&(_lock)) \
126}
127
128/**
115 * PLIST_NODE_INIT - static struct plist_node initializer 129 * PLIST_NODE_INIT - static struct plist_node initializer
116 * @node: struct plist_node variable name 130 * @node: struct plist_node variable name
117 * @__prio: initial node priority 131 * @__prio: initial node priority
@@ -119,13 +133,13 @@ struct plist_node {
119#define PLIST_NODE_INIT(node, __prio) \ 133#define PLIST_NODE_INIT(node, __prio) \
120{ \ 134{ \
121 .prio = (__prio), \ 135 .prio = (__prio), \
122 .plist = { _PLIST_HEAD_INIT((node).plist) }, \ 136 .plist = { _PLIST_HEAD_INIT((node).plist) }, \
123} 137}
124 138
125/** 139/**
126 * plist_head_init - dynamic struct plist_head initializer 140 * plist_head_init - dynamic struct plist_head initializer
127 * @head: &struct plist_head pointer 141 * @head: &struct plist_head pointer
128 * @lock: list spinlock, remembered for debugging 142 * @lock: spinlock protecting the list (debugging)
129 */ 143 */
130static inline void 144static inline void
131plist_head_init(struct plist_head *head, spinlock_t *lock) 145plist_head_init(struct plist_head *head, spinlock_t *lock)
@@ -133,7 +147,24 @@ plist_head_init(struct plist_head *head, spinlock_t *lock)
133 INIT_LIST_HEAD(&head->prio_list); 147 INIT_LIST_HEAD(&head->prio_list);
134 INIT_LIST_HEAD(&head->node_list); 148 INIT_LIST_HEAD(&head->node_list);
135#ifdef CONFIG_DEBUG_PI_LIST 149#ifdef CONFIG_DEBUG_PI_LIST
136 head->lock = lock; 150 head->spinlock = lock;
151 head->rawlock = NULL;
152#endif
153}
154
155/**
156 * plist_head_init_raw - dynamic struct plist_head initializer
157 * @head: &struct plist_head pointer
158 * @lock: raw_spinlock protecting the list (debugging)
159 */
160static inline void
161plist_head_init_raw(struct plist_head *head, raw_spinlock_t *lock)
162{
163 INIT_LIST_HEAD(&head->prio_list);
164 INIT_LIST_HEAD(&head->node_list);
165#ifdef CONFIG_DEBUG_PI_LIST
166 head->rawlock = lock;
167 head->spinlock = NULL;
137#endif 168#endif
138} 169}
139 170
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 0d65934246af..198b8f9fe05e 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -219,7 +219,7 @@ struct dev_pm_ops {
219 * to RAM and hibernation. 219 * to RAM and hibernation.
220 */ 220 */
221#define SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \ 221#define SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \
222struct dev_pm_ops name = { \ 222const struct dev_pm_ops name = { \
223 .suspend = suspend_fn, \ 223 .suspend = suspend_fn, \
224 .resume = resume_fn, \ 224 .resume = resume_fn, \
225 .freeze = suspend_fn, \ 225 .freeze = suspend_fn, \
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index 7456d7d87a19..56f2d63a5cbb 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -105,12 +105,7 @@ static inline int ptrace_reparented(struct task_struct *child)
105{ 105{
106 return child->real_parent != child->parent; 106 return child->real_parent != child->parent;
107} 107}
108static inline void ptrace_link(struct task_struct *child, 108
109 struct task_struct *new_parent)
110{
111 if (unlikely(child->ptrace))
112 __ptrace_link(child, new_parent);
113}
114static inline void ptrace_unlink(struct task_struct *child) 109static inline void ptrace_unlink(struct task_struct *child)
115{ 110{
116 if (unlikely(child->ptrace)) 111 if (unlikely(child->ptrace))
@@ -169,9 +164,9 @@ static inline void ptrace_init_task(struct task_struct *child, bool ptrace)
169 INIT_LIST_HEAD(&child->ptraced); 164 INIT_LIST_HEAD(&child->ptraced);
170 child->parent = child->real_parent; 165 child->parent = child->real_parent;
171 child->ptrace = 0; 166 child->ptrace = 0;
172 if (unlikely(ptrace)) { 167 if (unlikely(ptrace) && (current->ptrace & PT_PTRACED)) {
173 child->ptrace = current->ptrace; 168 child->ptrace = current->ptrace;
174 ptrace_link(child, current->parent); 169 __ptrace_link(child, current->parent);
175 } 170 }
176} 171}
177 172
@@ -278,6 +273,18 @@ static inline void user_enable_block_step(struct task_struct *task)
278} 273}
279#endif /* arch_has_block_step */ 274#endif /* arch_has_block_step */
280 275
276#ifdef ARCH_HAS_USER_SINGLE_STEP_INFO
277extern void user_single_step_siginfo(struct task_struct *tsk,
278 struct pt_regs *regs, siginfo_t *info);
279#else
280static inline void user_single_step_siginfo(struct task_struct *tsk,
281 struct pt_regs *regs, siginfo_t *info)
282{
283 memset(info, 0, sizeof(*info));
284 info->si_signo = SIGTRAP;
285}
286#endif
287
281#ifndef arch_ptrace_stop_needed 288#ifndef arch_ptrace_stop_needed
282/** 289/**
283 * arch_ptrace_stop_needed - Decide whether arch_ptrace_stop() should be called 290 * arch_ptrace_stop_needed - Decide whether arch_ptrace_stop() should be called
diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
index a05b4a20768d..c96c1858fe2c 100644
--- a/include/linux/reiserfs_fs.h
+++ b/include/linux/reiserfs_fs.h
@@ -2051,25 +2051,12 @@ void set_de_name_and_namelen(struct reiserfs_dir_entry *de);
2051int search_by_entry_key(struct super_block *sb, const struct cpu_key *key, 2051int search_by_entry_key(struct super_block *sb, const struct cpu_key *key,
2052 struct treepath *path, struct reiserfs_dir_entry *de); 2052 struct treepath *path, struct reiserfs_dir_entry *de);
2053struct dentry *reiserfs_get_parent(struct dentry *); 2053struct dentry *reiserfs_get_parent(struct dentry *);
2054/* procfs.c */
2055
2056#if defined( CONFIG_PROC_FS ) && defined( CONFIG_REISERFS_PROC_INFO )
2057#define REISERFS_PROC_INFO
2058#else
2059#undef REISERFS_PROC_INFO
2060#endif
2061 2054
2055#ifdef CONFIG_REISERFS_PROC_INFO
2062int reiserfs_proc_info_init(struct super_block *sb); 2056int reiserfs_proc_info_init(struct super_block *sb);
2063int reiserfs_proc_info_done(struct super_block *sb); 2057int reiserfs_proc_info_done(struct super_block *sb);
2064struct proc_dir_entry *reiserfs_proc_register_global(char *name,
2065 read_proc_t * func);
2066void reiserfs_proc_unregister_global(const char *name);
2067int reiserfs_proc_info_global_init(void); 2058int reiserfs_proc_info_global_init(void);
2068int reiserfs_proc_info_global_done(void); 2059int reiserfs_proc_info_global_done(void);
2069int reiserfs_global_version_in_proc(char *buffer, char **start, off_t offset,
2070 int count, int *eof, void *data);
2071
2072#if defined( REISERFS_PROC_INFO )
2073 2060
2074#define PROC_EXP( e ) e 2061#define PROC_EXP( e ) e
2075 2062
@@ -2084,6 +2071,26 @@ int reiserfs_global_version_in_proc(char *buffer, char **start, off_t offset,
2084 PROC_INFO_ADD( sb, free_at[ ( level ) ], B_FREE_SPACE( bh ) ); \ 2071 PROC_INFO_ADD( sb, free_at[ ( level ) ], B_FREE_SPACE( bh ) ); \
2085 PROC_INFO_ADD( sb, items_at[ ( level ) ], B_NR_ITEMS( bh ) ) 2072 PROC_INFO_ADD( sb, items_at[ ( level ) ], B_NR_ITEMS( bh ) )
2086#else 2073#else
2074static inline int reiserfs_proc_info_init(struct super_block *sb)
2075{
2076 return 0;
2077}
2078
2079static inline int reiserfs_proc_info_done(struct super_block *sb)
2080{
2081 return 0;
2082}
2083
2084static inline int reiserfs_proc_info_global_init(void)
2085{
2086 return 0;
2087}
2088
2089static inline int reiserfs_proc_info_global_done(void)
2090{
2091 return 0;
2092}
2093
2087#define PROC_EXP( e ) 2094#define PROC_EXP( e )
2088#define VOID_V ( ( void ) 0 ) 2095#define VOID_V ( ( void ) 0 )
2089#define PROC_INFO_MAX( sb, field, value ) VOID_V 2096#define PROC_INFO_MAX( sb, field, value ) VOID_V
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index cb0ba7032609..b019ae64e2ab 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -26,6 +26,9 @@
26 */ 26 */
27struct anon_vma { 27struct anon_vma {
28 spinlock_t lock; /* Serialize access to vma list */ 28 spinlock_t lock; /* Serialize access to vma list */
29#ifdef CONFIG_KSM
30 atomic_t ksm_refcount;
31#endif
29 /* 32 /*
30 * NOTE: the LSB of the head.next is set by 33 * NOTE: the LSB of the head.next is set by
31 * mm_take_all_locks() _after_ taking the above lock. So the 34 * mm_take_all_locks() _after_ taking the above lock. So the
@@ -38,6 +41,34 @@ struct anon_vma {
38}; 41};
39 42
40#ifdef CONFIG_MMU 43#ifdef CONFIG_MMU
44#ifdef CONFIG_KSM
45static inline void ksm_refcount_init(struct anon_vma *anon_vma)
46{
47 atomic_set(&anon_vma->ksm_refcount, 0);
48}
49
50static inline int ksm_refcount(struct anon_vma *anon_vma)
51{
52 return atomic_read(&anon_vma->ksm_refcount);
53}
54#else
55static inline void ksm_refcount_init(struct anon_vma *anon_vma)
56{
57}
58
59static inline int ksm_refcount(struct anon_vma *anon_vma)
60{
61 return 0;
62}
63#endif /* CONFIG_KSM */
64
65static inline struct anon_vma *page_anon_vma(struct page *page)
66{
67 if (((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) !=
68 PAGE_MAPPING_ANON)
69 return NULL;
70 return page_rmapping(page);
71}
41 72
42static inline void anon_vma_lock(struct vm_area_struct *vma) 73static inline void anon_vma_lock(struct vm_area_struct *vma)
43{ 74{
@@ -62,6 +93,7 @@ void __anon_vma_merge(struct vm_area_struct *, struct vm_area_struct *);
62void anon_vma_unlink(struct vm_area_struct *); 93void anon_vma_unlink(struct vm_area_struct *);
63void anon_vma_link(struct vm_area_struct *); 94void anon_vma_link(struct vm_area_struct *);
64void __anon_vma_link(struct vm_area_struct *); 95void __anon_vma_link(struct vm_area_struct *);
96void anon_vma_free(struct anon_vma *);
65 97
66/* 98/*
67 * rmap interfaces called when adding or removing pte of page 99 * rmap interfaces called when adding or removing pte of page
@@ -81,6 +113,9 @@ static inline void page_dup_rmap(struct page *page)
81 */ 113 */
82int page_referenced(struct page *, int is_locked, 114int page_referenced(struct page *, int is_locked,
83 struct mem_cgroup *cnt, unsigned long *vm_flags); 115 struct mem_cgroup *cnt, unsigned long *vm_flags);
116int page_referenced_one(struct page *, struct vm_area_struct *,
117 unsigned long address, unsigned int *mapcount, unsigned long *vm_flags);
118
84enum ttu_flags { 119enum ttu_flags {
85 TTU_UNMAP = 0, /* unmap mode */ 120 TTU_UNMAP = 0, /* unmap mode */
86 TTU_MIGRATION = 1, /* migration mode */ 121 TTU_MIGRATION = 1, /* migration mode */
@@ -94,6 +129,8 @@ enum ttu_flags {
94#define TTU_ACTION(x) ((x) & TTU_ACTION_MASK) 129#define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)
95 130
96int try_to_unmap(struct page *, enum ttu_flags flags); 131int try_to_unmap(struct page *, enum ttu_flags flags);
132int try_to_unmap_one(struct page *, struct vm_area_struct *,
133 unsigned long address, enum ttu_flags flags);
97 134
98/* 135/*
99 * Called from mm/filemap_xip.c to unmap empty zero page 136 * Called from mm/filemap_xip.c to unmap empty zero page
@@ -127,6 +164,12 @@ struct anon_vma *page_lock_anon_vma(struct page *page);
127void page_unlock_anon_vma(struct anon_vma *anon_vma); 164void page_unlock_anon_vma(struct anon_vma *anon_vma);
128int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); 165int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
129 166
167/*
168 * Called by migrate.c to remove migration ptes, but might be used more later.
169 */
170int rmap_walk(struct page *page, int (*rmap_one)(struct page *,
171 struct vm_area_struct *, unsigned long, void *), void *arg);
172
130#else /* !CONFIG_MMU */ 173#else /* !CONFIG_MMU */
131 174
132#define anon_vma_init() do {} while (0) 175#define anon_vma_init() do {} while (0)
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
index f19b00b7d530..281d8fd775e8 100644
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
@@ -24,7 +24,7 @@
24 * @owner: the mutex owner 24 * @owner: the mutex owner
25 */ 25 */
26struct rt_mutex { 26struct rt_mutex {
27 spinlock_t wait_lock; 27 raw_spinlock_t wait_lock;
28 struct plist_head wait_list; 28 struct plist_head wait_list;
29 struct task_struct *owner; 29 struct task_struct *owner;
30#ifdef CONFIG_DEBUG_RT_MUTEXES 30#ifdef CONFIG_DEBUG_RT_MUTEXES
@@ -63,8 +63,8 @@ struct hrtimer_sleeper;
63#endif 63#endif
64 64
65#define __RT_MUTEX_INITIALIZER(mutexname) \ 65#define __RT_MUTEX_INITIALIZER(mutexname) \
66 { .wait_lock = __SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ 66 { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
67 , .wait_list = PLIST_HEAD_INIT(mutexname.wait_list, mutexname.wait_lock) \ 67 , .wait_list = PLIST_HEAD_INIT_RAW(mutexname.wait_list, mutexname.wait_lock) \
68 , .owner = NULL \ 68 , .owner = NULL \
69 __DEBUG_RT_MUTEX_INITIALIZER(mutexname)} 69 __DEBUG_RT_MUTEX_INITIALIZER(mutexname)}
70 70
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 14fc906ed602..05330fc5b436 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -368,11 +368,9 @@ enum {
368#define RTAX_MAX (__RTAX_MAX - 1) 368#define RTAX_MAX (__RTAX_MAX - 1)
369 369
370#define RTAX_FEATURE_ECN 0x00000001 370#define RTAX_FEATURE_ECN 0x00000001
371#define RTAX_FEATURE_NO_SACK 0x00000002 371#define RTAX_FEATURE_SACK 0x00000002
372#define RTAX_FEATURE_NO_TSTAMP 0x00000004 372#define RTAX_FEATURE_TIMESTAMP 0x00000004
373#define RTAX_FEATURE_ALLFRAG 0x00000008 373#define RTAX_FEATURE_ALLFRAG 0x00000008
374#define RTAX_FEATURE_NO_WSCALE 0x00000010
375#define RTAX_FEATURE_NO_DSACK 0x00000020
376 374
377struct rta_session { 375struct rta_session {
378 __u8 proto; 376 __u8 proto;
diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h
new file mode 100644
index 000000000000..71e0b00b6f2c
--- /dev/null
+++ b/include/linux/rwlock.h
@@ -0,0 +1,125 @@
1#ifndef __LINUX_RWLOCK_H
2#define __LINUX_RWLOCK_H
3
4#ifndef __LINUX_SPINLOCK_H
5# error "please don't include this file directly"
6#endif
7
8/*
9 * rwlock related methods
10 *
11 * split out from spinlock.h
12 *
13 * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
14 * Released under the General Public License (GPL).
15 */
16
17#ifdef CONFIG_DEBUG_SPINLOCK
18 extern void __rwlock_init(rwlock_t *lock, const char *name,
19 struct lock_class_key *key);
20# define rwlock_init(lock) \
21do { \
22 static struct lock_class_key __key; \
23 \
24 __rwlock_init((lock), #lock, &__key); \
25} while (0)
26#else
27# define rwlock_init(lock) \
28 do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0)
29#endif
30
31#ifdef CONFIG_DEBUG_SPINLOCK
32 extern void do_raw_read_lock(rwlock_t *lock);
33#define do_raw_read_lock_flags(lock, flags) do_raw_read_lock(lock)
34 extern int do_raw_read_trylock(rwlock_t *lock);
35 extern void do_raw_read_unlock(rwlock_t *lock);
36 extern void do_raw_write_lock(rwlock_t *lock);
37#define do_raw_write_lock_flags(lock, flags) do_raw_write_lock(lock)
38 extern int do_raw_write_trylock(rwlock_t *lock);
39 extern void do_raw_write_unlock(rwlock_t *lock);
40#else
41# define do_raw_read_lock(rwlock) arch_read_lock(&(rwlock)->raw_lock)
42# define do_raw_read_lock_flags(lock, flags) \
43 arch_read_lock_flags(&(lock)->raw_lock, *(flags))
44# define do_raw_read_trylock(rwlock) arch_read_trylock(&(rwlock)->raw_lock)
45# define do_raw_read_unlock(rwlock) arch_read_unlock(&(rwlock)->raw_lock)
46# define do_raw_write_lock(rwlock) arch_write_lock(&(rwlock)->raw_lock)
47# define do_raw_write_lock_flags(lock, flags) \
48 arch_write_lock_flags(&(lock)->raw_lock, *(flags))
49# define do_raw_write_trylock(rwlock) arch_write_trylock(&(rwlock)->raw_lock)
50# define do_raw_write_unlock(rwlock) arch_write_unlock(&(rwlock)->raw_lock)
51#endif
52
53#define read_can_lock(rwlock) arch_read_can_lock(&(rwlock)->raw_lock)
54#define write_can_lock(rwlock) arch_write_can_lock(&(rwlock)->raw_lock)
55
56/*
57 * Define the various rw_lock methods. Note we define these
58 * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various
59 * methods are defined as nops in the case they are not required.
60 */
61#define read_trylock(lock) __cond_lock(lock, _raw_read_trylock(lock))
62#define write_trylock(lock) __cond_lock(lock, _raw_write_trylock(lock))
63
64#define write_lock(lock) _raw_write_lock(lock)
65#define read_lock(lock) _raw_read_lock(lock)
66
67#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
68
69#define read_lock_irqsave(lock, flags) \
70 do { \
71 typecheck(unsigned long, flags); \
72 flags = _raw_read_lock_irqsave(lock); \
73 } while (0)
74#define write_lock_irqsave(lock, flags) \
75 do { \
76 typecheck(unsigned long, flags); \
77 flags = _raw_write_lock_irqsave(lock); \
78 } while (0)
79
80#else
81
82#define read_lock_irqsave(lock, flags) \
83 do { \
84 typecheck(unsigned long, flags); \
85 _raw_read_lock_irqsave(lock, flags); \
86 } while (0)
87#define write_lock_irqsave(lock, flags) \
88 do { \
89 typecheck(unsigned long, flags); \
90 _raw_write_lock_irqsave(lock, flags); \
91 } while (0)
92
93#endif
94
95#define read_lock_irq(lock) _raw_read_lock_irq(lock)
96#define read_lock_bh(lock) _raw_read_lock_bh(lock)
97#define write_lock_irq(lock) _raw_write_lock_irq(lock)
98#define write_lock_bh(lock) _raw_write_lock_bh(lock)
99#define read_unlock(lock) _raw_read_unlock(lock)
100#define write_unlock(lock) _raw_write_unlock(lock)
101#define read_unlock_irq(lock) _raw_read_unlock_irq(lock)
102#define write_unlock_irq(lock) _raw_write_unlock_irq(lock)
103
104#define read_unlock_irqrestore(lock, flags) \
105 do { \
106 typecheck(unsigned long, flags); \
107 _raw_read_unlock_irqrestore(lock, flags); \
108 } while (0)
109#define read_unlock_bh(lock) _raw_read_unlock_bh(lock)
110
111#define write_unlock_irqrestore(lock, flags) \
112 do { \
113 typecheck(unsigned long, flags); \
114 _raw_write_unlock_irqrestore(lock, flags); \
115 } while (0)
116#define write_unlock_bh(lock) _raw_write_unlock_bh(lock)
117
118#define write_trylock_irqsave(lock, flags) \
119({ \
120 local_irq_save(flags); \
121 write_trylock(lock) ? \
122 1 : ({ local_irq_restore(flags); 0; }); \
123})
124
125#endif /* __LINUX_RWLOCK_H */
diff --git a/include/linux/rwlock_api_smp.h b/include/linux/rwlock_api_smp.h
new file mode 100644
index 000000000000..9c9f0495d37c
--- /dev/null
+++ b/include/linux/rwlock_api_smp.h
@@ -0,0 +1,282 @@
1#ifndef __LINUX_RWLOCK_API_SMP_H
2#define __LINUX_RWLOCK_API_SMP_H
3
4#ifndef __LINUX_SPINLOCK_API_SMP_H
5# error "please don't include this file directly"
6#endif
7
8/*
9 * include/linux/rwlock_api_smp.h
10 *
11 * spinlock API declarations on SMP (and debug)
12 * (implemented in kernel/spinlock.c)
13 *
14 * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
15 * Released under the General Public License (GPL).
16 */
17
18void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires(lock);
19void __lockfunc _raw_write_lock(rwlock_t *lock) __acquires(lock);
20void __lockfunc _raw_read_lock_bh(rwlock_t *lock) __acquires(lock);
21void __lockfunc _raw_write_lock_bh(rwlock_t *lock) __acquires(lock);
22void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires(lock);
23void __lockfunc _raw_write_lock_irq(rwlock_t *lock) __acquires(lock);
24unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock)
25 __acquires(lock);
26unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock)
27 __acquires(lock);
28int __lockfunc _raw_read_trylock(rwlock_t *lock);
29int __lockfunc _raw_write_trylock(rwlock_t *lock);
30void __lockfunc _raw_read_unlock(rwlock_t *lock) __releases(lock);
31void __lockfunc _raw_write_unlock(rwlock_t *lock) __releases(lock);
32void __lockfunc _raw_read_unlock_bh(rwlock_t *lock) __releases(lock);
33void __lockfunc _raw_write_unlock_bh(rwlock_t *lock) __releases(lock);
34void __lockfunc _raw_read_unlock_irq(rwlock_t *lock) __releases(lock);
35void __lockfunc _raw_write_unlock_irq(rwlock_t *lock) __releases(lock);
36void __lockfunc
37_raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
38 __releases(lock);
39void __lockfunc
40_raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
41 __releases(lock);
42
43#ifdef CONFIG_INLINE_READ_LOCK
44#define _raw_read_lock(lock) __raw_read_lock(lock)
45#endif
46
47#ifdef CONFIG_INLINE_WRITE_LOCK
48#define _raw_write_lock(lock) __raw_write_lock(lock)
49#endif
50
51#ifdef CONFIG_INLINE_READ_LOCK_BH
52#define _raw_read_lock_bh(lock) __raw_read_lock_bh(lock)
53#endif
54
55#ifdef CONFIG_INLINE_WRITE_LOCK_BH
56#define _raw_write_lock_bh(lock) __raw_write_lock_bh(lock)
57#endif
58
59#ifdef CONFIG_INLINE_READ_LOCK_IRQ
60#define _raw_read_lock_irq(lock) __raw_read_lock_irq(lock)
61#endif
62
63#ifdef CONFIG_INLINE_WRITE_LOCK_IRQ
64#define _raw_write_lock_irq(lock) __raw_write_lock_irq(lock)
65#endif
66
67#ifdef CONFIG_INLINE_READ_LOCK_IRQSAVE
68#define _raw_read_lock_irqsave(lock) __raw_read_lock_irqsave(lock)
69#endif
70
71#ifdef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
72#define _raw_write_lock_irqsave(lock) __raw_write_lock_irqsave(lock)
73#endif
74
75#ifdef CONFIG_INLINE_READ_TRYLOCK
76#define _raw_read_trylock(lock) __raw_read_trylock(lock)
77#endif
78
79#ifdef CONFIG_INLINE_WRITE_TRYLOCK
80#define _raw_write_trylock(lock) __raw_write_trylock(lock)
81#endif
82
83#ifdef CONFIG_INLINE_READ_UNLOCK
84#define _raw_read_unlock(lock) __raw_read_unlock(lock)
85#endif
86
87#ifdef CONFIG_INLINE_WRITE_UNLOCK
88#define _raw_write_unlock(lock) __raw_write_unlock(lock)
89#endif
90
91#ifdef CONFIG_INLINE_READ_UNLOCK_BH
92#define _raw_read_unlock_bh(lock) __raw_read_unlock_bh(lock)
93#endif
94
95#ifdef CONFIG_INLINE_WRITE_UNLOCK_BH
96#define _raw_write_unlock_bh(lock) __raw_write_unlock_bh(lock)
97#endif
98
99#ifdef CONFIG_INLINE_READ_UNLOCK_IRQ
100#define _raw_read_unlock_irq(lock) __raw_read_unlock_irq(lock)
101#endif
102
103#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQ
104#define _raw_write_unlock_irq(lock) __raw_write_unlock_irq(lock)
105#endif
106
107#ifdef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
108#define _raw_read_unlock_irqrestore(lock, flags) \
109 __raw_read_unlock_irqrestore(lock, flags)
110#endif
111
112#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
113#define _raw_write_unlock_irqrestore(lock, flags) \
114 __raw_write_unlock_irqrestore(lock, flags)
115#endif
116
117static inline int __raw_read_trylock(rwlock_t *lock)
118{
119 preempt_disable();
120 if (do_raw_read_trylock(lock)) {
121 rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_);
122 return 1;
123 }
124 preempt_enable();
125 return 0;
126}
127
128static inline int __raw_write_trylock(rwlock_t *lock)
129{
130 preempt_disable();
131 if (do_raw_write_trylock(lock)) {
132 rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_);
133 return 1;
134 }
135 preempt_enable();
136 return 0;
137}
138
139/*
140 * If lockdep is enabled then we use the non-preemption spin-ops
141 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
142 * not re-enabled during lock-acquire (which the preempt-spin-ops do):
143 */
144#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
145
146static inline void __raw_read_lock(rwlock_t *lock)
147{
148 preempt_disable();
149 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
150 LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
151}
152
153static inline unsigned long __raw_read_lock_irqsave(rwlock_t *lock)
154{
155 unsigned long flags;
156
157 local_irq_save(flags);
158 preempt_disable();
159 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
160 LOCK_CONTENDED_FLAGS(lock, do_raw_read_trylock, do_raw_read_lock,
161 do_raw_read_lock_flags, &flags);
162 return flags;
163}
164
165static inline void __raw_read_lock_irq(rwlock_t *lock)
166{
167 local_irq_disable();
168 preempt_disable();
169 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
170 LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
171}
172
173static inline void __raw_read_lock_bh(rwlock_t *lock)
174{
175 local_bh_disable();
176 preempt_disable();
177 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
178 LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
179}
180
181static inline unsigned long __raw_write_lock_irqsave(rwlock_t *lock)
182{
183 unsigned long flags;
184
185 local_irq_save(flags);
186 preempt_disable();
187 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
188 LOCK_CONTENDED_FLAGS(lock, do_raw_write_trylock, do_raw_write_lock,
189 do_raw_write_lock_flags, &flags);
190 return flags;
191}
192
193static inline void __raw_write_lock_irq(rwlock_t *lock)
194{
195 local_irq_disable();
196 preempt_disable();
197 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
198 LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
199}
200
201static inline void __raw_write_lock_bh(rwlock_t *lock)
202{
203 local_bh_disable();
204 preempt_disable();
205 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
206 LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
207}
208
209static inline void __raw_write_lock(rwlock_t *lock)
210{
211 preempt_disable();
212 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
213 LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
214}
215
216#endif /* CONFIG_PREEMPT */
217
218static inline void __raw_write_unlock(rwlock_t *lock)
219{
220 rwlock_release(&lock->dep_map, 1, _RET_IP_);
221 do_raw_write_unlock(lock);
222 preempt_enable();
223}
224
225static inline void __raw_read_unlock(rwlock_t *lock)
226{
227 rwlock_release(&lock->dep_map, 1, _RET_IP_);
228 do_raw_read_unlock(lock);
229 preempt_enable();
230}
231
232static inline void
233__raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
234{
235 rwlock_release(&lock->dep_map, 1, _RET_IP_);
236 do_raw_read_unlock(lock);
237 local_irq_restore(flags);
238 preempt_enable();
239}
240
241static inline void __raw_read_unlock_irq(rwlock_t *lock)
242{
243 rwlock_release(&lock->dep_map, 1, _RET_IP_);
244 do_raw_read_unlock(lock);
245 local_irq_enable();
246 preempt_enable();
247}
248
249static inline void __raw_read_unlock_bh(rwlock_t *lock)
250{
251 rwlock_release(&lock->dep_map, 1, _RET_IP_);
252 do_raw_read_unlock(lock);
253 preempt_enable_no_resched();
254 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
255}
256
257static inline void __raw_write_unlock_irqrestore(rwlock_t *lock,
258 unsigned long flags)
259{
260 rwlock_release(&lock->dep_map, 1, _RET_IP_);
261 do_raw_write_unlock(lock);
262 local_irq_restore(flags);
263 preempt_enable();
264}
265
266static inline void __raw_write_unlock_irq(rwlock_t *lock)
267{
268 rwlock_release(&lock->dep_map, 1, _RET_IP_);
269 do_raw_write_unlock(lock);
270 local_irq_enable();
271 preempt_enable();
272}
273
274static inline void __raw_write_unlock_bh(rwlock_t *lock)
275{
276 rwlock_release(&lock->dep_map, 1, _RET_IP_);
277 do_raw_write_unlock(lock);
278 preempt_enable_no_resched();
279 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
280}
281
282#endif /* __LINUX_RWLOCK_API_SMP_H */
diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h
new file mode 100644
index 000000000000..bd31808c7d8e
--- /dev/null
+++ b/include/linux/rwlock_types.h
@@ -0,0 +1,56 @@
1#ifndef __LINUX_RWLOCK_TYPES_H
2#define __LINUX_RWLOCK_TYPES_H
3
4/*
5 * include/linux/rwlock_types.h - generic rwlock type definitions
6 * and initializers
7 *
8 * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
9 * Released under the General Public License (GPL).
10 */
11typedef struct {
12 arch_rwlock_t raw_lock;
13#ifdef CONFIG_GENERIC_LOCKBREAK
14 unsigned int break_lock;
15#endif
16#ifdef CONFIG_DEBUG_SPINLOCK
17 unsigned int magic, owner_cpu;
18 void *owner;
19#endif
20#ifdef CONFIG_DEBUG_LOCK_ALLOC
21 struct lockdep_map dep_map;
22#endif
23} rwlock_t;
24
25#define RWLOCK_MAGIC 0xdeaf1eed
26
27#ifdef CONFIG_DEBUG_LOCK_ALLOC
28# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
29#else
30# define RW_DEP_MAP_INIT(lockname)
31#endif
32
33#ifdef CONFIG_DEBUG_SPINLOCK
34#define __RW_LOCK_UNLOCKED(lockname) \
35 (rwlock_t) { .raw_lock = __ARCH_RW_LOCK_UNLOCKED, \
36 .magic = RWLOCK_MAGIC, \
37 .owner = SPINLOCK_OWNER_INIT, \
38 .owner_cpu = -1, \
39 RW_DEP_MAP_INIT(lockname) }
40#else
41#define __RW_LOCK_UNLOCKED(lockname) \
42 (rwlock_t) { .raw_lock = __ARCH_RW_LOCK_UNLOCKED, \
43 RW_DEP_MAP_INIT(lockname) }
44#endif
45
46/*
47 * RW_LOCK_UNLOCKED defeat lockdep state tracking and is hence
48 * deprecated.
49 *
50 * Please use DEFINE_RWLOCK() or __RW_LOCK_UNLOCKED() as appropriate.
51 */
52#define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(old_style_rw_init)
53
54#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x)
55
56#endif /* __LINUX_RWLOCK_TYPES_H */
diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h
index 6c3c0f6c261f..bdfcc2527970 100644
--- a/include/linux/rwsem-spinlock.h
+++ b/include/linux/rwsem-spinlock.h
@@ -68,11 +68,7 @@ extern int __down_write_trylock(struct rw_semaphore *sem);
68extern void __up_read(struct rw_semaphore *sem); 68extern void __up_read(struct rw_semaphore *sem);
69extern void __up_write(struct rw_semaphore *sem); 69extern void __up_write(struct rw_semaphore *sem);
70extern void __downgrade_write(struct rw_semaphore *sem); 70extern void __downgrade_write(struct rw_semaphore *sem);
71 71extern int rwsem_is_locked(struct rw_semaphore *sem);
72static inline int rwsem_is_locked(struct rw_semaphore *sem)
73{
74 return (sem->activity != 0);
75}
76 72
77#endif /* __KERNEL__ */ 73#endif /* __KERNEL__ */
78#endif /* _LINUX_RWSEM_SPINLOCK_H */ 74#endif /* _LINUX_RWSEM_SPINLOCK_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 294eb2f80144..244c287a5ac1 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1409,7 +1409,7 @@ struct task_struct {
1409#endif 1409#endif
1410 1410
1411 /* Protection of the PI data structures: */ 1411 /* Protection of the PI data structures: */
1412 spinlock_t pi_lock; 1412 raw_spinlock_t pi_lock;
1413 1413
1414#ifdef CONFIG_RT_MUTEXES 1414#ifdef CONFIG_RT_MUTEXES
1415 /* PI waiters blocked on a rt_mutex held by this task */ 1415 /* PI waiters blocked on a rt_mutex held by this task */
@@ -1446,8 +1446,10 @@ struct task_struct {
1446 gfp_t lockdep_reclaim_gfp; 1446 gfp_t lockdep_reclaim_gfp;
1447#endif 1447#endif
1448 1448
1449#ifdef CONFIG_FS_JOURNAL_INFO
1449/* journalling filesystem info */ 1450/* journalling filesystem info */
1450 void *journal_info; 1451 void *journal_info;
1452#endif
1451 1453
1452/* stacked block device info */ 1454/* stacked block device info */
1453 struct bio *bio_list, **bio_tail; 1455 struct bio *bio_list, **bio_tail;
@@ -1542,6 +1544,14 @@ struct task_struct {
1542 unsigned long trace_recursion; 1544 unsigned long trace_recursion;
1543#endif /* CONFIG_TRACING */ 1545#endif /* CONFIG_TRACING */
1544 unsigned long stack_start; 1546 unsigned long stack_start;
1547#ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */
1548 struct memcg_batch_info {
1549 int do_batch; /* incremented when batch uncharge started */
1550 struct mem_cgroup *memcg; /* target memcg of uncharge */
1551 unsigned long bytes; /* uncharged usage */
1552 unsigned long memsw_bytes; /* uncharged mem+swap usage */
1553 } memcg_batch;
1554#endif
1545}; 1555};
1546 1556
1547/* Future-safe accessor for struct task_struct's cpus_allowed. */ 1557/* Future-safe accessor for struct task_struct's cpus_allowed. */
@@ -2073,7 +2083,6 @@ extern int kill_proc_info(int, struct siginfo *, pid_t);
2073extern int do_notify_parent(struct task_struct *, int); 2083extern int do_notify_parent(struct task_struct *, int);
2074extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent); 2084extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
2075extern void force_sig(int, struct task_struct *); 2085extern void force_sig(int, struct task_struct *);
2076extern void force_sig_specific(int, struct task_struct *);
2077extern int send_sig(int, struct task_struct *, int); 2086extern int send_sig(int, struct task_struct *, int);
2078extern void zap_other_threads(struct task_struct *p); 2087extern void zap_other_threads(struct task_struct *p);
2079extern struct sigqueue *sigqueue_alloc(void); 2088extern struct sigqueue *sigqueue_alloc(void);
@@ -2092,11 +2101,6 @@ static inline int kill_cad_pid(int sig, int priv)
2092#define SEND_SIG_PRIV ((struct siginfo *) 1) 2101#define SEND_SIG_PRIV ((struct siginfo *) 1)
2093#define SEND_SIG_FORCED ((struct siginfo *) 2) 2102#define SEND_SIG_FORCED ((struct siginfo *) 2)
2094 2103
2095static inline int is_si_special(const struct siginfo *info)
2096{
2097 return info <= SEND_SIG_FORCED;
2098}
2099
2100/* 2104/*
2101 * True if we are on the alternate signal stack. 2105 * True if we are on the alternate signal stack.
2102 */ 2106 */
diff --git a/include/linux/sem.h b/include/linux/sem.h
index 1b191c176bcd..8a4adbef8a0f 100644
--- a/include/linux/sem.h
+++ b/include/linux/sem.h
@@ -86,6 +86,7 @@ struct task_struct;
86struct sem { 86struct sem {
87 int semval; /* current value */ 87 int semval; /* current value */
88 int sempid; /* pid of last operation */ 88 int sempid; /* pid of last operation */
89 struct list_head sem_pending; /* pending single-sop operations */
89}; 90};
90 91
91/* One sem_array data structure for each set of semaphores in the system. */ 92/* One sem_array data structure for each set of semaphores in the system. */
@@ -96,11 +97,13 @@ struct sem_array {
96 struct sem *sem_base; /* ptr to first semaphore in array */ 97 struct sem *sem_base; /* ptr to first semaphore in array */
97 struct list_head sem_pending; /* pending operations to be processed */ 98 struct list_head sem_pending; /* pending operations to be processed */
98 struct list_head list_id; /* undo requests on this array */ 99 struct list_head list_id; /* undo requests on this array */
99 unsigned long sem_nsems; /* no. of semaphores in array */ 100 int sem_nsems; /* no. of semaphores in array */
101 int complex_count; /* pending complex operations */
100}; 102};
101 103
102/* One queue for each sleeping process in the system. */ 104/* One queue for each sleeping process in the system. */
103struct sem_queue { 105struct sem_queue {
106 struct list_head simple_list; /* queue of pending operations */
104 struct list_head list; /* queue of pending operations */ 107 struct list_head list; /* queue of pending operations */
105 struct task_struct *sleeper; /* this process */ 108 struct task_struct *sleeper; /* this process */
106 struct sem_undo *undo; /* undo structure */ 109 struct sem_undo *undo; /* undo structure */
diff --git a/include/linux/sm501-regs.h b/include/linux/sm501-regs.h
index d53642d2d899..67ed2c542831 100644
--- a/include/linux/sm501-regs.h
+++ b/include/linux/sm501-regs.h
@@ -31,6 +31,8 @@
31#define SM501_SYSCTRL_PCI_SUBSYS_LOCK (1<<11) 31#define SM501_SYSCTRL_PCI_SUBSYS_LOCK (1<<11)
32#define SM501_SYSCTRL_PCI_BURST_READ_EN (1<<15) 32#define SM501_SYSCTRL_PCI_BURST_READ_EN (1<<15)
33 33
34#define SM501_SYSCTRL_2D_ENGINE_STATUS (1<<19)
35
34/* miscellaneous control */ 36/* miscellaneous control */
35 37
36#define SM501_MISC_CONTROL (0x000004) 38#define SM501_MISC_CONTROL (0x000004)
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 71dccfeb0d88..86088213334a 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -8,13 +8,13 @@
8 * 8 *
9 * on SMP builds: 9 * on SMP builds:
10 * 10 *
11 * asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the 11 * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
12 * initializers 12 * initializers
13 * 13 *
14 * linux/spinlock_types.h: 14 * linux/spinlock_types.h:
15 * defines the generic type and initializers 15 * defines the generic type and initializers
16 * 16 *
17 * asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel 17 * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel
18 * implementations, mostly inline assembly code 18 * implementations, mostly inline assembly code
19 * 19 *
20 * (also included on UP-debug builds:) 20 * (also included on UP-debug builds:)
@@ -34,7 +34,7 @@
34 * defines the generic type and initializers 34 * defines the generic type and initializers
35 * 35 *
36 * linux/spinlock_up.h: 36 * linux/spinlock_up.h:
37 * contains the __raw_spin_*()/etc. version of UP 37 * contains the arch_spin_*()/etc. version of UP
38 * builds. (which are NOPs on non-debug, non-preempt 38 * builds. (which are NOPs on non-debug, non-preempt
39 * builds) 39 * builds)
40 * 40 *
@@ -75,12 +75,12 @@
75#define __lockfunc __attribute__((section(".spinlock.text"))) 75#define __lockfunc __attribute__((section(".spinlock.text")))
76 76
77/* 77/*
78 * Pull the raw_spinlock_t and raw_rwlock_t definitions: 78 * Pull the arch_spinlock_t and arch_rwlock_t definitions:
79 */ 79 */
80#include <linux/spinlock_types.h> 80#include <linux/spinlock_types.h>
81 81
82/* 82/*
83 * Pull the __raw*() functions/declarations (UP-nondebug doesnt need them): 83 * Pull the arch_spin*() functions/declarations (UP-nondebug doesnt need them):
84 */ 84 */
85#ifdef CONFIG_SMP 85#ifdef CONFIG_SMP
86# include <asm/spinlock.h> 86# include <asm/spinlock.h>
@@ -89,45 +89,31 @@
89#endif 89#endif
90 90
91#ifdef CONFIG_DEBUG_SPINLOCK 91#ifdef CONFIG_DEBUG_SPINLOCK
92 extern void __spin_lock_init(spinlock_t *lock, const char *name, 92 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
93 struct lock_class_key *key); 93 struct lock_class_key *key);
94# define spin_lock_init(lock) \ 94# define raw_spin_lock_init(lock) \
95do { \ 95do { \
96 static struct lock_class_key __key; \ 96 static struct lock_class_key __key; \
97 \ 97 \
98 __spin_lock_init((lock), #lock, &__key); \ 98 __raw_spin_lock_init((lock), #lock, &__key); \
99} while (0) 99} while (0)
100 100
101#else 101#else
102# define spin_lock_init(lock) \ 102# define raw_spin_lock_init(lock) \
103 do { *(lock) = __SPIN_LOCK_UNLOCKED(lock); } while (0) 103 do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
104#endif 104#endif
105 105
106#ifdef CONFIG_DEBUG_SPINLOCK 106#define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
107 extern void __rwlock_init(rwlock_t *lock, const char *name,
108 struct lock_class_key *key);
109# define rwlock_init(lock) \
110do { \
111 static struct lock_class_key __key; \
112 \
113 __rwlock_init((lock), #lock, &__key); \
114} while (0)
115#else
116# define rwlock_init(lock) \
117 do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0)
118#endif
119
120#define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock)
121 107
122#ifdef CONFIG_GENERIC_LOCKBREAK 108#ifdef CONFIG_GENERIC_LOCKBREAK
123#define spin_is_contended(lock) ((lock)->break_lock) 109#define raw_spin_is_contended(lock) ((lock)->break_lock)
124#else 110#else
125 111
126#ifdef __raw_spin_is_contended 112#ifdef arch_spin_is_contended
127#define spin_is_contended(lock) __raw_spin_is_contended(&(lock)->raw_lock) 113#define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock)
128#else 114#else
129#define spin_is_contended(lock) (((void)(lock), 0)) 115#define raw_spin_is_contended(lock) (((void)(lock), 0))
130#endif /*__raw_spin_is_contended*/ 116#endif /*arch_spin_is_contended*/
131#endif 117#endif
132 118
133/* The lock does not imply full memory barrier. */ 119/* The lock does not imply full memory barrier. */
@@ -136,182 +122,260 @@ static inline void smp_mb__after_lock(void) { smp_mb(); }
136#endif 122#endif
137 123
138/** 124/**
139 * spin_unlock_wait - wait until the spinlock gets unlocked 125 * raw_spin_unlock_wait - wait until the spinlock gets unlocked
140 * @lock: the spinlock in question. 126 * @lock: the spinlock in question.
141 */ 127 */
142#define spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock) 128#define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock)
143 129
144#ifdef CONFIG_DEBUG_SPINLOCK 130#ifdef CONFIG_DEBUG_SPINLOCK
145 extern void _raw_spin_lock(spinlock_t *lock); 131 extern void do_raw_spin_lock(raw_spinlock_t *lock);
146#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) 132#define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
147 extern int _raw_spin_trylock(spinlock_t *lock); 133 extern int do_raw_spin_trylock(raw_spinlock_t *lock);
148 extern void _raw_spin_unlock(spinlock_t *lock); 134 extern void do_raw_spin_unlock(raw_spinlock_t *lock);
149 extern void _raw_read_lock(rwlock_t *lock);
150#define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock)
151 extern int _raw_read_trylock(rwlock_t *lock);
152 extern void _raw_read_unlock(rwlock_t *lock);
153 extern void _raw_write_lock(rwlock_t *lock);
154#define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock)
155 extern int _raw_write_trylock(rwlock_t *lock);
156 extern void _raw_write_unlock(rwlock_t *lock);
157#else 135#else
158# define _raw_spin_lock(lock) __raw_spin_lock(&(lock)->raw_lock) 136static inline void do_raw_spin_lock(raw_spinlock_t *lock)
159# define _raw_spin_lock_flags(lock, flags) \ 137{
160 __raw_spin_lock_flags(&(lock)->raw_lock, *(flags)) 138 arch_spin_lock(&lock->raw_lock);
161# define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock) 139}
162# define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock) 140
163# define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock) 141static inline void
164# define _raw_read_lock_flags(lock, flags) \ 142do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags)
165 __raw_read_lock_flags(&(lock)->raw_lock, *(flags)) 143{
166# define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock) 144 arch_spin_lock_flags(&lock->raw_lock, *flags);
167# define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock) 145}
168# define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock) 146
169# define _raw_write_lock_flags(lock, flags) \ 147static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
170 __raw_write_lock_flags(&(lock)->raw_lock, *(flags)) 148{
171# define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock) 149 return arch_spin_trylock(&(lock)->raw_lock);
172# define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock) 150}
151
152static inline void do_raw_spin_unlock(raw_spinlock_t *lock)
153{
154 arch_spin_unlock(&lock->raw_lock);
155}
173#endif 156#endif
174 157
175#define read_can_lock(rwlock) __raw_read_can_lock(&(rwlock)->raw_lock)
176#define write_can_lock(rwlock) __raw_write_can_lock(&(rwlock)->raw_lock)
177
178/* 158/*
179 * Define the various spin_lock and rw_lock methods. Note we define these 159 * Define the various spin_lock methods. Note we define these
180 * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various 160 * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
181 * methods are defined as nops in the case they are not required. 161 * various methods are defined as nops in the case they are not
162 * required.
182 */ 163 */
183#define spin_trylock(lock) __cond_lock(lock, _spin_trylock(lock)) 164#define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock))
184#define read_trylock(lock) __cond_lock(lock, _read_trylock(lock))
185#define write_trylock(lock) __cond_lock(lock, _write_trylock(lock))
186 165
187#define spin_lock(lock) _spin_lock(lock) 166#define raw_spin_lock(lock) _raw_spin_lock(lock)
188 167
189#ifdef CONFIG_DEBUG_LOCK_ALLOC 168#ifdef CONFIG_DEBUG_LOCK_ALLOC
190# define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass) 169# define raw_spin_lock_nested(lock, subclass) \
191# define spin_lock_nest_lock(lock, nest_lock) \ 170 _raw_spin_lock_nested(lock, subclass)
171
172# define raw_spin_lock_nest_lock(lock, nest_lock) \
192 do { \ 173 do { \
193 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\ 174 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
194 _spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ 175 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
195 } while (0) 176 } while (0)
196#else 177#else
197# define spin_lock_nested(lock, subclass) _spin_lock(lock) 178# define raw_spin_lock_nested(lock, subclass) _raw_spin_lock(lock)
198# define spin_lock_nest_lock(lock, nest_lock) _spin_lock(lock) 179# define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
199#endif 180#endif
200 181
201#define write_lock(lock) _write_lock(lock)
202#define read_lock(lock) _read_lock(lock)
203
204#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) 182#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
205 183
206#define spin_lock_irqsave(lock, flags) \ 184#define raw_spin_lock_irqsave(lock, flags) \
207 do { \ 185 do { \
208 typecheck(unsigned long, flags); \ 186 typecheck(unsigned long, flags); \
209 flags = _spin_lock_irqsave(lock); \ 187 flags = _raw_spin_lock_irqsave(lock); \
210 } while (0)
211#define read_lock_irqsave(lock, flags) \
212 do { \
213 typecheck(unsigned long, flags); \
214 flags = _read_lock_irqsave(lock); \
215 } while (0)
216#define write_lock_irqsave(lock, flags) \
217 do { \
218 typecheck(unsigned long, flags); \
219 flags = _write_lock_irqsave(lock); \
220 } while (0) 188 } while (0)
221 189
222#ifdef CONFIG_DEBUG_LOCK_ALLOC 190#ifdef CONFIG_DEBUG_LOCK_ALLOC
223#define spin_lock_irqsave_nested(lock, flags, subclass) \ 191#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
224 do { \ 192 do { \
225 typecheck(unsigned long, flags); \ 193 typecheck(unsigned long, flags); \
226 flags = _spin_lock_irqsave_nested(lock, subclass); \ 194 flags = _raw_spin_lock_irqsave_nested(lock, subclass); \
227 } while (0) 195 } while (0)
228#else 196#else
229#define spin_lock_irqsave_nested(lock, flags, subclass) \ 197#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
230 do { \ 198 do { \
231 typecheck(unsigned long, flags); \ 199 typecheck(unsigned long, flags); \
232 flags = _spin_lock_irqsave(lock); \ 200 flags = _raw_spin_lock_irqsave(lock); \
233 } while (0) 201 } while (0)
234#endif 202#endif
235 203
236#else 204#else
237 205
238#define spin_lock_irqsave(lock, flags) \ 206#define raw_spin_lock_irqsave(lock, flags) \
239 do { \
240 typecheck(unsigned long, flags); \
241 _spin_lock_irqsave(lock, flags); \
242 } while (0)
243#define read_lock_irqsave(lock, flags) \
244 do { \
245 typecheck(unsigned long, flags); \
246 _read_lock_irqsave(lock, flags); \
247 } while (0)
248#define write_lock_irqsave(lock, flags) \
249 do { \ 207 do { \
250 typecheck(unsigned long, flags); \ 208 typecheck(unsigned long, flags); \
251 _write_lock_irqsave(lock, flags); \ 209 _raw_spin_lock_irqsave(lock, flags); \
252 } while (0) 210 } while (0)
253#define spin_lock_irqsave_nested(lock, flags, subclass) \
254 spin_lock_irqsave(lock, flags)
255 211
256#endif 212#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
213 raw_spin_lock_irqsave(lock, flags)
257 214
258#define spin_lock_irq(lock) _spin_lock_irq(lock) 215#endif
259#define spin_lock_bh(lock) _spin_lock_bh(lock)
260#define read_lock_irq(lock) _read_lock_irq(lock)
261#define read_lock_bh(lock) _read_lock_bh(lock)
262#define write_lock_irq(lock) _write_lock_irq(lock)
263#define write_lock_bh(lock) _write_lock_bh(lock)
264#define spin_unlock(lock) _spin_unlock(lock)
265#define read_unlock(lock) _read_unlock(lock)
266#define write_unlock(lock) _write_unlock(lock)
267#define spin_unlock_irq(lock) _spin_unlock_irq(lock)
268#define read_unlock_irq(lock) _read_unlock_irq(lock)
269#define write_unlock_irq(lock) _write_unlock_irq(lock)
270
271#define spin_unlock_irqrestore(lock, flags) \
272 do { \
273 typecheck(unsigned long, flags); \
274 _spin_unlock_irqrestore(lock, flags); \
275 } while (0)
276#define spin_unlock_bh(lock) _spin_unlock_bh(lock)
277 216
278#define read_unlock_irqrestore(lock, flags) \ 217#define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock)
279 do { \ 218#define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock)
280 typecheck(unsigned long, flags); \ 219#define raw_spin_unlock(lock) _raw_spin_unlock(lock)
281 _read_unlock_irqrestore(lock, flags); \ 220#define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock)
282 } while (0)
283#define read_unlock_bh(lock) _read_unlock_bh(lock)
284 221
285#define write_unlock_irqrestore(lock, flags) \ 222#define raw_spin_unlock_irqrestore(lock, flags) \
286 do { \ 223 do { \
287 typecheck(unsigned long, flags); \ 224 typecheck(unsigned long, flags); \
288 _write_unlock_irqrestore(lock, flags); \ 225 _raw_spin_unlock_irqrestore(lock, flags); \
289 } while (0) 226 } while (0)
290#define write_unlock_bh(lock) _write_unlock_bh(lock) 227#define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock)
291 228
292#define spin_trylock_bh(lock) __cond_lock(lock, _spin_trylock_bh(lock)) 229#define raw_spin_trylock_bh(lock) \
230 __cond_lock(lock, _raw_spin_trylock_bh(lock))
293 231
294#define spin_trylock_irq(lock) \ 232#define raw_spin_trylock_irq(lock) \
295({ \ 233({ \
296 local_irq_disable(); \ 234 local_irq_disable(); \
297 spin_trylock(lock) ? \ 235 raw_spin_trylock(lock) ? \
298 1 : ({ local_irq_enable(); 0; }); \ 236 1 : ({ local_irq_enable(); 0; }); \
299}) 237})
300 238
301#define spin_trylock_irqsave(lock, flags) \ 239#define raw_spin_trylock_irqsave(lock, flags) \
302({ \ 240({ \
303 local_irq_save(flags); \ 241 local_irq_save(flags); \
304 spin_trylock(lock) ? \ 242 raw_spin_trylock(lock) ? \
305 1 : ({ local_irq_restore(flags); 0; }); \ 243 1 : ({ local_irq_restore(flags); 0; }); \
306}) 244})
307 245
308#define write_trylock_irqsave(lock, flags) \ 246/**
309({ \ 247 * raw_spin_can_lock - would raw_spin_trylock() succeed?
310 local_irq_save(flags); \ 248 * @lock: the spinlock in question.
311 write_trylock(lock) ? \ 249 */
312 1 : ({ local_irq_restore(flags); 0; }); \ 250#define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock))
251
252/* Include rwlock functions */
253#include <linux/rwlock.h>
254
255/*
256 * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
257 */
258#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
259# include <linux/spinlock_api_smp.h>
260#else
261# include <linux/spinlock_api_up.h>
262#endif
263
264/*
265 * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
266 */
267
268static inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
269{
270 return &lock->rlock;
271}
272
273#define spin_lock_init(_lock) \
274do { \
275 spinlock_check(_lock); \
276 raw_spin_lock_init(&(_lock)->rlock); \
277} while (0)
278
279static inline void spin_lock(spinlock_t *lock)
280{
281 raw_spin_lock(&lock->rlock);
282}
283
284static inline void spin_lock_bh(spinlock_t *lock)
285{
286 raw_spin_lock_bh(&lock->rlock);
287}
288
289static inline int spin_trylock(spinlock_t *lock)
290{
291 return raw_spin_trylock(&lock->rlock);
292}
293
294#define spin_lock_nested(lock, subclass) \
295do { \
296 raw_spin_lock_nested(spinlock_check(lock), subclass); \
297} while (0)
298
299#define spin_lock_nest_lock(lock, nest_lock) \
300do { \
301 raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
302} while (0)
303
304static inline void spin_lock_irq(spinlock_t *lock)
305{
306 raw_spin_lock_irq(&lock->rlock);
307}
308
309#define spin_lock_irqsave(lock, flags) \
310do { \
311 raw_spin_lock_irqsave(spinlock_check(lock), flags); \
312} while (0)
313
314#define spin_lock_irqsave_nested(lock, flags, subclass) \
315do { \
316 raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
317} while (0)
318
319static inline void spin_unlock(spinlock_t *lock)
320{
321 raw_spin_unlock(&lock->rlock);
322}
323
324static inline void spin_unlock_bh(spinlock_t *lock)
325{
326 raw_spin_unlock_bh(&lock->rlock);
327}
328
329static inline void spin_unlock_irq(spinlock_t *lock)
330{
331 raw_spin_unlock_irq(&lock->rlock);
332}
333
334static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
335{
336 raw_spin_unlock_irqrestore(&lock->rlock, flags);
337}
338
339static inline int spin_trylock_bh(spinlock_t *lock)
340{
341 return raw_spin_trylock_bh(&lock->rlock);
342}
343
344static inline int spin_trylock_irq(spinlock_t *lock)
345{
346 return raw_spin_trylock_irq(&lock->rlock);
347}
348
349#define spin_trylock_irqsave(lock, flags) \
350({ \
351 raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
313}) 352})
314 353
354static inline void spin_unlock_wait(spinlock_t *lock)
355{
356 raw_spin_unlock_wait(&lock->rlock);
357}
358
359static inline int spin_is_locked(spinlock_t *lock)
360{
361 return raw_spin_is_locked(&lock->rlock);
362}
363
364static inline int spin_is_contended(spinlock_t *lock)
365{
366 return raw_spin_is_contended(&lock->rlock);
367}
368
369static inline int spin_can_lock(spinlock_t *lock)
370{
371 return raw_spin_can_lock(&lock->rlock);
372}
373
374static inline void assert_spin_locked(spinlock_t *lock)
375{
376 assert_raw_spin_locked(&lock->rlock);
377}
378
315/* 379/*
316 * Pull the atomic_t declaration: 380 * Pull the atomic_t declaration:
317 * (asm-mips/atomic.h needs above definitions) 381 * (asm-mips/atomic.h needs above definitions)
@@ -329,19 +393,4 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
329#define atomic_dec_and_lock(atomic, lock) \ 393#define atomic_dec_and_lock(atomic, lock) \
330 __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) 394 __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
331 395
332/**
333 * spin_can_lock - would spin_trylock() succeed?
334 * @lock: the spinlock in question.
335 */
336#define spin_can_lock(lock) (!spin_is_locked(lock))
337
338/*
339 * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
340 */
341#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
342# include <linux/spinlock_api_smp.h>
343#else
344# include <linux/spinlock_api_up.h>
345#endif
346
347#endif /* __LINUX_SPINLOCK_H */ 396#endif /* __LINUX_SPINLOCK_H */
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index 8264a7f459bc..e253ccd7a604 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -17,165 +17,76 @@
17 17
18int in_lock_functions(unsigned long addr); 18int in_lock_functions(unsigned long addr);
19 19
20#define assert_spin_locked(x) BUG_ON(!spin_is_locked(x)) 20#define assert_raw_spin_locked(x) BUG_ON(!raw_spin_is_locked(x))
21 21
22void __lockfunc _spin_lock(spinlock_t *lock) __acquires(lock); 22void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
23void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) 23void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
24 __acquires(lock); 24 __acquires(lock);
25void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *map) 25void __lockfunc
26 __acquires(lock); 26_raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map)
27void __lockfunc _read_lock(rwlock_t *lock) __acquires(lock); 27 __acquires(lock);
28void __lockfunc _write_lock(rwlock_t *lock) __acquires(lock); 28void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) __acquires(lock);
29void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(lock); 29void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock)
30void __lockfunc _read_lock_bh(rwlock_t *lock) __acquires(lock); 30 __acquires(lock);
31void __lockfunc _write_lock_bh(rwlock_t *lock) __acquires(lock); 31
32void __lockfunc _spin_lock_irq(spinlock_t *lock) __acquires(lock); 32unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock)
33void __lockfunc _read_lock_irq(rwlock_t *lock) __acquires(lock); 33 __acquires(lock);
34void __lockfunc _write_lock_irq(rwlock_t *lock) __acquires(lock); 34unsigned long __lockfunc
35unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) 35_raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass)
36 __acquires(lock); 36 __acquires(lock);
37unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass) 37int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock);
38 __acquires(lock); 38int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock);
39unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) 39void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
40 __acquires(lock); 40void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) __releases(lock);
41unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) 41void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock) __releases(lock);
42 __acquires(lock); 42void __lockfunc
43int __lockfunc _spin_trylock(spinlock_t *lock); 43_raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
44int __lockfunc _read_trylock(rwlock_t *lock); 44 __releases(lock);
45int __lockfunc _write_trylock(rwlock_t *lock);
46int __lockfunc _spin_trylock_bh(spinlock_t *lock);
47void __lockfunc _spin_unlock(spinlock_t *lock) __releases(lock);
48void __lockfunc _read_unlock(rwlock_t *lock) __releases(lock);
49void __lockfunc _write_unlock(rwlock_t *lock) __releases(lock);
50void __lockfunc _spin_unlock_bh(spinlock_t *lock) __releases(lock);
51void __lockfunc _read_unlock_bh(rwlock_t *lock) __releases(lock);
52void __lockfunc _write_unlock_bh(rwlock_t *lock) __releases(lock);
53void __lockfunc _spin_unlock_irq(spinlock_t *lock) __releases(lock);
54void __lockfunc _read_unlock_irq(rwlock_t *lock) __releases(lock);
55void __lockfunc _write_unlock_irq(rwlock_t *lock) __releases(lock);
56void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
57 __releases(lock);
58void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
59 __releases(lock);
60void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
61 __releases(lock);
62 45
63#ifdef CONFIG_INLINE_SPIN_LOCK 46#ifdef CONFIG_INLINE_SPIN_LOCK
64#define _spin_lock(lock) __spin_lock(lock) 47#define _raw_spin_lock(lock) __raw_spin_lock(lock)
65#endif
66
67#ifdef CONFIG_INLINE_READ_LOCK
68#define _read_lock(lock) __read_lock(lock)
69#endif
70
71#ifdef CONFIG_INLINE_WRITE_LOCK
72#define _write_lock(lock) __write_lock(lock)
73#endif 48#endif
74 49
75#ifdef CONFIG_INLINE_SPIN_LOCK_BH 50#ifdef CONFIG_INLINE_SPIN_LOCK_BH
76#define _spin_lock_bh(lock) __spin_lock_bh(lock) 51#define _raw_spin_lock_bh(lock) __raw_spin_lock_bh(lock)
77#endif
78
79#ifdef CONFIG_INLINE_READ_LOCK_BH
80#define _read_lock_bh(lock) __read_lock_bh(lock)
81#endif
82
83#ifdef CONFIG_INLINE_WRITE_LOCK_BH
84#define _write_lock_bh(lock) __write_lock_bh(lock)
85#endif 52#endif
86 53
87#ifdef CONFIG_INLINE_SPIN_LOCK_IRQ 54#ifdef CONFIG_INLINE_SPIN_LOCK_IRQ
88#define _spin_lock_irq(lock) __spin_lock_irq(lock) 55#define _raw_spin_lock_irq(lock) __raw_spin_lock_irq(lock)
89#endif
90
91#ifdef CONFIG_INLINE_READ_LOCK_IRQ
92#define _read_lock_irq(lock) __read_lock_irq(lock)
93#endif
94
95#ifdef CONFIG_INLINE_WRITE_LOCK_IRQ
96#define _write_lock_irq(lock) __write_lock_irq(lock)
97#endif 56#endif
98 57
99#ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE 58#ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
100#define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock) 59#define _raw_spin_lock_irqsave(lock) __raw_spin_lock_irqsave(lock)
101#endif
102
103#ifdef CONFIG_INLINE_READ_LOCK_IRQSAVE
104#define _read_lock_irqsave(lock) __read_lock_irqsave(lock)
105#endif
106
107#ifdef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
108#define _write_lock_irqsave(lock) __write_lock_irqsave(lock)
109#endif 60#endif
110 61
111#ifdef CONFIG_INLINE_SPIN_TRYLOCK 62#ifdef CONFIG_INLINE_SPIN_TRYLOCK
112#define _spin_trylock(lock) __spin_trylock(lock) 63#define _raw_spin_trylock(lock) __raw_spin_trylock(lock)
113#endif
114
115#ifdef CONFIG_INLINE_READ_TRYLOCK
116#define _read_trylock(lock) __read_trylock(lock)
117#endif
118
119#ifdef CONFIG_INLINE_WRITE_TRYLOCK
120#define _write_trylock(lock) __write_trylock(lock)
121#endif 64#endif
122 65
123#ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH 66#ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH
124#define _spin_trylock_bh(lock) __spin_trylock_bh(lock) 67#define _raw_spin_trylock_bh(lock) __raw_spin_trylock_bh(lock)
125#endif 68#endif
126 69
127#ifdef CONFIG_INLINE_SPIN_UNLOCK 70#ifdef CONFIG_INLINE_SPIN_UNLOCK
128#define _spin_unlock(lock) __spin_unlock(lock) 71#define _raw_spin_unlock(lock) __raw_spin_unlock(lock)
129#endif
130
131#ifdef CONFIG_INLINE_READ_UNLOCK
132#define _read_unlock(lock) __read_unlock(lock)
133#endif
134
135#ifdef CONFIG_INLINE_WRITE_UNLOCK
136#define _write_unlock(lock) __write_unlock(lock)
137#endif 72#endif
138 73
139#ifdef CONFIG_INLINE_SPIN_UNLOCK_BH 74#ifdef CONFIG_INLINE_SPIN_UNLOCK_BH
140#define _spin_unlock_bh(lock) __spin_unlock_bh(lock) 75#define _raw_spin_unlock_bh(lock) __raw_spin_unlock_bh(lock)
141#endif
142
143#ifdef CONFIG_INLINE_READ_UNLOCK_BH
144#define _read_unlock_bh(lock) __read_unlock_bh(lock)
145#endif
146
147#ifdef CONFIG_INLINE_WRITE_UNLOCK_BH
148#define _write_unlock_bh(lock) __write_unlock_bh(lock)
149#endif 76#endif
150 77
151#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ 78#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ
152#define _spin_unlock_irq(lock) __spin_unlock_irq(lock) 79#define _raw_spin_unlock_irq(lock) __raw_spin_unlock_irq(lock)
153#endif
154
155#ifdef CONFIG_INLINE_READ_UNLOCK_IRQ
156#define _read_unlock_irq(lock) __read_unlock_irq(lock)
157#endif
158
159#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQ
160#define _write_unlock_irq(lock) __write_unlock_irq(lock)
161#endif 80#endif
162 81
163#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE 82#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
164#define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags) 83#define _raw_spin_unlock_irqrestore(lock, flags) __raw_spin_unlock_irqrestore(lock, flags)
165#endif
166
167#ifdef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
168#define _read_unlock_irqrestore(lock, flags) __read_unlock_irqrestore(lock, flags)
169#endif
170
171#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
172#define _write_unlock_irqrestore(lock, flags) __write_unlock_irqrestore(lock, flags)
173#endif 84#endif
174 85
175static inline int __spin_trylock(spinlock_t *lock) 86static inline int __raw_spin_trylock(raw_spinlock_t *lock)
176{ 87{
177 preempt_disable(); 88 preempt_disable();
178 if (_raw_spin_trylock(lock)) { 89 if (do_raw_spin_trylock(lock)) {
179 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); 90 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
180 return 1; 91 return 1;
181 } 92 }
@@ -183,28 +94,6 @@ static inline int __spin_trylock(spinlock_t *lock)
183 return 0; 94 return 0;
184} 95}
185 96
186static inline int __read_trylock(rwlock_t *lock)
187{
188 preempt_disable();
189 if (_raw_read_trylock(lock)) {
190 rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_);
191 return 1;
192 }
193 preempt_enable();
194 return 0;
195}
196
197static inline int __write_trylock(rwlock_t *lock)
198{
199 preempt_disable();
200 if (_raw_write_trylock(lock)) {
201 rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_);
202 return 1;
203 }
204 preempt_enable();
205 return 0;
206}
207
208/* 97/*
209 * If lockdep is enabled then we use the non-preemption spin-ops 98 * If lockdep is enabled then we use the non-preemption spin-ops
210 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are 99 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
@@ -212,14 +101,7 @@ static inline int __write_trylock(rwlock_t *lock)
212 */ 101 */
213#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) 102#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
214 103
215static inline void __read_lock(rwlock_t *lock) 104static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock)
216{
217 preempt_disable();
218 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
219 LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
220}
221
222static inline unsigned long __spin_lock_irqsave(spinlock_t *lock)
223{ 105{
224 unsigned long flags; 106 unsigned long flags;
225 107
@@ -228,205 +110,79 @@ static inline unsigned long __spin_lock_irqsave(spinlock_t *lock)
228 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); 110 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
229 /* 111 /*
230 * On lockdep we dont want the hand-coded irq-enable of 112 * On lockdep we dont want the hand-coded irq-enable of
231 * _raw_spin_lock_flags() code, because lockdep assumes 113 * do_raw_spin_lock_flags() code, because lockdep assumes
232 * that interrupts are not re-enabled during lock-acquire: 114 * that interrupts are not re-enabled during lock-acquire:
233 */ 115 */
234#ifdef CONFIG_LOCKDEP 116#ifdef CONFIG_LOCKDEP
235 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); 117 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
236#else 118#else
237 _raw_spin_lock_flags(lock, &flags); 119 do_raw_spin_lock_flags(lock, &flags);
238#endif 120#endif
239 return flags; 121 return flags;
240} 122}
241 123
242static inline void __spin_lock_irq(spinlock_t *lock) 124static inline void __raw_spin_lock_irq(raw_spinlock_t *lock)
243{ 125{
244 local_irq_disable(); 126 local_irq_disable();
245 preempt_disable(); 127 preempt_disable();
246 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); 128 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
247 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); 129 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
248} 130}
249 131
250static inline void __spin_lock_bh(spinlock_t *lock) 132static inline void __raw_spin_lock_bh(raw_spinlock_t *lock)
251{ 133{
252 local_bh_disable(); 134 local_bh_disable();
253 preempt_disable(); 135 preempt_disable();
254 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); 136 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
255 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); 137 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
256}
257
258static inline unsigned long __read_lock_irqsave(rwlock_t *lock)
259{
260 unsigned long flags;
261
262 local_irq_save(flags);
263 preempt_disable();
264 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
265 LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock,
266 _raw_read_lock_flags, &flags);
267 return flags;
268}
269
270static inline void __read_lock_irq(rwlock_t *lock)
271{
272 local_irq_disable();
273 preempt_disable();
274 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
275 LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
276}
277
278static inline void __read_lock_bh(rwlock_t *lock)
279{
280 local_bh_disable();
281 preempt_disable();
282 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
283 LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
284}
285
286static inline unsigned long __write_lock_irqsave(rwlock_t *lock)
287{
288 unsigned long flags;
289
290 local_irq_save(flags);
291 preempt_disable();
292 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
293 LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock,
294 _raw_write_lock_flags, &flags);
295 return flags;
296}
297
298static inline void __write_lock_irq(rwlock_t *lock)
299{
300 local_irq_disable();
301 preempt_disable();
302 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
303 LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
304} 138}
305 139
306static inline void __write_lock_bh(rwlock_t *lock) 140static inline void __raw_spin_lock(raw_spinlock_t *lock)
307{
308 local_bh_disable();
309 preempt_disable();
310 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
311 LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
312}
313
314static inline void __spin_lock(spinlock_t *lock)
315{ 141{
316 preempt_disable(); 142 preempt_disable();
317 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); 143 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
318 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); 144 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
319}
320
321static inline void __write_lock(rwlock_t *lock)
322{
323 preempt_disable();
324 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
325 LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
326} 145}
327 146
328#endif /* CONFIG_PREEMPT */ 147#endif /* CONFIG_PREEMPT */
329 148
330static inline void __spin_unlock(spinlock_t *lock) 149static inline void __raw_spin_unlock(raw_spinlock_t *lock)
331{ 150{
332 spin_release(&lock->dep_map, 1, _RET_IP_); 151 spin_release(&lock->dep_map, 1, _RET_IP_);
333 _raw_spin_unlock(lock); 152 do_raw_spin_unlock(lock);
334 preempt_enable();
335}
336
337static inline void __write_unlock(rwlock_t *lock)
338{
339 rwlock_release(&lock->dep_map, 1, _RET_IP_);
340 _raw_write_unlock(lock);
341 preempt_enable();
342}
343
344static inline void __read_unlock(rwlock_t *lock)
345{
346 rwlock_release(&lock->dep_map, 1, _RET_IP_);
347 _raw_read_unlock(lock);
348 preempt_enable(); 153 preempt_enable();
349} 154}
350 155
351static inline void __spin_unlock_irqrestore(spinlock_t *lock, 156static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock,
352 unsigned long flags) 157 unsigned long flags)
353{ 158{
354 spin_release(&lock->dep_map, 1, _RET_IP_); 159 spin_release(&lock->dep_map, 1, _RET_IP_);
355 _raw_spin_unlock(lock); 160 do_raw_spin_unlock(lock);
356 local_irq_restore(flags); 161 local_irq_restore(flags);
357 preempt_enable(); 162 preempt_enable();
358} 163}
359 164
360static inline void __spin_unlock_irq(spinlock_t *lock) 165static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock)
361{ 166{
362 spin_release(&lock->dep_map, 1, _RET_IP_); 167 spin_release(&lock->dep_map, 1, _RET_IP_);
363 _raw_spin_unlock(lock); 168 do_raw_spin_unlock(lock);
364 local_irq_enable(); 169 local_irq_enable();
365 preempt_enable(); 170 preempt_enable();
366} 171}
367 172
368static inline void __spin_unlock_bh(spinlock_t *lock) 173static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock)
369{ 174{
370 spin_release(&lock->dep_map, 1, _RET_IP_); 175 spin_release(&lock->dep_map, 1, _RET_IP_);
371 _raw_spin_unlock(lock); 176 do_raw_spin_unlock(lock);
372 preempt_enable_no_resched();
373 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
374}
375
376static inline void __read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
377{
378 rwlock_release(&lock->dep_map, 1, _RET_IP_);
379 _raw_read_unlock(lock);
380 local_irq_restore(flags);
381 preempt_enable();
382}
383
384static inline void __read_unlock_irq(rwlock_t *lock)
385{
386 rwlock_release(&lock->dep_map, 1, _RET_IP_);
387 _raw_read_unlock(lock);
388 local_irq_enable();
389 preempt_enable();
390}
391
392static inline void __read_unlock_bh(rwlock_t *lock)
393{
394 rwlock_release(&lock->dep_map, 1, _RET_IP_);
395 _raw_read_unlock(lock);
396 preempt_enable_no_resched(); 177 preempt_enable_no_resched();
397 local_bh_enable_ip((unsigned long)__builtin_return_address(0)); 178 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
398} 179}
399 180
400static inline void __write_unlock_irqrestore(rwlock_t *lock, 181static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock)
401 unsigned long flags)
402{
403 rwlock_release(&lock->dep_map, 1, _RET_IP_);
404 _raw_write_unlock(lock);
405 local_irq_restore(flags);
406 preempt_enable();
407}
408
409static inline void __write_unlock_irq(rwlock_t *lock)
410{
411 rwlock_release(&lock->dep_map, 1, _RET_IP_);
412 _raw_write_unlock(lock);
413 local_irq_enable();
414 preempt_enable();
415}
416
417static inline void __write_unlock_bh(rwlock_t *lock)
418{
419 rwlock_release(&lock->dep_map, 1, _RET_IP_);
420 _raw_write_unlock(lock);
421 preempt_enable_no_resched();
422 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
423}
424
425static inline int __spin_trylock_bh(spinlock_t *lock)
426{ 182{
427 local_bh_disable(); 183 local_bh_disable();
428 preempt_disable(); 184 preempt_disable();
429 if (_raw_spin_trylock(lock)) { 185 if (do_raw_spin_trylock(lock)) {
430 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); 186 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
431 return 1; 187 return 1;
432 } 188 }
@@ -435,4 +191,6 @@ static inline int __spin_trylock_bh(spinlock_t *lock)
435 return 0; 191 return 0;
436} 192}
437 193
194#include <linux/rwlock_api_smp.h>
195
438#endif /* __LINUX_SPINLOCK_API_SMP_H */ 196#endif /* __LINUX_SPINLOCK_API_SMP_H */
diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h
index 04e1d3164576..af1f47229e70 100644
--- a/include/linux/spinlock_api_up.h
+++ b/include/linux/spinlock_api_up.h
@@ -16,7 +16,7 @@
16 16
17#define in_lock_functions(ADDR) 0 17#define in_lock_functions(ADDR) 0
18 18
19#define assert_spin_locked(lock) do { (void)(lock); } while (0) 19#define assert_raw_spin_locked(lock) do { (void)(lock); } while (0)
20 20
21/* 21/*
22 * In the UP-nondebug case there's no real locking going on, so the 22 * In the UP-nondebug case there's no real locking going on, so the
@@ -40,7 +40,8 @@
40 do { preempt_enable(); __release(lock); (void)(lock); } while (0) 40 do { preempt_enable(); __release(lock); (void)(lock); } while (0)
41 41
42#define __UNLOCK_BH(lock) \ 42#define __UNLOCK_BH(lock) \
43 do { preempt_enable_no_resched(); local_bh_enable(); __release(lock); (void)(lock); } while (0) 43 do { preempt_enable_no_resched(); local_bh_enable(); \
44 __release(lock); (void)(lock); } while (0)
44 45
45#define __UNLOCK_IRQ(lock) \ 46#define __UNLOCK_IRQ(lock) \
46 do { local_irq_enable(); __UNLOCK(lock); } while (0) 47 do { local_irq_enable(); __UNLOCK(lock); } while (0)
@@ -48,34 +49,37 @@
48#define __UNLOCK_IRQRESTORE(lock, flags) \ 49#define __UNLOCK_IRQRESTORE(lock, flags) \
49 do { local_irq_restore(flags); __UNLOCK(lock); } while (0) 50 do { local_irq_restore(flags); __UNLOCK(lock); } while (0)
50 51
51#define _spin_lock(lock) __LOCK(lock) 52#define _raw_spin_lock(lock) __LOCK(lock)
52#define _spin_lock_nested(lock, subclass) __LOCK(lock) 53#define _raw_spin_lock_nested(lock, subclass) __LOCK(lock)
53#define _read_lock(lock) __LOCK(lock) 54#define _raw_read_lock(lock) __LOCK(lock)
54#define _write_lock(lock) __LOCK(lock) 55#define _raw_write_lock(lock) __LOCK(lock)
55#define _spin_lock_bh(lock) __LOCK_BH(lock) 56#define _raw_spin_lock_bh(lock) __LOCK_BH(lock)
56#define _read_lock_bh(lock) __LOCK_BH(lock) 57#define _raw_read_lock_bh(lock) __LOCK_BH(lock)
57#define _write_lock_bh(lock) __LOCK_BH(lock) 58#define _raw_write_lock_bh(lock) __LOCK_BH(lock)
58#define _spin_lock_irq(lock) __LOCK_IRQ(lock) 59#define _raw_spin_lock_irq(lock) __LOCK_IRQ(lock)
59#define _read_lock_irq(lock) __LOCK_IRQ(lock) 60#define _raw_read_lock_irq(lock) __LOCK_IRQ(lock)
60#define _write_lock_irq(lock) __LOCK_IRQ(lock) 61#define _raw_write_lock_irq(lock) __LOCK_IRQ(lock)
61#define _spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) 62#define _raw_spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
62#define _read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) 63#define _raw_read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
63#define _write_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) 64#define _raw_write_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
64#define _spin_trylock(lock) ({ __LOCK(lock); 1; }) 65#define _raw_spin_trylock(lock) ({ __LOCK(lock); 1; })
65#define _read_trylock(lock) ({ __LOCK(lock); 1; }) 66#define _raw_read_trylock(lock) ({ __LOCK(lock); 1; })
66#define _write_trylock(lock) ({ __LOCK(lock); 1; }) 67#define _raw_write_trylock(lock) ({ __LOCK(lock); 1; })
67#define _spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; }) 68#define _raw_spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; })
68#define _spin_unlock(lock) __UNLOCK(lock) 69#define _raw_spin_unlock(lock) __UNLOCK(lock)
69#define _read_unlock(lock) __UNLOCK(lock) 70#define _raw_read_unlock(lock) __UNLOCK(lock)
70#define _write_unlock(lock) __UNLOCK(lock) 71#define _raw_write_unlock(lock) __UNLOCK(lock)
71#define _spin_unlock_bh(lock) __UNLOCK_BH(lock) 72#define _raw_spin_unlock_bh(lock) __UNLOCK_BH(lock)
72#define _write_unlock_bh(lock) __UNLOCK_BH(lock) 73#define _raw_write_unlock_bh(lock) __UNLOCK_BH(lock)
73#define _read_unlock_bh(lock) __UNLOCK_BH(lock) 74#define _raw_read_unlock_bh(lock) __UNLOCK_BH(lock)
74#define _spin_unlock_irq(lock) __UNLOCK_IRQ(lock) 75#define _raw_spin_unlock_irq(lock) __UNLOCK_IRQ(lock)
75#define _read_unlock_irq(lock) __UNLOCK_IRQ(lock) 76#define _raw_read_unlock_irq(lock) __UNLOCK_IRQ(lock)
76#define _write_unlock_irq(lock) __UNLOCK_IRQ(lock) 77#define _raw_write_unlock_irq(lock) __UNLOCK_IRQ(lock)
77#define _spin_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) 78#define _raw_spin_unlock_irqrestore(lock, flags) \
78#define _read_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) 79 __UNLOCK_IRQRESTORE(lock, flags)
79#define _write_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) 80#define _raw_read_unlock_irqrestore(lock, flags) \
81 __UNLOCK_IRQRESTORE(lock, flags)
82#define _raw_write_unlock_irqrestore(lock, flags) \
83 __UNLOCK_IRQRESTORE(lock, flags)
80 84
81#endif /* __LINUX_SPINLOCK_API_UP_H */ 85#endif /* __LINUX_SPINLOCK_API_UP_H */
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
index 68d88f71f1a2..851b7783720d 100644
--- a/include/linux/spinlock_types.h
+++ b/include/linux/spinlock_types.h
@@ -17,8 +17,8 @@
17 17
18#include <linux/lockdep.h> 18#include <linux/lockdep.h>
19 19
20typedef struct { 20typedef struct raw_spinlock {
21 raw_spinlock_t raw_lock; 21 arch_spinlock_t raw_lock;
22#ifdef CONFIG_GENERIC_LOCKBREAK 22#ifdef CONFIG_GENERIC_LOCKBREAK
23 unsigned int break_lock; 23 unsigned int break_lock;
24#endif 24#endif
@@ -29,26 +29,10 @@ typedef struct {
29#ifdef CONFIG_DEBUG_LOCK_ALLOC 29#ifdef CONFIG_DEBUG_LOCK_ALLOC
30 struct lockdep_map dep_map; 30 struct lockdep_map dep_map;
31#endif 31#endif
32} spinlock_t; 32} raw_spinlock_t;
33 33
34#define SPINLOCK_MAGIC 0xdead4ead 34#define SPINLOCK_MAGIC 0xdead4ead
35 35
36typedef struct {
37 raw_rwlock_t raw_lock;
38#ifdef CONFIG_GENERIC_LOCKBREAK
39 unsigned int break_lock;
40#endif
41#ifdef CONFIG_DEBUG_SPINLOCK
42 unsigned int magic, owner_cpu;
43 void *owner;
44#endif
45#ifdef CONFIG_DEBUG_LOCK_ALLOC
46 struct lockdep_map dep_map;
47#endif
48} rwlock_t;
49
50#define RWLOCK_MAGIC 0xdeaf1eed
51
52#define SPINLOCK_OWNER_INIT ((void *)-1L) 36#define SPINLOCK_OWNER_INIT ((void *)-1L)
53 37
54#ifdef CONFIG_DEBUG_LOCK_ALLOC 38#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -57,44 +41,56 @@ typedef struct {
57# define SPIN_DEP_MAP_INIT(lockname) 41# define SPIN_DEP_MAP_INIT(lockname)
58#endif 42#endif
59 43
60#ifdef CONFIG_DEBUG_LOCK_ALLOC 44#ifdef CONFIG_DEBUG_SPINLOCK
61# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } 45# define SPIN_DEBUG_INIT(lockname) \
46 .magic = SPINLOCK_MAGIC, \
47 .owner_cpu = -1, \
48 .owner = SPINLOCK_OWNER_INIT,
62#else 49#else
63# define RW_DEP_MAP_INIT(lockname) 50# define SPIN_DEBUG_INIT(lockname)
64#endif 51#endif
65 52
66#ifdef CONFIG_DEBUG_SPINLOCK 53#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \
67# define __SPIN_LOCK_UNLOCKED(lockname) \ 54 { \
68 (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ 55 .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
69 .magic = SPINLOCK_MAGIC, \ 56 SPIN_DEBUG_INIT(lockname) \
70 .owner = SPINLOCK_OWNER_INIT, \ 57 SPIN_DEP_MAP_INIT(lockname) }
71 .owner_cpu = -1, \ 58
72 SPIN_DEP_MAP_INIT(lockname) } 59#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \
73#define __RW_LOCK_UNLOCKED(lockname) \ 60 (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
74 (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ 61
75 .magic = RWLOCK_MAGIC, \ 62#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
76 .owner = SPINLOCK_OWNER_INIT, \ 63
77 .owner_cpu = -1, \ 64typedef struct spinlock {
78 RW_DEP_MAP_INIT(lockname) } 65 union {
79#else 66 struct raw_spinlock rlock;
80# define __SPIN_LOCK_UNLOCKED(lockname) \ 67
81 (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ 68#ifdef CONFIG_DEBUG_LOCK_ALLOC
82 SPIN_DEP_MAP_INIT(lockname) } 69# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map))
83#define __RW_LOCK_UNLOCKED(lockname) \ 70 struct {
84 (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ 71 u8 __padding[LOCK_PADSIZE];
85 RW_DEP_MAP_INIT(lockname) } 72 struct lockdep_map dep_map;
73 };
86#endif 74#endif
75 };
76} spinlock_t;
77
78#define __SPIN_LOCK_INITIALIZER(lockname) \
79 { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } }
80
81#define __SPIN_LOCK_UNLOCKED(lockname) \
82 (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname)
87 83
88/* 84/*
89 * SPIN_LOCK_UNLOCKED and RW_LOCK_UNLOCKED defeat lockdep state tracking and 85 * SPIN_LOCK_UNLOCKED defeats lockdep state tracking and is hence
90 * are hence deprecated. 86 * deprecated.
91 * Please use DEFINE_SPINLOCK()/DEFINE_RWLOCK() or 87 * Please use DEFINE_SPINLOCK() or __SPIN_LOCK_UNLOCKED() as
92 * __SPIN_LOCK_UNLOCKED()/__RW_LOCK_UNLOCKED() as appropriate. 88 * appropriate.
93 */ 89 */
94#define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(old_style_spin_init) 90#define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(old_style_spin_init)
95#define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(old_style_rw_init)
96 91
97#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) 92#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
98#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x) 93
94#include <linux/rwlock_types.h>
99 95
100#endif /* __LINUX_SPINLOCK_TYPES_H */ 96#endif /* __LINUX_SPINLOCK_TYPES_H */
diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h
index 04135b0e198e..c09b6407ae1b 100644
--- a/include/linux/spinlock_types_up.h
+++ b/include/linux/spinlock_types_up.h
@@ -16,22 +16,22 @@
16 16
17typedef struct { 17typedef struct {
18 volatile unsigned int slock; 18 volatile unsigned int slock;
19} raw_spinlock_t; 19} arch_spinlock_t;
20 20
21#define __RAW_SPIN_LOCK_UNLOCKED { 1 } 21#define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
22 22
23#else 23#else
24 24
25typedef struct { } raw_spinlock_t; 25typedef struct { } arch_spinlock_t;
26 26
27#define __RAW_SPIN_LOCK_UNLOCKED { } 27#define __ARCH_SPIN_LOCK_UNLOCKED { }
28 28
29#endif 29#endif
30 30
31typedef struct { 31typedef struct {
32 /* no debug version on UP */ 32 /* no debug version on UP */
33} raw_rwlock_t; 33} arch_rwlock_t;
34 34
35#define __RAW_RW_LOCK_UNLOCKED { } 35#define __ARCH_RW_LOCK_UNLOCKED { }
36 36
37#endif /* __LINUX_SPINLOCK_TYPES_UP_H */ 37#endif /* __LINUX_SPINLOCK_TYPES_UP_H */
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h
index d4841ed8215b..b14f6a91e19f 100644
--- a/include/linux/spinlock_up.h
+++ b/include/linux/spinlock_up.h
@@ -18,21 +18,21 @@
18 */ 18 */
19 19
20#ifdef CONFIG_DEBUG_SPINLOCK 20#ifdef CONFIG_DEBUG_SPINLOCK
21#define __raw_spin_is_locked(x) ((x)->slock == 0) 21#define arch_spin_is_locked(x) ((x)->slock == 0)
22 22
23static inline void __raw_spin_lock(raw_spinlock_t *lock) 23static inline void arch_spin_lock(arch_spinlock_t *lock)
24{ 24{
25 lock->slock = 0; 25 lock->slock = 0;
26} 26}
27 27
28static inline void 28static inline void
29__raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) 29arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
30{ 30{
31 local_irq_save(flags); 31 local_irq_save(flags);
32 lock->slock = 0; 32 lock->slock = 0;
33} 33}
34 34
35static inline int __raw_spin_trylock(raw_spinlock_t *lock) 35static inline int arch_spin_trylock(arch_spinlock_t *lock)
36{ 36{
37 char oldval = lock->slock; 37 char oldval = lock->slock;
38 38
@@ -41,7 +41,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
41 return oldval > 0; 41 return oldval > 0;
42} 42}
43 43
44static inline void __raw_spin_unlock(raw_spinlock_t *lock) 44static inline void arch_spin_unlock(arch_spinlock_t *lock)
45{ 45{
46 lock->slock = 1; 46 lock->slock = 1;
47} 47}
@@ -49,28 +49,28 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
49/* 49/*
50 * Read-write spinlocks. No debug version. 50 * Read-write spinlocks. No debug version.
51 */ 51 */
52#define __raw_read_lock(lock) do { (void)(lock); } while (0) 52#define arch_read_lock(lock) do { (void)(lock); } while (0)
53#define __raw_write_lock(lock) do { (void)(lock); } while (0) 53#define arch_write_lock(lock) do { (void)(lock); } while (0)
54#define __raw_read_trylock(lock) ({ (void)(lock); 1; }) 54#define arch_read_trylock(lock) ({ (void)(lock); 1; })
55#define __raw_write_trylock(lock) ({ (void)(lock); 1; }) 55#define arch_write_trylock(lock) ({ (void)(lock); 1; })
56#define __raw_read_unlock(lock) do { (void)(lock); } while (0) 56#define arch_read_unlock(lock) do { (void)(lock); } while (0)
57#define __raw_write_unlock(lock) do { (void)(lock); } while (0) 57#define arch_write_unlock(lock) do { (void)(lock); } while (0)
58 58
59#else /* DEBUG_SPINLOCK */ 59#else /* DEBUG_SPINLOCK */
60#define __raw_spin_is_locked(lock) ((void)(lock), 0) 60#define arch_spin_is_locked(lock) ((void)(lock), 0)
61/* for sched.c and kernel_lock.c: */ 61/* for sched.c and kernel_lock.c: */
62# define __raw_spin_lock(lock) do { (void)(lock); } while (0) 62# define arch_spin_lock(lock) do { (void)(lock); } while (0)
63# define __raw_spin_lock_flags(lock, flags) do { (void)(lock); } while (0) 63# define arch_spin_lock_flags(lock, flags) do { (void)(lock); } while (0)
64# define __raw_spin_unlock(lock) do { (void)(lock); } while (0) 64# define arch_spin_unlock(lock) do { (void)(lock); } while (0)
65# define __raw_spin_trylock(lock) ({ (void)(lock); 1; }) 65# define arch_spin_trylock(lock) ({ (void)(lock); 1; })
66#endif /* DEBUG_SPINLOCK */ 66#endif /* DEBUG_SPINLOCK */
67 67
68#define __raw_spin_is_contended(lock) (((void)(lock), 0)) 68#define arch_spin_is_contended(lock) (((void)(lock), 0))
69 69
70#define __raw_read_can_lock(lock) (((void)(lock), 1)) 70#define arch_read_can_lock(lock) (((void)(lock), 1))
71#define __raw_write_can_lock(lock) (((void)(lock), 1)) 71#define arch_write_can_lock(lock) (((void)(lock), 1))
72 72
73#define __raw_spin_unlock_wait(lock) \ 73#define arch_spin_unlock_wait(lock) \
74 do { cpu_relax(); } while (__raw_spin_is_locked(lock)) 74 do { cpu_relax(); } while (arch_spin_is_locked(lock))
75 75
76#endif /* __LINUX_SPINLOCK_UP_H */ 76#endif /* __LINUX_SPINLOCK_UP_H */
diff --git a/include/linux/string.h b/include/linux/string.h
index b8508868d5ad..651839a2a755 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -62,7 +62,15 @@ extern char * strnchr(const char *, size_t, int);
62#ifndef __HAVE_ARCH_STRRCHR 62#ifndef __HAVE_ARCH_STRRCHR
63extern char * strrchr(const char *,int); 63extern char * strrchr(const char *,int);
64#endif 64#endif
65extern char * __must_check strstrip(char *); 65extern char * __must_check skip_spaces(const char *);
66
67extern char *strim(char *);
68
69static inline __must_check char *strstrip(char *str)
70{
71 return strim(str);
72}
73
66#ifndef __HAVE_ARCH_STRSTR 74#ifndef __HAVE_ARCH_STRSTR
67extern char * strstr(const char *,const char *); 75extern char * strstr(const char *,const char *);
68#endif 76#endif
diff --git a/include/linux/sunrpc/debug.h b/include/linux/sunrpc/debug.h
index 10709cbe96fd..c2786f20016f 100644
--- a/include/linux/sunrpc/debug.h
+++ b/include/linux/sunrpc/debug.h
@@ -28,9 +28,6 @@
28 28
29#ifdef __KERNEL__ 29#ifdef __KERNEL__
30 30
31#include <linux/timer.h>
32#include <linux/workqueue.h>
33
34/* 31/*
35 * Enable RPC debugging/profiling. 32 * Enable RPC debugging/profiling.
36 */ 33 */
diff --git a/include/linux/sunrpc/rpc_rdma.h b/include/linux/sunrpc/rpc_rdma.h
index 87b895d5c786..b78f16b1dea3 100644
--- a/include/linux/sunrpc/rpc_rdma.h
+++ b/include/linux/sunrpc/rpc_rdma.h
@@ -40,6 +40,8 @@
40#ifndef _LINUX_SUNRPC_RPC_RDMA_H 40#ifndef _LINUX_SUNRPC_RPC_RDMA_H
41#define _LINUX_SUNRPC_RPC_RDMA_H 41#define _LINUX_SUNRPC_RPC_RDMA_H
42 42
43#include <linux/types.h>
44
43struct rpcrdma_segment { 45struct rpcrdma_segment {
44 __be32 rs_handle; /* Registered memory handle */ 46 __be32 rs_handle; /* Registered memory handle */
45 __be32 rs_length; /* Length of the chunk in bytes */ 47 __be32 rs_length; /* Length of the chunk in bytes */
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 52e8cb0a7569..5a3085b9b394 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -29,7 +29,6 @@ struct svc_pool_stats {
29 unsigned long packets; 29 unsigned long packets;
30 unsigned long sockets_queued; 30 unsigned long sockets_queued;
31 unsigned long threads_woken; 31 unsigned long threads_woken;
32 unsigned long overloads_avoided;
33 unsigned long threads_timedout; 32 unsigned long threads_timedout;
34}; 33};
35 34
@@ -50,7 +49,6 @@ struct svc_pool {
50 struct list_head sp_sockets; /* pending sockets */ 49 struct list_head sp_sockets; /* pending sockets */
51 unsigned int sp_nrthreads; /* # of threads in pool */ 50 unsigned int sp_nrthreads; /* # of threads in pool */
52 struct list_head sp_all_threads; /* all server threads */ 51 struct list_head sp_all_threads; /* all server threads */
53 int sp_nwaking; /* number of threads woken but not yet active */
54 struct svc_pool_stats sp_stats; /* statistics on pool operation */ 52 struct svc_pool_stats sp_stats; /* statistics on pool operation */
55} ____cacheline_aligned_in_smp; 53} ____cacheline_aligned_in_smp;
56 54
@@ -275,16 +273,11 @@ struct svc_rqst {
275 struct auth_domain * rq_client; /* RPC peer info */ 273 struct auth_domain * rq_client; /* RPC peer info */
276 struct auth_domain * rq_gssclient; /* "gss/"-style peer info */ 274 struct auth_domain * rq_gssclient; /* "gss/"-style peer info */
277 struct svc_cacherep * rq_cacherep; /* cache info */ 275 struct svc_cacherep * rq_cacherep; /* cache info */
278 struct knfsd_fh * rq_reffh; /* Referrence filehandle, used to
279 * determine what device number
280 * to report (real or virtual)
281 */
282 int rq_splice_ok; /* turned off in gss privacy 276 int rq_splice_ok; /* turned off in gss privacy
283 * to prevent encrypting page 277 * to prevent encrypting page
284 * cache pages */ 278 * cache pages */
285 wait_queue_head_t rq_wait; /* synchronization */ 279 wait_queue_head_t rq_wait; /* synchronization */
286 struct task_struct *rq_task; /* service thread */ 280 struct task_struct *rq_task; /* service thread */
287 int rq_waking; /* 1 if thread is being woken */
288}; 281};
289 282
290/* 283/*
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 4ec90019c1a4..a2602a8207a6 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -145,38 +145,43 @@ enum {
145 SWP_DISCARDABLE = (1 << 2), /* blkdev supports discard */ 145 SWP_DISCARDABLE = (1 << 2), /* blkdev supports discard */
146 SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */ 146 SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */
147 SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */ 147 SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */
148 SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */
148 /* add others here before... */ 149 /* add others here before... */
149 SWP_SCANNING = (1 << 8), /* refcount in scan_swap_map */ 150 SWP_SCANNING = (1 << 8), /* refcount in scan_swap_map */
150}; 151};
151 152
152#define SWAP_CLUSTER_MAX 32 153#define SWAP_CLUSTER_MAX 32
153 154
154#define SWAP_MAP_MAX 0x7ffe 155#define SWAP_MAP_MAX 0x3e /* Max duplication count, in first swap_map */
155#define SWAP_MAP_BAD 0x7fff 156#define SWAP_MAP_BAD 0x3f /* Note pageblock is bad, in first swap_map */
156#define SWAP_HAS_CACHE 0x8000 /* There is a swap cache of entry. */ 157#define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */
157#define SWAP_COUNT_MASK (~SWAP_HAS_CACHE) 158#define SWAP_CONT_MAX 0x7f /* Max count, in each swap_map continuation */
159#define COUNT_CONTINUED 0x80 /* See swap_map continuation for full count */
160#define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs, in first swap_map */
161
158/* 162/*
159 * The in-memory structure used to track swap areas. 163 * The in-memory structure used to track swap areas.
160 */ 164 */
161struct swap_info_struct { 165struct swap_info_struct {
162 unsigned long flags; 166 unsigned long flags; /* SWP_USED etc: see above */
163 int prio; /* swap priority */ 167 signed short prio; /* swap priority of this type */
164 int next; /* next entry on swap list */ 168 signed char type; /* strange name for an index */
165 struct file *swap_file; 169 signed char next; /* next type on the swap list */
166 struct block_device *bdev; 170 unsigned int max; /* extent of the swap_map */
167 struct list_head extent_list; 171 unsigned char *swap_map; /* vmalloc'ed array of usage counts */
168 struct swap_extent *curr_swap_extent; 172 unsigned int lowest_bit; /* index of first free in swap_map */
169 unsigned short *swap_map; 173 unsigned int highest_bit; /* index of last free in swap_map */
170 unsigned int lowest_bit; 174 unsigned int pages; /* total of usable pages of swap */
171 unsigned int highest_bit; 175 unsigned int inuse_pages; /* number of those currently in use */
176 unsigned int cluster_next; /* likely index for next allocation */
177 unsigned int cluster_nr; /* countdown to next cluster search */
172 unsigned int lowest_alloc; /* while preparing discard cluster */ 178 unsigned int lowest_alloc; /* while preparing discard cluster */
173 unsigned int highest_alloc; /* while preparing discard cluster */ 179 unsigned int highest_alloc; /* while preparing discard cluster */
174 unsigned int cluster_next; 180 struct swap_extent *curr_swap_extent;
175 unsigned int cluster_nr; 181 struct swap_extent first_swap_extent;
176 unsigned int pages; 182 struct block_device *bdev; /* swap device or bdev of swap file */
177 unsigned int max; 183 struct file *swap_file; /* seldom referenced */
178 unsigned int inuse_pages; 184 unsigned int old_block_size; /* seldom referenced */
179 unsigned int old_block_size;
180}; 185};
181 186
182struct swap_list_t { 187struct swap_list_t {
@@ -273,6 +278,7 @@ extern int scan_unevictable_register_node(struct node *node);
273extern void scan_unevictable_unregister_node(struct node *node); 278extern void scan_unevictable_unregister_node(struct node *node);
274 279
275extern int kswapd_run(int nid); 280extern int kswapd_run(int nid);
281extern void kswapd_stop(int nid);
276 282
277#ifdef CONFIG_MMU 283#ifdef CONFIG_MMU
278/* linux/mm/shmem.c */ 284/* linux/mm/shmem.c */
@@ -309,17 +315,18 @@ extern long total_swap_pages;
309extern void si_swapinfo(struct sysinfo *); 315extern void si_swapinfo(struct sysinfo *);
310extern swp_entry_t get_swap_page(void); 316extern swp_entry_t get_swap_page(void);
311extern swp_entry_t get_swap_page_of_type(int); 317extern swp_entry_t get_swap_page_of_type(int);
312extern void swap_duplicate(swp_entry_t);
313extern int swapcache_prepare(swp_entry_t);
314extern int valid_swaphandles(swp_entry_t, unsigned long *); 318extern int valid_swaphandles(swp_entry_t, unsigned long *);
319extern int add_swap_count_continuation(swp_entry_t, gfp_t);
320extern void swap_shmem_alloc(swp_entry_t);
321extern int swap_duplicate(swp_entry_t);
322extern int swapcache_prepare(swp_entry_t);
315extern void swap_free(swp_entry_t); 323extern void swap_free(swp_entry_t);
316extern void swapcache_free(swp_entry_t, struct page *page); 324extern void swapcache_free(swp_entry_t, struct page *page);
317extern int free_swap_and_cache(swp_entry_t); 325extern int free_swap_and_cache(swp_entry_t);
318extern int swap_type_of(dev_t, sector_t, struct block_device **); 326extern int swap_type_of(dev_t, sector_t, struct block_device **);
319extern unsigned int count_swap_pages(int, int); 327extern unsigned int count_swap_pages(int, int);
320extern sector_t map_swap_page(struct swap_info_struct *, pgoff_t); 328extern sector_t map_swap_page(struct page *, struct block_device **);
321extern sector_t swapdev_block(int, pgoff_t); 329extern sector_t swapdev_block(int, pgoff_t);
322extern struct swap_info_struct *get_swap_info_struct(unsigned);
323extern int reuse_swap_page(struct page *); 330extern int reuse_swap_page(struct page *);
324extern int try_to_free_swap(struct page *); 331extern int try_to_free_swap(struct page *);
325struct backing_dev_info; 332struct backing_dev_info;
@@ -384,8 +391,18 @@ static inline void show_swap_cache_info(void)
384#define free_swap_and_cache(swp) is_migration_entry(swp) 391#define free_swap_and_cache(swp) is_migration_entry(swp)
385#define swapcache_prepare(swp) is_migration_entry(swp) 392#define swapcache_prepare(swp) is_migration_entry(swp)
386 393
387static inline void swap_duplicate(swp_entry_t swp) 394static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
388{ 395{
396 return 0;
397}
398
399static inline void swap_shmem_alloc(swp_entry_t swp)
400{
401}
402
403static inline int swap_duplicate(swp_entry_t swp)
404{
405 return 0;
389} 406}
390 407
391static inline void swap_free(swp_entry_t swp) 408static inline void swap_free(swp_entry_t swp)
diff --git a/include/linux/timb_gpio.h b/include/linux/timb_gpio.h
new file mode 100644
index 000000000000..ce456eaae861
--- /dev/null
+++ b/include/linux/timb_gpio.h
@@ -0,0 +1,37 @@
1/*
2 * timb_gpio.h timberdale FPGA GPIO driver, platform data definition
3 * Copyright (c) 2009 Intel Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 */
18
19#ifndef _LINUX_TIMB_GPIO_H
20#define _LINUX_TIMB_GPIO_H
21
22/**
23 * struct timbgpio_platform_data - Platform data of the Timberdale GPIO driver
24 * @gpio_base The number of the first GPIO pin, set to -1 for
25 * dynamic number allocation.
26 * @nr_pins Number of pins that is supported by the hardware (1-32)
27 * @irq_base If IRQ is supported by the hardware, this is the base
28 * number of IRQ:s. One IRQ per pin will be used. Set to
29 * -1 if IRQ:s is not supported.
30 */
31struct timbgpio_platform_data {
32 int gpio_base;
33 int nr_pins;
34 int irq_base;
35};
36
37#endif
diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
index 1eb44a924e56..10db0102a890 100644
--- a/include/linux/tracehook.h
+++ b/include/linux/tracehook.h
@@ -134,6 +134,13 @@ static inline __must_check int tracehook_report_syscall_entry(
134 */ 134 */
135static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step) 135static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step)
136{ 136{
137 if (step) {
138 siginfo_t info;
139 user_single_step_siginfo(current, regs, &info);
140 force_sig_info(SIGTRAP, &info, current);
141 return;
142 }
143
137 ptrace_report_syscall(regs); 144 ptrace_report_syscall(regs);
138} 145}
139 146
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 405a9035fe40..ef3a2947b102 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -350,8 +350,6 @@ extern void tty_write_flush(struct tty_struct *);
350 350
351extern struct ktermios tty_std_termios; 351extern struct ktermios tty_std_termios;
352 352
353extern int kmsg_redirect;
354
355extern void console_init(void); 353extern void console_init(void);
356extern int vcs_init(void); 354extern int vcs_init(void);
357 355
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index 32b92298fd79..d4962a782b8a 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -294,6 +294,7 @@ struct v4l2_pix_format {
294 294
295/* Grey formats */ 295/* Grey formats */
296#define V4L2_PIX_FMT_GREY v4l2_fourcc('G', 'R', 'E', 'Y') /* 8 Greyscale */ 296#define V4L2_PIX_FMT_GREY v4l2_fourcc('G', 'R', 'E', 'Y') /* 8 Greyscale */
297#define V4L2_PIX_FMT_Y10 v4l2_fourcc('Y', '1', '0', ' ') /* 10 Greyscale */
297#define V4L2_PIX_FMT_Y16 v4l2_fourcc('Y', '1', '6', ' ') /* 16 Greyscale */ 298#define V4L2_PIX_FMT_Y16 v4l2_fourcc('Y', '1', '6', ' ') /* 16 Greyscale */
298 299
299/* Palette formats */ 300/* Palette formats */
@@ -329,7 +330,11 @@ struct v4l2_pix_format {
329#define V4L2_PIX_FMT_SBGGR8 v4l2_fourcc('B', 'A', '8', '1') /* 8 BGBG.. GRGR.. */ 330#define V4L2_PIX_FMT_SBGGR8 v4l2_fourcc('B', 'A', '8', '1') /* 8 BGBG.. GRGR.. */
330#define V4L2_PIX_FMT_SGBRG8 v4l2_fourcc('G', 'B', 'R', 'G') /* 8 GBGB.. RGRG.. */ 331#define V4L2_PIX_FMT_SGBRG8 v4l2_fourcc('G', 'B', 'R', 'G') /* 8 GBGB.. RGRG.. */
331#define V4L2_PIX_FMT_SGRBG8 v4l2_fourcc('G', 'R', 'B', 'G') /* 8 GRGR.. BGBG.. */ 332#define V4L2_PIX_FMT_SGRBG8 v4l2_fourcc('G', 'R', 'B', 'G') /* 8 GRGR.. BGBG.. */
332#define V4L2_PIX_FMT_SGRBG10 v4l2_fourcc('B', 'A', '1', '0') /* 10bit raw bayer */ 333#define V4L2_PIX_FMT_SRGGB8 v4l2_fourcc('R', 'G', 'G', 'B') /* 8 RGRG.. GBGB.. */
334#define V4L2_PIX_FMT_SBGGR10 v4l2_fourcc('B', 'G', '1', '0') /* 10 BGBG.. GRGR.. */
335#define V4L2_PIX_FMT_SGBRG10 v4l2_fourcc('G', 'B', '1', '0') /* 10 GBGB.. RGRG.. */
336#define V4L2_PIX_FMT_SGRBG10 v4l2_fourcc('B', 'A', '1', '0') /* 10 GRGR.. BGBG.. */
337#define V4L2_PIX_FMT_SRGGB10 v4l2_fourcc('R', 'G', '1', '0') /* 10 RGRG.. GBGB.. */
333 /* 10bit raw bayer DPCM compressed to 8 bits */ 338 /* 10bit raw bayer DPCM compressed to 8 bits */
334#define V4L2_PIX_FMT_SGRBG10DPCM8 v4l2_fourcc('B', 'D', '1', '0') 339#define V4L2_PIX_FMT_SGRBG10DPCM8 v4l2_fourcc('B', 'D', '1', '0')
335 /* 340 /*
@@ -732,6 +737,99 @@ struct v4l2_standard {
732}; 737};
733 738
734/* 739/*
740 * V I D E O T I M I N G S D V P R E S E T
741 */
742struct v4l2_dv_preset {
743 __u32 preset;
744 __u32 reserved[4];
745};
746
747/*
748 * D V P R E S E T S E N U M E R A T I O N
749 */
750struct v4l2_dv_enum_preset {
751 __u32 index;
752 __u32 preset;
753 __u8 name[32]; /* Name of the preset timing */
754 __u32 width;
755 __u32 height;
756 __u32 reserved[4];
757};
758
759/*
760 * D V P R E S E T V A L U E S
761 */
762#define V4L2_DV_INVALID 0
763#define V4L2_DV_480P59_94 1 /* BT.1362 */
764#define V4L2_DV_576P50 2 /* BT.1362 */
765#define V4L2_DV_720P24 3 /* SMPTE 296M */
766#define V4L2_DV_720P25 4 /* SMPTE 296M */
767#define V4L2_DV_720P30 5 /* SMPTE 296M */
768#define V4L2_DV_720P50 6 /* SMPTE 296M */
769#define V4L2_DV_720P59_94 7 /* SMPTE 274M */
770#define V4L2_DV_720P60 8 /* SMPTE 274M/296M */
771#define V4L2_DV_1080I29_97 9 /* BT.1120/ SMPTE 274M */
772#define V4L2_DV_1080I30 10 /* BT.1120/ SMPTE 274M */
773#define V4L2_DV_1080I25 11 /* BT.1120 */
774#define V4L2_DV_1080I50 12 /* SMPTE 296M */
775#define V4L2_DV_1080I60 13 /* SMPTE 296M */
776#define V4L2_DV_1080P24 14 /* SMPTE 296M */
777#define V4L2_DV_1080P25 15 /* SMPTE 296M */
778#define V4L2_DV_1080P30 16 /* SMPTE 296M */
779#define V4L2_DV_1080P50 17 /* BT.1120 */
780#define V4L2_DV_1080P60 18 /* BT.1120 */
781
782/*
783 * D V B T T I M I N G S
784 */
785
786/* BT.656/BT.1120 timing data */
787struct v4l2_bt_timings {
788 __u32 width; /* width in pixels */
789 __u32 height; /* height in lines */
790 __u32 interlaced; /* Interlaced or progressive */
791 __u32 polarities; /* Positive or negative polarity */
792 __u64 pixelclock; /* Pixel clock in HZ. Ex. 74.25MHz->74250000 */
793 __u32 hfrontporch; /* Horizpontal front porch in pixels */
794 __u32 hsync; /* Horizontal Sync length in pixels */
795 __u32 hbackporch; /* Horizontal back porch in pixels */
796 __u32 vfrontporch; /* Vertical front porch in pixels */
797 __u32 vsync; /* Vertical Sync length in lines */
798 __u32 vbackporch; /* Vertical back porch in lines */
799 __u32 il_vfrontporch; /* Vertical front porch for bottom field of
800 * interlaced field formats
801 */
802 __u32 il_vsync; /* Vertical sync length for bottom field of
803 * interlaced field formats
804 */
805 __u32 il_vbackporch; /* Vertical back porch for bottom field of
806 * interlaced field formats
807 */
808 __u32 reserved[16];
809} __attribute__ ((packed));
810
811/* Interlaced or progressive format */
812#define V4L2_DV_PROGRESSIVE 0
813#define V4L2_DV_INTERLACED 1
814
815/* Polarities. If bit is not set, it is assumed to be negative polarity */
816#define V4L2_DV_VSYNC_POS_POL 0x00000001
817#define V4L2_DV_HSYNC_POS_POL 0x00000002
818
819
820/* DV timings */
821struct v4l2_dv_timings {
822 __u32 type;
823 union {
824 struct v4l2_bt_timings bt;
825 __u32 reserved[32];
826 };
827} __attribute__ ((packed));
828
829/* Values for the type field */
830#define V4L2_DV_BT_656_1120 0 /* BT.656/1120 timing type */
831
832/*
735 * V I D E O I N P U T S 833 * V I D E O I N P U T S
736 */ 834 */
737struct v4l2_input { 835struct v4l2_input {
@@ -742,7 +840,8 @@ struct v4l2_input {
742 __u32 tuner; /* Associated tuner */ 840 __u32 tuner; /* Associated tuner */
743 v4l2_std_id std; 841 v4l2_std_id std;
744 __u32 status; 842 __u32 status;
745 __u32 reserved[4]; 843 __u32 capabilities;
844 __u32 reserved[3];
746}; 845};
747 846
748/* Values for the 'type' field */ 847/* Values for the 'type' field */
@@ -773,6 +872,11 @@ struct v4l2_input {
773#define V4L2_IN_ST_NO_ACCESS 0x02000000 /* Conditional access denied */ 872#define V4L2_IN_ST_NO_ACCESS 0x02000000 /* Conditional access denied */
774#define V4L2_IN_ST_VTR 0x04000000 /* VTR time constant */ 873#define V4L2_IN_ST_VTR 0x04000000 /* VTR time constant */
775 874
875/* capabilities flags */
876#define V4L2_IN_CAP_PRESETS 0x00000001 /* Supports S_DV_PRESET */
877#define V4L2_IN_CAP_CUSTOM_TIMINGS 0x00000002 /* Supports S_DV_TIMINGS */
878#define V4L2_IN_CAP_STD 0x00000004 /* Supports S_STD */
879
776/* 880/*
777 * V I D E O O U T P U T S 881 * V I D E O O U T P U T S
778 */ 882 */
@@ -783,13 +887,19 @@ struct v4l2_output {
783 __u32 audioset; /* Associated audios (bitfield) */ 887 __u32 audioset; /* Associated audios (bitfield) */
784 __u32 modulator; /* Associated modulator */ 888 __u32 modulator; /* Associated modulator */
785 v4l2_std_id std; 889 v4l2_std_id std;
786 __u32 reserved[4]; 890 __u32 capabilities;
891 __u32 reserved[3];
787}; 892};
788/* Values for the 'type' field */ 893/* Values for the 'type' field */
789#define V4L2_OUTPUT_TYPE_MODULATOR 1 894#define V4L2_OUTPUT_TYPE_MODULATOR 1
790#define V4L2_OUTPUT_TYPE_ANALOG 2 895#define V4L2_OUTPUT_TYPE_ANALOG 2
791#define V4L2_OUTPUT_TYPE_ANALOGVGAOVERLAY 3 896#define V4L2_OUTPUT_TYPE_ANALOGVGAOVERLAY 3
792 897
898/* capabilities flags */
899#define V4L2_OUT_CAP_PRESETS 0x00000001 /* Supports S_DV_PRESET */
900#define V4L2_OUT_CAP_CUSTOM_TIMINGS 0x00000002 /* Supports S_DV_TIMINGS */
901#define V4L2_OUT_CAP_STD 0x00000004 /* Supports S_STD */
902
793/* 903/*
794 * C O N T R O L S 904 * C O N T R O L S
795 */ 905 */
@@ -1624,6 +1734,13 @@ struct v4l2_dbg_chip_ident {
1624#endif 1734#endif
1625 1735
1626#define VIDIOC_S_HW_FREQ_SEEK _IOW('V', 82, struct v4l2_hw_freq_seek) 1736#define VIDIOC_S_HW_FREQ_SEEK _IOW('V', 82, struct v4l2_hw_freq_seek)
1737#define VIDIOC_ENUM_DV_PRESETS _IOWR('V', 83, struct v4l2_dv_enum_preset)
1738#define VIDIOC_S_DV_PRESET _IOWR('V', 84, struct v4l2_dv_preset)
1739#define VIDIOC_G_DV_PRESET _IOWR('V', 85, struct v4l2_dv_preset)
1740#define VIDIOC_QUERY_DV_PRESET _IOR('V', 86, struct v4l2_dv_preset)
1741#define VIDIOC_S_DV_TIMINGS _IOWR('V', 87, struct v4l2_dv_timings)
1742#define VIDIOC_G_DV_TIMINGS _IOWR('V', 88, struct v4l2_dv_timings)
1743
1627/* Reminder: when adding new ioctls please add support for them to 1744/* Reminder: when adding new ioctls please add support for them to
1628 drivers/media/video/v4l2-compat-ioctl32.c as well! */ 1745 drivers/media/video/v4l2-compat-ioctl32.c as well! */
1629 1746
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index d85889710f9b..ee03bba9c5df 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -40,6 +40,8 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
40 PGSCAN_ZONE_RECLAIM_FAILED, 40 PGSCAN_ZONE_RECLAIM_FAILED,
41#endif 41#endif
42 PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL, 42 PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
43 KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY,
44 KSWAPD_SKIP_CONGESTION_WAIT,
43 PAGEOUTRUN, ALLOCSTALL, PGROTATED, 45 PAGEOUTRUN, ALLOCSTALL, PGROTATED,
44#ifdef CONFIG_HUGETLB_PAGE 46#ifdef CONFIG_HUGETLB_PAGE
45 HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL, 47 HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
diff --git a/include/linux/vt.h b/include/linux/vt.h
index 7ffa11f06232..3fb9944e50a6 100644
--- a/include/linux/vt.h
+++ b/include/linux/vt.h
@@ -84,4 +84,19 @@ struct vt_setactivate {
84 84
85#define VT_SETACTIVATE 0x560F /* Activate and set the mode of a console */ 85#define VT_SETACTIVATE 0x560F /* Activate and set the mode of a console */
86 86
87#ifdef CONFIG_VT_CONSOLE
88
89extern int vt_kmsg_redirect(int new);
90
91#else
92
93static inline int vt_kmsg_redirect(int new)
94{
95 return 0;
96}
97
98#endif
99
100#define vt_get_kmsg_redirect() vt_kmsg_redirect(-1)
101
87#endif /* _LINUX_VT_H */ 102#endif /* _LINUX_VT_H */
diff --git a/include/media/ir-common.h b/include/media/ir-common.h
index e41a99ee353e..2c6af24b905e 100644
--- a/include/media/ir-common.h
+++ b/include/media/ir-common.h
@@ -26,26 +26,7 @@
26#include <linux/input.h> 26#include <linux/input.h>
27#include <linux/workqueue.h> 27#include <linux/workqueue.h>
28#include <linux/interrupt.h> 28#include <linux/interrupt.h>
29#include <linux/spinlock.h> 29#include <media/ir-core.h>
30
31extern int media_ir_debug; /* media_ir_debug level (0,1,2) */
32#define IR_dprintk(level, fmt, arg...) if (media_ir_debug >= level) \
33 printk(KERN_DEBUG "%s: " fmt , __func__, ## arg)
34
35#define IR_TYPE_RC5 1
36#define IR_TYPE_PD 2 /* Pulse distance encoded IR */
37#define IR_TYPE_OTHER 99
38
39struct ir_scancode {
40 u16 scancode;
41 u32 keycode;
42};
43
44struct ir_scancode_table {
45 struct ir_scancode *scan;
46 int size;
47 spinlock_t lock;
48};
49 30
50#define RC5_START(x) (((x)>>12)&3) 31#define RC5_START(x) (((x)>>12)&3)
51#define RC5_TOGGLE(x) (((x)>>11)&1) 32#define RC5_TOGGLE(x) (((x)>>11)&1)
@@ -56,8 +37,6 @@ struct ir_input_state {
56 /* configuration */ 37 /* configuration */
57 int ir_type; 38 int ir_type;
58 39
59 struct ir_scancode_table keytable;
60
61 /* key info */ 40 /* key info */
62 u32 ir_key; /* ir scancode */ 41 u32 ir_key; /* ir scancode */
63 u32 keycode; /* linux key code */ 42 u32 keycode; /* linux key code */
@@ -105,7 +84,7 @@ struct card_ir {
105/* Routines from ir-functions.c */ 84/* Routines from ir-functions.c */
106 85
107int ir_input_init(struct input_dev *dev, struct ir_input_state *ir, 86int ir_input_init(struct input_dev *dev, struct ir_input_state *ir,
108 int ir_type, struct ir_scancode_table *ir_codes); 87 int ir_type);
109void ir_input_nokey(struct input_dev *dev, struct ir_input_state *ir); 88void ir_input_nokey(struct input_dev *dev, struct ir_input_state *ir);
110void ir_input_keydown(struct input_dev *dev, struct ir_input_state *ir, 89void ir_input_keydown(struct input_dev *dev, struct ir_input_state *ir,
111 u32 ir_key); 90 u32 ir_key);
@@ -118,19 +97,6 @@ u32 ir_rc5_decode(unsigned int code);
118void ir_rc5_timer_end(unsigned long data); 97void ir_rc5_timer_end(unsigned long data);
119void ir_rc5_timer_keyup(unsigned long data); 98void ir_rc5_timer_keyup(unsigned long data);
120 99
121/* Routines from ir-keytable.c */
122
123u32 ir_g_keycode_from_table(struct input_dev *input_dev,
124 u32 scancode);
125
126int ir_set_keycode_table(struct input_dev *input_dev,
127 struct ir_scancode_table *rc_tab);
128
129int ir_roundup_tablesize(int n_elems);
130int ir_copy_table(struct ir_scancode_table *destin,
131 const struct ir_scancode_table *origin);
132void ir_input_free(struct input_dev *input_dev);
133
134/* scancode->keycode map tables from ir-keymaps.c */ 100/* scancode->keycode map tables from ir-keymaps.c */
135 101
136extern struct ir_scancode_table ir_codes_empty_table; 102extern struct ir_scancode_table ir_codes_empty_table;
@@ -195,4 +161,5 @@ extern struct ir_scancode_table ir_codes_evga_indtube_table;
195extern struct ir_scancode_table ir_codes_terratec_cinergy_xs_table; 161extern struct ir_scancode_table ir_codes_terratec_cinergy_xs_table;
196extern struct ir_scancode_table ir_codes_videomate_s350_table; 162extern struct ir_scancode_table ir_codes_videomate_s350_table;
197extern struct ir_scancode_table ir_codes_gadmei_rm008z_table; 163extern struct ir_scancode_table ir_codes_gadmei_rm008z_table;
164extern struct ir_scancode_table ir_codes_nec_terratec_cinergy_xs_table;
198#endif 165#endif
diff --git a/include/media/ir-core.h b/include/media/ir-core.h
new file mode 100644
index 000000000000..299d201e1339
--- /dev/null
+++ b/include/media/ir-core.h
@@ -0,0 +1,62 @@
1/*
2 * Remote Controller core header
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation version 2 of the License.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef _IR_CORE
15#define _IR_CORE
16
17#include <linux/input.h>
18#include <linux/spinlock.h>
19
20extern int ir_core_debug;
21#define IR_dprintk(level, fmt, arg...) if (ir_core_debug >= level) \
22 printk(KERN_DEBUG "%s: " fmt , __func__, ## arg)
23
24enum ir_type {
25 IR_TYPE_UNKNOWN = 0,
26 IR_TYPE_RC5 = 1,
27 IR_TYPE_PD = 2, /* Pulse distance encoded IR */
28 IR_TYPE_NEC = 3,
29 IR_TYPE_OTHER = 99,
30};
31
32struct ir_scancode {
33 u16 scancode;
34 u32 keycode;
35};
36
37struct ir_scancode_table {
38 struct ir_scancode *scan;
39 int size;
40 enum ir_type ir_type;
41 spinlock_t lock;
42};
43
44struct ir_input_dev {
45 struct input_dev *dev;
46 struct ir_scancode_table rc_tab;
47};
48
49/* Routines from ir-keytable.c */
50
51u32 ir_g_keycode_from_table(struct input_dev *input_dev,
52 u32 scancode);
53
54int ir_set_keycode_table(struct input_dev *input_dev,
55 struct ir_scancode_table *rc_tab);
56
57int ir_roundup_tablesize(int n_elems);
58int ir_input_register(struct input_dev *dev,
59 struct ir_scancode_table *ir_codes);
60void ir_input_unregister(struct input_dev *input_dev);
61
62#endif
diff --git a/include/media/mt9t112.h b/include/media/mt9t112.h
new file mode 100644
index 000000000000..a43c74ab05ec
--- /dev/null
+++ b/include/media/mt9t112.h
@@ -0,0 +1,30 @@
1/* mt9t112 Camera
2 *
3 * Copyright (C) 2009 Renesas Solutions Corp.
4 * Kuninori Morimoto <morimoto.kuninori@renesas.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifndef __MT9T112_H__
12#define __MT9T112_H__
13
14#define MT9T112_FLAG_PCLK_RISING_EDGE (1 << 0)
15#define MT9T112_FLAG_DATAWIDTH_8 (1 << 1) /* default width is 10 */
16
17struct mt9t112_pll_divider {
18 u8 m, n;
19 u8 p1, p2, p3, p4, p5, p6, p7;
20};
21
22/*
23 * mt9t112 camera info
24 */
25struct mt9t112_camera_info {
26 u32 flags;
27 struct mt9t112_pll_divider divider;
28};
29
30#endif /* __MT9T112_H__ */
diff --git a/include/media/ov772x.h b/include/media/ov772x.h
index 30d9629198ef..14c77efd6a85 100644
--- a/include/media/ov772x.h
+++ b/include/media/ov772x.h
@@ -1,4 +1,5 @@
1/* ov772x Camera 1/*
2 * ov772x Camera
2 * 3 *
3 * Copyright (C) 2008 Renesas Solutions Corp. 4 * Copyright (C) 2008 Renesas Solutions Corp.
4 * Kuninori Morimoto <morimoto.kuninori@renesas.com> 5 * Kuninori Morimoto <morimoto.kuninori@renesas.com>
@@ -54,7 +55,6 @@ struct ov772x_edge_ctrl {
54struct ov772x_camera_info { 55struct ov772x_camera_info {
55 unsigned long buswidth; 56 unsigned long buswidth;
56 unsigned long flags; 57 unsigned long flags;
57 struct soc_camera_link link;
58 struct ov772x_edge_ctrl edgectrl; 58 struct ov772x_edge_ctrl edgectrl;
59}; 59};
60 60
diff --git a/include/media/rj54n1cb0c.h b/include/media/rj54n1cb0c.h
new file mode 100644
index 000000000000..8ae3288ae925
--- /dev/null
+++ b/include/media/rj54n1cb0c.h
@@ -0,0 +1,19 @@
1/*
2 * RJ54N1CB0C Private data
3 *
4 * Copyright (C) 2009, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifndef __RJ54N1CB0C_H__
12#define __RJ54N1CB0C_H__
13
14struct rj54n1_pdata {
15 unsigned int mclk_freq;
16 bool ioctl_high;
17};
18
19#endif
diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
index eed5fccc83f3..4aeff96ff7d8 100644
--- a/include/media/saa7146_vv.h
+++ b/include/media/saa7146_vv.h
@@ -108,8 +108,6 @@ struct saa7146_fh {
108 108
109struct saa7146_vv 109struct saa7146_vv
110{ 110{
111 int vbi_minor;
112
113 /* vbi capture */ 111 /* vbi capture */
114 struct saa7146_dmaqueue vbi_q; 112 struct saa7146_dmaqueue vbi_q;
115 /* vbi workaround interrupt queue */ 113 /* vbi workaround interrupt queue */
@@ -117,8 +115,6 @@ struct saa7146_vv
117 int vbi_fieldcount; 115 int vbi_fieldcount;
118 struct saa7146_fh *vbi_streaming; 116 struct saa7146_fh *vbi_streaming;
119 117
120 int video_minor;
121
122 int video_status; 118 int video_status;
123 struct saa7146_fh *video_fh; 119 struct saa7146_fh *video_fh;
124 120
diff --git a/include/media/sh_mobile_ceu.h b/include/media/sh_mobile_ceu.h
index 0f3524cff435..b67747836878 100644
--- a/include/media/sh_mobile_ceu.h
+++ b/include/media/sh_mobile_ceu.h
@@ -3,6 +3,8 @@
3 3
4#define SH_CEU_FLAG_USE_8BIT_BUS (1 << 0) /* use 8bit bus width */ 4#define SH_CEU_FLAG_USE_8BIT_BUS (1 << 0) /* use 8bit bus width */
5#define SH_CEU_FLAG_USE_16BIT_BUS (1 << 1) /* use 16bit bus width */ 5#define SH_CEU_FLAG_USE_16BIT_BUS (1 << 1) /* use 16bit bus width */
6#define SH_CEU_FLAG_HSYNC_LOW (1 << 2) /* default High if possible */
7#define SH_CEU_FLAG_VSYNC_LOW (1 << 3) /* default High if possible */
6 8
7struct sh_mobile_ceu_info { 9struct sh_mobile_ceu_info {
8 unsigned long flags; 10 unsigned long flags;
diff --git a/include/media/soc_camera.h b/include/media/soc_camera.h
index 3d74e60032dd..dcc5b86bcb6c 100644
--- a/include/media/soc_camera.h
+++ b/include/media/soc_camera.h
@@ -24,18 +24,13 @@ struct soc_camera_device {
24 struct device *pdev; /* Platform device */ 24 struct device *pdev; /* Platform device */
25 s32 user_width; 25 s32 user_width;
26 s32 user_height; 26 s32 user_height;
27 unsigned short width_min; 27 enum v4l2_colorspace colorspace;
28 unsigned short height_min;
29 unsigned short y_skip_top; /* Lines to skip at the top */
30 unsigned char iface; /* Host number */ 28 unsigned char iface; /* Host number */
31 unsigned char devnum; /* Device number per host */ 29 unsigned char devnum; /* Device number per host */
32 unsigned char buswidth; /* See comment in .c */
33 struct soc_camera_sense *sense; /* See comment in struct definition */ 30 struct soc_camera_sense *sense; /* See comment in struct definition */
34 struct soc_camera_ops *ops; 31 struct soc_camera_ops *ops;
35 struct video_device *vdev; 32 struct video_device *vdev;
36 const struct soc_camera_data_format *current_fmt; 33 const struct soc_camera_format_xlate *current_fmt;
37 const struct soc_camera_data_format *formats;
38 int num_formats;
39 struct soc_camera_format_xlate *user_formats; 34 struct soc_camera_format_xlate *user_formats;
40 int num_user_formats; 35 int num_user_formats;
41 enum v4l2_field field; /* Preserve field over close() */ 36 enum v4l2_field field; /* Preserve field over close() */
@@ -107,6 +102,8 @@ struct soc_camera_link {
107 int i2c_adapter_id; 102 int i2c_adapter_id;
108 struct i2c_board_info *board_info; 103 struct i2c_board_info *board_info;
109 const char *module_name; 104 const char *module_name;
105 void *priv;
106
110 /* 107 /*
111 * For non-I2C devices platform platform has to provide methods to 108 * For non-I2C devices platform platform has to provide methods to
112 * add a device to the system and to remove 109 * add a device to the system and to remove
@@ -162,23 +159,13 @@ static inline struct v4l2_subdev *soc_camera_to_subdev(
162int soc_camera_host_register(struct soc_camera_host *ici); 159int soc_camera_host_register(struct soc_camera_host *ici);
163void soc_camera_host_unregister(struct soc_camera_host *ici); 160void soc_camera_host_unregister(struct soc_camera_host *ici);
164 161
165const struct soc_camera_data_format *soc_camera_format_by_fourcc(
166 struct soc_camera_device *icd, unsigned int fourcc);
167const struct soc_camera_format_xlate *soc_camera_xlate_by_fourcc( 162const struct soc_camera_format_xlate *soc_camera_xlate_by_fourcc(
168 struct soc_camera_device *icd, unsigned int fourcc); 163 struct soc_camera_device *icd, unsigned int fourcc);
169 164
170struct soc_camera_data_format {
171 const char *name;
172 unsigned int depth;
173 __u32 fourcc;
174 enum v4l2_colorspace colorspace;
175};
176
177/** 165/**
178 * struct soc_camera_format_xlate - match between host and sensor formats 166 * struct soc_camera_format_xlate - match between host and sensor formats
179 * @cam_fmt: sensor format provided by the sensor 167 * @code: code of a sensor provided format
180 * @host_fmt: host format after host translation from cam_fmt 168 * @host_fmt: host format after host translation from code
181 * @buswidth: bus width for this format
182 * 169 *
183 * Host and sensor translation structure. Used in table of host and sensor 170 * Host and sensor translation structure. Used in table of host and sensor
184 * formats matchings in soc_camera_device. A host can override the generic list 171 * formats matchings in soc_camera_device. A host can override the generic list
@@ -186,9 +173,8 @@ struct soc_camera_data_format {
186 * format setup. 173 * format setup.
187 */ 174 */
188struct soc_camera_format_xlate { 175struct soc_camera_format_xlate {
189 const struct soc_camera_data_format *cam_fmt; 176 enum v4l2_mbus_pixelcode code;
190 const struct soc_camera_data_format *host_fmt; 177 const struct soc_mbus_pixelfmt *host_fmt;
191 unsigned char buswidth;
192}; 178};
193 179
194struct soc_camera_ops { 180struct soc_camera_ops {
diff --git a/include/media/soc_camera_platform.h b/include/media/soc_camera_platform.h
index bb70401b8141..0ecefe227b76 100644
--- a/include/media/soc_camera_platform.h
+++ b/include/media/soc_camera_platform.h
@@ -19,11 +19,10 @@ struct device;
19struct soc_camera_platform_info { 19struct soc_camera_platform_info {
20 const char *format_name; 20 const char *format_name;
21 unsigned long format_depth; 21 unsigned long format_depth;
22 struct v4l2_pix_format format; 22 struct v4l2_mbus_framefmt format;
23 unsigned long bus_param; 23 unsigned long bus_param;
24 struct device *dev; 24 struct device *dev;
25 int (*set_capture)(struct soc_camera_platform_info *info, int enable); 25 int (*set_capture)(struct soc_camera_platform_info *info, int enable);
26 struct soc_camera_link link;
27}; 26};
28 27
29#endif /* __SOC_CAMERA_H__ */ 28#endif /* __SOC_CAMERA_H__ */
diff --git a/include/media/soc_mediabus.h b/include/media/soc_mediabus.h
new file mode 100644
index 000000000000..037cd7be001e
--- /dev/null
+++ b/include/media/soc_mediabus.h
@@ -0,0 +1,65 @@
1/*
2 * SoC-camera Media Bus API extensions
3 *
4 * Copyright (C) 2009, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifndef SOC_MEDIABUS_H
12#define SOC_MEDIABUS_H
13
14#include <linux/videodev2.h>
15
16#include <media/v4l2-mediabus.h>
17
18/**
19 * enum soc_mbus_packing - data packing types on the media-bus
20 * @SOC_MBUS_PACKING_NONE: no packing, bit-for-bit transfer to RAM
21 * @SOC_MBUS_PACKING_2X8_PADHI: 16 bits transferred in 2 8-bit samples, in the
22 * possibly incomplete byte high bits are padding
23 * @SOC_MBUS_PACKING_2X8_PADLO: as above, but low bits are padding
24 * @SOC_MBUS_PACKING_EXTEND16: sample width (e.g., 10 bits) has to be extended
25 * to 16 bits
26 */
27enum soc_mbus_packing {
28 SOC_MBUS_PACKING_NONE,
29 SOC_MBUS_PACKING_2X8_PADHI,
30 SOC_MBUS_PACKING_2X8_PADLO,
31 SOC_MBUS_PACKING_EXTEND16,
32};
33
34/**
35 * enum soc_mbus_order - sample order on the media bus
36 * @SOC_MBUS_ORDER_LE: least significant sample first
37 * @SOC_MBUS_ORDER_BE: most significant sample first
38 */
39enum soc_mbus_order {
40 SOC_MBUS_ORDER_LE,
41 SOC_MBUS_ORDER_BE,
42};
43
44/**
45 * struct soc_mbus_pixelfmt - Data format on the media bus
46 * @name: Name of the format
47 * @fourcc: Fourcc code, that will be obtained if the data is
48 * stored in memory in the following way:
49 * @packing: Type of sample-packing, that has to be used
50 * @order: Sample order when storing in memory
51 * @bits_per_sample: How many bits the bridge has to sample
52 */
53struct soc_mbus_pixelfmt {
54 const char *name;
55 u32 fourcc;
56 enum soc_mbus_packing packing;
57 enum soc_mbus_order order;
58 u8 bits_per_sample;
59};
60
61const struct soc_mbus_pixelfmt *soc_mbus_get_fmtdesc(
62 enum v4l2_mbus_pixelcode code);
63s32 soc_mbus_bytes_per_line(u32 width, const struct soc_mbus_pixelfmt *mf);
64
65#endif
diff --git a/include/media/tw9910.h b/include/media/tw9910.h
index 73231e7880d8..5e2895a05e6b 100644
--- a/include/media/tw9910.h
+++ b/include/media/tw9910.h
@@ -32,7 +32,6 @@ enum tw9910_mpout_pin {
32struct tw9910_video_info { 32struct tw9910_video_info {
33 unsigned long buswidth; 33 unsigned long buswidth;
34 enum tw9910_mpout_pin mpout; 34 enum tw9910_mpout_pin mpout;
35 struct soc_camera_link link;
36}; 35};
37 36
38 37
diff --git a/include/media/v4l2-chip-ident.h b/include/media/v4l2-chip-ident.h
index 91942dbe64e3..6cc107d198a0 100644
--- a/include/media/v4l2-chip-ident.h
+++ b/include/media/v4l2-chip-ident.h
@@ -267,6 +267,8 @@ enum {
267 V4L2_IDENT_MT9V022IX7ATC = 45010, /* No way to detect "normal" I77ATx */ 267 V4L2_IDENT_MT9V022IX7ATC = 45010, /* No way to detect "normal" I77ATx */
268 V4L2_IDENT_MT9V022IX7ATM = 45015, /* and "lead free" IA7ATx chips */ 268 V4L2_IDENT_MT9V022IX7ATM = 45015, /* and "lead free" IA7ATx chips */
269 V4L2_IDENT_MT9T031 = 45020, 269 V4L2_IDENT_MT9T031 = 45020,
270 V4L2_IDENT_MT9T111 = 45021,
271 V4L2_IDENT_MT9T112 = 45022,
270 V4L2_IDENT_MT9V111 = 45031, 272 V4L2_IDENT_MT9V111 = 45031,
271 V4L2_IDENT_MT9V112 = 45032, 273 V4L2_IDENT_MT9V112 = 45032,
272 274
diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h
index 1c25b10da34b..1c7b259f341c 100644
--- a/include/media/v4l2-common.h
+++ b/include/media/v4l2-common.h
@@ -212,5 +212,5 @@ void v4l_bound_align_image(unsigned int *w, unsigned int wmin,
212 unsigned int *h, unsigned int hmin, 212 unsigned int *h, unsigned int hmin,
213 unsigned int hmax, unsigned int halign, 213 unsigned int hmax, unsigned int halign,
214 unsigned int salign); 214 unsigned int salign);
215 215int v4l_fill_dv_preset_info(u32 preset, struct v4l2_dv_enum_preset *info);
216#endif /* V4L2_COMMON_H_ */ 216#endif /* V4L2_COMMON_H_ */
diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
index 73c9867d744c..2dee93892ea2 100644
--- a/include/media/v4l2-dev.h
+++ b/include/media/v4l2-dev.h
@@ -28,10 +28,10 @@ struct v4l2_ioctl_callbacks;
28struct video_device; 28struct video_device;
29struct v4l2_device; 29struct v4l2_device;
30 30
31/* Flag to mark the video_device struct as unregistered. 31/* Flag to mark the video_device struct as registered.
32 Drivers can set this flag if they want to block all future 32 Drivers can clear this flag if they want to block all future
33 device access. It is set by video_unregister_device. */ 33 device access. It is cleared by video_unregister_device. */
34#define V4L2_FL_UNREGISTERED (0) 34#define V4L2_FL_REGISTERED (0)
35 35
36struct v4l2_file_operations { 36struct v4l2_file_operations {
37 struct module *owner; 37 struct module *owner;
@@ -96,9 +96,7 @@ struct video_device
96/* Register video devices. Note that if video_register_device fails, 96/* Register video devices. Note that if video_register_device fails,
97 the release() callback of the video_device structure is *not* called, so 97 the release() callback of the video_device structure is *not* called, so
98 the caller is responsible for freeing any data. Usually that means that 98 the caller is responsible for freeing any data. Usually that means that
99 you call video_device_release() on failure. 99 you call video_device_release() on failure. */
100
101 Also note that vdev->minor is set to -1 if the registration failed. */
102int __must_check video_register_device(struct video_device *vdev, int type, int nr); 100int __must_check video_register_device(struct video_device *vdev, int type, int nr);
103 101
104/* Same as video_register_device, but no warning is issued if the desired 102/* Same as video_register_device, but no warning is issued if the desired
@@ -106,7 +104,7 @@ int __must_check video_register_device(struct video_device *vdev, int type, int
106int __must_check video_register_device_no_warn(struct video_device *vdev, int type, int nr); 104int __must_check video_register_device_no_warn(struct video_device *vdev, int type, int nr);
107 105
108/* Unregister video devices. Will do nothing if vdev == NULL or 106/* Unregister video devices. Will do nothing if vdev == NULL or
109 vdev->minor < 0. */ 107 video_is_registered() returns false. */
110void video_unregister_device(struct video_device *vdev); 108void video_unregister_device(struct video_device *vdev);
111 109
112/* helper functions to alloc/release struct video_device, the 110/* helper functions to alloc/release struct video_device, the
@@ -141,9 +139,14 @@ static inline void *video_drvdata(struct file *file)
141 return video_get_drvdata(video_devdata(file)); 139 return video_get_drvdata(video_devdata(file));
142} 140}
143 141
144static inline int video_is_unregistered(struct video_device *vdev) 142static inline const char *video_device_node_name(struct video_device *vdev)
143{
144 return dev_name(&vdev->dev);
145}
146
147static inline int video_is_registered(struct video_device *vdev)
145{ 148{
146 return test_bit(V4L2_FL_UNREGISTERED, &vdev->flags); 149 return test_bit(V4L2_FL_REGISTERED, &vdev->flags);
147} 150}
148 151
149#endif /* _V4L2_DEV_H */ 152#endif /* _V4L2_DEV_H */
diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
index 7a4529defa88..e8ba0f2efbae 100644
--- a/include/media/v4l2-ioctl.h
+++ b/include/media/v4l2-ioctl.h
@@ -239,6 +239,21 @@ struct v4l2_ioctl_ops {
239 int (*vidioc_enum_frameintervals) (struct file *file, void *fh, 239 int (*vidioc_enum_frameintervals) (struct file *file, void *fh,
240 struct v4l2_frmivalenum *fival); 240 struct v4l2_frmivalenum *fival);
241 241
242 /* DV Timings IOCTLs */
243 int (*vidioc_enum_dv_presets) (struct file *file, void *fh,
244 struct v4l2_dv_enum_preset *preset);
245
246 int (*vidioc_s_dv_preset) (struct file *file, void *fh,
247 struct v4l2_dv_preset *preset);
248 int (*vidioc_g_dv_preset) (struct file *file, void *fh,
249 struct v4l2_dv_preset *preset);
250 int (*vidioc_query_dv_preset) (struct file *file, void *fh,
251 struct v4l2_dv_preset *qpreset);
252 int (*vidioc_s_dv_timings) (struct file *file, void *fh,
253 struct v4l2_dv_timings *timings);
254 int (*vidioc_g_dv_timings) (struct file *file, void *fh,
255 struct v4l2_dv_timings *timings);
256
242 /* For other private ioctls */ 257 /* For other private ioctls */
243 long (*vidioc_default) (struct file *file, void *fh, 258 long (*vidioc_default) (struct file *file, void *fh,
244 int cmd, void *arg); 259 int cmd, void *arg);
diff --git a/include/media/v4l2-mediabus.h b/include/media/v4l2-mediabus.h
new file mode 100644
index 000000000000..0dbe02ada259
--- /dev/null
+++ b/include/media/v4l2-mediabus.h
@@ -0,0 +1,61 @@
1/*
2 * Media Bus API header
3 *
4 * Copyright (C) 2009, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifndef V4L2_MEDIABUS_H
12#define V4L2_MEDIABUS_H
13
14/*
15 * These pixel codes uniquely identify data formats on the media bus. Mostly
16 * they correspond to similarly named V4L2_PIX_FMT_* formats, format 0 is
17 * reserved, V4L2_MBUS_FMT_FIXED shall be used by host-client pairs, where the
18 * data format is fixed. Additionally, "2X8" means that one pixel is transferred
19 * in two 8-bit samples, "BE" or "LE" specify in which order those samples are
20 * transferred over the bus: "LE" means that the least significant bits are
21 * transferred first, "BE" means that the most significant bits are transferred
22 * first, and "PADHI" and "PADLO" define which bits - low or high, in the
23 * incomplete high byte, are filled with padding bits.
24 */
25enum v4l2_mbus_pixelcode {
26 V4L2_MBUS_FMT_FIXED = 1,
27 V4L2_MBUS_FMT_YUYV8_2X8_LE,
28 V4L2_MBUS_FMT_YVYU8_2X8_LE,
29 V4L2_MBUS_FMT_YUYV8_2X8_BE,
30 V4L2_MBUS_FMT_YVYU8_2X8_BE,
31 V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE,
32 V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE,
33 V4L2_MBUS_FMT_RGB565_2X8_LE,
34 V4L2_MBUS_FMT_RGB565_2X8_BE,
35 V4L2_MBUS_FMT_SBGGR8_1X8,
36 V4L2_MBUS_FMT_SBGGR10_1X10,
37 V4L2_MBUS_FMT_GREY8_1X8,
38 V4L2_MBUS_FMT_Y10_1X10,
39 V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE,
40 V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_LE,
41 V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_BE,
42 V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_BE,
43};
44
45/**
46 * struct v4l2_mbus_framefmt - frame format on the media bus
47 * @width: frame width
48 * @height: frame height
49 * @code: data format code
50 * @field: used interlacing type
51 * @colorspace: colorspace of the data
52 */
53struct v4l2_mbus_framefmt {
54 __u32 width;
55 __u32 height;
56 enum v4l2_mbus_pixelcode code;
57 enum v4l2_field field;
58 enum v4l2_colorspace colorspace;
59};
60
61#endif
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index 00bf17608453..9ba99cd39ee7 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -22,6 +22,7 @@
22#define _V4L2_SUBDEV_H 22#define _V4L2_SUBDEV_H
23 23
24#include <media/v4l2-common.h> 24#include <media/v4l2-common.h>
25#include <media/v4l2-mediabus.h>
25 26
26/* generic v4l2_device notify callback notification values */ 27/* generic v4l2_device notify callback notification values */
27#define V4L2_SUBDEV_IR_RX_NOTIFY _IOW('v', 0, u32) 28#define V4L2_SUBDEV_IR_RX_NOTIFY _IOW('v', 0, u32)
@@ -207,7 +208,7 @@ struct v4l2_subdev_audio_ops {
207 s_std_output: set v4l2_std_id for video OUTPUT devices. This is ignored by 208 s_std_output: set v4l2_std_id for video OUTPUT devices. This is ignored by
208 video input devices. 209 video input devices.
209 210
210 s_crystal_freq: sets the frequency of the crystal used to generate the 211 s_crystal_freq: sets the frequency of the crystal used to generate the
211 clocks in Hz. An extra flags field allows device specific configuration 212 clocks in Hz. An extra flags field allows device specific configuration
212 regarding clock frequency dividers, etc. If not used, then set flags 213 regarding clock frequency dividers, etc. If not used, then set flags
213 to 0. If the frequency is not supported, then -EINVAL is returned. 214 to 0. If the frequency is not supported, then -EINVAL is returned.
@@ -217,6 +218,26 @@ struct v4l2_subdev_audio_ops {
217 218
218 s_routing: see s_routing in audio_ops, except this version is for video 219 s_routing: see s_routing in audio_ops, except this version is for video
219 devices. 220 devices.
221
222 s_dv_preset: set dv (Digital Video) preset in the sub device. Similar to
223 s_std()
224
225 query_dv_preset: query dv preset in the sub device. This is similar to
226 querystd()
227
228 s_dv_timings(): Set custom dv timings in the sub device. This is used
229 when sub device is capable of setting detailed timing information
230 in the hardware to generate/detect the video signal.
231
232 g_dv_timings(): Get custom dv timings in the sub device.
233
234 enum_mbus_fmt: enumerate pixel formats, provided by a video data source
235
236 g_mbus_fmt: get the current pixel format, provided by a video data source
237
238 try_mbus_fmt: try to set a pixel format on a video data source
239
240 s_mbus_fmt: set a pixel format on a video data source
220 */ 241 */
221struct v4l2_subdev_video_ops { 242struct v4l2_subdev_video_ops {
222 int (*s_routing)(struct v4l2_subdev *sd, u32 input, u32 output, u32 config); 243 int (*s_routing)(struct v4l2_subdev *sd, u32 input, u32 output, u32 config);
@@ -240,6 +261,33 @@ struct v4l2_subdev_video_ops {
240 int (*s_parm)(struct v4l2_subdev *sd, struct v4l2_streamparm *param); 261 int (*s_parm)(struct v4l2_subdev *sd, struct v4l2_streamparm *param);
241 int (*enum_framesizes)(struct v4l2_subdev *sd, struct v4l2_frmsizeenum *fsize); 262 int (*enum_framesizes)(struct v4l2_subdev *sd, struct v4l2_frmsizeenum *fsize);
242 int (*enum_frameintervals)(struct v4l2_subdev *sd, struct v4l2_frmivalenum *fival); 263 int (*enum_frameintervals)(struct v4l2_subdev *sd, struct v4l2_frmivalenum *fival);
264 int (*s_dv_preset)(struct v4l2_subdev *sd,
265 struct v4l2_dv_preset *preset);
266 int (*query_dv_preset)(struct v4l2_subdev *sd,
267 struct v4l2_dv_preset *preset);
268 int (*s_dv_timings)(struct v4l2_subdev *sd,
269 struct v4l2_dv_timings *timings);
270 int (*g_dv_timings)(struct v4l2_subdev *sd,
271 struct v4l2_dv_timings *timings);
272 int (*enum_mbus_fmt)(struct v4l2_subdev *sd, int index,
273 enum v4l2_mbus_pixelcode *code);
274 int (*g_mbus_fmt)(struct v4l2_subdev *sd,
275 struct v4l2_mbus_framefmt *fmt);
276 int (*try_mbus_fmt)(struct v4l2_subdev *sd,
277 struct v4l2_mbus_framefmt *fmt);
278 int (*s_mbus_fmt)(struct v4l2_subdev *sd,
279 struct v4l2_mbus_framefmt *fmt);
280};
281
282/**
283 * struct v4l2_subdev_sensor_ops - v4l2-subdev sensor operations
284 * @g_skip_top_lines: number of lines at the top of the image to be skipped.
285 * This is needed for some sensors, which always corrupt
286 * several top lines of the output image, or which send their
287 * metadata in them.
288 */
289struct v4l2_subdev_sensor_ops {
290 int (*g_skip_top_lines)(struct v4l2_subdev *sd, u32 *lines);
243}; 291};
244 292
245/* 293/*
@@ -326,11 +374,12 @@ struct v4l2_subdev_ir_ops {
326}; 374};
327 375
328struct v4l2_subdev_ops { 376struct v4l2_subdev_ops {
329 const struct v4l2_subdev_core_ops *core; 377 const struct v4l2_subdev_core_ops *core;
330 const struct v4l2_subdev_tuner_ops *tuner; 378 const struct v4l2_subdev_tuner_ops *tuner;
331 const struct v4l2_subdev_audio_ops *audio; 379 const struct v4l2_subdev_audio_ops *audio;
332 const struct v4l2_subdev_video_ops *video; 380 const struct v4l2_subdev_video_ops *video;
333 const struct v4l2_subdev_ir_ops *ir; 381 const struct v4l2_subdev_ir_ops *ir;
382 const struct v4l2_subdev_sensor_ops *sensor;
334}; 383};
335 384
336#define V4L2_SUBDEV_NAME_SIZE 32 385#define V4L2_SUBDEV_NAME_SIZE 32
diff --git a/include/net/dst.h b/include/net/dst.h
index 387cb3cfde7e..39c4a5963e12 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -113,7 +113,7 @@ dst_metric(const struct dst_entry *dst, int metric)
113static inline u32 113static inline u32
114dst_feature(const struct dst_entry *dst, u32 feature) 114dst_feature(const struct dst_entry *dst, u32 feature)
115{ 115{
116 return (dst ? dst_metric(dst, RTAX_FEATURES) & feature : 0); 116 return dst_metric(dst, RTAX_FEATURES) & feature;
117} 117}
118 118
119static inline u32 dst_mtu(const struct dst_entry *dst) 119static inline u32 dst_mtu(const struct dst_entry *dst)
diff --git a/include/net/ip.h b/include/net/ip.h
index e6b9d12d5f62..85108cfbb1ae 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -337,6 +337,7 @@ enum ip_defrag_users {
337 IP_DEFRAG_CALL_RA_CHAIN, 337 IP_DEFRAG_CALL_RA_CHAIN,
338 IP_DEFRAG_CONNTRACK_IN, 338 IP_DEFRAG_CONNTRACK_IN,
339 IP_DEFRAG_CONNTRACK_OUT, 339 IP_DEFRAG_CONNTRACK_OUT,
340 IP_DEFRAG_CONNTRACK_BRIDGE_IN,
340 IP_DEFRAG_VS_IN, 341 IP_DEFRAG_VS_IN,
341 IP_DEFRAG_VS_OUT, 342 IP_DEFRAG_VS_OUT,
342 IP_DEFRAG_VS_FWD 343 IP_DEFRAG_VS_FWD
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 92db8617d188..ccab5946c830 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -350,8 +350,16 @@ static inline int ipv6_prefix_equal(const struct in6_addr *a1,
350 350
351struct inet_frag_queue; 351struct inet_frag_queue;
352 352
353enum ip6_defrag_users {
354 IP6_DEFRAG_LOCAL_DELIVER,
355 IP6_DEFRAG_CONNTRACK_IN,
356 IP6_DEFRAG_CONNTRACK_OUT,
357 IP6_DEFRAG_CONNTRACK_BRIDGE_IN,
358};
359
353struct ip6_create_arg { 360struct ip6_create_arg {
354 __be32 id; 361 __be32 id;
362 u32 user;
355 struct in6_addr *src; 363 struct in6_addr *src;
356 struct in6_addr *dst; 364 struct in6_addr *dst;
357}; 365};
diff --git a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
index abc55ad75c2b..1ee717eb5b09 100644
--- a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
+++ b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
@@ -9,7 +9,7 @@ extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6;
9 9
10extern int nf_ct_frag6_init(void); 10extern int nf_ct_frag6_init(void);
11extern void nf_ct_frag6_cleanup(void); 11extern void nf_ct_frag6_cleanup(void);
12extern struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb); 12extern struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user);
13extern void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb, 13extern void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb,
14 struct net_device *in, 14 struct net_device *in,
15 struct net_device *out, 15 struct net_device *out,
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 1b6f7d348cee..34f5cc24d903 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -408,8 +408,7 @@ extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk,
408extern void tcp_parse_options(struct sk_buff *skb, 408extern void tcp_parse_options(struct sk_buff *skb,
409 struct tcp_options_received *opt_rx, 409 struct tcp_options_received *opt_rx,
410 u8 **hvpp, 410 u8 **hvpp,
411 int estab, 411 int estab);
412 struct dst_entry *dst);
413 412
414extern u8 *tcp_parse_md5sig_option(struct tcphdr *th); 413extern u8 *tcp_parse_md5sig_option(struct tcphdr *th);
415 414
diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
index 483057b2f4b4..fa0d52b8e622 100644
--- a/include/rdma/ib_addr.h
+++ b/include/rdma/ib_addr.h
@@ -36,6 +36,7 @@
36 36
37#include <linux/in.h> 37#include <linux/in.h>
38#include <linux/in6.h> 38#include <linux/in6.h>
39#include <linux/if_arp.h>
39#include <linux/netdevice.h> 40#include <linux/netdevice.h>
40#include <linux/socket.h> 41#include <linux/socket.h>
41#include <rdma/ib_verbs.h> 42#include <rdma/ib_verbs.h>
@@ -60,8 +61,8 @@ struct rdma_dev_addr {
60 unsigned char src_dev_addr[MAX_ADDR_LEN]; 61 unsigned char src_dev_addr[MAX_ADDR_LEN];
61 unsigned char dst_dev_addr[MAX_ADDR_LEN]; 62 unsigned char dst_dev_addr[MAX_ADDR_LEN];
62 unsigned char broadcast[MAX_ADDR_LEN]; 63 unsigned char broadcast[MAX_ADDR_LEN];
63 enum rdma_node_type dev_type; 64 unsigned short dev_type;
64 struct net_device *src_dev; 65 int bound_dev_if;
65}; 66};
66 67
67/** 68/**
@@ -121,40 +122,29 @@ static inline void ib_addr_get_mgid(struct rdma_dev_addr *dev_addr,
121 memcpy(gid, dev_addr->broadcast + 4, sizeof *gid); 122 memcpy(gid, dev_addr->broadcast + 4, sizeof *gid);
122} 123}
123 124
124static inline void ib_addr_get_sgid(struct rdma_dev_addr *dev_addr, 125static inline int rdma_addr_gid_offset(struct rdma_dev_addr *dev_addr)
125 union ib_gid *gid)
126{ 126{
127 memcpy(gid, dev_addr->src_dev_addr + 4, sizeof *gid); 127 return dev_addr->dev_type == ARPHRD_INFINIBAND ? 4 : 0;
128} 128}
129 129
130static inline void ib_addr_set_sgid(struct rdma_dev_addr *dev_addr, 130static inline void rdma_addr_get_sgid(struct rdma_dev_addr *dev_addr, union ib_gid *gid)
131 union ib_gid *gid)
132{ 131{
133 memcpy(dev_addr->src_dev_addr + 4, gid, sizeof *gid); 132 memcpy(gid, dev_addr->src_dev_addr + rdma_addr_gid_offset(dev_addr), sizeof *gid);
134} 133}
135 134
136static inline void ib_addr_get_dgid(struct rdma_dev_addr *dev_addr, 135static inline void rdma_addr_set_sgid(struct rdma_dev_addr *dev_addr, union ib_gid *gid)
137 union ib_gid *gid)
138{ 136{
139 memcpy(gid, dev_addr->dst_dev_addr + 4, sizeof *gid); 137 memcpy(dev_addr->src_dev_addr + rdma_addr_gid_offset(dev_addr), gid, sizeof *gid);
140} 138}
141 139
142static inline void ib_addr_set_dgid(struct rdma_dev_addr *dev_addr, 140static inline void rdma_addr_get_dgid(struct rdma_dev_addr *dev_addr, union ib_gid *gid)
143 union ib_gid *gid)
144{ 141{
145 memcpy(dev_addr->dst_dev_addr + 4, gid, sizeof *gid); 142 memcpy(gid, dev_addr->dst_dev_addr + rdma_addr_gid_offset(dev_addr), sizeof *gid);
146} 143}
147 144
148static inline void iw_addr_get_sgid(struct rdma_dev_addr *dev_addr, 145static inline void rdma_addr_set_dgid(struct rdma_dev_addr *dev_addr, union ib_gid *gid)
149 union ib_gid *gid)
150{
151 memcpy(gid, dev_addr->src_dev_addr, sizeof *gid);
152}
153
154static inline void iw_addr_get_dgid(struct rdma_dev_addr *dev_addr,
155 union ib_gid *gid)
156{ 146{
157 memcpy(gid, dev_addr->dst_dev_addr, sizeof *gid); 147 memcpy(dev_addr->dst_dev_addr + rdma_addr_gid_offset(dev_addr), gid, sizeof *gid);
158} 148}
159 149
160#endif /* IB_ADDR_H */ 150#endif /* IB_ADDR_H */
diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h
index 3841c1aff692..1082afaed158 100644
--- a/include/rdma/ib_sa.h
+++ b/include/rdma/ib_sa.h
@@ -379,4 +379,10 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
379 struct ib_sa_path_rec *rec, 379 struct ib_sa_path_rec *rec,
380 struct ib_ah_attr *ah_attr); 380 struct ib_ah_attr *ah_attr);
381 381
382/**
383 * ib_sa_unpack_path - Convert a path record from MAD format to struct
384 * ib_sa_path_rec.
385 */
386void ib_sa_unpack_path(void *attribute, struct ib_sa_path_rec *rec);
387
382#endif /* IB_SA_H */ 388#endif /* IB_SA_H */
diff --git a/include/rdma/ib_user_sa.h b/include/rdma/ib_user_sa.h
index 659120157e14..cfc7c9ba781e 100644
--- a/include/rdma/ib_user_sa.h
+++ b/include/rdma/ib_user_sa.h
@@ -35,6 +35,22 @@
35 35
36#include <linux/types.h> 36#include <linux/types.h>
37 37
38enum {
39 IB_PATH_GMP = 1,
40 IB_PATH_PRIMARY = (1<<1),
41 IB_PATH_ALTERNATE = (1<<2),
42 IB_PATH_OUTBOUND = (1<<3),
43 IB_PATH_INBOUND = (1<<4),
44 IB_PATH_INBOUND_REVERSE = (1<<5),
45 IB_PATH_BIDIRECTIONAL = IB_PATH_OUTBOUND | IB_PATH_INBOUND_REVERSE
46};
47
48struct ib_path_rec_data {
49 __u32 flags;
50 __u32 reserved;
51 __u32 path_rec[16];
52};
53
38struct ib_user_path_rec { 54struct ib_user_path_rec {
39 __u8 dgid[16]; 55 __u8 dgid[16];
40 __u8 sgid[16]; 56 __u8 sgid[16];
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index c179318edd92..09509edb1c5f 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -1425,6 +1425,11 @@ int ib_destroy_qp(struct ib_qp *qp);
1425 * @send_wr: A list of work requests to post on the send queue. 1425 * @send_wr: A list of work requests to post on the send queue.
1426 * @bad_send_wr: On an immediate failure, this parameter will reference 1426 * @bad_send_wr: On an immediate failure, this parameter will reference
1427 * the work request that failed to be posted on the QP. 1427 * the work request that failed to be posted on the QP.
1428 *
1429 * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
1430 * error is returned, the QP state shall not be affected,
1431 * ib_post_send() will return an immediate error after queueing any
1432 * earlier work requests in the list.
1428 */ 1433 */
1429static inline int ib_post_send(struct ib_qp *qp, 1434static inline int ib_post_send(struct ib_qp *qp,
1430 struct ib_send_wr *send_wr, 1435 struct ib_send_wr *send_wr,
diff --git a/include/rdma/rdma_user_cm.h b/include/rdma/rdma_user_cm.h
index c55705460b87..1d165022c02d 100644
--- a/include/rdma/rdma_user_cm.h
+++ b/include/rdma/rdma_user_cm.h
@@ -215,12 +215,14 @@ struct rdma_ucm_event_resp {
215 215
216/* Option levels */ 216/* Option levels */
217enum { 217enum {
218 RDMA_OPTION_ID = 0 218 RDMA_OPTION_ID = 0,
219 RDMA_OPTION_IB = 1
219}; 220};
220 221
221/* Option details */ 222/* Option details */
222enum { 223enum {
223 RDMA_OPTION_ID_TOS = 0 224 RDMA_OPTION_ID_TOS = 0,
225 RDMA_OPTION_IB_PATH = 1
224}; 226};
225 227
226struct rdma_ucm_set_option { 228struct rdma_ucm_set_option {
diff --git a/include/video/da8xx-fb.h b/include/video/da8xx-fb.h
index c051a50ed528..89d43b3d4cb9 100644
--- a/include/video/da8xx-fb.h
+++ b/include/video/da8xx-fb.h
@@ -38,6 +38,7 @@ struct da8xx_lcdc_platform_data {
38 const char manu_name[10]; 38 const char manu_name[10];
39 void *controller_data; 39 void *controller_data;
40 const char type[25]; 40 const char type[25];
41 void (*panel_power_ctrl)(int);
41}; 42};
42 43
43struct lcd_ctrl_config { 44struct lcd_ctrl_config {
diff --git a/include/video/sh_mobile_lcdc.h b/include/video/sh_mobile_lcdc.h
index 25144ab22b95..288205457713 100644
--- a/include/video/sh_mobile_lcdc.h
+++ b/include/video/sh_mobile_lcdc.h
@@ -50,6 +50,8 @@ struct sh_mobile_lcdc_board_cfg {
50 void *board_data; 50 void *board_data;
51 int (*setup_sys)(void *board_data, void *sys_ops_handle, 51 int (*setup_sys)(void *board_data, void *sys_ops_handle,
52 struct sh_mobile_lcdc_sys_bus_ops *sys_ops); 52 struct sh_mobile_lcdc_sys_bus_ops *sys_ops);
53 void (*start_transfer)(void *board_data, void *sys_ops_handle,
54 struct sh_mobile_lcdc_sys_bus_ops *sys_ops);
53 void (*display_on)(void *board_data); 55 void (*display_on)(void *board_data);
54 void (*display_off)(void *board_data); 56 void (*display_off)(void *board_data);
55}; 57};
diff --git a/init/Kconfig b/init/Kconfig
index 54c655ce9c04..a23da9f01803 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1079,6 +1079,28 @@ config SLOB
1079 1079
1080endchoice 1080endchoice
1081 1081
1082config MMAP_ALLOW_UNINITIALIZED
1083 bool "Allow mmapped anonymous memory to be uninitialized"
1084 depends on EMBEDDED && !MMU
1085 default n
1086 help
1087 Normally, and according to the Linux spec, anonymous memory obtained
1088 from mmap() has it's contents cleared before it is passed to
1089 userspace. Enabling this config option allows you to request that
1090 mmap() skip that if it is given an MAP_UNINITIALIZED flag, thus
1091 providing a huge performance boost. If this option is not enabled,
1092 then the flag will be ignored.
1093
1094 This is taken advantage of by uClibc's malloc(), and also by
1095 ELF-FDPIC binfmt's brk and stack allocator.
1096
1097 Because of the obvious security issues, this option should only be
1098 enabled on embedded devices where you control what is run in
1099 userspace. Since that isn't generally a problem on no-MMU systems,
1100 it is normally safe to say Y here.
1101
1102 See Documentation/nommu-mmap.txt for more information.
1103
1082config PROFILING 1104config PROFILING
1083 bool "Profiling support (EXPERIMENTAL)" 1105 bool "Profiling support (EXPERIMENTAL)"
1084 help 1106 help
diff --git a/init/main.c b/init/main.c
index 4051d75dd2d6..c3db4a98b369 100644
--- a/init/main.c
+++ b/init/main.c
@@ -691,10 +691,10 @@ asmlinkage void __init start_kernel(void)
691static void __init do_ctors(void) 691static void __init do_ctors(void)
692{ 692{
693#ifdef CONFIG_CONSTRUCTORS 693#ifdef CONFIG_CONSTRUCTORS
694 ctor_fn_t *call = (ctor_fn_t *) __ctors_start; 694 ctor_fn_t *fn = (ctor_fn_t *) __ctors_start;
695 695
696 for (; call < (ctor_fn_t *) __ctors_end; call++) 696 for (; fn < (ctor_fn_t *) __ctors_end; fn++)
697 (*call)(); 697 (*fn)();
698#endif 698#endif
699} 699}
700 700
@@ -755,10 +755,10 @@ extern initcall_t __initcall_start[], __initcall_end[], __early_initcall_end[];
755 755
756static void __init do_initcalls(void) 756static void __init do_initcalls(void)
757{ 757{
758 initcall_t *call; 758 initcall_t *fn;
759 759
760 for (call = __early_initcall_end; call < __initcall_end; call++) 760 for (fn = __early_initcall_end; fn < __initcall_end; fn++)
761 do_one_initcall(*call); 761 do_one_initcall(*fn);
762 762
763 /* Make sure there is no pending stuff from the initcall sequence */ 763 /* Make sure there is no pending stuff from the initcall sequence */
764 flush_scheduled_work(); 764 flush_scheduled_work();
@@ -785,10 +785,10 @@ static void __init do_basic_setup(void)
785 785
786static void __init do_pre_smp_initcalls(void) 786static void __init do_pre_smp_initcalls(void)
787{ 787{
788 initcall_t *call; 788 initcall_t *fn;
789 789
790 for (call = __initcall_start; call < __early_initcall_end; call++) 790 for (fn = __initcall_start; fn < __early_initcall_end; fn++)
791 do_one_initcall(*call); 791 do_one_initcall(*fn);
792} 792}
793 793
794static void run_init_process(char *init_filename) 794static void run_init_process(char *init_filename)
diff --git a/ipc/msg.c b/ipc/msg.c
index 085bd58f2f07..af42ef8900a6 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -125,6 +125,7 @@ void msg_init_ns(struct ipc_namespace *ns)
125void msg_exit_ns(struct ipc_namespace *ns) 125void msg_exit_ns(struct ipc_namespace *ns)
126{ 126{
127 free_ipcs(ns, &msg_ids(ns), freeque); 127 free_ipcs(ns, &msg_ids(ns), freeque);
128 idr_destroy(&ns->ids[IPC_MSG_IDS].ipcs_idr);
128} 129}
129#endif 130#endif
130 131
diff --git a/ipc/sem.c b/ipc/sem.c
index 87c2b641fd7b..dbef95b15941 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -129,6 +129,7 @@ void sem_init_ns(struct ipc_namespace *ns)
129void sem_exit_ns(struct ipc_namespace *ns) 129void sem_exit_ns(struct ipc_namespace *ns)
130{ 130{
131 free_ipcs(ns, &sem_ids(ns), freeary); 131 free_ipcs(ns, &sem_ids(ns), freeary);
132 idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr);
132} 133}
133#endif 134#endif
134 135
@@ -240,6 +241,7 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
240 key_t key = params->key; 241 key_t key = params->key;
241 int nsems = params->u.nsems; 242 int nsems = params->u.nsems;
242 int semflg = params->flg; 243 int semflg = params->flg;
244 int i;
243 245
244 if (!nsems) 246 if (!nsems)
245 return -EINVAL; 247 return -EINVAL;
@@ -272,6 +274,11 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
272 ns->used_sems += nsems; 274 ns->used_sems += nsems;
273 275
274 sma->sem_base = (struct sem *) &sma[1]; 276 sma->sem_base = (struct sem *) &sma[1];
277
278 for (i = 0; i < nsems; i++)
279 INIT_LIST_HEAD(&sma->sem_base[i].sem_pending);
280
281 sma->complex_count = 0;
275 INIT_LIST_HEAD(&sma->sem_pending); 282 INIT_LIST_HEAD(&sma->sem_pending);
276 INIT_LIST_HEAD(&sma->list_id); 283 INIT_LIST_HEAD(&sma->list_id);
277 sma->sem_nsems = nsems; 284 sma->sem_nsems = nsems;
@@ -397,63 +404,109 @@ undo:
397 return result; 404 return result;
398} 405}
399 406
400/* Go through the pending queue for the indicated semaphore 407/*
401 * looking for tasks that can be completed. 408 * Wake up a process waiting on the sem queue with a given error.
409 * The queue is invalid (may not be accessed) after the function returns.
402 */ 410 */
403static void update_queue (struct sem_array * sma) 411static void wake_up_sem_queue(struct sem_queue *q, int error)
404{ 412{
405 int error; 413 /*
406 struct sem_queue * q; 414 * Hold preempt off so that we don't get preempted and have the
415 * wakee busy-wait until we're scheduled back on. We're holding
416 * locks here so it may not strictly be needed, however if the
417 * locks become preemptible then this prevents such a problem.
418 */
419 preempt_disable();
420 q->status = IN_WAKEUP;
421 wake_up_process(q->sleeper);
422 /* hands-off: q can disappear immediately after writing q->status. */
423 smp_wmb();
424 q->status = error;
425 preempt_enable();
426}
427
428static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
429{
430 list_del(&q->list);
431 if (q->nsops == 1)
432 list_del(&q->simple_list);
433 else
434 sma->complex_count--;
435}
436
437
438/**
439 * update_queue(sma, semnum): Look for tasks that can be completed.
440 * @sma: semaphore array.
441 * @semnum: semaphore that was modified.
442 *
443 * update_queue must be called after a semaphore in a semaphore array
444 * was modified. If multiple semaphore were modified, then @semnum
445 * must be set to -1.
446 */
447static void update_queue(struct sem_array *sma, int semnum)
448{
449 struct sem_queue *q;
450 struct list_head *walk;
451 struct list_head *pending_list;
452 int offset;
453
454 /* if there are complex operations around, then knowing the semaphore
455 * that was modified doesn't help us. Assume that multiple semaphores
456 * were modified.
457 */
458 if (sma->complex_count)
459 semnum = -1;
460
461 if (semnum == -1) {
462 pending_list = &sma->sem_pending;
463 offset = offsetof(struct sem_queue, list);
464 } else {
465 pending_list = &sma->sem_base[semnum].sem_pending;
466 offset = offsetof(struct sem_queue, simple_list);
467 }
468
469again:
470 walk = pending_list->next;
471 while (walk != pending_list) {
472 int error, alter;
473
474 q = (struct sem_queue *)((char *)walk - offset);
475 walk = walk->next;
476
477 /* If we are scanning the single sop, per-semaphore list of
478 * one semaphore and that semaphore is 0, then it is not
479 * necessary to scan the "alter" entries: simple increments
480 * that affect only one entry succeed immediately and cannot
481 * be in the per semaphore pending queue, and decrements
482 * cannot be successful if the value is already 0.
483 */
484 if (semnum != -1 && sma->sem_base[semnum].semval == 0 &&
485 q->alter)
486 break;
407 487
408 q = list_entry(sma->sem_pending.next, struct sem_queue, list);
409 while (&q->list != &sma->sem_pending) {
410 error = try_atomic_semop(sma, q->sops, q->nsops, 488 error = try_atomic_semop(sma, q->sops, q->nsops,
411 q->undo, q->pid); 489 q->undo, q->pid);
412 490
413 /* Does q->sleeper still need to sleep? */ 491 /* Does q->sleeper still need to sleep? */
414 if (error <= 0) { 492 if (error > 0)
415 struct sem_queue *n; 493 continue;
416
417 /*
418 * Continue scanning. The next operation
419 * that must be checked depends on the type of the
420 * completed operation:
421 * - if the operation modified the array, then
422 * restart from the head of the queue and
423 * check for threads that might be waiting
424 * for semaphore values to become 0.
425 * - if the operation didn't modify the array,
426 * then just continue.
427 * The order of list_del() and reading ->next
428 * is crucial: In the former case, the list_del()
429 * must be done first [because we might be the
430 * first entry in ->sem_pending], in the latter
431 * case the list_del() must be done last
432 * [because the list is invalid after the list_del()]
433 */
434 if (q->alter) {
435 list_del(&q->list);
436 n = list_entry(sma->sem_pending.next,
437 struct sem_queue, list);
438 } else {
439 n = list_entry(q->list.next, struct sem_queue,
440 list);
441 list_del(&q->list);
442 }
443
444 /* wake up the waiting thread */
445 q->status = IN_WAKEUP;
446 494
447 wake_up_process(q->sleeper); 495 unlink_queue(sma, q);
448 /* hands-off: q will disappear immediately after 496
449 * writing q->status. 497 /*
450 */ 498 * The next operation that must be checked depends on the type
451 smp_wmb(); 499 * of the completed operation:
452 q->status = error; 500 * - if the operation modified the array, then restart from the
453 q = n; 501 * head of the queue and check for threads that might be
454 } else { 502 * waiting for the new semaphore values.
455 q = list_entry(q->list.next, struct sem_queue, list); 503 * - if the operation didn't modify the array, then just
456 } 504 * continue.
505 */
506 alter = q->alter;
507 wake_up_sem_queue(q, error);
508 if (alter && !error)
509 goto again;
457 } 510 }
458} 511}
459 512
@@ -533,12 +586,8 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
533 586
534 /* Wake up all pending processes and let them fail with EIDRM. */ 587 /* Wake up all pending processes and let them fail with EIDRM. */
535 list_for_each_entry_safe(q, tq, &sma->sem_pending, list) { 588 list_for_each_entry_safe(q, tq, &sma->sem_pending, list) {
536 list_del(&q->list); 589 unlink_queue(sma, q);
537 590 wake_up_sem_queue(q, -EIDRM);
538 q->status = IN_WAKEUP;
539 wake_up_process(q->sleeper); /* doesn't sleep */
540 smp_wmb();
541 q->status = -EIDRM; /* hands-off q */
542 } 591 }
543 592
544 /* Remove the semaphore set from the IDR */ 593 /* Remove the semaphore set from the IDR */
@@ -575,7 +624,7 @@ static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in,
575static int semctl_nolock(struct ipc_namespace *ns, int semid, 624static int semctl_nolock(struct ipc_namespace *ns, int semid,
576 int cmd, int version, union semun arg) 625 int cmd, int version, union semun arg)
577{ 626{
578 int err = -EINVAL; 627 int err;
579 struct sem_array *sma; 628 struct sem_array *sma;
580 629
581 switch(cmd) { 630 switch(cmd) {
@@ -652,7 +701,6 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid,
652 default: 701 default:
653 return -EINVAL; 702 return -EINVAL;
654 } 703 }
655 return err;
656out_unlock: 704out_unlock:
657 sem_unlock(sma); 705 sem_unlock(sma);
658 return err; 706 return err;
@@ -759,7 +807,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
759 } 807 }
760 sma->sem_ctime = get_seconds(); 808 sma->sem_ctime = get_seconds();
761 /* maybe some queued-up processes were waiting for this */ 809 /* maybe some queued-up processes were waiting for this */
762 update_queue(sma); 810 update_queue(sma, -1);
763 err = 0; 811 err = 0;
764 goto out_unlock; 812 goto out_unlock;
765 } 813 }
@@ -801,7 +849,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
801 curr->sempid = task_tgid_vnr(current); 849 curr->sempid = task_tgid_vnr(current);
802 sma->sem_ctime = get_seconds(); 850 sma->sem_ctime = get_seconds();
803 /* maybe some queued-up processes were waiting for this */ 851 /* maybe some queued-up processes were waiting for this */
804 update_queue(sma); 852 update_queue(sma, semnum);
805 err = 0; 853 err = 0;
806 goto out_unlock; 854 goto out_unlock;
807 } 855 }
@@ -961,17 +1009,31 @@ static inline int get_undo_list(struct sem_undo_list **undo_listp)
961 return 0; 1009 return 0;
962} 1010}
963 1011
964static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid) 1012static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid)
965{ 1013{
966 struct sem_undo *walk; 1014 struct sem_undo *un;
967 1015
968 list_for_each_entry_rcu(walk, &ulp->list_proc, list_proc) { 1016 list_for_each_entry_rcu(un, &ulp->list_proc, list_proc) {
969 if (walk->semid == semid) 1017 if (un->semid == semid)
970 return walk; 1018 return un;
971 } 1019 }
972 return NULL; 1020 return NULL;
973} 1021}
974 1022
1023static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
1024{
1025 struct sem_undo *un;
1026
1027 assert_spin_locked(&ulp->lock);
1028
1029 un = __lookup_undo(ulp, semid);
1030 if (un) {
1031 list_del_rcu(&un->list_proc);
1032 list_add_rcu(&un->list_proc, &ulp->list_proc);
1033 }
1034 return un;
1035}
1036
975/** 1037/**
976 * find_alloc_undo - Lookup (and if not present create) undo array 1038 * find_alloc_undo - Lookup (and if not present create) undo array
977 * @ns: namespace 1039 * @ns: namespace
@@ -1163,7 +1225,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
1163 error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current)); 1225 error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current));
1164 if (error <= 0) { 1226 if (error <= 0) {
1165 if (alter && error == 0) 1227 if (alter && error == 0)
1166 update_queue (sma); 1228 update_queue(sma, (nsops == 1) ? sops[0].sem_num : -1);
1229
1167 goto out_unlock_free; 1230 goto out_unlock_free;
1168 } 1231 }
1169 1232
@@ -1181,6 +1244,19 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
1181 else 1244 else
1182 list_add(&queue.list, &sma->sem_pending); 1245 list_add(&queue.list, &sma->sem_pending);
1183 1246
1247 if (nsops == 1) {
1248 struct sem *curr;
1249 curr = &sma->sem_base[sops->sem_num];
1250
1251 if (alter)
1252 list_add_tail(&queue.simple_list, &curr->sem_pending);
1253 else
1254 list_add(&queue.simple_list, &curr->sem_pending);
1255 } else {
1256 INIT_LIST_HEAD(&queue.simple_list);
1257 sma->complex_count++;
1258 }
1259
1184 queue.status = -EINTR; 1260 queue.status = -EINTR;
1185 queue.sleeper = current; 1261 queue.sleeper = current;
1186 current->state = TASK_INTERRUPTIBLE; 1262 current->state = TASK_INTERRUPTIBLE;
@@ -1222,7 +1298,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
1222 */ 1298 */
1223 if (timeout && jiffies_left == 0) 1299 if (timeout && jiffies_left == 0)
1224 error = -EAGAIN; 1300 error = -EAGAIN;
1225 list_del(&queue.list); 1301 unlink_queue(sma, &queue);
1226 1302
1227out_unlock_free: 1303out_unlock_free:
1228 sem_unlock(sma); 1304 sem_unlock(sma);
@@ -1307,7 +1383,7 @@ void exit_sem(struct task_struct *tsk)
1307 if (IS_ERR(sma)) 1383 if (IS_ERR(sma))
1308 continue; 1384 continue;
1309 1385
1310 un = lookup_undo(ulp, semid); 1386 un = __lookup_undo(ulp, semid);
1311 if (un == NULL) { 1387 if (un == NULL) {
1312 /* exit_sem raced with IPC_RMID+semget() that created 1388 /* exit_sem raced with IPC_RMID+semget() that created
1313 * exactly the same semid. Nothing to do. 1389 * exactly the same semid. Nothing to do.
@@ -1351,7 +1427,7 @@ void exit_sem(struct task_struct *tsk)
1351 } 1427 }
1352 sma->sem_otime = get_seconds(); 1428 sma->sem_otime = get_seconds();
1353 /* maybe some queued-up processes were waiting for this */ 1429 /* maybe some queued-up processes were waiting for this */
1354 update_queue(sma); 1430 update_queue(sma, -1);
1355 sem_unlock(sma); 1431 sem_unlock(sma);
1356 1432
1357 call_rcu(&un->rcu, free_un); 1433 call_rcu(&un->rcu, free_un);
@@ -1365,7 +1441,7 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
1365 struct sem_array *sma = it; 1441 struct sem_array *sma = it;
1366 1442
1367 return seq_printf(s, 1443 return seq_printf(s,
1368 "%10d %10d %4o %10lu %5u %5u %5u %5u %10lu %10lu\n", 1444 "%10d %10d %4o %10u %5u %5u %5u %5u %10lu %10lu\n",
1369 sma->sem_perm.key, 1445 sma->sem_perm.key,
1370 sma->sem_perm.id, 1446 sma->sem_perm.id,
1371 sma->sem_perm.mode, 1447 sma->sem_perm.mode,
diff --git a/ipc/shm.c b/ipc/shm.c
index 11bec626c228..e9b039f74129 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -101,6 +101,7 @@ static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
101void shm_exit_ns(struct ipc_namespace *ns) 101void shm_exit_ns(struct ipc_namespace *ns)
102{ 102{
103 free_ipcs(ns, &shm_ids(ns), do_shm_rmid); 103 free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
104 idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr);
104} 105}
105#endif 106#endif
106 107
diff --git a/kernel/acct.c b/kernel/acct.c
index 9a4715a2f6bf..a6605ca921b6 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -536,7 +536,8 @@ static void do_acct_process(struct bsd_acct_struct *acct,
536 do_div(elapsed, AHZ); 536 do_div(elapsed, AHZ);
537 ac.ac_btime = get_seconds() - elapsed; 537 ac.ac_btime = get_seconds() - elapsed;
538 /* we really need to bite the bullet and change layout */ 538 /* we really need to bite the bullet and change layout */
539 current_uid_gid(&ac.ac_uid, &ac.ac_gid); 539 ac.ac_uid = orig_cred->uid;
540 ac.ac_gid = orig_cred->gid;
540#if ACCT_VERSION==2 541#if ACCT_VERSION==2
541 ac.ac_ahz = AHZ; 542 ac.ac_ahz = AHZ;
542#endif 543#endif
diff --git a/kernel/exit.c b/kernel/exit.c
index 6f50ef55a6f3..5962d7ccf243 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -933,7 +933,7 @@ NORET_TYPE void do_exit(long code)
933 * an exiting task cleaning up the robust pi futexes. 933 * an exiting task cleaning up the robust pi futexes.
934 */ 934 */
935 smp_mb(); 935 smp_mb();
936 spin_unlock_wait(&tsk->pi_lock); 936 raw_spin_unlock_wait(&tsk->pi_lock);
937 937
938 if (unlikely(in_atomic())) 938 if (unlikely(in_atomic()))
939 printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n", 939 printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
diff --git a/kernel/fork.c b/kernel/fork.c
index 1415dc4598ae..202a0ba63d3c 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -939,9 +939,9 @@ SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
939 939
940static void rt_mutex_init_task(struct task_struct *p) 940static void rt_mutex_init_task(struct task_struct *p)
941{ 941{
942 spin_lock_init(&p->pi_lock); 942 raw_spin_lock_init(&p->pi_lock);
943#ifdef CONFIG_RT_MUTEXES 943#ifdef CONFIG_RT_MUTEXES
944 plist_head_init(&p->pi_waiters, &p->pi_lock); 944 plist_head_init_raw(&p->pi_waiters, &p->pi_lock);
945 p->pi_blocked_on = NULL; 945 p->pi_blocked_on = NULL;
946#endif 946#endif
947} 947}
@@ -1127,6 +1127,10 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1127#ifdef CONFIG_DEBUG_MUTEXES 1127#ifdef CONFIG_DEBUG_MUTEXES
1128 p->blocked_on = NULL; /* not blocked yet */ 1128 p->blocked_on = NULL; /* not blocked yet */
1129#endif 1129#endif
1130#ifdef CONFIG_CGROUP_MEM_RES_CTLR
1131 p->memcg_batch.do_batch = 0;
1132 p->memcg_batch.memcg = NULL;
1133#endif
1130 1134
1131 p->bts = NULL; 1135 p->bts = NULL;
1132 1136
@@ -1206,9 +1210,10 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1206 p->sas_ss_sp = p->sas_ss_size = 0; 1210 p->sas_ss_sp = p->sas_ss_size = 0;
1207 1211
1208 /* 1212 /*
1209 * Syscall tracing should be turned off in the child regardless 1213 * Syscall tracing and stepping should be turned off in the
1210 * of CLONE_PTRACE. 1214 * child regardless of CLONE_PTRACE.
1211 */ 1215 */
1216 user_disable_single_step(p);
1212 clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE); 1217 clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
1213#ifdef TIF_SYSCALL_EMU 1218#ifdef TIF_SYSCALL_EMU
1214 clear_tsk_thread_flag(p, TIF_SYSCALL_EMU); 1219 clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
diff --git a/kernel/futex.c b/kernel/futex.c
index d73ef1f3e55d..8e3c3ffe1b9a 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -403,9 +403,9 @@ static void free_pi_state(struct futex_pi_state *pi_state)
403 * and has cleaned up the pi_state already 403 * and has cleaned up the pi_state already
404 */ 404 */
405 if (pi_state->owner) { 405 if (pi_state->owner) {
406 spin_lock_irq(&pi_state->owner->pi_lock); 406 raw_spin_lock_irq(&pi_state->owner->pi_lock);
407 list_del_init(&pi_state->list); 407 list_del_init(&pi_state->list);
408 spin_unlock_irq(&pi_state->owner->pi_lock); 408 raw_spin_unlock_irq(&pi_state->owner->pi_lock);
409 409
410 rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner); 410 rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
411 } 411 }
@@ -470,18 +470,18 @@ void exit_pi_state_list(struct task_struct *curr)
470 * pi_state_list anymore, but we have to be careful 470 * pi_state_list anymore, but we have to be careful
471 * versus waiters unqueueing themselves: 471 * versus waiters unqueueing themselves:
472 */ 472 */
473 spin_lock_irq(&curr->pi_lock); 473 raw_spin_lock_irq(&curr->pi_lock);
474 while (!list_empty(head)) { 474 while (!list_empty(head)) {
475 475
476 next = head->next; 476 next = head->next;
477 pi_state = list_entry(next, struct futex_pi_state, list); 477 pi_state = list_entry(next, struct futex_pi_state, list);
478 key = pi_state->key; 478 key = pi_state->key;
479 hb = hash_futex(&key); 479 hb = hash_futex(&key);
480 spin_unlock_irq(&curr->pi_lock); 480 raw_spin_unlock_irq(&curr->pi_lock);
481 481
482 spin_lock(&hb->lock); 482 spin_lock(&hb->lock);
483 483
484 spin_lock_irq(&curr->pi_lock); 484 raw_spin_lock_irq(&curr->pi_lock);
485 /* 485 /*
486 * We dropped the pi-lock, so re-check whether this 486 * We dropped the pi-lock, so re-check whether this
487 * task still owns the PI-state: 487 * task still owns the PI-state:
@@ -495,15 +495,15 @@ void exit_pi_state_list(struct task_struct *curr)
495 WARN_ON(list_empty(&pi_state->list)); 495 WARN_ON(list_empty(&pi_state->list));
496 list_del_init(&pi_state->list); 496 list_del_init(&pi_state->list);
497 pi_state->owner = NULL; 497 pi_state->owner = NULL;
498 spin_unlock_irq(&curr->pi_lock); 498 raw_spin_unlock_irq(&curr->pi_lock);
499 499
500 rt_mutex_unlock(&pi_state->pi_mutex); 500 rt_mutex_unlock(&pi_state->pi_mutex);
501 501
502 spin_unlock(&hb->lock); 502 spin_unlock(&hb->lock);
503 503
504 spin_lock_irq(&curr->pi_lock); 504 raw_spin_lock_irq(&curr->pi_lock);
505 } 505 }
506 spin_unlock_irq(&curr->pi_lock); 506 raw_spin_unlock_irq(&curr->pi_lock);
507} 507}
508 508
509static int 509static int
@@ -558,7 +558,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
558 * change of the task flags, we do this protected by 558 * change of the task flags, we do this protected by
559 * p->pi_lock: 559 * p->pi_lock:
560 */ 560 */
561 spin_lock_irq(&p->pi_lock); 561 raw_spin_lock_irq(&p->pi_lock);
562 if (unlikely(p->flags & PF_EXITING)) { 562 if (unlikely(p->flags & PF_EXITING)) {
563 /* 563 /*
564 * The task is on the way out. When PF_EXITPIDONE is 564 * The task is on the way out. When PF_EXITPIDONE is
@@ -567,7 +567,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
567 */ 567 */
568 int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN; 568 int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
569 569
570 spin_unlock_irq(&p->pi_lock); 570 raw_spin_unlock_irq(&p->pi_lock);
571 put_task_struct(p); 571 put_task_struct(p);
572 return ret; 572 return ret;
573 } 573 }
@@ -586,7 +586,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
586 WARN_ON(!list_empty(&pi_state->list)); 586 WARN_ON(!list_empty(&pi_state->list));
587 list_add(&pi_state->list, &p->pi_state_list); 587 list_add(&pi_state->list, &p->pi_state_list);
588 pi_state->owner = p; 588 pi_state->owner = p;
589 spin_unlock_irq(&p->pi_lock); 589 raw_spin_unlock_irq(&p->pi_lock);
590 590
591 put_task_struct(p); 591 put_task_struct(p);
592 592
@@ -760,7 +760,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
760 if (!pi_state) 760 if (!pi_state)
761 return -EINVAL; 761 return -EINVAL;
762 762
763 spin_lock(&pi_state->pi_mutex.wait_lock); 763 raw_spin_lock(&pi_state->pi_mutex.wait_lock);
764 new_owner = rt_mutex_next_owner(&pi_state->pi_mutex); 764 new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
765 765
766 /* 766 /*
@@ -789,23 +789,23 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
789 else if (curval != uval) 789 else if (curval != uval)
790 ret = -EINVAL; 790 ret = -EINVAL;
791 if (ret) { 791 if (ret) {
792 spin_unlock(&pi_state->pi_mutex.wait_lock); 792 raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
793 return ret; 793 return ret;
794 } 794 }
795 } 795 }
796 796
797 spin_lock_irq(&pi_state->owner->pi_lock); 797 raw_spin_lock_irq(&pi_state->owner->pi_lock);
798 WARN_ON(list_empty(&pi_state->list)); 798 WARN_ON(list_empty(&pi_state->list));
799 list_del_init(&pi_state->list); 799 list_del_init(&pi_state->list);
800 spin_unlock_irq(&pi_state->owner->pi_lock); 800 raw_spin_unlock_irq(&pi_state->owner->pi_lock);
801 801
802 spin_lock_irq(&new_owner->pi_lock); 802 raw_spin_lock_irq(&new_owner->pi_lock);
803 WARN_ON(!list_empty(&pi_state->list)); 803 WARN_ON(!list_empty(&pi_state->list));
804 list_add(&pi_state->list, &new_owner->pi_state_list); 804 list_add(&pi_state->list, &new_owner->pi_state_list);
805 pi_state->owner = new_owner; 805 pi_state->owner = new_owner;
806 spin_unlock_irq(&new_owner->pi_lock); 806 raw_spin_unlock_irq(&new_owner->pi_lock);
807 807
808 spin_unlock(&pi_state->pi_mutex.wait_lock); 808 raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
809 rt_mutex_unlock(&pi_state->pi_mutex); 809 rt_mutex_unlock(&pi_state->pi_mutex);
810 810
811 return 0; 811 return 0;
@@ -1010,7 +1010,7 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
1010 plist_add(&q->list, &hb2->chain); 1010 plist_add(&q->list, &hb2->chain);
1011 q->lock_ptr = &hb2->lock; 1011 q->lock_ptr = &hb2->lock;
1012#ifdef CONFIG_DEBUG_PI_LIST 1012#ifdef CONFIG_DEBUG_PI_LIST
1013 q->list.plist.lock = &hb2->lock; 1013 q->list.plist.spinlock = &hb2->lock;
1014#endif 1014#endif
1015 } 1015 }
1016 get_futex_key_refs(key2); 1016 get_futex_key_refs(key2);
@@ -1046,7 +1046,7 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
1046 1046
1047 q->lock_ptr = &hb->lock; 1047 q->lock_ptr = &hb->lock;
1048#ifdef CONFIG_DEBUG_PI_LIST 1048#ifdef CONFIG_DEBUG_PI_LIST
1049 q->list.plist.lock = &hb->lock; 1049 q->list.plist.spinlock = &hb->lock;
1050#endif 1050#endif
1051 1051
1052 wake_up_state(q->task, TASK_NORMAL); 1052 wake_up_state(q->task, TASK_NORMAL);
@@ -1394,7 +1394,7 @@ static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
1394 1394
1395 plist_node_init(&q->list, prio); 1395 plist_node_init(&q->list, prio);
1396#ifdef CONFIG_DEBUG_PI_LIST 1396#ifdef CONFIG_DEBUG_PI_LIST
1397 q->list.plist.lock = &hb->lock; 1397 q->list.plist.spinlock = &hb->lock;
1398#endif 1398#endif
1399 plist_add(&q->list, &hb->chain); 1399 plist_add(&q->list, &hb->chain);
1400 q->task = current; 1400 q->task = current;
@@ -1529,18 +1529,18 @@ retry:
1529 * itself. 1529 * itself.
1530 */ 1530 */
1531 if (pi_state->owner != NULL) { 1531 if (pi_state->owner != NULL) {
1532 spin_lock_irq(&pi_state->owner->pi_lock); 1532 raw_spin_lock_irq(&pi_state->owner->pi_lock);
1533 WARN_ON(list_empty(&pi_state->list)); 1533 WARN_ON(list_empty(&pi_state->list));
1534 list_del_init(&pi_state->list); 1534 list_del_init(&pi_state->list);
1535 spin_unlock_irq(&pi_state->owner->pi_lock); 1535 raw_spin_unlock_irq(&pi_state->owner->pi_lock);
1536 } 1536 }
1537 1537
1538 pi_state->owner = newowner; 1538 pi_state->owner = newowner;
1539 1539
1540 spin_lock_irq(&newowner->pi_lock); 1540 raw_spin_lock_irq(&newowner->pi_lock);
1541 WARN_ON(!list_empty(&pi_state->list)); 1541 WARN_ON(!list_empty(&pi_state->list));
1542 list_add(&pi_state->list, &newowner->pi_state_list); 1542 list_add(&pi_state->list, &newowner->pi_state_list);
1543 spin_unlock_irq(&newowner->pi_lock); 1543 raw_spin_unlock_irq(&newowner->pi_lock);
1544 return 0; 1544 return 0;
1545 1545
1546 /* 1546 /*
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index d2f9239dc6ba..0086628b6e97 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -127,11 +127,11 @@ struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
127 for (;;) { 127 for (;;) {
128 base = timer->base; 128 base = timer->base;
129 if (likely(base != NULL)) { 129 if (likely(base != NULL)) {
130 spin_lock_irqsave(&base->cpu_base->lock, *flags); 130 raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
131 if (likely(base == timer->base)) 131 if (likely(base == timer->base))
132 return base; 132 return base;
133 /* The timer has migrated to another CPU: */ 133 /* The timer has migrated to another CPU: */
134 spin_unlock_irqrestore(&base->cpu_base->lock, *flags); 134 raw_spin_unlock_irqrestore(&base->cpu_base->lock, *flags);
135 } 135 }
136 cpu_relax(); 136 cpu_relax();
137 } 137 }
@@ -208,13 +208,13 @@ again:
208 208
209 /* See the comment in lock_timer_base() */ 209 /* See the comment in lock_timer_base() */
210 timer->base = NULL; 210 timer->base = NULL;
211 spin_unlock(&base->cpu_base->lock); 211 raw_spin_unlock(&base->cpu_base->lock);
212 spin_lock(&new_base->cpu_base->lock); 212 raw_spin_lock(&new_base->cpu_base->lock);
213 213
214 if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) { 214 if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) {
215 cpu = this_cpu; 215 cpu = this_cpu;
216 spin_unlock(&new_base->cpu_base->lock); 216 raw_spin_unlock(&new_base->cpu_base->lock);
217 spin_lock(&base->cpu_base->lock); 217 raw_spin_lock(&base->cpu_base->lock);
218 timer->base = base; 218 timer->base = base;
219 goto again; 219 goto again;
220 } 220 }
@@ -230,7 +230,7 @@ lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
230{ 230{
231 struct hrtimer_clock_base *base = timer->base; 231 struct hrtimer_clock_base *base = timer->base;
232 232
233 spin_lock_irqsave(&base->cpu_base->lock, *flags); 233 raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
234 234
235 return base; 235 return base;
236} 236}
@@ -628,12 +628,12 @@ static void retrigger_next_event(void *arg)
628 base = &__get_cpu_var(hrtimer_bases); 628 base = &__get_cpu_var(hrtimer_bases);
629 629
630 /* Adjust CLOCK_REALTIME offset */ 630 /* Adjust CLOCK_REALTIME offset */
631 spin_lock(&base->lock); 631 raw_spin_lock(&base->lock);
632 base->clock_base[CLOCK_REALTIME].offset = 632 base->clock_base[CLOCK_REALTIME].offset =
633 timespec_to_ktime(realtime_offset); 633 timespec_to_ktime(realtime_offset);
634 634
635 hrtimer_force_reprogram(base, 0); 635 hrtimer_force_reprogram(base, 0);
636 spin_unlock(&base->lock); 636 raw_spin_unlock(&base->lock);
637} 637}
638 638
639/* 639/*
@@ -694,9 +694,9 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
694{ 694{
695 if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) { 695 if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
696 if (wakeup) { 696 if (wakeup) {
697 spin_unlock(&base->cpu_base->lock); 697 raw_spin_unlock(&base->cpu_base->lock);
698 raise_softirq_irqoff(HRTIMER_SOFTIRQ); 698 raise_softirq_irqoff(HRTIMER_SOFTIRQ);
699 spin_lock(&base->cpu_base->lock); 699 raw_spin_lock(&base->cpu_base->lock);
700 } else 700 } else
701 __raise_softirq_irqoff(HRTIMER_SOFTIRQ); 701 __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
702 702
@@ -790,7 +790,7 @@ static inline void timer_stats_account_hrtimer(struct hrtimer *timer)
790static inline 790static inline
791void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) 791void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
792{ 792{
793 spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags); 793 raw_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags);
794} 794}
795 795
796/** 796/**
@@ -1123,7 +1123,7 @@ ktime_t hrtimer_get_next_event(void)
1123 unsigned long flags; 1123 unsigned long flags;
1124 int i; 1124 int i;
1125 1125
1126 spin_lock_irqsave(&cpu_base->lock, flags); 1126 raw_spin_lock_irqsave(&cpu_base->lock, flags);
1127 1127
1128 if (!hrtimer_hres_active()) { 1128 if (!hrtimer_hres_active()) {
1129 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) { 1129 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
@@ -1140,7 +1140,7 @@ ktime_t hrtimer_get_next_event(void)
1140 } 1140 }
1141 } 1141 }
1142 1142
1143 spin_unlock_irqrestore(&cpu_base->lock, flags); 1143 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1144 1144
1145 if (mindelta.tv64 < 0) 1145 if (mindelta.tv64 < 0)
1146 mindelta.tv64 = 0; 1146 mindelta.tv64 = 0;
@@ -1222,11 +1222,11 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now)
1222 * they get migrated to another cpu, therefore its safe to unlock 1222 * they get migrated to another cpu, therefore its safe to unlock
1223 * the timer base. 1223 * the timer base.
1224 */ 1224 */
1225 spin_unlock(&cpu_base->lock); 1225 raw_spin_unlock(&cpu_base->lock);
1226 trace_hrtimer_expire_entry(timer, now); 1226 trace_hrtimer_expire_entry(timer, now);
1227 restart = fn(timer); 1227 restart = fn(timer);
1228 trace_hrtimer_expire_exit(timer); 1228 trace_hrtimer_expire_exit(timer);
1229 spin_lock(&cpu_base->lock); 1229 raw_spin_lock(&cpu_base->lock);
1230 1230
1231 /* 1231 /*
1232 * Note: We clear the CALLBACK bit after enqueue_hrtimer and 1232 * Note: We clear the CALLBACK bit after enqueue_hrtimer and
@@ -1261,7 +1261,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
1261retry: 1261retry:
1262 expires_next.tv64 = KTIME_MAX; 1262 expires_next.tv64 = KTIME_MAX;
1263 1263
1264 spin_lock(&cpu_base->lock); 1264 raw_spin_lock(&cpu_base->lock);
1265 /* 1265 /*
1266 * We set expires_next to KTIME_MAX here with cpu_base->lock 1266 * We set expires_next to KTIME_MAX here with cpu_base->lock
1267 * held to prevent that a timer is enqueued in our queue via 1267 * held to prevent that a timer is enqueued in our queue via
@@ -1317,7 +1317,7 @@ retry:
1317 * against it. 1317 * against it.
1318 */ 1318 */
1319 cpu_base->expires_next = expires_next; 1319 cpu_base->expires_next = expires_next;
1320 spin_unlock(&cpu_base->lock); 1320 raw_spin_unlock(&cpu_base->lock);
1321 1321
1322 /* Reprogramming necessary ? */ 1322 /* Reprogramming necessary ? */
1323 if (expires_next.tv64 == KTIME_MAX || 1323 if (expires_next.tv64 == KTIME_MAX ||
@@ -1457,7 +1457,7 @@ void hrtimer_run_queues(void)
1457 gettime = 0; 1457 gettime = 0;
1458 } 1458 }
1459 1459
1460 spin_lock(&cpu_base->lock); 1460 raw_spin_lock(&cpu_base->lock);
1461 1461
1462 while ((node = base->first)) { 1462 while ((node = base->first)) {
1463 struct hrtimer *timer; 1463 struct hrtimer *timer;
@@ -1469,7 +1469,7 @@ void hrtimer_run_queues(void)
1469 1469
1470 __run_hrtimer(timer, &base->softirq_time); 1470 __run_hrtimer(timer, &base->softirq_time);
1471 } 1471 }
1472 spin_unlock(&cpu_base->lock); 1472 raw_spin_unlock(&cpu_base->lock);
1473 } 1473 }
1474} 1474}
1475 1475
@@ -1625,7 +1625,7 @@ static void __cpuinit init_hrtimers_cpu(int cpu)
1625 struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); 1625 struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
1626 int i; 1626 int i;
1627 1627
1628 spin_lock_init(&cpu_base->lock); 1628 raw_spin_lock_init(&cpu_base->lock);
1629 1629
1630 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) 1630 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
1631 cpu_base->clock_base[i].cpu_base = cpu_base; 1631 cpu_base->clock_base[i].cpu_base = cpu_base;
@@ -1683,16 +1683,16 @@ static void migrate_hrtimers(int scpu)
1683 * The caller is globally serialized and nobody else 1683 * The caller is globally serialized and nobody else
1684 * takes two locks at once, deadlock is not possible. 1684 * takes two locks at once, deadlock is not possible.
1685 */ 1685 */
1686 spin_lock(&new_base->lock); 1686 raw_spin_lock(&new_base->lock);
1687 spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); 1687 raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
1688 1688
1689 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { 1689 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
1690 migrate_hrtimer_list(&old_base->clock_base[i], 1690 migrate_hrtimer_list(&old_base->clock_base[i],
1691 &new_base->clock_base[i]); 1691 &new_base->clock_base[i]);
1692 } 1692 }
1693 1693
1694 spin_unlock(&old_base->lock); 1694 raw_spin_unlock(&old_base->lock);
1695 spin_unlock(&new_base->lock); 1695 raw_spin_unlock(&new_base->lock);
1696 1696
1697 /* Check, if we got expired work to do */ 1697 /* Check, if we got expired work to do */
1698 __hrtimer_peek_ahead_timers(); 1698 __hrtimer_peek_ahead_timers();
diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c
index 366eedf949c0..dbcbf6a33a08 100644
--- a/kernel/hw_breakpoint.c
+++ b/kernel/hw_breakpoint.c
@@ -96,7 +96,7 @@ static int task_bp_pinned(struct task_struct *tsk)
96 96
97 list = &ctx->event_list; 97 list = &ctx->event_list;
98 98
99 spin_lock_irqsave(&ctx->lock, flags); 99 raw_spin_lock_irqsave(&ctx->lock, flags);
100 100
101 /* 101 /*
102 * The current breakpoint counter is not included in the list 102 * The current breakpoint counter is not included in the list
@@ -107,7 +107,7 @@ static int task_bp_pinned(struct task_struct *tsk)
107 count++; 107 count++;
108 } 108 }
109 109
110 spin_unlock_irqrestore(&ctx->lock, flags); 110 raw_spin_unlock_irqrestore(&ctx->lock, flags);
111 111
112 return count; 112 return count;
113} 113}
diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c
index 1de9700f416e..2295a31ef110 100644
--- a/kernel/irq/autoprobe.c
+++ b/kernel/irq/autoprobe.c
@@ -45,7 +45,7 @@ unsigned long probe_irq_on(void)
45 * flush such a longstanding irq before considering it as spurious. 45 * flush such a longstanding irq before considering it as spurious.
46 */ 46 */
47 for_each_irq_desc_reverse(i, desc) { 47 for_each_irq_desc_reverse(i, desc) {
48 spin_lock_irq(&desc->lock); 48 raw_spin_lock_irq(&desc->lock);
49 if (!desc->action && !(desc->status & IRQ_NOPROBE)) { 49 if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
50 /* 50 /*
51 * An old-style architecture might still have 51 * An old-style architecture might still have
@@ -61,7 +61,7 @@ unsigned long probe_irq_on(void)
61 desc->chip->set_type(i, IRQ_TYPE_PROBE); 61 desc->chip->set_type(i, IRQ_TYPE_PROBE);
62 desc->chip->startup(i); 62 desc->chip->startup(i);
63 } 63 }
64 spin_unlock_irq(&desc->lock); 64 raw_spin_unlock_irq(&desc->lock);
65 } 65 }
66 66
67 /* Wait for longstanding interrupts to trigger. */ 67 /* Wait for longstanding interrupts to trigger. */
@@ -73,13 +73,13 @@ unsigned long probe_irq_on(void)
73 * happened in the previous stage, it may have masked itself) 73 * happened in the previous stage, it may have masked itself)
74 */ 74 */
75 for_each_irq_desc_reverse(i, desc) { 75 for_each_irq_desc_reverse(i, desc) {
76 spin_lock_irq(&desc->lock); 76 raw_spin_lock_irq(&desc->lock);
77 if (!desc->action && !(desc->status & IRQ_NOPROBE)) { 77 if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
78 desc->status |= IRQ_AUTODETECT | IRQ_WAITING; 78 desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
79 if (desc->chip->startup(i)) 79 if (desc->chip->startup(i))
80 desc->status |= IRQ_PENDING; 80 desc->status |= IRQ_PENDING;
81 } 81 }
82 spin_unlock_irq(&desc->lock); 82 raw_spin_unlock_irq(&desc->lock);
83 } 83 }
84 84
85 /* 85 /*
@@ -91,7 +91,7 @@ unsigned long probe_irq_on(void)
91 * Now filter out any obviously spurious interrupts 91 * Now filter out any obviously spurious interrupts
92 */ 92 */
93 for_each_irq_desc(i, desc) { 93 for_each_irq_desc(i, desc) {
94 spin_lock_irq(&desc->lock); 94 raw_spin_lock_irq(&desc->lock);
95 status = desc->status; 95 status = desc->status;
96 96
97 if (status & IRQ_AUTODETECT) { 97 if (status & IRQ_AUTODETECT) {
@@ -103,7 +103,7 @@ unsigned long probe_irq_on(void)
103 if (i < 32) 103 if (i < 32)
104 mask |= 1 << i; 104 mask |= 1 << i;
105 } 105 }
106 spin_unlock_irq(&desc->lock); 106 raw_spin_unlock_irq(&desc->lock);
107 } 107 }
108 108
109 return mask; 109 return mask;
@@ -129,7 +129,7 @@ unsigned int probe_irq_mask(unsigned long val)
129 int i; 129 int i;
130 130
131 for_each_irq_desc(i, desc) { 131 for_each_irq_desc(i, desc) {
132 spin_lock_irq(&desc->lock); 132 raw_spin_lock_irq(&desc->lock);
133 status = desc->status; 133 status = desc->status;
134 134
135 if (status & IRQ_AUTODETECT) { 135 if (status & IRQ_AUTODETECT) {
@@ -139,7 +139,7 @@ unsigned int probe_irq_mask(unsigned long val)
139 desc->status = status & ~IRQ_AUTODETECT; 139 desc->status = status & ~IRQ_AUTODETECT;
140 desc->chip->shutdown(i); 140 desc->chip->shutdown(i);
141 } 141 }
142 spin_unlock_irq(&desc->lock); 142 raw_spin_unlock_irq(&desc->lock);
143 } 143 }
144 mutex_unlock(&probing_active); 144 mutex_unlock(&probing_active);
145 145
@@ -171,7 +171,7 @@ int probe_irq_off(unsigned long val)
171 unsigned int status; 171 unsigned int status;
172 172
173 for_each_irq_desc(i, desc) { 173 for_each_irq_desc(i, desc) {
174 spin_lock_irq(&desc->lock); 174 raw_spin_lock_irq(&desc->lock);
175 status = desc->status; 175 status = desc->status;
176 176
177 if (status & IRQ_AUTODETECT) { 177 if (status & IRQ_AUTODETECT) {
@@ -183,7 +183,7 @@ int probe_irq_off(unsigned long val)
183 desc->status = status & ~IRQ_AUTODETECT; 183 desc->status = status & ~IRQ_AUTODETECT;
184 desc->chip->shutdown(i); 184 desc->chip->shutdown(i);
185 } 185 }
186 spin_unlock_irq(&desc->lock); 186 raw_spin_unlock_irq(&desc->lock);
187 } 187 }
188 mutex_unlock(&probing_active); 188 mutex_unlock(&probing_active);
189 189
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index ba566c261adc..ecc3fa28f666 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -34,7 +34,7 @@ void dynamic_irq_init(unsigned int irq)
34 } 34 }
35 35
36 /* Ensure we don't have left over values from a previous use of this irq */ 36 /* Ensure we don't have left over values from a previous use of this irq */
37 spin_lock_irqsave(&desc->lock, flags); 37 raw_spin_lock_irqsave(&desc->lock, flags);
38 desc->status = IRQ_DISABLED; 38 desc->status = IRQ_DISABLED;
39 desc->chip = &no_irq_chip; 39 desc->chip = &no_irq_chip;
40 desc->handle_irq = handle_bad_irq; 40 desc->handle_irq = handle_bad_irq;
@@ -51,7 +51,7 @@ void dynamic_irq_init(unsigned int irq)
51 cpumask_clear(desc->pending_mask); 51 cpumask_clear(desc->pending_mask);
52#endif 52#endif
53#endif 53#endif
54 spin_unlock_irqrestore(&desc->lock, flags); 54 raw_spin_unlock_irqrestore(&desc->lock, flags);
55} 55}
56 56
57/** 57/**
@@ -68,9 +68,9 @@ void dynamic_irq_cleanup(unsigned int irq)
68 return; 68 return;
69 } 69 }
70 70
71 spin_lock_irqsave(&desc->lock, flags); 71 raw_spin_lock_irqsave(&desc->lock, flags);
72 if (desc->action) { 72 if (desc->action) {
73 spin_unlock_irqrestore(&desc->lock, flags); 73 raw_spin_unlock_irqrestore(&desc->lock, flags);
74 WARN(1, KERN_ERR "Destroying IRQ%d without calling free_irq\n", 74 WARN(1, KERN_ERR "Destroying IRQ%d without calling free_irq\n",
75 irq); 75 irq);
76 return; 76 return;
@@ -82,7 +82,7 @@ void dynamic_irq_cleanup(unsigned int irq)
82 desc->chip = &no_irq_chip; 82 desc->chip = &no_irq_chip;
83 desc->name = NULL; 83 desc->name = NULL;
84 clear_kstat_irqs(desc); 84 clear_kstat_irqs(desc);
85 spin_unlock_irqrestore(&desc->lock, flags); 85 raw_spin_unlock_irqrestore(&desc->lock, flags);
86} 86}
87 87
88 88
@@ -104,10 +104,10 @@ int set_irq_chip(unsigned int irq, struct irq_chip *chip)
104 if (!chip) 104 if (!chip)
105 chip = &no_irq_chip; 105 chip = &no_irq_chip;
106 106
107 spin_lock_irqsave(&desc->lock, flags); 107 raw_spin_lock_irqsave(&desc->lock, flags);
108 irq_chip_set_defaults(chip); 108 irq_chip_set_defaults(chip);
109 desc->chip = chip; 109 desc->chip = chip;
110 spin_unlock_irqrestore(&desc->lock, flags); 110 raw_spin_unlock_irqrestore(&desc->lock, flags);
111 111
112 return 0; 112 return 0;
113} 113}
@@ -133,9 +133,9 @@ int set_irq_type(unsigned int irq, unsigned int type)
133 if (type == IRQ_TYPE_NONE) 133 if (type == IRQ_TYPE_NONE)
134 return 0; 134 return 0;
135 135
136 spin_lock_irqsave(&desc->lock, flags); 136 raw_spin_lock_irqsave(&desc->lock, flags);
137 ret = __irq_set_trigger(desc, irq, type); 137 ret = __irq_set_trigger(desc, irq, type);
138 spin_unlock_irqrestore(&desc->lock, flags); 138 raw_spin_unlock_irqrestore(&desc->lock, flags);
139 return ret; 139 return ret;
140} 140}
141EXPORT_SYMBOL(set_irq_type); 141EXPORT_SYMBOL(set_irq_type);
@@ -158,9 +158,9 @@ int set_irq_data(unsigned int irq, void *data)
158 return -EINVAL; 158 return -EINVAL;
159 } 159 }
160 160
161 spin_lock_irqsave(&desc->lock, flags); 161 raw_spin_lock_irqsave(&desc->lock, flags);
162 desc->handler_data = data; 162 desc->handler_data = data;
163 spin_unlock_irqrestore(&desc->lock, flags); 163 raw_spin_unlock_irqrestore(&desc->lock, flags);
164 return 0; 164 return 0;
165} 165}
166EXPORT_SYMBOL(set_irq_data); 166EXPORT_SYMBOL(set_irq_data);
@@ -183,11 +183,11 @@ int set_irq_msi(unsigned int irq, struct msi_desc *entry)
183 return -EINVAL; 183 return -EINVAL;
184 } 184 }
185 185
186 spin_lock_irqsave(&desc->lock, flags); 186 raw_spin_lock_irqsave(&desc->lock, flags);
187 desc->msi_desc = entry; 187 desc->msi_desc = entry;
188 if (entry) 188 if (entry)
189 entry->irq = irq; 189 entry->irq = irq;
190 spin_unlock_irqrestore(&desc->lock, flags); 190 raw_spin_unlock_irqrestore(&desc->lock, flags);
191 return 0; 191 return 0;
192} 192}
193 193
@@ -214,9 +214,9 @@ int set_irq_chip_data(unsigned int irq, void *data)
214 return -EINVAL; 214 return -EINVAL;
215 } 215 }
216 216
217 spin_lock_irqsave(&desc->lock, flags); 217 raw_spin_lock_irqsave(&desc->lock, flags);
218 desc->chip_data = data; 218 desc->chip_data = data;
219 spin_unlock_irqrestore(&desc->lock, flags); 219 raw_spin_unlock_irqrestore(&desc->lock, flags);
220 220
221 return 0; 221 return 0;
222} 222}
@@ -241,12 +241,12 @@ void set_irq_nested_thread(unsigned int irq, int nest)
241 if (!desc) 241 if (!desc)
242 return; 242 return;
243 243
244 spin_lock_irqsave(&desc->lock, flags); 244 raw_spin_lock_irqsave(&desc->lock, flags);
245 if (nest) 245 if (nest)
246 desc->status |= IRQ_NESTED_THREAD; 246 desc->status |= IRQ_NESTED_THREAD;
247 else 247 else
248 desc->status &= ~IRQ_NESTED_THREAD; 248 desc->status &= ~IRQ_NESTED_THREAD;
249 spin_unlock_irqrestore(&desc->lock, flags); 249 raw_spin_unlock_irqrestore(&desc->lock, flags);
250} 250}
251EXPORT_SYMBOL_GPL(set_irq_nested_thread); 251EXPORT_SYMBOL_GPL(set_irq_nested_thread);
252 252
@@ -343,7 +343,7 @@ void handle_nested_irq(unsigned int irq)
343 343
344 might_sleep(); 344 might_sleep();
345 345
346 spin_lock_irq(&desc->lock); 346 raw_spin_lock_irq(&desc->lock);
347 347
348 kstat_incr_irqs_this_cpu(irq, desc); 348 kstat_incr_irqs_this_cpu(irq, desc);
349 349
@@ -352,17 +352,17 @@ void handle_nested_irq(unsigned int irq)
352 goto out_unlock; 352 goto out_unlock;
353 353
354 desc->status |= IRQ_INPROGRESS; 354 desc->status |= IRQ_INPROGRESS;
355 spin_unlock_irq(&desc->lock); 355 raw_spin_unlock_irq(&desc->lock);
356 356
357 action_ret = action->thread_fn(action->irq, action->dev_id); 357 action_ret = action->thread_fn(action->irq, action->dev_id);
358 if (!noirqdebug) 358 if (!noirqdebug)
359 note_interrupt(irq, desc, action_ret); 359 note_interrupt(irq, desc, action_ret);
360 360
361 spin_lock_irq(&desc->lock); 361 raw_spin_lock_irq(&desc->lock);
362 desc->status &= ~IRQ_INPROGRESS; 362 desc->status &= ~IRQ_INPROGRESS;
363 363
364out_unlock: 364out_unlock:
365 spin_unlock_irq(&desc->lock); 365 raw_spin_unlock_irq(&desc->lock);
366} 366}
367EXPORT_SYMBOL_GPL(handle_nested_irq); 367EXPORT_SYMBOL_GPL(handle_nested_irq);
368 368
@@ -384,7 +384,7 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc)
384 struct irqaction *action; 384 struct irqaction *action;
385 irqreturn_t action_ret; 385 irqreturn_t action_ret;
386 386
387 spin_lock(&desc->lock); 387 raw_spin_lock(&desc->lock);
388 388
389 if (unlikely(desc->status & IRQ_INPROGRESS)) 389 if (unlikely(desc->status & IRQ_INPROGRESS))
390 goto out_unlock; 390 goto out_unlock;
@@ -396,16 +396,16 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc)
396 goto out_unlock; 396 goto out_unlock;
397 397
398 desc->status |= IRQ_INPROGRESS; 398 desc->status |= IRQ_INPROGRESS;
399 spin_unlock(&desc->lock); 399 raw_spin_unlock(&desc->lock);
400 400
401 action_ret = handle_IRQ_event(irq, action); 401 action_ret = handle_IRQ_event(irq, action);
402 if (!noirqdebug) 402 if (!noirqdebug)
403 note_interrupt(irq, desc, action_ret); 403 note_interrupt(irq, desc, action_ret);
404 404
405 spin_lock(&desc->lock); 405 raw_spin_lock(&desc->lock);
406 desc->status &= ~IRQ_INPROGRESS; 406 desc->status &= ~IRQ_INPROGRESS;
407out_unlock: 407out_unlock:
408 spin_unlock(&desc->lock); 408 raw_spin_unlock(&desc->lock);
409} 409}
410 410
411/** 411/**
@@ -424,7 +424,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
424 struct irqaction *action; 424 struct irqaction *action;
425 irqreturn_t action_ret; 425 irqreturn_t action_ret;
426 426
427 spin_lock(&desc->lock); 427 raw_spin_lock(&desc->lock);
428 mask_ack_irq(desc, irq); 428 mask_ack_irq(desc, irq);
429 429
430 if (unlikely(desc->status & IRQ_INPROGRESS)) 430 if (unlikely(desc->status & IRQ_INPROGRESS))
@@ -441,13 +441,13 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
441 goto out_unlock; 441 goto out_unlock;
442 442
443 desc->status |= IRQ_INPROGRESS; 443 desc->status |= IRQ_INPROGRESS;
444 spin_unlock(&desc->lock); 444 raw_spin_unlock(&desc->lock);
445 445
446 action_ret = handle_IRQ_event(irq, action); 446 action_ret = handle_IRQ_event(irq, action);
447 if (!noirqdebug) 447 if (!noirqdebug)
448 note_interrupt(irq, desc, action_ret); 448 note_interrupt(irq, desc, action_ret);
449 449
450 spin_lock(&desc->lock); 450 raw_spin_lock(&desc->lock);
451 desc->status &= ~IRQ_INPROGRESS; 451 desc->status &= ~IRQ_INPROGRESS;
452 452
453 if (unlikely(desc->status & IRQ_ONESHOT)) 453 if (unlikely(desc->status & IRQ_ONESHOT))
@@ -455,7 +455,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
455 else if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask) 455 else if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
456 desc->chip->unmask(irq); 456 desc->chip->unmask(irq);
457out_unlock: 457out_unlock:
458 spin_unlock(&desc->lock); 458 raw_spin_unlock(&desc->lock);
459} 459}
460EXPORT_SYMBOL_GPL(handle_level_irq); 460EXPORT_SYMBOL_GPL(handle_level_irq);
461 461
@@ -475,7 +475,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
475 struct irqaction *action; 475 struct irqaction *action;
476 irqreturn_t action_ret; 476 irqreturn_t action_ret;
477 477
478 spin_lock(&desc->lock); 478 raw_spin_lock(&desc->lock);
479 479
480 if (unlikely(desc->status & IRQ_INPROGRESS)) 480 if (unlikely(desc->status & IRQ_INPROGRESS))
481 goto out; 481 goto out;
@@ -497,18 +497,18 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
497 497
498 desc->status |= IRQ_INPROGRESS; 498 desc->status |= IRQ_INPROGRESS;
499 desc->status &= ~IRQ_PENDING; 499 desc->status &= ~IRQ_PENDING;
500 spin_unlock(&desc->lock); 500 raw_spin_unlock(&desc->lock);
501 501
502 action_ret = handle_IRQ_event(irq, action); 502 action_ret = handle_IRQ_event(irq, action);
503 if (!noirqdebug) 503 if (!noirqdebug)
504 note_interrupt(irq, desc, action_ret); 504 note_interrupt(irq, desc, action_ret);
505 505
506 spin_lock(&desc->lock); 506 raw_spin_lock(&desc->lock);
507 desc->status &= ~IRQ_INPROGRESS; 507 desc->status &= ~IRQ_INPROGRESS;
508out: 508out:
509 desc->chip->eoi(irq); 509 desc->chip->eoi(irq);
510 510
511 spin_unlock(&desc->lock); 511 raw_spin_unlock(&desc->lock);
512} 512}
513 513
514/** 514/**
@@ -530,7 +530,7 @@ out:
530void 530void
531handle_edge_irq(unsigned int irq, struct irq_desc *desc) 531handle_edge_irq(unsigned int irq, struct irq_desc *desc)
532{ 532{
533 spin_lock(&desc->lock); 533 raw_spin_lock(&desc->lock);
534 534
535 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); 535 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
536 536
@@ -576,17 +576,17 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
576 } 576 }
577 577
578 desc->status &= ~IRQ_PENDING; 578 desc->status &= ~IRQ_PENDING;
579 spin_unlock(&desc->lock); 579 raw_spin_unlock(&desc->lock);
580 action_ret = handle_IRQ_event(irq, action); 580 action_ret = handle_IRQ_event(irq, action);
581 if (!noirqdebug) 581 if (!noirqdebug)
582 note_interrupt(irq, desc, action_ret); 582 note_interrupt(irq, desc, action_ret);
583 spin_lock(&desc->lock); 583 raw_spin_lock(&desc->lock);
584 584
585 } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING); 585 } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING);
586 586
587 desc->status &= ~IRQ_INPROGRESS; 587 desc->status &= ~IRQ_INPROGRESS;
588out_unlock: 588out_unlock:
589 spin_unlock(&desc->lock); 589 raw_spin_unlock(&desc->lock);
590} 590}
591 591
592/** 592/**
@@ -643,7 +643,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
643 } 643 }
644 644
645 chip_bus_lock(irq, desc); 645 chip_bus_lock(irq, desc);
646 spin_lock_irqsave(&desc->lock, flags); 646 raw_spin_lock_irqsave(&desc->lock, flags);
647 647
648 /* Uninstall? */ 648 /* Uninstall? */
649 if (handle == handle_bad_irq) { 649 if (handle == handle_bad_irq) {
@@ -661,7 +661,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
661 desc->depth = 0; 661 desc->depth = 0;
662 desc->chip->startup(irq); 662 desc->chip->startup(irq);
663 } 663 }
664 spin_unlock_irqrestore(&desc->lock, flags); 664 raw_spin_unlock_irqrestore(&desc->lock, flags);
665 chip_bus_sync_unlock(irq, desc); 665 chip_bus_sync_unlock(irq, desc);
666} 666}
667EXPORT_SYMBOL_GPL(__set_irq_handler); 667EXPORT_SYMBOL_GPL(__set_irq_handler);
@@ -692,9 +692,9 @@ void __init set_irq_noprobe(unsigned int irq)
692 return; 692 return;
693 } 693 }
694 694
695 spin_lock_irqsave(&desc->lock, flags); 695 raw_spin_lock_irqsave(&desc->lock, flags);
696 desc->status |= IRQ_NOPROBE; 696 desc->status |= IRQ_NOPROBE;
697 spin_unlock_irqrestore(&desc->lock, flags); 697 raw_spin_unlock_irqrestore(&desc->lock, flags);
698} 698}
699 699
700void __init set_irq_probe(unsigned int irq) 700void __init set_irq_probe(unsigned int irq)
@@ -707,7 +707,7 @@ void __init set_irq_probe(unsigned int irq)
707 return; 707 return;
708 } 708 }
709 709
710 spin_lock_irqsave(&desc->lock, flags); 710 raw_spin_lock_irqsave(&desc->lock, flags);
711 desc->status &= ~IRQ_NOPROBE; 711 desc->status &= ~IRQ_NOPROBE;
712 spin_unlock_irqrestore(&desc->lock, flags); 712 raw_spin_unlock_irqrestore(&desc->lock, flags);
713} 713}
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 17c71bb565c6..814940e7f485 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -80,7 +80,7 @@ static struct irq_desc irq_desc_init = {
80 .chip = &no_irq_chip, 80 .chip = &no_irq_chip,
81 .handle_irq = handle_bad_irq, 81 .handle_irq = handle_bad_irq,
82 .depth = 1, 82 .depth = 1,
83 .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), 83 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
84}; 84};
85 85
86void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr) 86void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr)
@@ -108,7 +108,7 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int node)
108{ 108{
109 memcpy(desc, &irq_desc_init, sizeof(struct irq_desc)); 109 memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
110 110
111 spin_lock_init(&desc->lock); 111 raw_spin_lock_init(&desc->lock);
112 desc->irq = irq; 112 desc->irq = irq;
113#ifdef CONFIG_SMP 113#ifdef CONFIG_SMP
114 desc->node = node; 114 desc->node = node;
@@ -130,7 +130,7 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int node)
130/* 130/*
131 * Protect the sparse_irqs: 131 * Protect the sparse_irqs:
132 */ 132 */
133DEFINE_SPINLOCK(sparse_irq_lock); 133DEFINE_RAW_SPINLOCK(sparse_irq_lock);
134 134
135struct irq_desc **irq_desc_ptrs __read_mostly; 135struct irq_desc **irq_desc_ptrs __read_mostly;
136 136
@@ -141,7 +141,7 @@ static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_sm
141 .chip = &no_irq_chip, 141 .chip = &no_irq_chip,
142 .handle_irq = handle_bad_irq, 142 .handle_irq = handle_bad_irq,
143 .depth = 1, 143 .depth = 1,
144 .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), 144 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
145 } 145 }
146}; 146};
147 147
@@ -212,7 +212,7 @@ struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
212 if (desc) 212 if (desc)
213 return desc; 213 return desc;
214 214
215 spin_lock_irqsave(&sparse_irq_lock, flags); 215 raw_spin_lock_irqsave(&sparse_irq_lock, flags);
216 216
217 /* We have to check it to avoid races with another CPU */ 217 /* We have to check it to avoid races with another CPU */
218 desc = irq_desc_ptrs[irq]; 218 desc = irq_desc_ptrs[irq];
@@ -234,7 +234,7 @@ struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
234 irq_desc_ptrs[irq] = desc; 234 irq_desc_ptrs[irq] = desc;
235 235
236out_unlock: 236out_unlock:
237 spin_unlock_irqrestore(&sparse_irq_lock, flags); 237 raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
238 238
239 return desc; 239 return desc;
240} 240}
@@ -247,7 +247,7 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
247 .chip = &no_irq_chip, 247 .chip = &no_irq_chip,
248 .handle_irq = handle_bad_irq, 248 .handle_irq = handle_bad_irq,
249 .depth = 1, 249 .depth = 1,
250 .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock), 250 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
251 } 251 }
252}; 252};
253 253
@@ -473,7 +473,7 @@ unsigned int __do_IRQ(unsigned int irq)
473 return 1; 473 return 1;
474 } 474 }
475 475
476 spin_lock(&desc->lock); 476 raw_spin_lock(&desc->lock);
477 if (desc->chip->ack) 477 if (desc->chip->ack)
478 desc->chip->ack(irq); 478 desc->chip->ack(irq);
479 /* 479 /*
@@ -517,13 +517,13 @@ unsigned int __do_IRQ(unsigned int irq)
517 for (;;) { 517 for (;;) {
518 irqreturn_t action_ret; 518 irqreturn_t action_ret;
519 519
520 spin_unlock(&desc->lock); 520 raw_spin_unlock(&desc->lock);
521 521
522 action_ret = handle_IRQ_event(irq, action); 522 action_ret = handle_IRQ_event(irq, action);
523 if (!noirqdebug) 523 if (!noirqdebug)
524 note_interrupt(irq, desc, action_ret); 524 note_interrupt(irq, desc, action_ret);
525 525
526 spin_lock(&desc->lock); 526 raw_spin_lock(&desc->lock);
527 if (likely(!(desc->status & IRQ_PENDING))) 527 if (likely(!(desc->status & IRQ_PENDING)))
528 break; 528 break;
529 desc->status &= ~IRQ_PENDING; 529 desc->status &= ~IRQ_PENDING;
@@ -536,7 +536,7 @@ out:
536 * disabled while the handler was running. 536 * disabled while the handler was running.
537 */ 537 */
538 desc->chip->end(irq); 538 desc->chip->end(irq);
539 spin_unlock(&desc->lock); 539 raw_spin_unlock(&desc->lock);
540 540
541 return 1; 541 return 1;
542} 542}
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 1b5d742c6a77..b2821f070a3d 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -18,7 +18,7 @@ extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume);
18extern struct lock_class_key irq_desc_lock_class; 18extern struct lock_class_key irq_desc_lock_class;
19extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); 19extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
20extern void clear_kstat_irqs(struct irq_desc *desc); 20extern void clear_kstat_irqs(struct irq_desc *desc);
21extern spinlock_t sparse_irq_lock; 21extern raw_spinlock_t sparse_irq_lock;
22 22
23#ifdef CONFIG_SPARSE_IRQ 23#ifdef CONFIG_SPARSE_IRQ
24/* irq_desc_ptrs allocated at boot time */ 24/* irq_desc_ptrs allocated at boot time */
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 7305b297d1eb..eb6078ca60c7 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -46,9 +46,9 @@ void synchronize_irq(unsigned int irq)
46 cpu_relax(); 46 cpu_relax();
47 47
48 /* Ok, that indicated we're done: double-check carefully. */ 48 /* Ok, that indicated we're done: double-check carefully. */
49 spin_lock_irqsave(&desc->lock, flags); 49 raw_spin_lock_irqsave(&desc->lock, flags);
50 status = desc->status; 50 status = desc->status;
51 spin_unlock_irqrestore(&desc->lock, flags); 51 raw_spin_unlock_irqrestore(&desc->lock, flags);
52 52
53 /* Oops, that failed? */ 53 /* Oops, that failed? */
54 } while (status & IRQ_INPROGRESS); 54 } while (status & IRQ_INPROGRESS);
@@ -114,7 +114,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
114 if (!desc->chip->set_affinity) 114 if (!desc->chip->set_affinity)
115 return -EINVAL; 115 return -EINVAL;
116 116
117 spin_lock_irqsave(&desc->lock, flags); 117 raw_spin_lock_irqsave(&desc->lock, flags);
118 118
119#ifdef CONFIG_GENERIC_PENDING_IRQ 119#ifdef CONFIG_GENERIC_PENDING_IRQ
120 if (desc->status & IRQ_MOVE_PCNTXT) { 120 if (desc->status & IRQ_MOVE_PCNTXT) {
@@ -134,7 +134,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
134 } 134 }
135#endif 135#endif
136 desc->status |= IRQ_AFFINITY_SET; 136 desc->status |= IRQ_AFFINITY_SET;
137 spin_unlock_irqrestore(&desc->lock, flags); 137 raw_spin_unlock_irqrestore(&desc->lock, flags);
138 return 0; 138 return 0;
139} 139}
140 140
@@ -181,11 +181,11 @@ int irq_select_affinity_usr(unsigned int irq)
181 unsigned long flags; 181 unsigned long flags;
182 int ret; 182 int ret;
183 183
184 spin_lock_irqsave(&desc->lock, flags); 184 raw_spin_lock_irqsave(&desc->lock, flags);
185 ret = setup_affinity(irq, desc); 185 ret = setup_affinity(irq, desc);
186 if (!ret) 186 if (!ret)
187 irq_set_thread_affinity(desc); 187 irq_set_thread_affinity(desc);
188 spin_unlock_irqrestore(&desc->lock, flags); 188 raw_spin_unlock_irqrestore(&desc->lock, flags);
189 189
190 return ret; 190 return ret;
191} 191}
@@ -231,9 +231,9 @@ void disable_irq_nosync(unsigned int irq)
231 return; 231 return;
232 232
233 chip_bus_lock(irq, desc); 233 chip_bus_lock(irq, desc);
234 spin_lock_irqsave(&desc->lock, flags); 234 raw_spin_lock_irqsave(&desc->lock, flags);
235 __disable_irq(desc, irq, false); 235 __disable_irq(desc, irq, false);
236 spin_unlock_irqrestore(&desc->lock, flags); 236 raw_spin_unlock_irqrestore(&desc->lock, flags);
237 chip_bus_sync_unlock(irq, desc); 237 chip_bus_sync_unlock(irq, desc);
238} 238}
239EXPORT_SYMBOL(disable_irq_nosync); 239EXPORT_SYMBOL(disable_irq_nosync);
@@ -308,9 +308,9 @@ void enable_irq(unsigned int irq)
308 return; 308 return;
309 309
310 chip_bus_lock(irq, desc); 310 chip_bus_lock(irq, desc);
311 spin_lock_irqsave(&desc->lock, flags); 311 raw_spin_lock_irqsave(&desc->lock, flags);
312 __enable_irq(desc, irq, false); 312 __enable_irq(desc, irq, false);
313 spin_unlock_irqrestore(&desc->lock, flags); 313 raw_spin_unlock_irqrestore(&desc->lock, flags);
314 chip_bus_sync_unlock(irq, desc); 314 chip_bus_sync_unlock(irq, desc);
315} 315}
316EXPORT_SYMBOL(enable_irq); 316EXPORT_SYMBOL(enable_irq);
@@ -347,7 +347,7 @@ int set_irq_wake(unsigned int irq, unsigned int on)
347 /* wakeup-capable irqs can be shared between drivers that 347 /* wakeup-capable irqs can be shared between drivers that
348 * don't need to have the same sleep mode behaviors. 348 * don't need to have the same sleep mode behaviors.
349 */ 349 */
350 spin_lock_irqsave(&desc->lock, flags); 350 raw_spin_lock_irqsave(&desc->lock, flags);
351 if (on) { 351 if (on) {
352 if (desc->wake_depth++ == 0) { 352 if (desc->wake_depth++ == 0) {
353 ret = set_irq_wake_real(irq, on); 353 ret = set_irq_wake_real(irq, on);
@@ -368,7 +368,7 @@ int set_irq_wake(unsigned int irq, unsigned int on)
368 } 368 }
369 } 369 }
370 370
371 spin_unlock_irqrestore(&desc->lock, flags); 371 raw_spin_unlock_irqrestore(&desc->lock, flags);
372 return ret; 372 return ret;
373} 373}
374EXPORT_SYMBOL(set_irq_wake); 374EXPORT_SYMBOL(set_irq_wake);
@@ -484,12 +484,12 @@ static int irq_wait_for_interrupt(struct irqaction *action)
484static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc) 484static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc)
485{ 485{
486 chip_bus_lock(irq, desc); 486 chip_bus_lock(irq, desc);
487 spin_lock_irq(&desc->lock); 487 raw_spin_lock_irq(&desc->lock);
488 if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) { 488 if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) {
489 desc->status &= ~IRQ_MASKED; 489 desc->status &= ~IRQ_MASKED;
490 desc->chip->unmask(irq); 490 desc->chip->unmask(irq);
491 } 491 }
492 spin_unlock_irq(&desc->lock); 492 raw_spin_unlock_irq(&desc->lock);
493 chip_bus_sync_unlock(irq, desc); 493 chip_bus_sync_unlock(irq, desc);
494} 494}
495 495
@@ -514,9 +514,9 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
514 return; 514 return;
515 } 515 }
516 516
517 spin_lock_irq(&desc->lock); 517 raw_spin_lock_irq(&desc->lock);
518 cpumask_copy(mask, desc->affinity); 518 cpumask_copy(mask, desc->affinity);
519 spin_unlock_irq(&desc->lock); 519 raw_spin_unlock_irq(&desc->lock);
520 520
521 set_cpus_allowed_ptr(current, mask); 521 set_cpus_allowed_ptr(current, mask);
522 free_cpumask_var(mask); 522 free_cpumask_var(mask);
@@ -545,7 +545,7 @@ static int irq_thread(void *data)
545 545
546 atomic_inc(&desc->threads_active); 546 atomic_inc(&desc->threads_active);
547 547
548 spin_lock_irq(&desc->lock); 548 raw_spin_lock_irq(&desc->lock);
549 if (unlikely(desc->status & IRQ_DISABLED)) { 549 if (unlikely(desc->status & IRQ_DISABLED)) {
550 /* 550 /*
551 * CHECKME: We might need a dedicated 551 * CHECKME: We might need a dedicated
@@ -555,9 +555,9 @@ static int irq_thread(void *data)
555 * retriggers the interrupt itself --- tglx 555 * retriggers the interrupt itself --- tglx
556 */ 556 */
557 desc->status |= IRQ_PENDING; 557 desc->status |= IRQ_PENDING;
558 spin_unlock_irq(&desc->lock); 558 raw_spin_unlock_irq(&desc->lock);
559 } else { 559 } else {
560 spin_unlock_irq(&desc->lock); 560 raw_spin_unlock_irq(&desc->lock);
561 561
562 action->thread_fn(action->irq, action->dev_id); 562 action->thread_fn(action->irq, action->dev_id);
563 563
@@ -679,7 +679,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
679 /* 679 /*
680 * The following block of code has to be executed atomically 680 * The following block of code has to be executed atomically
681 */ 681 */
682 spin_lock_irqsave(&desc->lock, flags); 682 raw_spin_lock_irqsave(&desc->lock, flags);
683 old_ptr = &desc->action; 683 old_ptr = &desc->action;
684 old = *old_ptr; 684 old = *old_ptr;
685 if (old) { 685 if (old) {
@@ -775,7 +775,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
775 __enable_irq(desc, irq, false); 775 __enable_irq(desc, irq, false);
776 } 776 }
777 777
778 spin_unlock_irqrestore(&desc->lock, flags); 778 raw_spin_unlock_irqrestore(&desc->lock, flags);
779 779
780 /* 780 /*
781 * Strictly no need to wake it up, but hung_task complains 781 * Strictly no need to wake it up, but hung_task complains
@@ -802,7 +802,7 @@ mismatch:
802 ret = -EBUSY; 802 ret = -EBUSY;
803 803
804out_thread: 804out_thread:
805 spin_unlock_irqrestore(&desc->lock, flags); 805 raw_spin_unlock_irqrestore(&desc->lock, flags);
806 if (new->thread) { 806 if (new->thread) {
807 struct task_struct *t = new->thread; 807 struct task_struct *t = new->thread;
808 808
@@ -844,7 +844,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
844 if (!desc) 844 if (!desc)
845 return NULL; 845 return NULL;
846 846
847 spin_lock_irqsave(&desc->lock, flags); 847 raw_spin_lock_irqsave(&desc->lock, flags);
848 848
849 /* 849 /*
850 * There can be multiple actions per IRQ descriptor, find the right 850 * There can be multiple actions per IRQ descriptor, find the right
@@ -856,7 +856,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
856 856
857 if (!action) { 857 if (!action) {
858 WARN(1, "Trying to free already-free IRQ %d\n", irq); 858 WARN(1, "Trying to free already-free IRQ %d\n", irq);
859 spin_unlock_irqrestore(&desc->lock, flags); 859 raw_spin_unlock_irqrestore(&desc->lock, flags);
860 860
861 return NULL; 861 return NULL;
862 } 862 }
@@ -884,7 +884,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
884 desc->chip->disable(irq); 884 desc->chip->disable(irq);
885 } 885 }
886 886
887 spin_unlock_irqrestore(&desc->lock, flags); 887 raw_spin_unlock_irqrestore(&desc->lock, flags);
888 888
889 unregister_handler_proc(irq, action); 889 unregister_handler_proc(irq, action);
890 890
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index fcb6c96f2627..241962280836 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -27,7 +27,7 @@ void move_masked_irq(int irq)
27 if (!desc->chip->set_affinity) 27 if (!desc->chip->set_affinity)
28 return; 28 return;
29 29
30 assert_spin_locked(&desc->lock); 30 assert_raw_spin_locked(&desc->lock);
31 31
32 /* 32 /*
33 * If there was a valid mask to work with, please 33 * If there was a valid mask to work with, please
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c
index 3fd30197da2e..26bac9d8f860 100644
--- a/kernel/irq/numa_migrate.c
+++ b/kernel/irq/numa_migrate.c
@@ -42,7 +42,7 @@ static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc,
42 "for migration.\n", irq); 42 "for migration.\n", irq);
43 return false; 43 return false;
44 } 44 }
45 spin_lock_init(&desc->lock); 45 raw_spin_lock_init(&desc->lock);
46 desc->node = node; 46 desc->node = node;
47 lockdep_set_class(&desc->lock, &irq_desc_lock_class); 47 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
48 init_copy_kstat_irqs(old_desc, desc, node, nr_cpu_ids); 48 init_copy_kstat_irqs(old_desc, desc, node, nr_cpu_ids);
@@ -67,7 +67,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
67 67
68 irq = old_desc->irq; 68 irq = old_desc->irq;
69 69
70 spin_lock_irqsave(&sparse_irq_lock, flags); 70 raw_spin_lock_irqsave(&sparse_irq_lock, flags);
71 71
72 /* We have to check it to avoid races with another CPU */ 72 /* We have to check it to avoid races with another CPU */
73 desc = irq_desc_ptrs[irq]; 73 desc = irq_desc_ptrs[irq];
@@ -91,7 +91,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
91 } 91 }
92 92
93 irq_desc_ptrs[irq] = desc; 93 irq_desc_ptrs[irq] = desc;
94 spin_unlock_irqrestore(&sparse_irq_lock, flags); 94 raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
95 95
96 /* free the old one */ 96 /* free the old one */
97 free_one_irq_desc(old_desc, desc); 97 free_one_irq_desc(old_desc, desc);
@@ -100,7 +100,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
100 return desc; 100 return desc;
101 101
102out_unlock: 102out_unlock:
103 spin_unlock_irqrestore(&sparse_irq_lock, flags); 103 raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
104 104
105 return desc; 105 return desc;
106} 106}
diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c
index a0bb09e79867..0d4005d85b03 100644
--- a/kernel/irq/pm.c
+++ b/kernel/irq/pm.c
@@ -28,9 +28,9 @@ void suspend_device_irqs(void)
28 for_each_irq_desc(irq, desc) { 28 for_each_irq_desc(irq, desc) {
29 unsigned long flags; 29 unsigned long flags;
30 30
31 spin_lock_irqsave(&desc->lock, flags); 31 raw_spin_lock_irqsave(&desc->lock, flags);
32 __disable_irq(desc, irq, true); 32 __disable_irq(desc, irq, true);
33 spin_unlock_irqrestore(&desc->lock, flags); 33 raw_spin_unlock_irqrestore(&desc->lock, flags);
34 } 34 }
35 35
36 for_each_irq_desc(irq, desc) 36 for_each_irq_desc(irq, desc)
@@ -56,9 +56,9 @@ void resume_device_irqs(void)
56 if (!(desc->status & IRQ_SUSPENDED)) 56 if (!(desc->status & IRQ_SUSPENDED))
57 continue; 57 continue;
58 58
59 spin_lock_irqsave(&desc->lock, flags); 59 raw_spin_lock_irqsave(&desc->lock, flags);
60 __enable_irq(desc, irq, true); 60 __enable_irq(desc, irq, true);
61 spin_unlock_irqrestore(&desc->lock, flags); 61 raw_spin_unlock_irqrestore(&desc->lock, flags);
62 } 62 }
63} 63}
64EXPORT_SYMBOL_GPL(resume_device_irqs); 64EXPORT_SYMBOL_GPL(resume_device_irqs);
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index 0832145fea97..6f50eccc79c0 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -179,7 +179,7 @@ static int name_unique(unsigned int irq, struct irqaction *new_action)
179 unsigned long flags; 179 unsigned long flags;
180 int ret = 1; 180 int ret = 1;
181 181
182 spin_lock_irqsave(&desc->lock, flags); 182 raw_spin_lock_irqsave(&desc->lock, flags);
183 for (action = desc->action ; action; action = action->next) { 183 for (action = desc->action ; action; action = action->next) {
184 if ((action != new_action) && action->name && 184 if ((action != new_action) && action->name &&
185 !strcmp(new_action->name, action->name)) { 185 !strcmp(new_action->name, action->name)) {
@@ -187,7 +187,7 @@ static int name_unique(unsigned int irq, struct irqaction *new_action)
187 break; 187 break;
188 } 188 }
189 } 189 }
190 spin_unlock_irqrestore(&desc->lock, flags); 190 raw_spin_unlock_irqrestore(&desc->lock, flags);
191 return ret; 191 return ret;
192} 192}
193 193
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index e49ea1c5232d..89fb90ae534f 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -28,7 +28,7 @@ static int try_one_irq(int irq, struct irq_desc *desc)
28 struct irqaction *action; 28 struct irqaction *action;
29 int ok = 0, work = 0; 29 int ok = 0, work = 0;
30 30
31 spin_lock(&desc->lock); 31 raw_spin_lock(&desc->lock);
32 /* Already running on another processor */ 32 /* Already running on another processor */
33 if (desc->status & IRQ_INPROGRESS) { 33 if (desc->status & IRQ_INPROGRESS) {
34 /* 34 /*
@@ -37,13 +37,13 @@ static int try_one_irq(int irq, struct irq_desc *desc)
37 */ 37 */
38 if (desc->action && (desc->action->flags & IRQF_SHARED)) 38 if (desc->action && (desc->action->flags & IRQF_SHARED))
39 desc->status |= IRQ_PENDING; 39 desc->status |= IRQ_PENDING;
40 spin_unlock(&desc->lock); 40 raw_spin_unlock(&desc->lock);
41 return ok; 41 return ok;
42 } 42 }
43 /* Honour the normal IRQ locking */ 43 /* Honour the normal IRQ locking */
44 desc->status |= IRQ_INPROGRESS; 44 desc->status |= IRQ_INPROGRESS;
45 action = desc->action; 45 action = desc->action;
46 spin_unlock(&desc->lock); 46 raw_spin_unlock(&desc->lock);
47 47
48 while (action) { 48 while (action) {
49 /* Only shared IRQ handlers are safe to call */ 49 /* Only shared IRQ handlers are safe to call */
@@ -56,7 +56,7 @@ static int try_one_irq(int irq, struct irq_desc *desc)
56 } 56 }
57 local_irq_disable(); 57 local_irq_disable();
58 /* Now clean up the flags */ 58 /* Now clean up the flags */
59 spin_lock(&desc->lock); 59 raw_spin_lock(&desc->lock);
60 action = desc->action; 60 action = desc->action;
61 61
62 /* 62 /*
@@ -68,9 +68,9 @@ static int try_one_irq(int irq, struct irq_desc *desc)
68 * Perform real IRQ processing for the IRQ we deferred 68 * Perform real IRQ processing for the IRQ we deferred
69 */ 69 */
70 work = 1; 70 work = 1;
71 spin_unlock(&desc->lock); 71 raw_spin_unlock(&desc->lock);
72 handle_IRQ_event(irq, action); 72 handle_IRQ_event(irq, action);
73 spin_lock(&desc->lock); 73 raw_spin_lock(&desc->lock);
74 desc->status &= ~IRQ_PENDING; 74 desc->status &= ~IRQ_PENDING;
75 } 75 }
76 desc->status &= ~IRQ_INPROGRESS; 76 desc->status &= ~IRQ_INPROGRESS;
@@ -80,7 +80,7 @@ static int try_one_irq(int irq, struct irq_desc *desc)
80 */ 80 */
81 if (work && desc->chip && desc->chip->end) 81 if (work && desc->chip && desc->chip->end)
82 desc->chip->end(irq); 82 desc->chip->end(irq);
83 spin_unlock(&desc->lock); 83 raw_spin_unlock(&desc->lock);
84 84
85 return ok; 85 return ok;
86} 86}
diff --git a/kernel/kexec.c b/kernel/kexec.c
index f336e2107f98..433e9fcc1fc5 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -31,6 +31,7 @@
31#include <linux/cpu.h> 31#include <linux/cpu.h>
32#include <linux/console.h> 32#include <linux/console.h>
33#include <linux/vmalloc.h> 33#include <linux/vmalloc.h>
34#include <linux/swap.h>
34 35
35#include <asm/page.h> 36#include <asm/page.h>
36#include <asm/uaccess.h> 37#include <asm/uaccess.h>
@@ -1082,6 +1083,64 @@ void crash_kexec(struct pt_regs *regs)
1082 } 1083 }
1083} 1084}
1084 1085
1086size_t crash_get_memory_size(void)
1087{
1088 size_t size;
1089 mutex_lock(&kexec_mutex);
1090 size = crashk_res.end - crashk_res.start + 1;
1091 mutex_unlock(&kexec_mutex);
1092 return size;
1093}
1094
1095static void free_reserved_phys_range(unsigned long begin, unsigned long end)
1096{
1097 unsigned long addr;
1098
1099 for (addr = begin; addr < end; addr += PAGE_SIZE) {
1100 ClearPageReserved(pfn_to_page(addr >> PAGE_SHIFT));
1101 init_page_count(pfn_to_page(addr >> PAGE_SHIFT));
1102 free_page((unsigned long)__va(addr));
1103 totalram_pages++;
1104 }
1105}
1106
1107int crash_shrink_memory(unsigned long new_size)
1108{
1109 int ret = 0;
1110 unsigned long start, end;
1111
1112 mutex_lock(&kexec_mutex);
1113
1114 if (kexec_crash_image) {
1115 ret = -ENOENT;
1116 goto unlock;
1117 }
1118 start = crashk_res.start;
1119 end = crashk_res.end;
1120
1121 if (new_size >= end - start + 1) {
1122 ret = -EINVAL;
1123 if (new_size == end - start + 1)
1124 ret = 0;
1125 goto unlock;
1126 }
1127
1128 start = roundup(start, PAGE_SIZE);
1129 end = roundup(start + new_size, PAGE_SIZE);
1130
1131 free_reserved_phys_range(end, crashk_res.end);
1132
1133 if (start == end) {
1134 crashk_res.end = end;
1135 release_resource(&crashk_res);
1136 } else
1137 crashk_res.end = end - 1;
1138
1139unlock:
1140 mutex_unlock(&kexec_mutex);
1141 return ret;
1142}
1143
1085static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data, 1144static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
1086 size_t data_len) 1145 size_t data_len)
1087{ 1146{
diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
index 528dd78e7e7e..3feaf5a74514 100644
--- a/kernel/ksysfs.c
+++ b/kernel/ksysfs.c
@@ -100,6 +100,26 @@ static ssize_t kexec_crash_loaded_show(struct kobject *kobj,
100} 100}
101KERNEL_ATTR_RO(kexec_crash_loaded); 101KERNEL_ATTR_RO(kexec_crash_loaded);
102 102
103static ssize_t kexec_crash_size_show(struct kobject *kobj,
104 struct kobj_attribute *attr, char *buf)
105{
106 return sprintf(buf, "%zu\n", crash_get_memory_size());
107}
108static ssize_t kexec_crash_size_store(struct kobject *kobj,
109 struct kobj_attribute *attr,
110 const char *buf, size_t count)
111{
112 unsigned long cnt;
113 int ret;
114
115 if (strict_strtoul(buf, 0, &cnt))
116 return -EINVAL;
117
118 ret = crash_shrink_memory(cnt);
119 return ret < 0 ? ret : count;
120}
121KERNEL_ATTR_RW(kexec_crash_size);
122
103static ssize_t vmcoreinfo_show(struct kobject *kobj, 123static ssize_t vmcoreinfo_show(struct kobject *kobj,
104 struct kobj_attribute *attr, char *buf) 124 struct kobj_attribute *attr, char *buf)
105{ 125{
@@ -147,6 +167,7 @@ static struct attribute * kernel_attrs[] = {
147#ifdef CONFIG_KEXEC 167#ifdef CONFIG_KEXEC
148 &kexec_loaded_attr.attr, 168 &kexec_loaded_attr.attr,
149 &kexec_crash_loaded_attr.attr, 169 &kexec_crash_loaded_attr.attr,
170 &kexec_crash_size_attr.attr,
150 &vmcoreinfo_attr.attr, 171 &vmcoreinfo_attr.attr,
151#endif 172#endif
152 NULL 173 NULL
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 429540c70d3f..5feaddcdbe49 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -73,11 +73,11 @@ module_param(lock_stat, int, 0644);
73 * to use a raw spinlock - we really dont want the spinlock 73 * to use a raw spinlock - we really dont want the spinlock
74 * code to recurse back into the lockdep code... 74 * code to recurse back into the lockdep code...
75 */ 75 */
76static raw_spinlock_t lockdep_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 76static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
77 77
78static int graph_lock(void) 78static int graph_lock(void)
79{ 79{
80 __raw_spin_lock(&lockdep_lock); 80 arch_spin_lock(&lockdep_lock);
81 /* 81 /*
82 * Make sure that if another CPU detected a bug while 82 * Make sure that if another CPU detected a bug while
83 * walking the graph we dont change it (while the other 83 * walking the graph we dont change it (while the other
@@ -85,7 +85,7 @@ static int graph_lock(void)
85 * dropped already) 85 * dropped already)
86 */ 86 */
87 if (!debug_locks) { 87 if (!debug_locks) {
88 __raw_spin_unlock(&lockdep_lock); 88 arch_spin_unlock(&lockdep_lock);
89 return 0; 89 return 0;
90 } 90 }
91 /* prevent any recursions within lockdep from causing deadlocks */ 91 /* prevent any recursions within lockdep from causing deadlocks */
@@ -95,11 +95,11 @@ static int graph_lock(void)
95 95
96static inline int graph_unlock(void) 96static inline int graph_unlock(void)
97{ 97{
98 if (debug_locks && !__raw_spin_is_locked(&lockdep_lock)) 98 if (debug_locks && !arch_spin_is_locked(&lockdep_lock))
99 return DEBUG_LOCKS_WARN_ON(1); 99 return DEBUG_LOCKS_WARN_ON(1);
100 100
101 current->lockdep_recursion--; 101 current->lockdep_recursion--;
102 __raw_spin_unlock(&lockdep_lock); 102 arch_spin_unlock(&lockdep_lock);
103 return 0; 103 return 0;
104} 104}
105 105
@@ -111,7 +111,7 @@ static inline int debug_locks_off_graph_unlock(void)
111{ 111{
112 int ret = debug_locks_off(); 112 int ret = debug_locks_off();
113 113
114 __raw_spin_unlock(&lockdep_lock); 114 arch_spin_unlock(&lockdep_lock);
115 115
116 return ret; 116 return ret;
117} 117}
@@ -1170,9 +1170,9 @@ unsigned long lockdep_count_forward_deps(struct lock_class *class)
1170 this.class = class; 1170 this.class = class;
1171 1171
1172 local_irq_save(flags); 1172 local_irq_save(flags);
1173 __raw_spin_lock(&lockdep_lock); 1173 arch_spin_lock(&lockdep_lock);
1174 ret = __lockdep_count_forward_deps(&this); 1174 ret = __lockdep_count_forward_deps(&this);
1175 __raw_spin_unlock(&lockdep_lock); 1175 arch_spin_unlock(&lockdep_lock);
1176 local_irq_restore(flags); 1176 local_irq_restore(flags);
1177 1177
1178 return ret; 1178 return ret;
@@ -1197,9 +1197,9 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
1197 this.class = class; 1197 this.class = class;
1198 1198
1199 local_irq_save(flags); 1199 local_irq_save(flags);
1200 __raw_spin_lock(&lockdep_lock); 1200 arch_spin_lock(&lockdep_lock);
1201 ret = __lockdep_count_backward_deps(&this); 1201 ret = __lockdep_count_backward_deps(&this);
1202 __raw_spin_unlock(&lockdep_lock); 1202 arch_spin_unlock(&lockdep_lock);
1203 local_irq_restore(flags); 1203 local_irq_restore(flags);
1204 1204
1205 return ret; 1205 return ret;
diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
index 6b2d735846a5..57d527a16f9d 100644
--- a/kernel/mutex-debug.h
+++ b/kernel/mutex-debug.h
@@ -43,13 +43,13 @@ static inline void mutex_clear_owner(struct mutex *lock)
43 \ 43 \
44 DEBUG_LOCKS_WARN_ON(in_interrupt()); \ 44 DEBUG_LOCKS_WARN_ON(in_interrupt()); \
45 local_irq_save(flags); \ 45 local_irq_save(flags); \
46 __raw_spin_lock(&(lock)->raw_lock); \ 46 arch_spin_lock(&(lock)->rlock.raw_lock);\
47 DEBUG_LOCKS_WARN_ON(l->magic != l); \ 47 DEBUG_LOCKS_WARN_ON(l->magic != l); \
48 } while (0) 48 } while (0)
49 49
50#define spin_unlock_mutex(lock, flags) \ 50#define spin_unlock_mutex(lock, flags) \
51 do { \ 51 do { \
52 __raw_spin_unlock(&(lock)->raw_lock); \ 52 arch_spin_unlock(&(lock)->rlock.raw_lock); \
53 local_irq_restore(flags); \ 53 local_irq_restore(flags); \
54 preempt_check_resched(); \ 54 preempt_check_resched(); \
55 } while (0) 55 } while (0)
diff --git a/kernel/panic.c b/kernel/panic.c
index 96b45d0b4ba5..5827f7b97254 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -10,6 +10,7 @@
10 */ 10 */
11#include <linux/debug_locks.h> 11#include <linux/debug_locks.h>
12#include <linux/interrupt.h> 12#include <linux/interrupt.h>
13#include <linux/kmsg_dump.h>
13#include <linux/kallsyms.h> 14#include <linux/kallsyms.h>
14#include <linux/notifier.h> 15#include <linux/notifier.h>
15#include <linux/module.h> 16#include <linux/module.h>
@@ -74,6 +75,7 @@ NORET_TYPE void panic(const char * fmt, ...)
74 dump_stack(); 75 dump_stack();
75#endif 76#endif
76 77
78 kmsg_dump(KMSG_DUMP_PANIC);
77 /* 79 /*
78 * If we have crashed and we have a crash kernel loaded let it handle 80 * If we have crashed and we have a crash kernel loaded let it handle
79 * everything else. 81 * everything else.
@@ -339,6 +341,7 @@ void oops_exit(void)
339{ 341{
340 do_oops_enter_exit(); 342 do_oops_enter_exit();
341 print_oops_end_marker(); 343 print_oops_end_marker();
344 kmsg_dump(KMSG_DUMP_OOPS);
342} 345}
343 346
344#ifdef WANT_WARN_ON_SLOWPATH 347#ifdef WANT_WARN_ON_SLOWPATH
diff --git a/kernel/params.c b/kernel/params.c
index d656c276508d..cf1b69183127 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -24,6 +24,7 @@
24#include <linux/err.h> 24#include <linux/err.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/ctype.h> 26#include <linux/ctype.h>
27#include <linux/string.h>
27 28
28#if 0 29#if 0
29#define DEBUGP printk 30#define DEBUGP printk
@@ -122,9 +123,7 @@ static char *next_arg(char *args, char **param, char **val)
122 next = args + i; 123 next = args + i;
123 124
124 /* Chew up trailing spaces. */ 125 /* Chew up trailing spaces. */
125 while (isspace(*next)) 126 return skip_spaces(next);
126 next++;
127 return next;
128} 127}
129 128
130/* Args looks like "foo=bar,bar2 baz=fuz wiz". */ 129/* Args looks like "foo=bar,bar2 baz=fuz wiz". */
@@ -139,8 +138,7 @@ int parse_args(const char *name,
139 DEBUGP("Parsing ARGS: %s\n", args); 138 DEBUGP("Parsing ARGS: %s\n", args);
140 139
141 /* Chew leading spaces */ 140 /* Chew leading spaces */
142 while (isspace(*args)) 141 args = skip_spaces(args);
143 args++;
144 142
145 while (*args) { 143 while (*args) {
146 int ret; 144 int ret;
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index e73e53c7582f..9052d6c8c9fd 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -203,14 +203,14 @@ perf_lock_task_context(struct task_struct *task, unsigned long *flags)
203 * if so. If we locked the right context, then it 203 * if so. If we locked the right context, then it
204 * can't get swapped on us any more. 204 * can't get swapped on us any more.
205 */ 205 */
206 spin_lock_irqsave(&ctx->lock, *flags); 206 raw_spin_lock_irqsave(&ctx->lock, *flags);
207 if (ctx != rcu_dereference(task->perf_event_ctxp)) { 207 if (ctx != rcu_dereference(task->perf_event_ctxp)) {
208 spin_unlock_irqrestore(&ctx->lock, *flags); 208 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
209 goto retry; 209 goto retry;
210 } 210 }
211 211
212 if (!atomic_inc_not_zero(&ctx->refcount)) { 212 if (!atomic_inc_not_zero(&ctx->refcount)) {
213 spin_unlock_irqrestore(&ctx->lock, *flags); 213 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
214 ctx = NULL; 214 ctx = NULL;
215 } 215 }
216 } 216 }
@@ -231,7 +231,7 @@ static struct perf_event_context *perf_pin_task_context(struct task_struct *task
231 ctx = perf_lock_task_context(task, &flags); 231 ctx = perf_lock_task_context(task, &flags);
232 if (ctx) { 232 if (ctx) {
233 ++ctx->pin_count; 233 ++ctx->pin_count;
234 spin_unlock_irqrestore(&ctx->lock, flags); 234 raw_spin_unlock_irqrestore(&ctx->lock, flags);
235 } 235 }
236 return ctx; 236 return ctx;
237} 237}
@@ -240,9 +240,9 @@ static void perf_unpin_context(struct perf_event_context *ctx)
240{ 240{
241 unsigned long flags; 241 unsigned long flags;
242 242
243 spin_lock_irqsave(&ctx->lock, flags); 243 raw_spin_lock_irqsave(&ctx->lock, flags);
244 --ctx->pin_count; 244 --ctx->pin_count;
245 spin_unlock_irqrestore(&ctx->lock, flags); 245 raw_spin_unlock_irqrestore(&ctx->lock, flags);
246 put_ctx(ctx); 246 put_ctx(ctx);
247} 247}
248 248
@@ -427,7 +427,7 @@ static void __perf_event_remove_from_context(void *info)
427 if (ctx->task && cpuctx->task_ctx != ctx) 427 if (ctx->task && cpuctx->task_ctx != ctx)
428 return; 428 return;
429 429
430 spin_lock(&ctx->lock); 430 raw_spin_lock(&ctx->lock);
431 /* 431 /*
432 * Protect the list operation against NMI by disabling the 432 * Protect the list operation against NMI by disabling the
433 * events on a global level. 433 * events on a global level.
@@ -449,7 +449,7 @@ static void __perf_event_remove_from_context(void *info)
449 } 449 }
450 450
451 perf_enable(); 451 perf_enable();
452 spin_unlock(&ctx->lock); 452 raw_spin_unlock(&ctx->lock);
453} 453}
454 454
455 455
@@ -488,12 +488,12 @@ retry:
488 task_oncpu_function_call(task, __perf_event_remove_from_context, 488 task_oncpu_function_call(task, __perf_event_remove_from_context,
489 event); 489 event);
490 490
491 spin_lock_irq(&ctx->lock); 491 raw_spin_lock_irq(&ctx->lock);
492 /* 492 /*
493 * If the context is active we need to retry the smp call. 493 * If the context is active we need to retry the smp call.
494 */ 494 */
495 if (ctx->nr_active && !list_empty(&event->group_entry)) { 495 if (ctx->nr_active && !list_empty(&event->group_entry)) {
496 spin_unlock_irq(&ctx->lock); 496 raw_spin_unlock_irq(&ctx->lock);
497 goto retry; 497 goto retry;
498 } 498 }
499 499
@@ -504,7 +504,7 @@ retry:
504 */ 504 */
505 if (!list_empty(&event->group_entry)) 505 if (!list_empty(&event->group_entry))
506 list_del_event(event, ctx); 506 list_del_event(event, ctx);
507 spin_unlock_irq(&ctx->lock); 507 raw_spin_unlock_irq(&ctx->lock);
508} 508}
509 509
510/* 510/*
@@ -535,7 +535,7 @@ static void __perf_event_disable(void *info)
535 if (ctx->task && cpuctx->task_ctx != ctx) 535 if (ctx->task && cpuctx->task_ctx != ctx)
536 return; 536 return;
537 537
538 spin_lock(&ctx->lock); 538 raw_spin_lock(&ctx->lock);
539 539
540 /* 540 /*
541 * If the event is on, turn it off. 541 * If the event is on, turn it off.
@@ -551,7 +551,7 @@ static void __perf_event_disable(void *info)
551 event->state = PERF_EVENT_STATE_OFF; 551 event->state = PERF_EVENT_STATE_OFF;
552 } 552 }
553 553
554 spin_unlock(&ctx->lock); 554 raw_spin_unlock(&ctx->lock);
555} 555}
556 556
557/* 557/*
@@ -584,12 +584,12 @@ void perf_event_disable(struct perf_event *event)
584 retry: 584 retry:
585 task_oncpu_function_call(task, __perf_event_disable, event); 585 task_oncpu_function_call(task, __perf_event_disable, event);
586 586
587 spin_lock_irq(&ctx->lock); 587 raw_spin_lock_irq(&ctx->lock);
588 /* 588 /*
589 * If the event is still active, we need to retry the cross-call. 589 * If the event is still active, we need to retry the cross-call.
590 */ 590 */
591 if (event->state == PERF_EVENT_STATE_ACTIVE) { 591 if (event->state == PERF_EVENT_STATE_ACTIVE) {
592 spin_unlock_irq(&ctx->lock); 592 raw_spin_unlock_irq(&ctx->lock);
593 goto retry; 593 goto retry;
594 } 594 }
595 595
@@ -602,7 +602,7 @@ void perf_event_disable(struct perf_event *event)
602 event->state = PERF_EVENT_STATE_OFF; 602 event->state = PERF_EVENT_STATE_OFF;
603 } 603 }
604 604
605 spin_unlock_irq(&ctx->lock); 605 raw_spin_unlock_irq(&ctx->lock);
606} 606}
607 607
608static int 608static int
@@ -770,7 +770,7 @@ static void __perf_install_in_context(void *info)
770 cpuctx->task_ctx = ctx; 770 cpuctx->task_ctx = ctx;
771 } 771 }
772 772
773 spin_lock(&ctx->lock); 773 raw_spin_lock(&ctx->lock);
774 ctx->is_active = 1; 774 ctx->is_active = 1;
775 update_context_time(ctx); 775 update_context_time(ctx);
776 776
@@ -820,7 +820,7 @@ static void __perf_install_in_context(void *info)
820 unlock: 820 unlock:
821 perf_enable(); 821 perf_enable();
822 822
823 spin_unlock(&ctx->lock); 823 raw_spin_unlock(&ctx->lock);
824} 824}
825 825
826/* 826/*
@@ -856,12 +856,12 @@ retry:
856 task_oncpu_function_call(task, __perf_install_in_context, 856 task_oncpu_function_call(task, __perf_install_in_context,
857 event); 857 event);
858 858
859 spin_lock_irq(&ctx->lock); 859 raw_spin_lock_irq(&ctx->lock);
860 /* 860 /*
861 * we need to retry the smp call. 861 * we need to retry the smp call.
862 */ 862 */
863 if (ctx->is_active && list_empty(&event->group_entry)) { 863 if (ctx->is_active && list_empty(&event->group_entry)) {
864 spin_unlock_irq(&ctx->lock); 864 raw_spin_unlock_irq(&ctx->lock);
865 goto retry; 865 goto retry;
866 } 866 }
867 867
@@ -872,7 +872,7 @@ retry:
872 */ 872 */
873 if (list_empty(&event->group_entry)) 873 if (list_empty(&event->group_entry))
874 add_event_to_ctx(event, ctx); 874 add_event_to_ctx(event, ctx);
875 spin_unlock_irq(&ctx->lock); 875 raw_spin_unlock_irq(&ctx->lock);
876} 876}
877 877
878/* 878/*
@@ -917,7 +917,7 @@ static void __perf_event_enable(void *info)
917 cpuctx->task_ctx = ctx; 917 cpuctx->task_ctx = ctx;
918 } 918 }
919 919
920 spin_lock(&ctx->lock); 920 raw_spin_lock(&ctx->lock);
921 ctx->is_active = 1; 921 ctx->is_active = 1;
922 update_context_time(ctx); 922 update_context_time(ctx);
923 923
@@ -959,7 +959,7 @@ static void __perf_event_enable(void *info)
959 } 959 }
960 960
961 unlock: 961 unlock:
962 spin_unlock(&ctx->lock); 962 raw_spin_unlock(&ctx->lock);
963} 963}
964 964
965/* 965/*
@@ -985,7 +985,7 @@ void perf_event_enable(struct perf_event *event)
985 return; 985 return;
986 } 986 }
987 987
988 spin_lock_irq(&ctx->lock); 988 raw_spin_lock_irq(&ctx->lock);
989 if (event->state >= PERF_EVENT_STATE_INACTIVE) 989 if (event->state >= PERF_EVENT_STATE_INACTIVE)
990 goto out; 990 goto out;
991 991
@@ -1000,10 +1000,10 @@ void perf_event_enable(struct perf_event *event)
1000 event->state = PERF_EVENT_STATE_OFF; 1000 event->state = PERF_EVENT_STATE_OFF;
1001 1001
1002 retry: 1002 retry:
1003 spin_unlock_irq(&ctx->lock); 1003 raw_spin_unlock_irq(&ctx->lock);
1004 task_oncpu_function_call(task, __perf_event_enable, event); 1004 task_oncpu_function_call(task, __perf_event_enable, event);
1005 1005
1006 spin_lock_irq(&ctx->lock); 1006 raw_spin_lock_irq(&ctx->lock);
1007 1007
1008 /* 1008 /*
1009 * If the context is active and the event is still off, 1009 * If the context is active and the event is still off,
@@ -1020,7 +1020,7 @@ void perf_event_enable(struct perf_event *event)
1020 __perf_event_mark_enabled(event, ctx); 1020 __perf_event_mark_enabled(event, ctx);
1021 1021
1022 out: 1022 out:
1023 spin_unlock_irq(&ctx->lock); 1023 raw_spin_unlock_irq(&ctx->lock);
1024} 1024}
1025 1025
1026static int perf_event_refresh(struct perf_event *event, int refresh) 1026static int perf_event_refresh(struct perf_event *event, int refresh)
@@ -1042,7 +1042,7 @@ void __perf_event_sched_out(struct perf_event_context *ctx,
1042{ 1042{
1043 struct perf_event *event; 1043 struct perf_event *event;
1044 1044
1045 spin_lock(&ctx->lock); 1045 raw_spin_lock(&ctx->lock);
1046 ctx->is_active = 0; 1046 ctx->is_active = 0;
1047 if (likely(!ctx->nr_events)) 1047 if (likely(!ctx->nr_events))
1048 goto out; 1048 goto out;
@@ -1055,7 +1055,7 @@ void __perf_event_sched_out(struct perf_event_context *ctx,
1055 } 1055 }
1056 perf_enable(); 1056 perf_enable();
1057 out: 1057 out:
1058 spin_unlock(&ctx->lock); 1058 raw_spin_unlock(&ctx->lock);
1059} 1059}
1060 1060
1061/* 1061/*
@@ -1193,8 +1193,8 @@ void perf_event_task_sched_out(struct task_struct *task,
1193 * order we take the locks because no other cpu could 1193 * order we take the locks because no other cpu could
1194 * be trying to lock both of these tasks. 1194 * be trying to lock both of these tasks.
1195 */ 1195 */
1196 spin_lock(&ctx->lock); 1196 raw_spin_lock(&ctx->lock);
1197 spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING); 1197 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
1198 if (context_equiv(ctx, next_ctx)) { 1198 if (context_equiv(ctx, next_ctx)) {
1199 /* 1199 /*
1200 * XXX do we need a memory barrier of sorts 1200 * XXX do we need a memory barrier of sorts
@@ -1208,8 +1208,8 @@ void perf_event_task_sched_out(struct task_struct *task,
1208 1208
1209 perf_event_sync_stat(ctx, next_ctx); 1209 perf_event_sync_stat(ctx, next_ctx);
1210 } 1210 }
1211 spin_unlock(&next_ctx->lock); 1211 raw_spin_unlock(&next_ctx->lock);
1212 spin_unlock(&ctx->lock); 1212 raw_spin_unlock(&ctx->lock);
1213 } 1213 }
1214 rcu_read_unlock(); 1214 rcu_read_unlock();
1215 1215
@@ -1251,7 +1251,7 @@ __perf_event_sched_in(struct perf_event_context *ctx,
1251 struct perf_event *event; 1251 struct perf_event *event;
1252 int can_add_hw = 1; 1252 int can_add_hw = 1;
1253 1253
1254 spin_lock(&ctx->lock); 1254 raw_spin_lock(&ctx->lock);
1255 ctx->is_active = 1; 1255 ctx->is_active = 1;
1256 if (likely(!ctx->nr_events)) 1256 if (likely(!ctx->nr_events))
1257 goto out; 1257 goto out;
@@ -1306,7 +1306,7 @@ __perf_event_sched_in(struct perf_event_context *ctx,
1306 } 1306 }
1307 perf_enable(); 1307 perf_enable();
1308 out: 1308 out:
1309 spin_unlock(&ctx->lock); 1309 raw_spin_unlock(&ctx->lock);
1310} 1310}
1311 1311
1312/* 1312/*
@@ -1370,7 +1370,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
1370 struct hw_perf_event *hwc; 1370 struct hw_perf_event *hwc;
1371 u64 interrupts, freq; 1371 u64 interrupts, freq;
1372 1372
1373 spin_lock(&ctx->lock); 1373 raw_spin_lock(&ctx->lock);
1374 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 1374 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
1375 if (event->state != PERF_EVENT_STATE_ACTIVE) 1375 if (event->state != PERF_EVENT_STATE_ACTIVE)
1376 continue; 1376 continue;
@@ -1425,7 +1425,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
1425 perf_enable(); 1425 perf_enable();
1426 } 1426 }
1427 } 1427 }
1428 spin_unlock(&ctx->lock); 1428 raw_spin_unlock(&ctx->lock);
1429} 1429}
1430 1430
1431/* 1431/*
@@ -1438,7 +1438,7 @@ static void rotate_ctx(struct perf_event_context *ctx)
1438 if (!ctx->nr_events) 1438 if (!ctx->nr_events)
1439 return; 1439 return;
1440 1440
1441 spin_lock(&ctx->lock); 1441 raw_spin_lock(&ctx->lock);
1442 /* 1442 /*
1443 * Rotate the first entry last (works just fine for group events too): 1443 * Rotate the first entry last (works just fine for group events too):
1444 */ 1444 */
@@ -1449,7 +1449,7 @@ static void rotate_ctx(struct perf_event_context *ctx)
1449 } 1449 }
1450 perf_enable(); 1450 perf_enable();
1451 1451
1452 spin_unlock(&ctx->lock); 1452 raw_spin_unlock(&ctx->lock);
1453} 1453}
1454 1454
1455void perf_event_task_tick(struct task_struct *curr, int cpu) 1455void perf_event_task_tick(struct task_struct *curr, int cpu)
@@ -1498,7 +1498,7 @@ static void perf_event_enable_on_exec(struct task_struct *task)
1498 1498
1499 __perf_event_task_sched_out(ctx); 1499 __perf_event_task_sched_out(ctx);
1500 1500
1501 spin_lock(&ctx->lock); 1501 raw_spin_lock(&ctx->lock);
1502 1502
1503 list_for_each_entry(event, &ctx->group_list, group_entry) { 1503 list_for_each_entry(event, &ctx->group_list, group_entry) {
1504 if (!event->attr.enable_on_exec) 1504 if (!event->attr.enable_on_exec)
@@ -1516,7 +1516,7 @@ static void perf_event_enable_on_exec(struct task_struct *task)
1516 if (enabled) 1516 if (enabled)
1517 unclone_ctx(ctx); 1517 unclone_ctx(ctx);
1518 1518
1519 spin_unlock(&ctx->lock); 1519 raw_spin_unlock(&ctx->lock);
1520 1520
1521 perf_event_task_sched_in(task, smp_processor_id()); 1521 perf_event_task_sched_in(task, smp_processor_id());
1522 out: 1522 out:
@@ -1542,10 +1542,10 @@ static void __perf_event_read(void *info)
1542 if (ctx->task && cpuctx->task_ctx != ctx) 1542 if (ctx->task && cpuctx->task_ctx != ctx)
1543 return; 1543 return;
1544 1544
1545 spin_lock(&ctx->lock); 1545 raw_spin_lock(&ctx->lock);
1546 update_context_time(ctx); 1546 update_context_time(ctx);
1547 update_event_times(event); 1547 update_event_times(event);
1548 spin_unlock(&ctx->lock); 1548 raw_spin_unlock(&ctx->lock);
1549 1549
1550 event->pmu->read(event); 1550 event->pmu->read(event);
1551} 1551}
@@ -1563,10 +1563,10 @@ static u64 perf_event_read(struct perf_event *event)
1563 struct perf_event_context *ctx = event->ctx; 1563 struct perf_event_context *ctx = event->ctx;
1564 unsigned long flags; 1564 unsigned long flags;
1565 1565
1566 spin_lock_irqsave(&ctx->lock, flags); 1566 raw_spin_lock_irqsave(&ctx->lock, flags);
1567 update_context_time(ctx); 1567 update_context_time(ctx);
1568 update_event_times(event); 1568 update_event_times(event);
1569 spin_unlock_irqrestore(&ctx->lock, flags); 1569 raw_spin_unlock_irqrestore(&ctx->lock, flags);
1570 } 1570 }
1571 1571
1572 return atomic64_read(&event->count); 1572 return atomic64_read(&event->count);
@@ -1579,7 +1579,7 @@ static void
1579__perf_event_init_context(struct perf_event_context *ctx, 1579__perf_event_init_context(struct perf_event_context *ctx,
1580 struct task_struct *task) 1580 struct task_struct *task)
1581{ 1581{
1582 spin_lock_init(&ctx->lock); 1582 raw_spin_lock_init(&ctx->lock);
1583 mutex_init(&ctx->mutex); 1583 mutex_init(&ctx->mutex);
1584 INIT_LIST_HEAD(&ctx->group_list); 1584 INIT_LIST_HEAD(&ctx->group_list);
1585 INIT_LIST_HEAD(&ctx->event_list); 1585 INIT_LIST_HEAD(&ctx->event_list);
@@ -1649,7 +1649,7 @@ static struct perf_event_context *find_get_context(pid_t pid, int cpu)
1649 ctx = perf_lock_task_context(task, &flags); 1649 ctx = perf_lock_task_context(task, &flags);
1650 if (ctx) { 1650 if (ctx) {
1651 unclone_ctx(ctx); 1651 unclone_ctx(ctx);
1652 spin_unlock_irqrestore(&ctx->lock, flags); 1652 raw_spin_unlock_irqrestore(&ctx->lock, flags);
1653 } 1653 }
1654 1654
1655 if (!ctx) { 1655 if (!ctx) {
@@ -1987,7 +1987,7 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
1987 if (!value) 1987 if (!value)
1988 return -EINVAL; 1988 return -EINVAL;
1989 1989
1990 spin_lock_irq(&ctx->lock); 1990 raw_spin_lock_irq(&ctx->lock);
1991 if (event->attr.freq) { 1991 if (event->attr.freq) {
1992 if (value > sysctl_perf_event_sample_rate) { 1992 if (value > sysctl_perf_event_sample_rate) {
1993 ret = -EINVAL; 1993 ret = -EINVAL;
@@ -2000,7 +2000,7 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
2000 event->hw.sample_period = value; 2000 event->hw.sample_period = value;
2001 } 2001 }
2002unlock: 2002unlock:
2003 spin_unlock_irq(&ctx->lock); 2003 raw_spin_unlock_irq(&ctx->lock);
2004 2004
2005 return ret; 2005 return ret;
2006} 2006}
@@ -4992,7 +4992,7 @@ void perf_event_exit_task(struct task_struct *child)
4992 * reading child->perf_event_ctxp, we wait until it has 4992 * reading child->perf_event_ctxp, we wait until it has
4993 * incremented the context's refcount before we do put_ctx below. 4993 * incremented the context's refcount before we do put_ctx below.
4994 */ 4994 */
4995 spin_lock(&child_ctx->lock); 4995 raw_spin_lock(&child_ctx->lock);
4996 child->perf_event_ctxp = NULL; 4996 child->perf_event_ctxp = NULL;
4997 /* 4997 /*
4998 * If this context is a clone; unclone it so it can't get 4998 * If this context is a clone; unclone it so it can't get
@@ -5001,7 +5001,7 @@ void perf_event_exit_task(struct task_struct *child)
5001 */ 5001 */
5002 unclone_ctx(child_ctx); 5002 unclone_ctx(child_ctx);
5003 update_context_time(child_ctx); 5003 update_context_time(child_ctx);
5004 spin_unlock_irqrestore(&child_ctx->lock, flags); 5004 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
5005 5005
5006 /* 5006 /*
5007 * Report the task dead after unscheduling the events so that we 5007 * Report the task dead after unscheduling the events so that we
@@ -5292,11 +5292,11 @@ perf_set_reserve_percpu(struct sysdev_class *class,
5292 perf_reserved_percpu = val; 5292 perf_reserved_percpu = val;
5293 for_each_online_cpu(cpu) { 5293 for_each_online_cpu(cpu) {
5294 cpuctx = &per_cpu(perf_cpu_context, cpu); 5294 cpuctx = &per_cpu(perf_cpu_context, cpu);
5295 spin_lock_irq(&cpuctx->ctx.lock); 5295 raw_spin_lock_irq(&cpuctx->ctx.lock);
5296 mpt = min(perf_max_events - cpuctx->ctx.nr_events, 5296 mpt = min(perf_max_events - cpuctx->ctx.nr_events,
5297 perf_max_events - perf_reserved_percpu); 5297 perf_max_events - perf_reserved_percpu);
5298 cpuctx->max_pertask = mpt; 5298 cpuctx->max_pertask = mpt;
5299 spin_unlock_irq(&cpuctx->ctx.lock); 5299 raw_spin_unlock_irq(&cpuctx->ctx.lock);
5300 } 5300 }
5301 spin_unlock(&perf_resource_lock); 5301 spin_unlock(&perf_resource_lock);
5302 5302
diff --git a/kernel/pid.c b/kernel/pid.c
index d3f722d20f9c..2e17c9c92cbe 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -141,11 +141,12 @@ static int alloc_pidmap(struct pid_namespace *pid_ns)
141 * installing it: 141 * installing it:
142 */ 142 */
143 spin_lock_irq(&pidmap_lock); 143 spin_lock_irq(&pidmap_lock);
144 if (map->page) 144 if (!map->page) {
145 kfree(page);
146 else
147 map->page = page; 145 map->page = page;
146 page = NULL;
147 }
148 spin_unlock_irq(&pidmap_lock); 148 spin_unlock_irq(&pidmap_lock);
149 kfree(page);
149 if (unlikely(!map->page)) 150 if (unlikely(!map->page))
150 break; 151 break;
151 } 152 }
@@ -268,12 +269,11 @@ struct pid *alloc_pid(struct pid_namespace *ns)
268 for (type = 0; type < PIDTYPE_MAX; ++type) 269 for (type = 0; type < PIDTYPE_MAX; ++type)
269 INIT_HLIST_HEAD(&pid->tasks[type]); 270 INIT_HLIST_HEAD(&pid->tasks[type]);
270 271
272 upid = pid->numbers + ns->level;
271 spin_lock_irq(&pidmap_lock); 273 spin_lock_irq(&pidmap_lock);
272 for (i = ns->level; i >= 0; i--) { 274 for ( ; upid >= pid->numbers; --upid)
273 upid = &pid->numbers[i];
274 hlist_add_head_rcu(&upid->pid_chain, 275 hlist_add_head_rcu(&upid->pid_chain,
275 &pid_hash[pid_hashfn(upid->nr, upid->ns)]); 276 &pid_hash[pid_hashfn(upid->nr, upid->ns)]);
276 }
277 spin_unlock_irq(&pidmap_lock); 277 spin_unlock_irq(&pidmap_lock);
278 278
279out: 279out:
diff --git a/kernel/power/console.c b/kernel/power/console.c
index 5187136fe1de..218e5af90156 100644
--- a/kernel/power/console.c
+++ b/kernel/power/console.c
@@ -6,7 +6,7 @@
6 6
7#include <linux/vt_kern.h> 7#include <linux/vt_kern.h>
8#include <linux/kbd_kern.h> 8#include <linux/kbd_kern.h>
9#include <linux/console.h> 9#include <linux/vt.h>
10#include <linux/module.h> 10#include <linux/module.h>
11#include "power.h" 11#include "power.h"
12 12
@@ -21,8 +21,7 @@ int pm_prepare_console(void)
21 if (orig_fgconsole < 0) 21 if (orig_fgconsole < 0)
22 return 1; 22 return 1;
23 23
24 orig_kmsg = kmsg_redirect; 24 orig_kmsg = vt_kmsg_redirect(SUSPEND_CONSOLE);
25 kmsg_redirect = SUSPEND_CONSOLE;
26 return 0; 25 return 0;
27} 26}
28 27
@@ -30,7 +29,7 @@ void pm_restore_console(void)
30{ 29{
31 if (orig_fgconsole >= 0) { 30 if (orig_fgconsole >= 0) {
32 vt_move_to_console(orig_fgconsole, 0); 31 vt_move_to_console(orig_fgconsole, 0);
33 kmsg_redirect = orig_kmsg; 32 vt_kmsg_redirect(orig_kmsg);
34 } 33 }
35} 34}
36#endif 35#endif
diff --git a/kernel/printk.c b/kernel/printk.c
index b5ac4d99c667..1ded8e7dd19b 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -34,6 +34,7 @@
34#include <linux/syscalls.h> 34#include <linux/syscalls.h>
35#include <linux/kexec.h> 35#include <linux/kexec.h>
36#include <linux/ratelimit.h> 36#include <linux/ratelimit.h>
37#include <linux/kmsg_dump.h>
37 38
38#include <asm/uaccess.h> 39#include <asm/uaccess.h>
39 40
@@ -1405,4 +1406,122 @@ bool printk_timed_ratelimit(unsigned long *caller_jiffies,
1405 return false; 1406 return false;
1406} 1407}
1407EXPORT_SYMBOL(printk_timed_ratelimit); 1408EXPORT_SYMBOL(printk_timed_ratelimit);
1409
1410static DEFINE_SPINLOCK(dump_list_lock);
1411static LIST_HEAD(dump_list);
1412
1413/**
1414 * kmsg_dump_register - register a kernel log dumper.
1415 * @dump: pointer to the kmsg_dumper structure
1416 *
1417 * Adds a kernel log dumper to the system. The dump callback in the
1418 * structure will be called when the kernel oopses or panics and must be
1419 * set. Returns zero on success and %-EINVAL or %-EBUSY otherwise.
1420 */
1421int kmsg_dump_register(struct kmsg_dumper *dumper)
1422{
1423 unsigned long flags;
1424 int err = -EBUSY;
1425
1426 /* The dump callback needs to be set */
1427 if (!dumper->dump)
1428 return -EINVAL;
1429
1430 spin_lock_irqsave(&dump_list_lock, flags);
1431 /* Don't allow registering multiple times */
1432 if (!dumper->registered) {
1433 dumper->registered = 1;
1434 list_add_tail(&dumper->list, &dump_list);
1435 err = 0;
1436 }
1437 spin_unlock_irqrestore(&dump_list_lock, flags);
1438
1439 return err;
1440}
1441EXPORT_SYMBOL_GPL(kmsg_dump_register);
1442
1443/**
1444 * kmsg_dump_unregister - unregister a kmsg dumper.
1445 * @dump: pointer to the kmsg_dumper structure
1446 *
1447 * Removes a dump device from the system. Returns zero on success and
1448 * %-EINVAL otherwise.
1449 */
1450int kmsg_dump_unregister(struct kmsg_dumper *dumper)
1451{
1452 unsigned long flags;
1453 int err = -EINVAL;
1454
1455 spin_lock_irqsave(&dump_list_lock, flags);
1456 if (dumper->registered) {
1457 dumper->registered = 0;
1458 list_del(&dumper->list);
1459 err = 0;
1460 }
1461 spin_unlock_irqrestore(&dump_list_lock, flags);
1462
1463 return err;
1464}
1465EXPORT_SYMBOL_GPL(kmsg_dump_unregister);
1466
1467static const char const *kmsg_reasons[] = {
1468 [KMSG_DUMP_OOPS] = "oops",
1469 [KMSG_DUMP_PANIC] = "panic",
1470};
1471
1472static const char *kmsg_to_str(enum kmsg_dump_reason reason)
1473{
1474 if (reason >= ARRAY_SIZE(kmsg_reasons) || reason < 0)
1475 return "unknown";
1476
1477 return kmsg_reasons[reason];
1478}
1479
1480/**
1481 * kmsg_dump - dump kernel log to kernel message dumpers.
1482 * @reason: the reason (oops, panic etc) for dumping
1483 *
1484 * Iterate through each of the dump devices and call the oops/panic
1485 * callbacks with the log buffer.
1486 */
1487void kmsg_dump(enum kmsg_dump_reason reason)
1488{
1489 unsigned long end;
1490 unsigned chars;
1491 struct kmsg_dumper *dumper;
1492 const char *s1, *s2;
1493 unsigned long l1, l2;
1494 unsigned long flags;
1495
1496 /* Theoretically, the log could move on after we do this, but
1497 there's not a lot we can do about that. The new messages
1498 will overwrite the start of what we dump. */
1499 spin_lock_irqsave(&logbuf_lock, flags);
1500 end = log_end & LOG_BUF_MASK;
1501 chars = logged_chars;
1502 spin_unlock_irqrestore(&logbuf_lock, flags);
1503
1504 if (logged_chars > end) {
1505 s1 = log_buf + log_buf_len - logged_chars + end;
1506 l1 = logged_chars - end;
1507
1508 s2 = log_buf;
1509 l2 = end;
1510 } else {
1511 s1 = "";
1512 l1 = 0;
1513
1514 s2 = log_buf + end - logged_chars;
1515 l2 = logged_chars;
1516 }
1517
1518 if (!spin_trylock_irqsave(&dump_list_lock, flags)) {
1519 printk(KERN_ERR "dump_kmsg: dump list lock is held during %s, skipping dump\n",
1520 kmsg_to_str(reason));
1521 return;
1522 }
1523 list_for_each_entry(dumper, &dump_list, list)
1524 dumper->dump(dumper, reason, s1, l1, s2, l2);
1525 spin_unlock_irqrestore(&dump_list_lock, flags);
1526}
1408#endif 1527#endif
diff --git a/kernel/relay.c b/kernel/relay.c
index 760c26209a3c..c705a41b4ba3 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -1198,7 +1198,7 @@ static void relay_pipe_buf_release(struct pipe_inode_info *pipe,
1198 relay_consume_bytes(rbuf, buf->private); 1198 relay_consume_bytes(rbuf, buf->private);
1199} 1199}
1200 1200
1201static struct pipe_buf_operations relay_pipe_buf_ops = { 1201static const struct pipe_buf_operations relay_pipe_buf_ops = {
1202 .can_merge = 0, 1202 .can_merge = 0,
1203 .map = generic_pipe_buf_map, 1203 .map = generic_pipe_buf_map,
1204 .unmap = generic_pipe_buf_unmap, 1204 .unmap = generic_pipe_buf_unmap,
diff --git a/kernel/rtmutex-debug.c b/kernel/rtmutex-debug.c
index 5fcb4fe645e2..ddabb54bb5c8 100644
--- a/kernel/rtmutex-debug.c
+++ b/kernel/rtmutex-debug.c
@@ -37,8 +37,8 @@ do { \
37 if (rt_trace_on) { \ 37 if (rt_trace_on) { \
38 rt_trace_on = 0; \ 38 rt_trace_on = 0; \
39 console_verbose(); \ 39 console_verbose(); \
40 if (spin_is_locked(&current->pi_lock)) \ 40 if (raw_spin_is_locked(&current->pi_lock)) \
41 spin_unlock(&current->pi_lock); \ 41 raw_spin_unlock(&current->pi_lock); \
42 } \ 42 } \
43} while (0) 43} while (0)
44 44
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
index 29bd4baf9e75..a9604815786a 100644
--- a/kernel/rtmutex.c
+++ b/kernel/rtmutex.c
@@ -138,9 +138,9 @@ static void rt_mutex_adjust_prio(struct task_struct *task)
138{ 138{
139 unsigned long flags; 139 unsigned long flags;
140 140
141 spin_lock_irqsave(&task->pi_lock, flags); 141 raw_spin_lock_irqsave(&task->pi_lock, flags);
142 __rt_mutex_adjust_prio(task); 142 __rt_mutex_adjust_prio(task);
143 spin_unlock_irqrestore(&task->pi_lock, flags); 143 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
144} 144}
145 145
146/* 146/*
@@ -195,7 +195,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
195 /* 195 /*
196 * Task can not go away as we did a get_task() before ! 196 * Task can not go away as we did a get_task() before !
197 */ 197 */
198 spin_lock_irqsave(&task->pi_lock, flags); 198 raw_spin_lock_irqsave(&task->pi_lock, flags);
199 199
200 waiter = task->pi_blocked_on; 200 waiter = task->pi_blocked_on;
201 /* 201 /*
@@ -231,8 +231,8 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
231 goto out_unlock_pi; 231 goto out_unlock_pi;
232 232
233 lock = waiter->lock; 233 lock = waiter->lock;
234 if (!spin_trylock(&lock->wait_lock)) { 234 if (!raw_spin_trylock(&lock->wait_lock)) {
235 spin_unlock_irqrestore(&task->pi_lock, flags); 235 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
236 cpu_relax(); 236 cpu_relax();
237 goto retry; 237 goto retry;
238 } 238 }
@@ -240,7 +240,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
240 /* Deadlock detection */ 240 /* Deadlock detection */
241 if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { 241 if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
242 debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock); 242 debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
243 spin_unlock(&lock->wait_lock); 243 raw_spin_unlock(&lock->wait_lock);
244 ret = deadlock_detect ? -EDEADLK : 0; 244 ret = deadlock_detect ? -EDEADLK : 0;
245 goto out_unlock_pi; 245 goto out_unlock_pi;
246 } 246 }
@@ -253,13 +253,13 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
253 plist_add(&waiter->list_entry, &lock->wait_list); 253 plist_add(&waiter->list_entry, &lock->wait_list);
254 254
255 /* Release the task */ 255 /* Release the task */
256 spin_unlock_irqrestore(&task->pi_lock, flags); 256 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
257 put_task_struct(task); 257 put_task_struct(task);
258 258
259 /* Grab the next task */ 259 /* Grab the next task */
260 task = rt_mutex_owner(lock); 260 task = rt_mutex_owner(lock);
261 get_task_struct(task); 261 get_task_struct(task);
262 spin_lock_irqsave(&task->pi_lock, flags); 262 raw_spin_lock_irqsave(&task->pi_lock, flags);
263 263
264 if (waiter == rt_mutex_top_waiter(lock)) { 264 if (waiter == rt_mutex_top_waiter(lock)) {
265 /* Boost the owner */ 265 /* Boost the owner */
@@ -277,10 +277,10 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
277 __rt_mutex_adjust_prio(task); 277 __rt_mutex_adjust_prio(task);
278 } 278 }
279 279
280 spin_unlock_irqrestore(&task->pi_lock, flags); 280 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
281 281
282 top_waiter = rt_mutex_top_waiter(lock); 282 top_waiter = rt_mutex_top_waiter(lock);
283 spin_unlock(&lock->wait_lock); 283 raw_spin_unlock(&lock->wait_lock);
284 284
285 if (!detect_deadlock && waiter != top_waiter) 285 if (!detect_deadlock && waiter != top_waiter)
286 goto out_put_task; 286 goto out_put_task;
@@ -288,7 +288,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
288 goto again; 288 goto again;
289 289
290 out_unlock_pi: 290 out_unlock_pi:
291 spin_unlock_irqrestore(&task->pi_lock, flags); 291 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
292 out_put_task: 292 out_put_task:
293 put_task_struct(task); 293 put_task_struct(task);
294 294
@@ -313,9 +313,9 @@ static inline int try_to_steal_lock(struct rt_mutex *lock,
313 if (pendowner == task) 313 if (pendowner == task)
314 return 1; 314 return 1;
315 315
316 spin_lock_irqsave(&pendowner->pi_lock, flags); 316 raw_spin_lock_irqsave(&pendowner->pi_lock, flags);
317 if (task->prio >= pendowner->prio) { 317 if (task->prio >= pendowner->prio) {
318 spin_unlock_irqrestore(&pendowner->pi_lock, flags); 318 raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags);
319 return 0; 319 return 0;
320 } 320 }
321 321
@@ -325,7 +325,7 @@ static inline int try_to_steal_lock(struct rt_mutex *lock,
325 * priority. 325 * priority.
326 */ 326 */
327 if (likely(!rt_mutex_has_waiters(lock))) { 327 if (likely(!rt_mutex_has_waiters(lock))) {
328 spin_unlock_irqrestore(&pendowner->pi_lock, flags); 328 raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags);
329 return 1; 329 return 1;
330 } 330 }
331 331
@@ -333,7 +333,7 @@ static inline int try_to_steal_lock(struct rt_mutex *lock,
333 next = rt_mutex_top_waiter(lock); 333 next = rt_mutex_top_waiter(lock);
334 plist_del(&next->pi_list_entry, &pendowner->pi_waiters); 334 plist_del(&next->pi_list_entry, &pendowner->pi_waiters);
335 __rt_mutex_adjust_prio(pendowner); 335 __rt_mutex_adjust_prio(pendowner);
336 spin_unlock_irqrestore(&pendowner->pi_lock, flags); 336 raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags);
337 337
338 /* 338 /*
339 * We are going to steal the lock and a waiter was 339 * We are going to steal the lock and a waiter was
@@ -350,10 +350,10 @@ static inline int try_to_steal_lock(struct rt_mutex *lock,
350 * might be task: 350 * might be task:
351 */ 351 */
352 if (likely(next->task != task)) { 352 if (likely(next->task != task)) {
353 spin_lock_irqsave(&task->pi_lock, flags); 353 raw_spin_lock_irqsave(&task->pi_lock, flags);
354 plist_add(&next->pi_list_entry, &task->pi_waiters); 354 plist_add(&next->pi_list_entry, &task->pi_waiters);
355 __rt_mutex_adjust_prio(task); 355 __rt_mutex_adjust_prio(task);
356 spin_unlock_irqrestore(&task->pi_lock, flags); 356 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
357 } 357 }
358 return 1; 358 return 1;
359} 359}
@@ -420,7 +420,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
420 unsigned long flags; 420 unsigned long flags;
421 int chain_walk = 0, res; 421 int chain_walk = 0, res;
422 422
423 spin_lock_irqsave(&task->pi_lock, flags); 423 raw_spin_lock_irqsave(&task->pi_lock, flags);
424 __rt_mutex_adjust_prio(task); 424 __rt_mutex_adjust_prio(task);
425 waiter->task = task; 425 waiter->task = task;
426 waiter->lock = lock; 426 waiter->lock = lock;
@@ -434,17 +434,17 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
434 434
435 task->pi_blocked_on = waiter; 435 task->pi_blocked_on = waiter;
436 436
437 spin_unlock_irqrestore(&task->pi_lock, flags); 437 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
438 438
439 if (waiter == rt_mutex_top_waiter(lock)) { 439 if (waiter == rt_mutex_top_waiter(lock)) {
440 spin_lock_irqsave(&owner->pi_lock, flags); 440 raw_spin_lock_irqsave(&owner->pi_lock, flags);
441 plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters); 441 plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters);
442 plist_add(&waiter->pi_list_entry, &owner->pi_waiters); 442 plist_add(&waiter->pi_list_entry, &owner->pi_waiters);
443 443
444 __rt_mutex_adjust_prio(owner); 444 __rt_mutex_adjust_prio(owner);
445 if (owner->pi_blocked_on) 445 if (owner->pi_blocked_on)
446 chain_walk = 1; 446 chain_walk = 1;
447 spin_unlock_irqrestore(&owner->pi_lock, flags); 447 raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
448 } 448 }
449 else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) 449 else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock))
450 chain_walk = 1; 450 chain_walk = 1;
@@ -459,12 +459,12 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
459 */ 459 */
460 get_task_struct(owner); 460 get_task_struct(owner);
461 461
462 spin_unlock(&lock->wait_lock); 462 raw_spin_unlock(&lock->wait_lock);
463 463
464 res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter, 464 res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter,
465 task); 465 task);
466 466
467 spin_lock(&lock->wait_lock); 467 raw_spin_lock(&lock->wait_lock);
468 468
469 return res; 469 return res;
470} 470}
@@ -483,7 +483,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
483 struct task_struct *pendowner; 483 struct task_struct *pendowner;
484 unsigned long flags; 484 unsigned long flags;
485 485
486 spin_lock_irqsave(&current->pi_lock, flags); 486 raw_spin_lock_irqsave(&current->pi_lock, flags);
487 487
488 waiter = rt_mutex_top_waiter(lock); 488 waiter = rt_mutex_top_waiter(lock);
489 plist_del(&waiter->list_entry, &lock->wait_list); 489 plist_del(&waiter->list_entry, &lock->wait_list);
@@ -500,7 +500,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
500 500
501 rt_mutex_set_owner(lock, pendowner, RT_MUTEX_OWNER_PENDING); 501 rt_mutex_set_owner(lock, pendowner, RT_MUTEX_OWNER_PENDING);
502 502
503 spin_unlock_irqrestore(&current->pi_lock, flags); 503 raw_spin_unlock_irqrestore(&current->pi_lock, flags);
504 504
505 /* 505 /*
506 * Clear the pi_blocked_on variable and enqueue a possible 506 * Clear the pi_blocked_on variable and enqueue a possible
@@ -509,7 +509,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
509 * waiter with higher priority than pending-owner->normal_prio 509 * waiter with higher priority than pending-owner->normal_prio
510 * is blocked on the unboosted (pending) owner. 510 * is blocked on the unboosted (pending) owner.
511 */ 511 */
512 spin_lock_irqsave(&pendowner->pi_lock, flags); 512 raw_spin_lock_irqsave(&pendowner->pi_lock, flags);
513 513
514 WARN_ON(!pendowner->pi_blocked_on); 514 WARN_ON(!pendowner->pi_blocked_on);
515 WARN_ON(pendowner->pi_blocked_on != waiter); 515 WARN_ON(pendowner->pi_blocked_on != waiter);
@@ -523,7 +523,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
523 next = rt_mutex_top_waiter(lock); 523 next = rt_mutex_top_waiter(lock);
524 plist_add(&next->pi_list_entry, &pendowner->pi_waiters); 524 plist_add(&next->pi_list_entry, &pendowner->pi_waiters);
525 } 525 }
526 spin_unlock_irqrestore(&pendowner->pi_lock, flags); 526 raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags);
527 527
528 wake_up_process(pendowner); 528 wake_up_process(pendowner);
529} 529}
@@ -541,15 +541,15 @@ static void remove_waiter(struct rt_mutex *lock,
541 unsigned long flags; 541 unsigned long flags;
542 int chain_walk = 0; 542 int chain_walk = 0;
543 543
544 spin_lock_irqsave(&current->pi_lock, flags); 544 raw_spin_lock_irqsave(&current->pi_lock, flags);
545 plist_del(&waiter->list_entry, &lock->wait_list); 545 plist_del(&waiter->list_entry, &lock->wait_list);
546 waiter->task = NULL; 546 waiter->task = NULL;
547 current->pi_blocked_on = NULL; 547 current->pi_blocked_on = NULL;
548 spin_unlock_irqrestore(&current->pi_lock, flags); 548 raw_spin_unlock_irqrestore(&current->pi_lock, flags);
549 549
550 if (first && owner != current) { 550 if (first && owner != current) {
551 551
552 spin_lock_irqsave(&owner->pi_lock, flags); 552 raw_spin_lock_irqsave(&owner->pi_lock, flags);
553 553
554 plist_del(&waiter->pi_list_entry, &owner->pi_waiters); 554 plist_del(&waiter->pi_list_entry, &owner->pi_waiters);
555 555
@@ -564,7 +564,7 @@ static void remove_waiter(struct rt_mutex *lock,
564 if (owner->pi_blocked_on) 564 if (owner->pi_blocked_on)
565 chain_walk = 1; 565 chain_walk = 1;
566 566
567 spin_unlock_irqrestore(&owner->pi_lock, flags); 567 raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
568 } 568 }
569 569
570 WARN_ON(!plist_node_empty(&waiter->pi_list_entry)); 570 WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
@@ -575,11 +575,11 @@ static void remove_waiter(struct rt_mutex *lock,
575 /* gets dropped in rt_mutex_adjust_prio_chain()! */ 575 /* gets dropped in rt_mutex_adjust_prio_chain()! */
576 get_task_struct(owner); 576 get_task_struct(owner);
577 577
578 spin_unlock(&lock->wait_lock); 578 raw_spin_unlock(&lock->wait_lock);
579 579
580 rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current); 580 rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current);
581 581
582 spin_lock(&lock->wait_lock); 582 raw_spin_lock(&lock->wait_lock);
583} 583}
584 584
585/* 585/*
@@ -592,15 +592,15 @@ void rt_mutex_adjust_pi(struct task_struct *task)
592 struct rt_mutex_waiter *waiter; 592 struct rt_mutex_waiter *waiter;
593 unsigned long flags; 593 unsigned long flags;
594 594
595 spin_lock_irqsave(&task->pi_lock, flags); 595 raw_spin_lock_irqsave(&task->pi_lock, flags);
596 596
597 waiter = task->pi_blocked_on; 597 waiter = task->pi_blocked_on;
598 if (!waiter || waiter->list_entry.prio == task->prio) { 598 if (!waiter || waiter->list_entry.prio == task->prio) {
599 spin_unlock_irqrestore(&task->pi_lock, flags); 599 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
600 return; 600 return;
601 } 601 }
602 602
603 spin_unlock_irqrestore(&task->pi_lock, flags); 603 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
604 604
605 /* gets dropped in rt_mutex_adjust_prio_chain()! */ 605 /* gets dropped in rt_mutex_adjust_prio_chain()! */
606 get_task_struct(task); 606 get_task_struct(task);
@@ -672,14 +672,14 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
672 break; 672 break;
673 } 673 }
674 674
675 spin_unlock(&lock->wait_lock); 675 raw_spin_unlock(&lock->wait_lock);
676 676
677 debug_rt_mutex_print_deadlock(waiter); 677 debug_rt_mutex_print_deadlock(waiter);
678 678
679 if (waiter->task) 679 if (waiter->task)
680 schedule_rt_mutex(lock); 680 schedule_rt_mutex(lock);
681 681
682 spin_lock(&lock->wait_lock); 682 raw_spin_lock(&lock->wait_lock);
683 set_current_state(state); 683 set_current_state(state);
684 } 684 }
685 685
@@ -700,11 +700,11 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
700 debug_rt_mutex_init_waiter(&waiter); 700 debug_rt_mutex_init_waiter(&waiter);
701 waiter.task = NULL; 701 waiter.task = NULL;
702 702
703 spin_lock(&lock->wait_lock); 703 raw_spin_lock(&lock->wait_lock);
704 704
705 /* Try to acquire the lock again: */ 705 /* Try to acquire the lock again: */
706 if (try_to_take_rt_mutex(lock)) { 706 if (try_to_take_rt_mutex(lock)) {
707 spin_unlock(&lock->wait_lock); 707 raw_spin_unlock(&lock->wait_lock);
708 return 0; 708 return 0;
709 } 709 }
710 710
@@ -731,7 +731,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
731 */ 731 */
732 fixup_rt_mutex_waiters(lock); 732 fixup_rt_mutex_waiters(lock);
733 733
734 spin_unlock(&lock->wait_lock); 734 raw_spin_unlock(&lock->wait_lock);
735 735
736 /* Remove pending timer: */ 736 /* Remove pending timer: */
737 if (unlikely(timeout)) 737 if (unlikely(timeout))
@@ -758,7 +758,7 @@ rt_mutex_slowtrylock(struct rt_mutex *lock)
758{ 758{
759 int ret = 0; 759 int ret = 0;
760 760
761 spin_lock(&lock->wait_lock); 761 raw_spin_lock(&lock->wait_lock);
762 762
763 if (likely(rt_mutex_owner(lock) != current)) { 763 if (likely(rt_mutex_owner(lock) != current)) {
764 764
@@ -770,7 +770,7 @@ rt_mutex_slowtrylock(struct rt_mutex *lock)
770 fixup_rt_mutex_waiters(lock); 770 fixup_rt_mutex_waiters(lock);
771 } 771 }
772 772
773 spin_unlock(&lock->wait_lock); 773 raw_spin_unlock(&lock->wait_lock);
774 774
775 return ret; 775 return ret;
776} 776}
@@ -781,7 +781,7 @@ rt_mutex_slowtrylock(struct rt_mutex *lock)
781static void __sched 781static void __sched
782rt_mutex_slowunlock(struct rt_mutex *lock) 782rt_mutex_slowunlock(struct rt_mutex *lock)
783{ 783{
784 spin_lock(&lock->wait_lock); 784 raw_spin_lock(&lock->wait_lock);
785 785
786 debug_rt_mutex_unlock(lock); 786 debug_rt_mutex_unlock(lock);
787 787
@@ -789,13 +789,13 @@ rt_mutex_slowunlock(struct rt_mutex *lock)
789 789
790 if (!rt_mutex_has_waiters(lock)) { 790 if (!rt_mutex_has_waiters(lock)) {
791 lock->owner = NULL; 791 lock->owner = NULL;
792 spin_unlock(&lock->wait_lock); 792 raw_spin_unlock(&lock->wait_lock);
793 return; 793 return;
794 } 794 }
795 795
796 wakeup_next_waiter(lock); 796 wakeup_next_waiter(lock);
797 797
798 spin_unlock(&lock->wait_lock); 798 raw_spin_unlock(&lock->wait_lock);
799 799
800 /* Undo pi boosting if necessary: */ 800 /* Undo pi boosting if necessary: */
801 rt_mutex_adjust_prio(current); 801 rt_mutex_adjust_prio(current);
@@ -970,8 +970,8 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy);
970void __rt_mutex_init(struct rt_mutex *lock, const char *name) 970void __rt_mutex_init(struct rt_mutex *lock, const char *name)
971{ 971{
972 lock->owner = NULL; 972 lock->owner = NULL;
973 spin_lock_init(&lock->wait_lock); 973 raw_spin_lock_init(&lock->wait_lock);
974 plist_head_init(&lock->wait_list, &lock->wait_lock); 974 plist_head_init_raw(&lock->wait_list, &lock->wait_lock);
975 975
976 debug_rt_mutex_init(lock, name); 976 debug_rt_mutex_init(lock, name);
977} 977}
@@ -1032,7 +1032,7 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
1032{ 1032{
1033 int ret; 1033 int ret;
1034 1034
1035 spin_lock(&lock->wait_lock); 1035 raw_spin_lock(&lock->wait_lock);
1036 1036
1037 mark_rt_mutex_waiters(lock); 1037 mark_rt_mutex_waiters(lock);
1038 1038
@@ -1040,7 +1040,7 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
1040 /* We got the lock for task. */ 1040 /* We got the lock for task. */
1041 debug_rt_mutex_lock(lock); 1041 debug_rt_mutex_lock(lock);
1042 rt_mutex_set_owner(lock, task, 0); 1042 rt_mutex_set_owner(lock, task, 0);
1043 spin_unlock(&lock->wait_lock); 1043 raw_spin_unlock(&lock->wait_lock);
1044 rt_mutex_deadlock_account_lock(lock, task); 1044 rt_mutex_deadlock_account_lock(lock, task);
1045 return 1; 1045 return 1;
1046 } 1046 }
@@ -1056,7 +1056,7 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
1056 */ 1056 */
1057 ret = 0; 1057 ret = 0;
1058 } 1058 }
1059 spin_unlock(&lock->wait_lock); 1059 raw_spin_unlock(&lock->wait_lock);
1060 1060
1061 debug_rt_mutex_print_deadlock(waiter); 1061 debug_rt_mutex_print_deadlock(waiter);
1062 1062
@@ -1106,7 +1106,7 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
1106{ 1106{
1107 int ret; 1107 int ret;
1108 1108
1109 spin_lock(&lock->wait_lock); 1109 raw_spin_lock(&lock->wait_lock);
1110 1110
1111 set_current_state(TASK_INTERRUPTIBLE); 1111 set_current_state(TASK_INTERRUPTIBLE);
1112 1112
@@ -1124,7 +1124,7 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
1124 */ 1124 */
1125 fixup_rt_mutex_waiters(lock); 1125 fixup_rt_mutex_waiters(lock);
1126 1126
1127 spin_unlock(&lock->wait_lock); 1127 raw_spin_unlock(&lock->wait_lock);
1128 1128
1129 /* 1129 /*
1130 * Readjust priority, when we did not get the lock. We might have been 1130 * Readjust priority, when we did not get the lock. We might have been
diff --git a/kernel/sched.c b/kernel/sched.c
index fd05861b2111..18cceeecce35 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -141,7 +141,7 @@ struct rt_prio_array {
141 141
142struct rt_bandwidth { 142struct rt_bandwidth {
143 /* nests inside the rq lock: */ 143 /* nests inside the rq lock: */
144 spinlock_t rt_runtime_lock; 144 raw_spinlock_t rt_runtime_lock;
145 ktime_t rt_period; 145 ktime_t rt_period;
146 u64 rt_runtime; 146 u64 rt_runtime;
147 struct hrtimer rt_period_timer; 147 struct hrtimer rt_period_timer;
@@ -178,7 +178,7 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
178 rt_b->rt_period = ns_to_ktime(period); 178 rt_b->rt_period = ns_to_ktime(period);
179 rt_b->rt_runtime = runtime; 179 rt_b->rt_runtime = runtime;
180 180
181 spin_lock_init(&rt_b->rt_runtime_lock); 181 raw_spin_lock_init(&rt_b->rt_runtime_lock);
182 182
183 hrtimer_init(&rt_b->rt_period_timer, 183 hrtimer_init(&rt_b->rt_period_timer,
184 CLOCK_MONOTONIC, HRTIMER_MODE_REL); 184 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
@@ -200,7 +200,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
200 if (hrtimer_active(&rt_b->rt_period_timer)) 200 if (hrtimer_active(&rt_b->rt_period_timer))
201 return; 201 return;
202 202
203 spin_lock(&rt_b->rt_runtime_lock); 203 raw_spin_lock(&rt_b->rt_runtime_lock);
204 for (;;) { 204 for (;;) {
205 unsigned long delta; 205 unsigned long delta;
206 ktime_t soft, hard; 206 ktime_t soft, hard;
@@ -217,7 +217,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
217 __hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta, 217 __hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta,
218 HRTIMER_MODE_ABS_PINNED, 0); 218 HRTIMER_MODE_ABS_PINNED, 0);
219 } 219 }
220 spin_unlock(&rt_b->rt_runtime_lock); 220 raw_spin_unlock(&rt_b->rt_runtime_lock);
221} 221}
222 222
223#ifdef CONFIG_RT_GROUP_SCHED 223#ifdef CONFIG_RT_GROUP_SCHED
@@ -470,7 +470,7 @@ struct rt_rq {
470 u64 rt_time; 470 u64 rt_time;
471 u64 rt_runtime; 471 u64 rt_runtime;
472 /* Nests inside the rq lock: */ 472 /* Nests inside the rq lock: */
473 spinlock_t rt_runtime_lock; 473 raw_spinlock_t rt_runtime_lock;
474 474
475#ifdef CONFIG_RT_GROUP_SCHED 475#ifdef CONFIG_RT_GROUP_SCHED
476 unsigned long rt_nr_boosted; 476 unsigned long rt_nr_boosted;
@@ -525,7 +525,7 @@ static struct root_domain def_root_domain;
525 */ 525 */
526struct rq { 526struct rq {
527 /* runqueue lock: */ 527 /* runqueue lock: */
528 spinlock_t lock; 528 raw_spinlock_t lock;
529 529
530 /* 530 /*
531 * nr_running and cpu_load should be in the same cacheline because 531 * nr_running and cpu_load should be in the same cacheline because
@@ -685,7 +685,7 @@ inline void update_rq_clock(struct rq *rq)
685 */ 685 */
686int runqueue_is_locked(int cpu) 686int runqueue_is_locked(int cpu)
687{ 687{
688 return spin_is_locked(&cpu_rq(cpu)->lock); 688 return raw_spin_is_locked(&cpu_rq(cpu)->lock);
689} 689}
690 690
691/* 691/*
@@ -893,7 +893,7 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
893 */ 893 */
894 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); 894 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
895 895
896 spin_unlock_irq(&rq->lock); 896 raw_spin_unlock_irq(&rq->lock);
897} 897}
898 898
899#else /* __ARCH_WANT_UNLOCKED_CTXSW */ 899#else /* __ARCH_WANT_UNLOCKED_CTXSW */
@@ -917,9 +917,9 @@ static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
917 next->oncpu = 1; 917 next->oncpu = 1;
918#endif 918#endif
919#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW 919#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
920 spin_unlock_irq(&rq->lock); 920 raw_spin_unlock_irq(&rq->lock);
921#else 921#else
922 spin_unlock(&rq->lock); 922 raw_spin_unlock(&rq->lock);
923#endif 923#endif
924} 924}
925 925
@@ -949,10 +949,10 @@ static inline struct rq *__task_rq_lock(struct task_struct *p)
949{ 949{
950 for (;;) { 950 for (;;) {
951 struct rq *rq = task_rq(p); 951 struct rq *rq = task_rq(p);
952 spin_lock(&rq->lock); 952 raw_spin_lock(&rq->lock);
953 if (likely(rq == task_rq(p))) 953 if (likely(rq == task_rq(p)))
954 return rq; 954 return rq;
955 spin_unlock(&rq->lock); 955 raw_spin_unlock(&rq->lock);
956 } 956 }
957} 957}
958 958
@@ -969,10 +969,10 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
969 for (;;) { 969 for (;;) {
970 local_irq_save(*flags); 970 local_irq_save(*flags);
971 rq = task_rq(p); 971 rq = task_rq(p);
972 spin_lock(&rq->lock); 972 raw_spin_lock(&rq->lock);
973 if (likely(rq == task_rq(p))) 973 if (likely(rq == task_rq(p)))
974 return rq; 974 return rq;
975 spin_unlock_irqrestore(&rq->lock, *flags); 975 raw_spin_unlock_irqrestore(&rq->lock, *flags);
976 } 976 }
977} 977}
978 978
@@ -981,19 +981,19 @@ void task_rq_unlock_wait(struct task_struct *p)
981 struct rq *rq = task_rq(p); 981 struct rq *rq = task_rq(p);
982 982
983 smp_mb(); /* spin-unlock-wait is not a full memory barrier */ 983 smp_mb(); /* spin-unlock-wait is not a full memory barrier */
984 spin_unlock_wait(&rq->lock); 984 raw_spin_unlock_wait(&rq->lock);
985} 985}
986 986
987static void __task_rq_unlock(struct rq *rq) 987static void __task_rq_unlock(struct rq *rq)
988 __releases(rq->lock) 988 __releases(rq->lock)
989{ 989{
990 spin_unlock(&rq->lock); 990 raw_spin_unlock(&rq->lock);
991} 991}
992 992
993static inline void task_rq_unlock(struct rq *rq, unsigned long *flags) 993static inline void task_rq_unlock(struct rq *rq, unsigned long *flags)
994 __releases(rq->lock) 994 __releases(rq->lock)
995{ 995{
996 spin_unlock_irqrestore(&rq->lock, *flags); 996 raw_spin_unlock_irqrestore(&rq->lock, *flags);
997} 997}
998 998
999/* 999/*
@@ -1006,7 +1006,7 @@ static struct rq *this_rq_lock(void)
1006 1006
1007 local_irq_disable(); 1007 local_irq_disable();
1008 rq = this_rq(); 1008 rq = this_rq();
1009 spin_lock(&rq->lock); 1009 raw_spin_lock(&rq->lock);
1010 1010
1011 return rq; 1011 return rq;
1012} 1012}
@@ -1053,10 +1053,10 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer)
1053 1053
1054 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); 1054 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
1055 1055
1056 spin_lock(&rq->lock); 1056 raw_spin_lock(&rq->lock);
1057 update_rq_clock(rq); 1057 update_rq_clock(rq);
1058 rq->curr->sched_class->task_tick(rq, rq->curr, 1); 1058 rq->curr->sched_class->task_tick(rq, rq->curr, 1);
1059 spin_unlock(&rq->lock); 1059 raw_spin_unlock(&rq->lock);
1060 1060
1061 return HRTIMER_NORESTART; 1061 return HRTIMER_NORESTART;
1062} 1062}
@@ -1069,10 +1069,10 @@ static void __hrtick_start(void *arg)
1069{ 1069{
1070 struct rq *rq = arg; 1070 struct rq *rq = arg;
1071 1071
1072 spin_lock(&rq->lock); 1072 raw_spin_lock(&rq->lock);
1073 hrtimer_restart(&rq->hrtick_timer); 1073 hrtimer_restart(&rq->hrtick_timer);
1074 rq->hrtick_csd_pending = 0; 1074 rq->hrtick_csd_pending = 0;
1075 spin_unlock(&rq->lock); 1075 raw_spin_unlock(&rq->lock);
1076} 1076}
1077 1077
1078/* 1078/*
@@ -1179,7 +1179,7 @@ static void resched_task(struct task_struct *p)
1179{ 1179{
1180 int cpu; 1180 int cpu;
1181 1181
1182 assert_spin_locked(&task_rq(p)->lock); 1182 assert_raw_spin_locked(&task_rq(p)->lock);
1183 1183
1184 if (test_tsk_need_resched(p)) 1184 if (test_tsk_need_resched(p))
1185 return; 1185 return;
@@ -1201,10 +1201,10 @@ static void resched_cpu(int cpu)
1201 struct rq *rq = cpu_rq(cpu); 1201 struct rq *rq = cpu_rq(cpu);
1202 unsigned long flags; 1202 unsigned long flags;
1203 1203
1204 if (!spin_trylock_irqsave(&rq->lock, flags)) 1204 if (!raw_spin_trylock_irqsave(&rq->lock, flags))
1205 return; 1205 return;
1206 resched_task(cpu_curr(cpu)); 1206 resched_task(cpu_curr(cpu));
1207 spin_unlock_irqrestore(&rq->lock, flags); 1207 raw_spin_unlock_irqrestore(&rq->lock, flags);
1208} 1208}
1209 1209
1210#ifdef CONFIG_NO_HZ 1210#ifdef CONFIG_NO_HZ
@@ -1273,7 +1273,7 @@ static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1273#else /* !CONFIG_SMP */ 1273#else /* !CONFIG_SMP */
1274static void resched_task(struct task_struct *p) 1274static void resched_task(struct task_struct *p)
1275{ 1275{
1276 assert_spin_locked(&task_rq(p)->lock); 1276 assert_raw_spin_locked(&task_rq(p)->lock);
1277 set_tsk_need_resched(p); 1277 set_tsk_need_resched(p);
1278} 1278}
1279 1279
@@ -1600,11 +1600,11 @@ static void update_group_shares_cpu(struct task_group *tg, int cpu,
1600 struct rq *rq = cpu_rq(cpu); 1600 struct rq *rq = cpu_rq(cpu);
1601 unsigned long flags; 1601 unsigned long flags;
1602 1602
1603 spin_lock_irqsave(&rq->lock, flags); 1603 raw_spin_lock_irqsave(&rq->lock, flags);
1604 tg->cfs_rq[cpu]->rq_weight = boost ? 0 : rq_weight; 1604 tg->cfs_rq[cpu]->rq_weight = boost ? 0 : rq_weight;
1605 tg->cfs_rq[cpu]->shares = boost ? 0 : shares; 1605 tg->cfs_rq[cpu]->shares = boost ? 0 : shares;
1606 __set_se_shares(tg->se[cpu], shares); 1606 __set_se_shares(tg->se[cpu], shares);
1607 spin_unlock_irqrestore(&rq->lock, flags); 1607 raw_spin_unlock_irqrestore(&rq->lock, flags);
1608 } 1608 }
1609} 1609}
1610 1610
@@ -1706,9 +1706,9 @@ static void update_shares_locked(struct rq *rq, struct sched_domain *sd)
1706 if (root_task_group_empty()) 1706 if (root_task_group_empty())
1707 return; 1707 return;
1708 1708
1709 spin_unlock(&rq->lock); 1709 raw_spin_unlock(&rq->lock);
1710 update_shares(sd); 1710 update_shares(sd);
1711 spin_lock(&rq->lock); 1711 raw_spin_lock(&rq->lock);
1712} 1712}
1713 1713
1714static void update_h_load(long cpu) 1714static void update_h_load(long cpu)
@@ -1748,7 +1748,7 @@ static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1748 __acquires(busiest->lock) 1748 __acquires(busiest->lock)
1749 __acquires(this_rq->lock) 1749 __acquires(this_rq->lock)
1750{ 1750{
1751 spin_unlock(&this_rq->lock); 1751 raw_spin_unlock(&this_rq->lock);
1752 double_rq_lock(this_rq, busiest); 1752 double_rq_lock(this_rq, busiest);
1753 1753
1754 return 1; 1754 return 1;
@@ -1769,14 +1769,16 @@ static int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1769{ 1769{
1770 int ret = 0; 1770 int ret = 0;
1771 1771
1772 if (unlikely(!spin_trylock(&busiest->lock))) { 1772 if (unlikely(!raw_spin_trylock(&busiest->lock))) {
1773 if (busiest < this_rq) { 1773 if (busiest < this_rq) {
1774 spin_unlock(&this_rq->lock); 1774 raw_spin_unlock(&this_rq->lock);
1775 spin_lock(&busiest->lock); 1775 raw_spin_lock(&busiest->lock);
1776 spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING); 1776 raw_spin_lock_nested(&this_rq->lock,
1777 SINGLE_DEPTH_NESTING);
1777 ret = 1; 1778 ret = 1;
1778 } else 1779 } else
1779 spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING); 1780 raw_spin_lock_nested(&busiest->lock,
1781 SINGLE_DEPTH_NESTING);
1780 } 1782 }
1781 return ret; 1783 return ret;
1782} 1784}
@@ -1790,7 +1792,7 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1790{ 1792{
1791 if (unlikely(!irqs_disabled())) { 1793 if (unlikely(!irqs_disabled())) {
1792 /* printk() doesn't work good under rq->lock */ 1794 /* printk() doesn't work good under rq->lock */
1793 spin_unlock(&this_rq->lock); 1795 raw_spin_unlock(&this_rq->lock);
1794 BUG_ON(1); 1796 BUG_ON(1);
1795 } 1797 }
1796 1798
@@ -1800,7 +1802,7 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1800static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) 1802static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1801 __releases(busiest->lock) 1803 __releases(busiest->lock)
1802{ 1804{
1803 spin_unlock(&busiest->lock); 1805 raw_spin_unlock(&busiest->lock);
1804 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); 1806 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
1805} 1807}
1806#endif 1808#endif
@@ -2023,13 +2025,13 @@ void kthread_bind(struct task_struct *p, unsigned int cpu)
2023 return; 2025 return;
2024 } 2026 }
2025 2027
2026 spin_lock_irqsave(&rq->lock, flags); 2028 raw_spin_lock_irqsave(&rq->lock, flags);
2027 update_rq_clock(rq); 2029 update_rq_clock(rq);
2028 set_task_cpu(p, cpu); 2030 set_task_cpu(p, cpu);
2029 p->cpus_allowed = cpumask_of_cpu(cpu); 2031 p->cpus_allowed = cpumask_of_cpu(cpu);
2030 p->rt.nr_cpus_allowed = 1; 2032 p->rt.nr_cpus_allowed = 1;
2031 p->flags |= PF_THREAD_BOUND; 2033 p->flags |= PF_THREAD_BOUND;
2032 spin_unlock_irqrestore(&rq->lock, flags); 2034 raw_spin_unlock_irqrestore(&rq->lock, flags);
2033} 2035}
2034EXPORT_SYMBOL(kthread_bind); 2036EXPORT_SYMBOL(kthread_bind);
2035 2037
@@ -2781,10 +2783,10 @@ static inline void post_schedule(struct rq *rq)
2781 if (rq->post_schedule) { 2783 if (rq->post_schedule) {
2782 unsigned long flags; 2784 unsigned long flags;
2783 2785
2784 spin_lock_irqsave(&rq->lock, flags); 2786 raw_spin_lock_irqsave(&rq->lock, flags);
2785 if (rq->curr->sched_class->post_schedule) 2787 if (rq->curr->sched_class->post_schedule)
2786 rq->curr->sched_class->post_schedule(rq); 2788 rq->curr->sched_class->post_schedule(rq);
2787 spin_unlock_irqrestore(&rq->lock, flags); 2789 raw_spin_unlock_irqrestore(&rq->lock, flags);
2788 2790
2789 rq->post_schedule = 0; 2791 rq->post_schedule = 0;
2790 } 2792 }
@@ -3066,15 +3068,15 @@ static void double_rq_lock(struct rq *rq1, struct rq *rq2)
3066{ 3068{
3067 BUG_ON(!irqs_disabled()); 3069 BUG_ON(!irqs_disabled());
3068 if (rq1 == rq2) { 3070 if (rq1 == rq2) {
3069 spin_lock(&rq1->lock); 3071 raw_spin_lock(&rq1->lock);
3070 __acquire(rq2->lock); /* Fake it out ;) */ 3072 __acquire(rq2->lock); /* Fake it out ;) */
3071 } else { 3073 } else {
3072 if (rq1 < rq2) { 3074 if (rq1 < rq2) {
3073 spin_lock(&rq1->lock); 3075 raw_spin_lock(&rq1->lock);
3074 spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING); 3076 raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
3075 } else { 3077 } else {
3076 spin_lock(&rq2->lock); 3078 raw_spin_lock(&rq2->lock);
3077 spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); 3079 raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
3078 } 3080 }
3079 } 3081 }
3080 update_rq_clock(rq1); 3082 update_rq_clock(rq1);
@@ -3091,9 +3093,9 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
3091 __releases(rq1->lock) 3093 __releases(rq1->lock)
3092 __releases(rq2->lock) 3094 __releases(rq2->lock)
3093{ 3095{
3094 spin_unlock(&rq1->lock); 3096 raw_spin_unlock(&rq1->lock);
3095 if (rq1 != rq2) 3097 if (rq1 != rq2)
3096 spin_unlock(&rq2->lock); 3098 raw_spin_unlock(&rq2->lock);
3097 else 3099 else
3098 __release(rq2->lock); 3100 __release(rq2->lock);
3099} 3101}
@@ -4186,14 +4188,15 @@ redo:
4186 4188
4187 if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) { 4189 if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) {
4188 4190
4189 spin_lock_irqsave(&busiest->lock, flags); 4191 raw_spin_lock_irqsave(&busiest->lock, flags);
4190 4192
4191 /* don't kick the migration_thread, if the curr 4193 /* don't kick the migration_thread, if the curr
4192 * task on busiest cpu can't be moved to this_cpu 4194 * task on busiest cpu can't be moved to this_cpu
4193 */ 4195 */
4194 if (!cpumask_test_cpu(this_cpu, 4196 if (!cpumask_test_cpu(this_cpu,
4195 &busiest->curr->cpus_allowed)) { 4197 &busiest->curr->cpus_allowed)) {
4196 spin_unlock_irqrestore(&busiest->lock, flags); 4198 raw_spin_unlock_irqrestore(&busiest->lock,
4199 flags);
4197 all_pinned = 1; 4200 all_pinned = 1;
4198 goto out_one_pinned; 4201 goto out_one_pinned;
4199 } 4202 }
@@ -4203,7 +4206,7 @@ redo:
4203 busiest->push_cpu = this_cpu; 4206 busiest->push_cpu = this_cpu;
4204 active_balance = 1; 4207 active_balance = 1;
4205 } 4208 }
4206 spin_unlock_irqrestore(&busiest->lock, flags); 4209 raw_spin_unlock_irqrestore(&busiest->lock, flags);
4207 if (active_balance) 4210 if (active_balance)
4208 wake_up_process(busiest->migration_thread); 4211 wake_up_process(busiest->migration_thread);
4209 4212
@@ -4385,10 +4388,10 @@ redo:
4385 /* 4388 /*
4386 * Should not call ttwu while holding a rq->lock 4389 * Should not call ttwu while holding a rq->lock
4387 */ 4390 */
4388 spin_unlock(&this_rq->lock); 4391 raw_spin_unlock(&this_rq->lock);
4389 if (active_balance) 4392 if (active_balance)
4390 wake_up_process(busiest->migration_thread); 4393 wake_up_process(busiest->migration_thread);
4391 spin_lock(&this_rq->lock); 4394 raw_spin_lock(&this_rq->lock);
4392 4395
4393 } else 4396 } else
4394 sd->nr_balance_failed = 0; 4397 sd->nr_balance_failed = 0;
@@ -5257,11 +5260,11 @@ void scheduler_tick(void)
5257 5260
5258 sched_clock_tick(); 5261 sched_clock_tick();
5259 5262
5260 spin_lock(&rq->lock); 5263 raw_spin_lock(&rq->lock);
5261 update_rq_clock(rq); 5264 update_rq_clock(rq);
5262 update_cpu_load(rq); 5265 update_cpu_load(rq);
5263 curr->sched_class->task_tick(rq, curr, 0); 5266 curr->sched_class->task_tick(rq, curr, 0);
5264 spin_unlock(&rq->lock); 5267 raw_spin_unlock(&rq->lock);
5265 5268
5266 perf_event_task_tick(curr, cpu); 5269 perf_event_task_tick(curr, cpu);
5267 5270
@@ -5455,7 +5458,7 @@ need_resched_nonpreemptible:
5455 if (sched_feat(HRTICK)) 5458 if (sched_feat(HRTICK))
5456 hrtick_clear(rq); 5459 hrtick_clear(rq);
5457 5460
5458 spin_lock_irq(&rq->lock); 5461 raw_spin_lock_irq(&rq->lock);
5459 update_rq_clock(rq); 5462 update_rq_clock(rq);
5460 clear_tsk_need_resched(prev); 5463 clear_tsk_need_resched(prev);
5461 5464
@@ -5491,7 +5494,7 @@ need_resched_nonpreemptible:
5491 cpu = smp_processor_id(); 5494 cpu = smp_processor_id();
5492 rq = cpu_rq(cpu); 5495 rq = cpu_rq(cpu);
5493 } else 5496 } else
5494 spin_unlock_irq(&rq->lock); 5497 raw_spin_unlock_irq(&rq->lock);
5495 5498
5496 post_schedule(rq); 5499 post_schedule(rq);
5497 5500
@@ -6320,7 +6323,7 @@ recheck:
6320 * make sure no PI-waiters arrive (or leave) while we are 6323 * make sure no PI-waiters arrive (or leave) while we are
6321 * changing the priority of the task: 6324 * changing the priority of the task:
6322 */ 6325 */
6323 spin_lock_irqsave(&p->pi_lock, flags); 6326 raw_spin_lock_irqsave(&p->pi_lock, flags);
6324 /* 6327 /*
6325 * To be able to change p->policy safely, the apropriate 6328 * To be able to change p->policy safely, the apropriate
6326 * runqueue lock must be held. 6329 * runqueue lock must be held.
@@ -6330,7 +6333,7 @@ recheck:
6330 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { 6333 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
6331 policy = oldpolicy = -1; 6334 policy = oldpolicy = -1;
6332 __task_rq_unlock(rq); 6335 __task_rq_unlock(rq);
6333 spin_unlock_irqrestore(&p->pi_lock, flags); 6336 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
6334 goto recheck; 6337 goto recheck;
6335 } 6338 }
6336 update_rq_clock(rq); 6339 update_rq_clock(rq);
@@ -6354,7 +6357,7 @@ recheck:
6354 check_class_changed(rq, p, prev_class, oldprio, running); 6357 check_class_changed(rq, p, prev_class, oldprio, running);
6355 } 6358 }
6356 __task_rq_unlock(rq); 6359 __task_rq_unlock(rq);
6357 spin_unlock_irqrestore(&p->pi_lock, flags); 6360 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
6358 6361
6359 rt_mutex_adjust_pi(p); 6362 rt_mutex_adjust_pi(p);
6360 6363
@@ -6684,7 +6687,7 @@ SYSCALL_DEFINE0(sched_yield)
6684 */ 6687 */
6685 __release(rq->lock); 6688 __release(rq->lock);
6686 spin_release(&rq->lock.dep_map, 1, _THIS_IP_); 6689 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
6687 _raw_spin_unlock(&rq->lock); 6690 do_raw_spin_unlock(&rq->lock);
6688 preempt_enable_no_resched(); 6691 preempt_enable_no_resched();
6689 6692
6690 schedule(); 6693 schedule();
@@ -6980,7 +6983,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
6980 struct rq *rq = cpu_rq(cpu); 6983 struct rq *rq = cpu_rq(cpu);
6981 unsigned long flags; 6984 unsigned long flags;
6982 6985
6983 spin_lock_irqsave(&rq->lock, flags); 6986 raw_spin_lock_irqsave(&rq->lock, flags);
6984 6987
6985 __sched_fork(idle); 6988 __sched_fork(idle);
6986 idle->se.exec_start = sched_clock(); 6989 idle->se.exec_start = sched_clock();
@@ -6992,7 +6995,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
6992#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) 6995#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
6993 idle->oncpu = 1; 6996 idle->oncpu = 1;
6994#endif 6997#endif
6995 spin_unlock_irqrestore(&rq->lock, flags); 6998 raw_spin_unlock_irqrestore(&rq->lock, flags);
6996 6999
6997 /* Set the preempt count _outside_ the spinlocks! */ 7000 /* Set the preempt count _outside_ the spinlocks! */
6998#if defined(CONFIG_PREEMPT) 7001#if defined(CONFIG_PREEMPT)
@@ -7209,10 +7212,10 @@ static int migration_thread(void *data)
7209 struct migration_req *req; 7212 struct migration_req *req;
7210 struct list_head *head; 7213 struct list_head *head;
7211 7214
7212 spin_lock_irq(&rq->lock); 7215 raw_spin_lock_irq(&rq->lock);
7213 7216
7214 if (cpu_is_offline(cpu)) { 7217 if (cpu_is_offline(cpu)) {
7215 spin_unlock_irq(&rq->lock); 7218 raw_spin_unlock_irq(&rq->lock);
7216 break; 7219 break;
7217 } 7220 }
7218 7221
@@ -7224,7 +7227,7 @@ static int migration_thread(void *data)
7224 head = &rq->migration_queue; 7227 head = &rq->migration_queue;
7225 7228
7226 if (list_empty(head)) { 7229 if (list_empty(head)) {
7227 spin_unlock_irq(&rq->lock); 7230 raw_spin_unlock_irq(&rq->lock);
7228 schedule(); 7231 schedule();
7229 set_current_state(TASK_INTERRUPTIBLE); 7232 set_current_state(TASK_INTERRUPTIBLE);
7230 continue; 7233 continue;
@@ -7233,14 +7236,14 @@ static int migration_thread(void *data)
7233 list_del_init(head->next); 7236 list_del_init(head->next);
7234 7237
7235 if (req->task != NULL) { 7238 if (req->task != NULL) {
7236 spin_unlock(&rq->lock); 7239 raw_spin_unlock(&rq->lock);
7237 __migrate_task(req->task, cpu, req->dest_cpu); 7240 __migrate_task(req->task, cpu, req->dest_cpu);
7238 } else if (likely(cpu == (badcpu = smp_processor_id()))) { 7241 } else if (likely(cpu == (badcpu = smp_processor_id()))) {
7239 req->dest_cpu = RCU_MIGRATION_GOT_QS; 7242 req->dest_cpu = RCU_MIGRATION_GOT_QS;
7240 spin_unlock(&rq->lock); 7243 raw_spin_unlock(&rq->lock);
7241 } else { 7244 } else {
7242 req->dest_cpu = RCU_MIGRATION_MUST_SYNC; 7245 req->dest_cpu = RCU_MIGRATION_MUST_SYNC;
7243 spin_unlock(&rq->lock); 7246 raw_spin_unlock(&rq->lock);
7244 WARN_ONCE(1, "migration_thread() on CPU %d, expected %d\n", badcpu, cpu); 7247 WARN_ONCE(1, "migration_thread() on CPU %d, expected %d\n", badcpu, cpu);
7245 } 7248 }
7246 local_irq_enable(); 7249 local_irq_enable();
@@ -7363,14 +7366,14 @@ void sched_idle_next(void)
7363 * Strictly not necessary since rest of the CPUs are stopped by now 7366 * Strictly not necessary since rest of the CPUs are stopped by now
7364 * and interrupts disabled on the current cpu. 7367 * and interrupts disabled on the current cpu.
7365 */ 7368 */
7366 spin_lock_irqsave(&rq->lock, flags); 7369 raw_spin_lock_irqsave(&rq->lock, flags);
7367 7370
7368 __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1); 7371 __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
7369 7372
7370 update_rq_clock(rq); 7373 update_rq_clock(rq);
7371 activate_task(rq, p, 0); 7374 activate_task(rq, p, 0);
7372 7375
7373 spin_unlock_irqrestore(&rq->lock, flags); 7376 raw_spin_unlock_irqrestore(&rq->lock, flags);
7374} 7377}
7375 7378
7376/* 7379/*
@@ -7406,9 +7409,9 @@ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
7406 * that's OK. No task can be added to this CPU, so iteration is 7409 * that's OK. No task can be added to this CPU, so iteration is
7407 * fine. 7410 * fine.
7408 */ 7411 */
7409 spin_unlock_irq(&rq->lock); 7412 raw_spin_unlock_irq(&rq->lock);
7410 move_task_off_dead_cpu(dead_cpu, p); 7413 move_task_off_dead_cpu(dead_cpu, p);
7411 spin_lock_irq(&rq->lock); 7414 raw_spin_lock_irq(&rq->lock);
7412 7415
7413 put_task_struct(p); 7416 put_task_struct(p);
7414} 7417}
@@ -7674,13 +7677,13 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
7674 7677
7675 /* Update our root-domain */ 7678 /* Update our root-domain */
7676 rq = cpu_rq(cpu); 7679 rq = cpu_rq(cpu);
7677 spin_lock_irqsave(&rq->lock, flags); 7680 raw_spin_lock_irqsave(&rq->lock, flags);
7678 if (rq->rd) { 7681 if (rq->rd) {
7679 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 7682 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
7680 7683
7681 set_rq_online(rq); 7684 set_rq_online(rq);
7682 } 7685 }
7683 spin_unlock_irqrestore(&rq->lock, flags); 7686 raw_spin_unlock_irqrestore(&rq->lock, flags);
7684 break; 7687 break;
7685 7688
7686#ifdef CONFIG_HOTPLUG_CPU 7689#ifdef CONFIG_HOTPLUG_CPU
@@ -7705,13 +7708,13 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
7705 put_task_struct(rq->migration_thread); 7708 put_task_struct(rq->migration_thread);
7706 rq->migration_thread = NULL; 7709 rq->migration_thread = NULL;
7707 /* Idle task back to normal (off runqueue, low prio) */ 7710 /* Idle task back to normal (off runqueue, low prio) */
7708 spin_lock_irq(&rq->lock); 7711 raw_spin_lock_irq(&rq->lock);
7709 update_rq_clock(rq); 7712 update_rq_clock(rq);
7710 deactivate_task(rq, rq->idle, 0); 7713 deactivate_task(rq, rq->idle, 0);
7711 __setscheduler(rq, rq->idle, SCHED_NORMAL, 0); 7714 __setscheduler(rq, rq->idle, SCHED_NORMAL, 0);
7712 rq->idle->sched_class = &idle_sched_class; 7715 rq->idle->sched_class = &idle_sched_class;
7713 migrate_dead_tasks(cpu); 7716 migrate_dead_tasks(cpu);
7714 spin_unlock_irq(&rq->lock); 7717 raw_spin_unlock_irq(&rq->lock);
7715 cpuset_unlock(); 7718 cpuset_unlock();
7716 migrate_nr_uninterruptible(rq); 7719 migrate_nr_uninterruptible(rq);
7717 BUG_ON(rq->nr_running != 0); 7720 BUG_ON(rq->nr_running != 0);
@@ -7721,30 +7724,30 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
7721 * they didn't take sched_hotcpu_mutex. Just wake up 7724 * they didn't take sched_hotcpu_mutex. Just wake up
7722 * the requestors. 7725 * the requestors.
7723 */ 7726 */
7724 spin_lock_irq(&rq->lock); 7727 raw_spin_lock_irq(&rq->lock);
7725 while (!list_empty(&rq->migration_queue)) { 7728 while (!list_empty(&rq->migration_queue)) {
7726 struct migration_req *req; 7729 struct migration_req *req;
7727 7730
7728 req = list_entry(rq->migration_queue.next, 7731 req = list_entry(rq->migration_queue.next,
7729 struct migration_req, list); 7732 struct migration_req, list);
7730 list_del_init(&req->list); 7733 list_del_init(&req->list);
7731 spin_unlock_irq(&rq->lock); 7734 raw_spin_unlock_irq(&rq->lock);
7732 complete(&req->done); 7735 complete(&req->done);
7733 spin_lock_irq(&rq->lock); 7736 raw_spin_lock_irq(&rq->lock);
7734 } 7737 }
7735 spin_unlock_irq(&rq->lock); 7738 raw_spin_unlock_irq(&rq->lock);
7736 break; 7739 break;
7737 7740
7738 case CPU_DYING: 7741 case CPU_DYING:
7739 case CPU_DYING_FROZEN: 7742 case CPU_DYING_FROZEN:
7740 /* Update our root-domain */ 7743 /* Update our root-domain */
7741 rq = cpu_rq(cpu); 7744 rq = cpu_rq(cpu);
7742 spin_lock_irqsave(&rq->lock, flags); 7745 raw_spin_lock_irqsave(&rq->lock, flags);
7743 if (rq->rd) { 7746 if (rq->rd) {
7744 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 7747 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
7745 set_rq_offline(rq); 7748 set_rq_offline(rq);
7746 } 7749 }
7747 spin_unlock_irqrestore(&rq->lock, flags); 7750 raw_spin_unlock_irqrestore(&rq->lock, flags);
7748 break; 7751 break;
7749#endif 7752#endif
7750 } 7753 }
@@ -7974,7 +7977,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
7974 struct root_domain *old_rd = NULL; 7977 struct root_domain *old_rd = NULL;
7975 unsigned long flags; 7978 unsigned long flags;
7976 7979
7977 spin_lock_irqsave(&rq->lock, flags); 7980 raw_spin_lock_irqsave(&rq->lock, flags);
7978 7981
7979 if (rq->rd) { 7982 if (rq->rd) {
7980 old_rd = rq->rd; 7983 old_rd = rq->rd;
@@ -8000,7 +8003,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
8000 if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) 8003 if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
8001 set_rq_online(rq); 8004 set_rq_online(rq);
8002 8005
8003 spin_unlock_irqrestore(&rq->lock, flags); 8006 raw_spin_unlock_irqrestore(&rq->lock, flags);
8004 8007
8005 if (old_rd) 8008 if (old_rd)
8006 free_rootdomain(old_rd); 8009 free_rootdomain(old_rd);
@@ -9357,13 +9360,13 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
9357#ifdef CONFIG_SMP 9360#ifdef CONFIG_SMP
9358 rt_rq->rt_nr_migratory = 0; 9361 rt_rq->rt_nr_migratory = 0;
9359 rt_rq->overloaded = 0; 9362 rt_rq->overloaded = 0;
9360 plist_head_init(&rt_rq->pushable_tasks, &rq->lock); 9363 plist_head_init_raw(&rt_rq->pushable_tasks, &rq->lock);
9361#endif 9364#endif
9362 9365
9363 rt_rq->rt_time = 0; 9366 rt_rq->rt_time = 0;
9364 rt_rq->rt_throttled = 0; 9367 rt_rq->rt_throttled = 0;
9365 rt_rq->rt_runtime = 0; 9368 rt_rq->rt_runtime = 0;
9366 spin_lock_init(&rt_rq->rt_runtime_lock); 9369 raw_spin_lock_init(&rt_rq->rt_runtime_lock);
9367 9370
9368#ifdef CONFIG_RT_GROUP_SCHED 9371#ifdef CONFIG_RT_GROUP_SCHED
9369 rt_rq->rt_nr_boosted = 0; 9372 rt_rq->rt_nr_boosted = 0;
@@ -9523,7 +9526,7 @@ void __init sched_init(void)
9523 struct rq *rq; 9526 struct rq *rq;
9524 9527
9525 rq = cpu_rq(i); 9528 rq = cpu_rq(i);
9526 spin_lock_init(&rq->lock); 9529 raw_spin_lock_init(&rq->lock);
9527 rq->nr_running = 0; 9530 rq->nr_running = 0;
9528 rq->calc_load_active = 0; 9531 rq->calc_load_active = 0;
9529 rq->calc_load_update = jiffies + LOAD_FREQ; 9532 rq->calc_load_update = jiffies + LOAD_FREQ;
@@ -9621,7 +9624,7 @@ void __init sched_init(void)
9621#endif 9624#endif
9622 9625
9623#ifdef CONFIG_RT_MUTEXES 9626#ifdef CONFIG_RT_MUTEXES
9624 plist_head_init(&init_task.pi_waiters, &init_task.pi_lock); 9627 plist_head_init_raw(&init_task.pi_waiters, &init_task.pi_lock);
9625#endif 9628#endif
9626 9629
9627 /* 9630 /*
@@ -9746,13 +9749,13 @@ void normalize_rt_tasks(void)
9746 continue; 9749 continue;
9747 } 9750 }
9748 9751
9749 spin_lock(&p->pi_lock); 9752 raw_spin_lock(&p->pi_lock);
9750 rq = __task_rq_lock(p); 9753 rq = __task_rq_lock(p);
9751 9754
9752 normalize_task(rq, p); 9755 normalize_task(rq, p);
9753 9756
9754 __task_rq_unlock(rq); 9757 __task_rq_unlock(rq);
9755 spin_unlock(&p->pi_lock); 9758 raw_spin_unlock(&p->pi_lock);
9756 } while_each_thread(g, p); 9759 } while_each_thread(g, p);
9757 9760
9758 read_unlock_irqrestore(&tasklist_lock, flags); 9761 read_unlock_irqrestore(&tasklist_lock, flags);
@@ -10115,9 +10118,9 @@ static void set_se_shares(struct sched_entity *se, unsigned long shares)
10115 struct rq *rq = cfs_rq->rq; 10118 struct rq *rq = cfs_rq->rq;
10116 unsigned long flags; 10119 unsigned long flags;
10117 10120
10118 spin_lock_irqsave(&rq->lock, flags); 10121 raw_spin_lock_irqsave(&rq->lock, flags);
10119 __set_se_shares(se, shares); 10122 __set_se_shares(se, shares);
10120 spin_unlock_irqrestore(&rq->lock, flags); 10123 raw_spin_unlock_irqrestore(&rq->lock, flags);
10121} 10124}
10122 10125
10123static DEFINE_MUTEX(shares_mutex); 10126static DEFINE_MUTEX(shares_mutex);
@@ -10302,18 +10305,18 @@ static int tg_set_bandwidth(struct task_group *tg,
10302 if (err) 10305 if (err)
10303 goto unlock; 10306 goto unlock;
10304 10307
10305 spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock); 10308 raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
10306 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period); 10309 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
10307 tg->rt_bandwidth.rt_runtime = rt_runtime; 10310 tg->rt_bandwidth.rt_runtime = rt_runtime;
10308 10311
10309 for_each_possible_cpu(i) { 10312 for_each_possible_cpu(i) {
10310 struct rt_rq *rt_rq = tg->rt_rq[i]; 10313 struct rt_rq *rt_rq = tg->rt_rq[i];
10311 10314
10312 spin_lock(&rt_rq->rt_runtime_lock); 10315 raw_spin_lock(&rt_rq->rt_runtime_lock);
10313 rt_rq->rt_runtime = rt_runtime; 10316 rt_rq->rt_runtime = rt_runtime;
10314 spin_unlock(&rt_rq->rt_runtime_lock); 10317 raw_spin_unlock(&rt_rq->rt_runtime_lock);
10315 } 10318 }
10316 spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock); 10319 raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
10317 unlock: 10320 unlock:
10318 read_unlock(&tasklist_lock); 10321 read_unlock(&tasklist_lock);
10319 mutex_unlock(&rt_constraints_mutex); 10322 mutex_unlock(&rt_constraints_mutex);
@@ -10418,15 +10421,15 @@ static int sched_rt_global_constraints(void)
10418 if (sysctl_sched_rt_runtime == 0) 10421 if (sysctl_sched_rt_runtime == 0)
10419 return -EBUSY; 10422 return -EBUSY;
10420 10423
10421 spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags); 10424 raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
10422 for_each_possible_cpu(i) { 10425 for_each_possible_cpu(i) {
10423 struct rt_rq *rt_rq = &cpu_rq(i)->rt; 10426 struct rt_rq *rt_rq = &cpu_rq(i)->rt;
10424 10427
10425 spin_lock(&rt_rq->rt_runtime_lock); 10428 raw_spin_lock(&rt_rq->rt_runtime_lock);
10426 rt_rq->rt_runtime = global_rt_runtime(); 10429 rt_rq->rt_runtime = global_rt_runtime();
10427 spin_unlock(&rt_rq->rt_runtime_lock); 10430 raw_spin_unlock(&rt_rq->rt_runtime_lock);
10428 } 10431 }
10429 spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags); 10432 raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
10430 10433
10431 return 0; 10434 return 0;
10432} 10435}
@@ -10717,9 +10720,9 @@ static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
10717 /* 10720 /*
10718 * Take rq->lock to make 64-bit read safe on 32-bit platforms. 10721 * Take rq->lock to make 64-bit read safe on 32-bit platforms.
10719 */ 10722 */
10720 spin_lock_irq(&cpu_rq(cpu)->lock); 10723 raw_spin_lock_irq(&cpu_rq(cpu)->lock);
10721 data = *cpuusage; 10724 data = *cpuusage;
10722 spin_unlock_irq(&cpu_rq(cpu)->lock); 10725 raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
10723#else 10726#else
10724 data = *cpuusage; 10727 data = *cpuusage;
10725#endif 10728#endif
@@ -10735,9 +10738,9 @@ static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
10735 /* 10738 /*
10736 * Take rq->lock to make 64-bit write safe on 32-bit platforms. 10739 * Take rq->lock to make 64-bit write safe on 32-bit platforms.
10737 */ 10740 */
10738 spin_lock_irq(&cpu_rq(cpu)->lock); 10741 raw_spin_lock_irq(&cpu_rq(cpu)->lock);
10739 *cpuusage = val; 10742 *cpuusage = val;
10740 spin_unlock_irq(&cpu_rq(cpu)->lock); 10743 raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
10741#else 10744#else
10742 *cpuusage = val; 10745 *cpuusage = val;
10743#endif 10746#endif
@@ -10971,9 +10974,9 @@ void synchronize_sched_expedited(void)
10971 init_completion(&req->done); 10974 init_completion(&req->done);
10972 req->task = NULL; 10975 req->task = NULL;
10973 req->dest_cpu = RCU_MIGRATION_NEED_QS; 10976 req->dest_cpu = RCU_MIGRATION_NEED_QS;
10974 spin_lock_irqsave(&rq->lock, flags); 10977 raw_spin_lock_irqsave(&rq->lock, flags);
10975 list_add(&req->list, &rq->migration_queue); 10978 list_add(&req->list, &rq->migration_queue);
10976 spin_unlock_irqrestore(&rq->lock, flags); 10979 raw_spin_unlock_irqrestore(&rq->lock, flags);
10977 wake_up_process(rq->migration_thread); 10980 wake_up_process(rq->migration_thread);
10978 } 10981 }
10979 for_each_online_cpu(cpu) { 10982 for_each_online_cpu(cpu) {
@@ -10981,11 +10984,11 @@ void synchronize_sched_expedited(void)
10981 req = &per_cpu(rcu_migration_req, cpu); 10984 req = &per_cpu(rcu_migration_req, cpu);
10982 rq = cpu_rq(cpu); 10985 rq = cpu_rq(cpu);
10983 wait_for_completion(&req->done); 10986 wait_for_completion(&req->done);
10984 spin_lock_irqsave(&rq->lock, flags); 10987 raw_spin_lock_irqsave(&rq->lock, flags);
10985 if (unlikely(req->dest_cpu == RCU_MIGRATION_MUST_SYNC)) 10988 if (unlikely(req->dest_cpu == RCU_MIGRATION_MUST_SYNC))
10986 need_full_sync = 1; 10989 need_full_sync = 1;
10987 req->dest_cpu = RCU_MIGRATION_IDLE; 10990 req->dest_cpu = RCU_MIGRATION_IDLE;
10988 spin_unlock_irqrestore(&rq->lock, flags); 10991 raw_spin_unlock_irqrestore(&rq->lock, flags);
10989 } 10992 }
10990 rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE; 10993 rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE;
10991 synchronize_sched_expedited_count++; 10994 synchronize_sched_expedited_count++;
diff --git a/kernel/sched_cpupri.c b/kernel/sched_cpupri.c
index 0f052fc674d5..597b33099dfa 100644
--- a/kernel/sched_cpupri.c
+++ b/kernel/sched_cpupri.c
@@ -135,26 +135,26 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri)
135 if (likely(newpri != CPUPRI_INVALID)) { 135 if (likely(newpri != CPUPRI_INVALID)) {
136 struct cpupri_vec *vec = &cp->pri_to_cpu[newpri]; 136 struct cpupri_vec *vec = &cp->pri_to_cpu[newpri];
137 137
138 spin_lock_irqsave(&vec->lock, flags); 138 raw_spin_lock_irqsave(&vec->lock, flags);
139 139
140 cpumask_set_cpu(cpu, vec->mask); 140 cpumask_set_cpu(cpu, vec->mask);
141 vec->count++; 141 vec->count++;
142 if (vec->count == 1) 142 if (vec->count == 1)
143 set_bit(newpri, cp->pri_active); 143 set_bit(newpri, cp->pri_active);
144 144
145 spin_unlock_irqrestore(&vec->lock, flags); 145 raw_spin_unlock_irqrestore(&vec->lock, flags);
146 } 146 }
147 if (likely(oldpri != CPUPRI_INVALID)) { 147 if (likely(oldpri != CPUPRI_INVALID)) {
148 struct cpupri_vec *vec = &cp->pri_to_cpu[oldpri]; 148 struct cpupri_vec *vec = &cp->pri_to_cpu[oldpri];
149 149
150 spin_lock_irqsave(&vec->lock, flags); 150 raw_spin_lock_irqsave(&vec->lock, flags);
151 151
152 vec->count--; 152 vec->count--;
153 if (!vec->count) 153 if (!vec->count)
154 clear_bit(oldpri, cp->pri_active); 154 clear_bit(oldpri, cp->pri_active);
155 cpumask_clear_cpu(cpu, vec->mask); 155 cpumask_clear_cpu(cpu, vec->mask);
156 156
157 spin_unlock_irqrestore(&vec->lock, flags); 157 raw_spin_unlock_irqrestore(&vec->lock, flags);
158 } 158 }
159 159
160 *currpri = newpri; 160 *currpri = newpri;
@@ -180,7 +180,7 @@ int cpupri_init(struct cpupri *cp, bool bootmem)
180 for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) { 180 for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) {
181 struct cpupri_vec *vec = &cp->pri_to_cpu[i]; 181 struct cpupri_vec *vec = &cp->pri_to_cpu[i];
182 182
183 spin_lock_init(&vec->lock); 183 raw_spin_lock_init(&vec->lock);
184 vec->count = 0; 184 vec->count = 0;
185 if (!zalloc_cpumask_var(&vec->mask, gfp)) 185 if (!zalloc_cpumask_var(&vec->mask, gfp))
186 goto cleanup; 186 goto cleanup;
diff --git a/kernel/sched_cpupri.h b/kernel/sched_cpupri.h
index 9a7e859b8fbf..7cb5bb6b95be 100644
--- a/kernel/sched_cpupri.h
+++ b/kernel/sched_cpupri.h
@@ -12,7 +12,7 @@
12/* values 2-101 are RT priorities 0-99 */ 12/* values 2-101 are RT priorities 0-99 */
13 13
14struct cpupri_vec { 14struct cpupri_vec {
15 spinlock_t lock; 15 raw_spinlock_t lock;
16 int count; 16 int count;
17 cpumask_var_t mask; 17 cpumask_var_t mask;
18}; 18};
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 5ae24fc65d75..67f95aada4b9 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -184,7 +184,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
184 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock", 184 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
185 SPLIT_NS(cfs_rq->exec_clock)); 185 SPLIT_NS(cfs_rq->exec_clock));
186 186
187 spin_lock_irqsave(&rq->lock, flags); 187 raw_spin_lock_irqsave(&rq->lock, flags);
188 if (cfs_rq->rb_leftmost) 188 if (cfs_rq->rb_leftmost)
189 MIN_vruntime = (__pick_next_entity(cfs_rq))->vruntime; 189 MIN_vruntime = (__pick_next_entity(cfs_rq))->vruntime;
190 last = __pick_last_entity(cfs_rq); 190 last = __pick_last_entity(cfs_rq);
@@ -192,7 +192,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
192 max_vruntime = last->vruntime; 192 max_vruntime = last->vruntime;
193 min_vruntime = cfs_rq->min_vruntime; 193 min_vruntime = cfs_rq->min_vruntime;
194 rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime; 194 rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
195 spin_unlock_irqrestore(&rq->lock, flags); 195 raw_spin_unlock_irqrestore(&rq->lock, flags);
196 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime", 196 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
197 SPLIT_NS(MIN_vruntime)); 197 SPLIT_NS(MIN_vruntime));
198 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime", 198 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 804a411838f1..5bedf6e3ebf3 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1955,7 +1955,7 @@ static void task_fork_fair(struct task_struct *p)
1955 struct rq *rq = this_rq(); 1955 struct rq *rq = this_rq();
1956 unsigned long flags; 1956 unsigned long flags;
1957 1957
1958 spin_lock_irqsave(&rq->lock, flags); 1958 raw_spin_lock_irqsave(&rq->lock, flags);
1959 1959
1960 if (unlikely(task_cpu(p) != this_cpu)) 1960 if (unlikely(task_cpu(p) != this_cpu))
1961 __set_task_cpu(p, this_cpu); 1961 __set_task_cpu(p, this_cpu);
@@ -1975,7 +1975,7 @@ static void task_fork_fair(struct task_struct *p)
1975 resched_task(rq->curr); 1975 resched_task(rq->curr);
1976 } 1976 }
1977 1977
1978 spin_unlock_irqrestore(&rq->lock, flags); 1978 raw_spin_unlock_irqrestore(&rq->lock, flags);
1979} 1979}
1980 1980
1981/* 1981/*
diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c
index 33d5384a73a8..5f93b570d383 100644
--- a/kernel/sched_idletask.c
+++ b/kernel/sched_idletask.c
@@ -34,10 +34,10 @@ static struct task_struct *pick_next_task_idle(struct rq *rq)
34static void 34static void
35dequeue_task_idle(struct rq *rq, struct task_struct *p, int sleep) 35dequeue_task_idle(struct rq *rq, struct task_struct *p, int sleep)
36{ 36{
37 spin_unlock_irq(&rq->lock); 37 raw_spin_unlock_irq(&rq->lock);
38 printk(KERN_ERR "bad: scheduling from the idle thread!\n"); 38 printk(KERN_ERR "bad: scheduling from the idle thread!\n");
39 dump_stack(); 39 dump_stack();
40 spin_lock_irq(&rq->lock); 40 raw_spin_lock_irq(&rq->lock);
41} 41}
42 42
43static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) 43static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index aecbd9c6b20c..d2ea2828164e 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -327,7 +327,7 @@ static int do_balance_runtime(struct rt_rq *rt_rq)
327 327
328 weight = cpumask_weight(rd->span); 328 weight = cpumask_weight(rd->span);
329 329
330 spin_lock(&rt_b->rt_runtime_lock); 330 raw_spin_lock(&rt_b->rt_runtime_lock);
331 rt_period = ktime_to_ns(rt_b->rt_period); 331 rt_period = ktime_to_ns(rt_b->rt_period);
332 for_each_cpu(i, rd->span) { 332 for_each_cpu(i, rd->span) {
333 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); 333 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
@@ -336,7 +336,7 @@ static int do_balance_runtime(struct rt_rq *rt_rq)
336 if (iter == rt_rq) 336 if (iter == rt_rq)
337 continue; 337 continue;
338 338
339 spin_lock(&iter->rt_runtime_lock); 339 raw_spin_lock(&iter->rt_runtime_lock);
340 /* 340 /*
341 * Either all rqs have inf runtime and there's nothing to steal 341 * Either all rqs have inf runtime and there's nothing to steal
342 * or __disable_runtime() below sets a specific rq to inf to 342 * or __disable_runtime() below sets a specific rq to inf to
@@ -358,14 +358,14 @@ static int do_balance_runtime(struct rt_rq *rt_rq)
358 rt_rq->rt_runtime += diff; 358 rt_rq->rt_runtime += diff;
359 more = 1; 359 more = 1;
360 if (rt_rq->rt_runtime == rt_period) { 360 if (rt_rq->rt_runtime == rt_period) {
361 spin_unlock(&iter->rt_runtime_lock); 361 raw_spin_unlock(&iter->rt_runtime_lock);
362 break; 362 break;
363 } 363 }
364 } 364 }
365next: 365next:
366 spin_unlock(&iter->rt_runtime_lock); 366 raw_spin_unlock(&iter->rt_runtime_lock);
367 } 367 }
368 spin_unlock(&rt_b->rt_runtime_lock); 368 raw_spin_unlock(&rt_b->rt_runtime_lock);
369 369
370 return more; 370 return more;
371} 371}
@@ -386,8 +386,8 @@ static void __disable_runtime(struct rq *rq)
386 s64 want; 386 s64 want;
387 int i; 387 int i;
388 388
389 spin_lock(&rt_b->rt_runtime_lock); 389 raw_spin_lock(&rt_b->rt_runtime_lock);
390 spin_lock(&rt_rq->rt_runtime_lock); 390 raw_spin_lock(&rt_rq->rt_runtime_lock);
391 /* 391 /*
392 * Either we're all inf and nobody needs to borrow, or we're 392 * Either we're all inf and nobody needs to borrow, or we're
393 * already disabled and thus have nothing to do, or we have 393 * already disabled and thus have nothing to do, or we have
@@ -396,7 +396,7 @@ static void __disable_runtime(struct rq *rq)
396 if (rt_rq->rt_runtime == RUNTIME_INF || 396 if (rt_rq->rt_runtime == RUNTIME_INF ||
397 rt_rq->rt_runtime == rt_b->rt_runtime) 397 rt_rq->rt_runtime == rt_b->rt_runtime)
398 goto balanced; 398 goto balanced;
399 spin_unlock(&rt_rq->rt_runtime_lock); 399 raw_spin_unlock(&rt_rq->rt_runtime_lock);
400 400
401 /* 401 /*
402 * Calculate the difference between what we started out with 402 * Calculate the difference between what we started out with
@@ -418,7 +418,7 @@ static void __disable_runtime(struct rq *rq)
418 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF) 418 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
419 continue; 419 continue;
420 420
421 spin_lock(&iter->rt_runtime_lock); 421 raw_spin_lock(&iter->rt_runtime_lock);
422 if (want > 0) { 422 if (want > 0) {
423 diff = min_t(s64, iter->rt_runtime, want); 423 diff = min_t(s64, iter->rt_runtime, want);
424 iter->rt_runtime -= diff; 424 iter->rt_runtime -= diff;
@@ -427,13 +427,13 @@ static void __disable_runtime(struct rq *rq)
427 iter->rt_runtime -= want; 427 iter->rt_runtime -= want;
428 want -= want; 428 want -= want;
429 } 429 }
430 spin_unlock(&iter->rt_runtime_lock); 430 raw_spin_unlock(&iter->rt_runtime_lock);
431 431
432 if (!want) 432 if (!want)
433 break; 433 break;
434 } 434 }
435 435
436 spin_lock(&rt_rq->rt_runtime_lock); 436 raw_spin_lock(&rt_rq->rt_runtime_lock);
437 /* 437 /*
438 * We cannot be left wanting - that would mean some runtime 438 * We cannot be left wanting - that would mean some runtime
439 * leaked out of the system. 439 * leaked out of the system.
@@ -445,8 +445,8 @@ balanced:
445 * runtime - in which case borrowing doesn't make sense. 445 * runtime - in which case borrowing doesn't make sense.
446 */ 446 */
447 rt_rq->rt_runtime = RUNTIME_INF; 447 rt_rq->rt_runtime = RUNTIME_INF;
448 spin_unlock(&rt_rq->rt_runtime_lock); 448 raw_spin_unlock(&rt_rq->rt_runtime_lock);
449 spin_unlock(&rt_b->rt_runtime_lock); 449 raw_spin_unlock(&rt_b->rt_runtime_lock);
450 } 450 }
451} 451}
452 452
@@ -454,9 +454,9 @@ static void disable_runtime(struct rq *rq)
454{ 454{
455 unsigned long flags; 455 unsigned long flags;
456 456
457 spin_lock_irqsave(&rq->lock, flags); 457 raw_spin_lock_irqsave(&rq->lock, flags);
458 __disable_runtime(rq); 458 __disable_runtime(rq);
459 spin_unlock_irqrestore(&rq->lock, flags); 459 raw_spin_unlock_irqrestore(&rq->lock, flags);
460} 460}
461 461
462static void __enable_runtime(struct rq *rq) 462static void __enable_runtime(struct rq *rq)
@@ -472,13 +472,13 @@ static void __enable_runtime(struct rq *rq)
472 for_each_leaf_rt_rq(rt_rq, rq) { 472 for_each_leaf_rt_rq(rt_rq, rq) {
473 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); 473 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
474 474
475 spin_lock(&rt_b->rt_runtime_lock); 475 raw_spin_lock(&rt_b->rt_runtime_lock);
476 spin_lock(&rt_rq->rt_runtime_lock); 476 raw_spin_lock(&rt_rq->rt_runtime_lock);
477 rt_rq->rt_runtime = rt_b->rt_runtime; 477 rt_rq->rt_runtime = rt_b->rt_runtime;
478 rt_rq->rt_time = 0; 478 rt_rq->rt_time = 0;
479 rt_rq->rt_throttled = 0; 479 rt_rq->rt_throttled = 0;
480 spin_unlock(&rt_rq->rt_runtime_lock); 480 raw_spin_unlock(&rt_rq->rt_runtime_lock);
481 spin_unlock(&rt_b->rt_runtime_lock); 481 raw_spin_unlock(&rt_b->rt_runtime_lock);
482 } 482 }
483} 483}
484 484
@@ -486,9 +486,9 @@ static void enable_runtime(struct rq *rq)
486{ 486{
487 unsigned long flags; 487 unsigned long flags;
488 488
489 spin_lock_irqsave(&rq->lock, flags); 489 raw_spin_lock_irqsave(&rq->lock, flags);
490 __enable_runtime(rq); 490 __enable_runtime(rq);
491 spin_unlock_irqrestore(&rq->lock, flags); 491 raw_spin_unlock_irqrestore(&rq->lock, flags);
492} 492}
493 493
494static int balance_runtime(struct rt_rq *rt_rq) 494static int balance_runtime(struct rt_rq *rt_rq)
@@ -496,9 +496,9 @@ static int balance_runtime(struct rt_rq *rt_rq)
496 int more = 0; 496 int more = 0;
497 497
498 if (rt_rq->rt_time > rt_rq->rt_runtime) { 498 if (rt_rq->rt_time > rt_rq->rt_runtime) {
499 spin_unlock(&rt_rq->rt_runtime_lock); 499 raw_spin_unlock(&rt_rq->rt_runtime_lock);
500 more = do_balance_runtime(rt_rq); 500 more = do_balance_runtime(rt_rq);
501 spin_lock(&rt_rq->rt_runtime_lock); 501 raw_spin_lock(&rt_rq->rt_runtime_lock);
502 } 502 }
503 503
504 return more; 504 return more;
@@ -524,11 +524,11 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
524 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); 524 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
525 struct rq *rq = rq_of_rt_rq(rt_rq); 525 struct rq *rq = rq_of_rt_rq(rt_rq);
526 526
527 spin_lock(&rq->lock); 527 raw_spin_lock(&rq->lock);
528 if (rt_rq->rt_time) { 528 if (rt_rq->rt_time) {
529 u64 runtime; 529 u64 runtime;
530 530
531 spin_lock(&rt_rq->rt_runtime_lock); 531 raw_spin_lock(&rt_rq->rt_runtime_lock);
532 if (rt_rq->rt_throttled) 532 if (rt_rq->rt_throttled)
533 balance_runtime(rt_rq); 533 balance_runtime(rt_rq);
534 runtime = rt_rq->rt_runtime; 534 runtime = rt_rq->rt_runtime;
@@ -539,13 +539,13 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
539 } 539 }
540 if (rt_rq->rt_time || rt_rq->rt_nr_running) 540 if (rt_rq->rt_time || rt_rq->rt_nr_running)
541 idle = 0; 541 idle = 0;
542 spin_unlock(&rt_rq->rt_runtime_lock); 542 raw_spin_unlock(&rt_rq->rt_runtime_lock);
543 } else if (rt_rq->rt_nr_running) 543 } else if (rt_rq->rt_nr_running)
544 idle = 0; 544 idle = 0;
545 545
546 if (enqueue) 546 if (enqueue)
547 sched_rt_rq_enqueue(rt_rq); 547 sched_rt_rq_enqueue(rt_rq);
548 spin_unlock(&rq->lock); 548 raw_spin_unlock(&rq->lock);
549 } 549 }
550 550
551 return idle; 551 return idle;
@@ -624,11 +624,11 @@ static void update_curr_rt(struct rq *rq)
624 rt_rq = rt_rq_of_se(rt_se); 624 rt_rq = rt_rq_of_se(rt_se);
625 625
626 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) { 626 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
627 spin_lock(&rt_rq->rt_runtime_lock); 627 raw_spin_lock(&rt_rq->rt_runtime_lock);
628 rt_rq->rt_time += delta_exec; 628 rt_rq->rt_time += delta_exec;
629 if (sched_rt_runtime_exceeded(rt_rq)) 629 if (sched_rt_runtime_exceeded(rt_rq))
630 resched_task(curr); 630 resched_task(curr);
631 spin_unlock(&rt_rq->rt_runtime_lock); 631 raw_spin_unlock(&rt_rq->rt_runtime_lock);
632 } 632 }
633 } 633 }
634} 634}
@@ -1246,7 +1246,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1246 task_running(rq, task) || 1246 task_running(rq, task) ||
1247 !task->se.on_rq)) { 1247 !task->se.on_rq)) {
1248 1248
1249 spin_unlock(&lowest_rq->lock); 1249 raw_spin_unlock(&lowest_rq->lock);
1250 lowest_rq = NULL; 1250 lowest_rq = NULL;
1251 break; 1251 break;
1252 } 1252 }
diff --git a/kernel/signal.c b/kernel/signal.c
index 6b982f2cf524..1814e68e4de3 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -423,7 +423,7 @@ still_pending:
423 */ 423 */
424 info->si_signo = sig; 424 info->si_signo = sig;
425 info->si_errno = 0; 425 info->si_errno = 0;
426 info->si_code = 0; 426 info->si_code = SI_USER;
427 info->si_pid = 0; 427 info->si_pid = 0;
428 info->si_uid = 0; 428 info->si_uid = 0;
429 } 429 }
@@ -607,6 +607,17 @@ static int rm_from_queue(unsigned long mask, struct sigpending *s)
607 return 1; 607 return 1;
608} 608}
609 609
610static inline int is_si_special(const struct siginfo *info)
611{
612 return info <= SEND_SIG_FORCED;
613}
614
615static inline bool si_fromuser(const struct siginfo *info)
616{
617 return info == SEND_SIG_NOINFO ||
618 (!is_si_special(info) && SI_FROMUSER(info));
619}
620
610/* 621/*
611 * Bad permissions for sending the signal 622 * Bad permissions for sending the signal
612 * - the caller must hold at least the RCU read lock 623 * - the caller must hold at least the RCU read lock
@@ -621,7 +632,7 @@ static int check_kill_permission(int sig, struct siginfo *info,
621 if (!valid_signal(sig)) 632 if (!valid_signal(sig))
622 return -EINVAL; 633 return -EINVAL;
623 634
624 if (info != SEND_SIG_NOINFO && (is_si_special(info) || SI_FROMKERNEL(info))) 635 if (!si_fromuser(info))
625 return 0; 636 return 0;
626 637
627 error = audit_signal_info(sig, t); /* Let audit system see the signal */ 638 error = audit_signal_info(sig, t); /* Let audit system see the signal */
@@ -949,9 +960,8 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
949 int from_ancestor_ns = 0; 960 int from_ancestor_ns = 0;
950 961
951#ifdef CONFIG_PID_NS 962#ifdef CONFIG_PID_NS
952 if (!is_si_special(info) && SI_FROMUSER(info) && 963 from_ancestor_ns = si_fromuser(info) &&
953 task_pid_nr_ns(current, task_active_pid_ns(t)) <= 0) 964 !task_pid_nr_ns(current, task_active_pid_ns(t));
954 from_ancestor_ns = 1;
955#endif 965#endif
956 966
957 return __send_signal(sig, info, t, group, from_ancestor_ns); 967 return __send_signal(sig, info, t, group, from_ancestor_ns);
@@ -1052,12 +1062,6 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1052 return ret; 1062 return ret;
1053} 1063}
1054 1064
1055void
1056force_sig_specific(int sig, struct task_struct *t)
1057{
1058 force_sig_info(sig, SEND_SIG_FORCED, t);
1059}
1060
1061/* 1065/*
1062 * Nuke all other threads in the group. 1066 * Nuke all other threads in the group.
1063 */ 1067 */
@@ -1186,8 +1190,7 @@ int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
1186 goto out_unlock; 1190 goto out_unlock;
1187 } 1191 }
1188 pcred = __task_cred(p); 1192 pcred = __task_cred(p);
1189 if ((info == SEND_SIG_NOINFO || 1193 if (si_fromuser(info) &&
1190 (!is_si_special(info) && SI_FROMUSER(info))) &&
1191 euid != pcred->suid && euid != pcred->uid && 1194 euid != pcred->suid && euid != pcred->uid &&
1192 uid != pcred->suid && uid != pcred->uid) { 1195 uid != pcred->suid && uid != pcred->uid) {
1193 ret = -EPERM; 1196 ret = -EPERM;
@@ -1837,11 +1840,6 @@ relock:
1837 1840
1838 for (;;) { 1841 for (;;) {
1839 struct k_sigaction *ka; 1842 struct k_sigaction *ka;
1840
1841 if (unlikely(signal->group_stop_count > 0) &&
1842 do_signal_stop(0))
1843 goto relock;
1844
1845 /* 1843 /*
1846 * Tracing can induce an artifical signal and choose sigaction. 1844 * Tracing can induce an artifical signal and choose sigaction.
1847 * The return value in @signr determines the default action, 1845 * The return value in @signr determines the default action,
@@ -1853,6 +1851,10 @@ relock:
1853 if (unlikely(signr != 0)) 1851 if (unlikely(signr != 0))
1854 ka = return_ka; 1852 ka = return_ka;
1855 else { 1853 else {
1854 if (unlikely(signal->group_stop_count > 0) &&
1855 do_signal_stop(0))
1856 goto relock;
1857
1856 signr = dequeue_signal(current, &current->blocked, 1858 signr = dequeue_signal(current, &current->blocked,
1857 info); 1859 info);
1858 1860
diff --git a/kernel/smp.c b/kernel/smp.c
index a8c76069cf50..de735a6637d0 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -16,11 +16,11 @@ static DEFINE_PER_CPU(struct call_single_queue, call_single_queue);
16 16
17static struct { 17static struct {
18 struct list_head queue; 18 struct list_head queue;
19 spinlock_t lock; 19 raw_spinlock_t lock;
20} call_function __cacheline_aligned_in_smp = 20} call_function __cacheline_aligned_in_smp =
21 { 21 {
22 .queue = LIST_HEAD_INIT(call_function.queue), 22 .queue = LIST_HEAD_INIT(call_function.queue),
23 .lock = __SPIN_LOCK_UNLOCKED(call_function.lock), 23 .lock = __RAW_SPIN_LOCK_UNLOCKED(call_function.lock),
24 }; 24 };
25 25
26enum { 26enum {
@@ -35,7 +35,7 @@ struct call_function_data {
35 35
36struct call_single_queue { 36struct call_single_queue {
37 struct list_head list; 37 struct list_head list;
38 spinlock_t lock; 38 raw_spinlock_t lock;
39}; 39};
40 40
41static DEFINE_PER_CPU(struct call_function_data, cfd_data); 41static DEFINE_PER_CPU(struct call_function_data, cfd_data);
@@ -80,7 +80,7 @@ static int __cpuinit init_call_single_data(void)
80 for_each_possible_cpu(i) { 80 for_each_possible_cpu(i) {
81 struct call_single_queue *q = &per_cpu(call_single_queue, i); 81 struct call_single_queue *q = &per_cpu(call_single_queue, i);
82 82
83 spin_lock_init(&q->lock); 83 raw_spin_lock_init(&q->lock);
84 INIT_LIST_HEAD(&q->list); 84 INIT_LIST_HEAD(&q->list);
85 } 85 }
86 86
@@ -141,10 +141,10 @@ void generic_exec_single(int cpu, struct call_single_data *data, int wait)
141 unsigned long flags; 141 unsigned long flags;
142 int ipi; 142 int ipi;
143 143
144 spin_lock_irqsave(&dst->lock, flags); 144 raw_spin_lock_irqsave(&dst->lock, flags);
145 ipi = list_empty(&dst->list); 145 ipi = list_empty(&dst->list);
146 list_add_tail(&data->list, &dst->list); 146 list_add_tail(&data->list, &dst->list);
147 spin_unlock_irqrestore(&dst->lock, flags); 147 raw_spin_unlock_irqrestore(&dst->lock, flags);
148 148
149 /* 149 /*
150 * The list addition should be visible before sending the IPI 150 * The list addition should be visible before sending the IPI
@@ -171,7 +171,7 @@ void generic_exec_single(int cpu, struct call_single_data *data, int wait)
171void generic_smp_call_function_interrupt(void) 171void generic_smp_call_function_interrupt(void)
172{ 172{
173 struct call_function_data *data; 173 struct call_function_data *data;
174 int cpu = get_cpu(); 174 int cpu = smp_processor_id();
175 175
176 /* 176 /*
177 * Shouldn't receive this interrupt on a cpu that is not yet online. 177 * Shouldn't receive this interrupt on a cpu that is not yet online.
@@ -201,9 +201,9 @@ void generic_smp_call_function_interrupt(void)
201 refs = atomic_dec_return(&data->refs); 201 refs = atomic_dec_return(&data->refs);
202 WARN_ON(refs < 0); 202 WARN_ON(refs < 0);
203 if (!refs) { 203 if (!refs) {
204 spin_lock(&call_function.lock); 204 raw_spin_lock(&call_function.lock);
205 list_del_rcu(&data->csd.list); 205 list_del_rcu(&data->csd.list);
206 spin_unlock(&call_function.lock); 206 raw_spin_unlock(&call_function.lock);
207 } 207 }
208 208
209 if (refs) 209 if (refs)
@@ -212,7 +212,6 @@ void generic_smp_call_function_interrupt(void)
212 csd_unlock(&data->csd); 212 csd_unlock(&data->csd);
213 } 213 }
214 214
215 put_cpu();
216} 215}
217 216
218/* 217/*
@@ -230,9 +229,9 @@ void generic_smp_call_function_single_interrupt(void)
230 */ 229 */
231 WARN_ON_ONCE(!cpu_online(smp_processor_id())); 230 WARN_ON_ONCE(!cpu_online(smp_processor_id()));
232 231
233 spin_lock(&q->lock); 232 raw_spin_lock(&q->lock);
234 list_replace_init(&q->list, &list); 233 list_replace_init(&q->list, &list);
235 spin_unlock(&q->lock); 234 raw_spin_unlock(&q->lock);
236 235
237 while (!list_empty(&list)) { 236 while (!list_empty(&list)) {
238 struct call_single_data *data; 237 struct call_single_data *data;
@@ -449,14 +448,14 @@ void smp_call_function_many(const struct cpumask *mask,
449 cpumask_clear_cpu(this_cpu, data->cpumask); 448 cpumask_clear_cpu(this_cpu, data->cpumask);
450 atomic_set(&data->refs, cpumask_weight(data->cpumask)); 449 atomic_set(&data->refs, cpumask_weight(data->cpumask));
451 450
452 spin_lock_irqsave(&call_function.lock, flags); 451 raw_spin_lock_irqsave(&call_function.lock, flags);
453 /* 452 /*
454 * Place entry at the _HEAD_ of the list, so that any cpu still 453 * Place entry at the _HEAD_ of the list, so that any cpu still
455 * observing the entry in generic_smp_call_function_interrupt() 454 * observing the entry in generic_smp_call_function_interrupt()
456 * will not miss any other list entries: 455 * will not miss any other list entries:
457 */ 456 */
458 list_add_rcu(&data->csd.list, &call_function.queue); 457 list_add_rcu(&data->csd.list, &call_function.queue);
459 spin_unlock_irqrestore(&call_function.lock, flags); 458 raw_spin_unlock_irqrestore(&call_function.lock, flags);
460 459
461 /* 460 /*
462 * Make the list addition visible before sending the ipi. 461 * Make the list addition visible before sending the ipi.
@@ -501,20 +500,20 @@ EXPORT_SYMBOL(smp_call_function);
501 500
502void ipi_call_lock(void) 501void ipi_call_lock(void)
503{ 502{
504 spin_lock(&call_function.lock); 503 raw_spin_lock(&call_function.lock);
505} 504}
506 505
507void ipi_call_unlock(void) 506void ipi_call_unlock(void)
508{ 507{
509 spin_unlock(&call_function.lock); 508 raw_spin_unlock(&call_function.lock);
510} 509}
511 510
512void ipi_call_lock_irq(void) 511void ipi_call_lock_irq(void)
513{ 512{
514 spin_lock_irq(&call_function.lock); 513 raw_spin_lock_irq(&call_function.lock);
515} 514}
516 515
517void ipi_call_unlock_irq(void) 516void ipi_call_unlock_irq(void)
518{ 517{
519 spin_unlock_irq(&call_function.lock); 518 raw_spin_unlock_irq(&call_function.lock);
520} 519}
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
index 41e042219ff6..be6517fb9c14 100644
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -32,6 +32,8 @@
32 * include/linux/spinlock_api_smp.h 32 * include/linux/spinlock_api_smp.h
33 */ 33 */
34#else 34#else
35#define raw_read_can_lock(l) read_can_lock(l)
36#define raw_write_can_lock(l) write_can_lock(l)
35/* 37/*
36 * We build the __lock_function inlines here. They are too large for 38 * We build the __lock_function inlines here. They are too large for
37 * inlining all over the place, but here is only one user per function 39 * inlining all over the place, but here is only one user per function
@@ -42,49 +44,49 @@
42 * towards that other CPU that it should break the lock ASAP. 44 * towards that other CPU that it should break the lock ASAP.
43 */ 45 */
44#define BUILD_LOCK_OPS(op, locktype) \ 46#define BUILD_LOCK_OPS(op, locktype) \
45void __lockfunc __##op##_lock(locktype##_t *lock) \ 47void __lockfunc __raw_##op##_lock(locktype##_t *lock) \
46{ \ 48{ \
47 for (;;) { \ 49 for (;;) { \
48 preempt_disable(); \ 50 preempt_disable(); \
49 if (likely(_raw_##op##_trylock(lock))) \ 51 if (likely(do_raw_##op##_trylock(lock))) \
50 break; \ 52 break; \
51 preempt_enable(); \ 53 preempt_enable(); \
52 \ 54 \
53 if (!(lock)->break_lock) \ 55 if (!(lock)->break_lock) \
54 (lock)->break_lock = 1; \ 56 (lock)->break_lock = 1; \
55 while (!op##_can_lock(lock) && (lock)->break_lock) \ 57 while (!raw_##op##_can_lock(lock) && (lock)->break_lock)\
56 _raw_##op##_relax(&lock->raw_lock); \ 58 arch_##op##_relax(&lock->raw_lock); \
57 } \ 59 } \
58 (lock)->break_lock = 0; \ 60 (lock)->break_lock = 0; \
59} \ 61} \
60 \ 62 \
61unsigned long __lockfunc __##op##_lock_irqsave(locktype##_t *lock) \ 63unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \
62{ \ 64{ \
63 unsigned long flags; \ 65 unsigned long flags; \
64 \ 66 \
65 for (;;) { \ 67 for (;;) { \
66 preempt_disable(); \ 68 preempt_disable(); \
67 local_irq_save(flags); \ 69 local_irq_save(flags); \
68 if (likely(_raw_##op##_trylock(lock))) \ 70 if (likely(do_raw_##op##_trylock(lock))) \
69 break; \ 71 break; \
70 local_irq_restore(flags); \ 72 local_irq_restore(flags); \
71 preempt_enable(); \ 73 preempt_enable(); \
72 \ 74 \
73 if (!(lock)->break_lock) \ 75 if (!(lock)->break_lock) \
74 (lock)->break_lock = 1; \ 76 (lock)->break_lock = 1; \
75 while (!op##_can_lock(lock) && (lock)->break_lock) \ 77 while (!raw_##op##_can_lock(lock) && (lock)->break_lock)\
76 _raw_##op##_relax(&lock->raw_lock); \ 78 arch_##op##_relax(&lock->raw_lock); \
77 } \ 79 } \
78 (lock)->break_lock = 0; \ 80 (lock)->break_lock = 0; \
79 return flags; \ 81 return flags; \
80} \ 82} \
81 \ 83 \
82void __lockfunc __##op##_lock_irq(locktype##_t *lock) \ 84void __lockfunc __raw_##op##_lock_irq(locktype##_t *lock) \
83{ \ 85{ \
84 _##op##_lock_irqsave(lock); \ 86 _raw_##op##_lock_irqsave(lock); \
85} \ 87} \
86 \ 88 \
87void __lockfunc __##op##_lock_bh(locktype##_t *lock) \ 89void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock) \
88{ \ 90{ \
89 unsigned long flags; \ 91 unsigned long flags; \
90 \ 92 \
@@ -93,7 +95,7 @@ void __lockfunc __##op##_lock_bh(locktype##_t *lock) \
93 /* irq-disabling. We use the generic preemption-aware */ \ 95 /* irq-disabling. We use the generic preemption-aware */ \
94 /* function: */ \ 96 /* function: */ \
95 /**/ \ 97 /**/ \
96 flags = _##op##_lock_irqsave(lock); \ 98 flags = _raw_##op##_lock_irqsave(lock); \
97 local_bh_disable(); \ 99 local_bh_disable(); \
98 local_irq_restore(flags); \ 100 local_irq_restore(flags); \
99} \ 101} \
@@ -107,269 +109,269 @@ void __lockfunc __##op##_lock_bh(locktype##_t *lock) \
107 * __[spin|read|write]_lock_irqsave() 109 * __[spin|read|write]_lock_irqsave()
108 * __[spin|read|write]_lock_bh() 110 * __[spin|read|write]_lock_bh()
109 */ 111 */
110BUILD_LOCK_OPS(spin, spinlock); 112BUILD_LOCK_OPS(spin, raw_spinlock);
111BUILD_LOCK_OPS(read, rwlock); 113BUILD_LOCK_OPS(read, rwlock);
112BUILD_LOCK_OPS(write, rwlock); 114BUILD_LOCK_OPS(write, rwlock);
113 115
114#endif 116#endif
115 117
116#ifdef CONFIG_DEBUG_LOCK_ALLOC 118#ifndef CONFIG_INLINE_SPIN_TRYLOCK
117 119int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock)
118void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
119{ 120{
120 preempt_disable(); 121 return __raw_spin_trylock(lock);
121 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
122 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
123} 122}
124EXPORT_SYMBOL(_spin_lock_nested); 123EXPORT_SYMBOL(_raw_spin_trylock);
124#endif
125 125
126unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, 126#ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH
127 int subclass) 127int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock)
128{ 128{
129 unsigned long flags; 129 return __raw_spin_trylock_bh(lock);
130
131 local_irq_save(flags);
132 preempt_disable();
133 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
134 LOCK_CONTENDED_FLAGS(lock, _raw_spin_trylock, _raw_spin_lock,
135 _raw_spin_lock_flags, &flags);
136 return flags;
137} 130}
138EXPORT_SYMBOL(_spin_lock_irqsave_nested); 131EXPORT_SYMBOL(_raw_spin_trylock_bh);
132#endif
139 133
140void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, 134#ifndef CONFIG_INLINE_SPIN_LOCK
141 struct lockdep_map *nest_lock) 135void __lockfunc _raw_spin_lock(raw_spinlock_t *lock)
142{ 136{
143 preempt_disable(); 137 __raw_spin_lock(lock);
144 spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
145 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
146} 138}
147EXPORT_SYMBOL(_spin_lock_nest_lock); 139EXPORT_SYMBOL(_raw_spin_lock);
148
149#endif 140#endif
150 141
151#ifndef CONFIG_INLINE_SPIN_TRYLOCK 142#ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
152int __lockfunc _spin_trylock(spinlock_t *lock) 143unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock)
153{ 144{
154 return __spin_trylock(lock); 145 return __raw_spin_lock_irqsave(lock);
155} 146}
156EXPORT_SYMBOL(_spin_trylock); 147EXPORT_SYMBOL(_raw_spin_lock_irqsave);
157#endif 148#endif
158 149
159#ifndef CONFIG_INLINE_READ_TRYLOCK 150#ifndef CONFIG_INLINE_SPIN_LOCK_IRQ
160int __lockfunc _read_trylock(rwlock_t *lock) 151void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock)
161{ 152{
162 return __read_trylock(lock); 153 __raw_spin_lock_irq(lock);
163} 154}
164EXPORT_SYMBOL(_read_trylock); 155EXPORT_SYMBOL(_raw_spin_lock_irq);
165#endif 156#endif
166 157
167#ifndef CONFIG_INLINE_WRITE_TRYLOCK 158#ifndef CONFIG_INLINE_SPIN_LOCK_BH
168int __lockfunc _write_trylock(rwlock_t *lock) 159void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock)
169{ 160{
170 return __write_trylock(lock); 161 __raw_spin_lock_bh(lock);
171} 162}
172EXPORT_SYMBOL(_write_trylock); 163EXPORT_SYMBOL(_raw_spin_lock_bh);
173#endif 164#endif
174 165
175#ifndef CONFIG_INLINE_READ_LOCK 166#ifndef CONFIG_INLINE_SPIN_UNLOCK
176void __lockfunc _read_lock(rwlock_t *lock) 167void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock)
177{ 168{
178 __read_lock(lock); 169 __raw_spin_unlock(lock);
179} 170}
180EXPORT_SYMBOL(_read_lock); 171EXPORT_SYMBOL(_raw_spin_unlock);
181#endif 172#endif
182 173
183#ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE 174#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
184unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) 175void __lockfunc _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
185{ 176{
186 return __spin_lock_irqsave(lock); 177 __raw_spin_unlock_irqrestore(lock, flags);
187} 178}
188EXPORT_SYMBOL(_spin_lock_irqsave); 179EXPORT_SYMBOL(_raw_spin_unlock_irqrestore);
189#endif 180#endif
190 181
191#ifndef CONFIG_INLINE_SPIN_LOCK_IRQ 182#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ
192void __lockfunc _spin_lock_irq(spinlock_t *lock) 183void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock)
193{ 184{
194 __spin_lock_irq(lock); 185 __raw_spin_unlock_irq(lock);
195} 186}
196EXPORT_SYMBOL(_spin_lock_irq); 187EXPORT_SYMBOL(_raw_spin_unlock_irq);
197#endif 188#endif
198 189
199#ifndef CONFIG_INLINE_SPIN_LOCK_BH 190#ifndef CONFIG_INLINE_SPIN_UNLOCK_BH
200void __lockfunc _spin_lock_bh(spinlock_t *lock) 191void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock)
201{ 192{
202 __spin_lock_bh(lock); 193 __raw_spin_unlock_bh(lock);
203} 194}
204EXPORT_SYMBOL(_spin_lock_bh); 195EXPORT_SYMBOL(_raw_spin_unlock_bh);
205#endif 196#endif
206 197
207#ifndef CONFIG_INLINE_READ_LOCK_IRQSAVE 198#ifndef CONFIG_INLINE_READ_TRYLOCK
208unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) 199int __lockfunc _raw_read_trylock(rwlock_t *lock)
209{ 200{
210 return __read_lock_irqsave(lock); 201 return __raw_read_trylock(lock);
211} 202}
212EXPORT_SYMBOL(_read_lock_irqsave); 203EXPORT_SYMBOL(_raw_read_trylock);
213#endif 204#endif
214 205
215#ifndef CONFIG_INLINE_READ_LOCK_IRQ 206#ifndef CONFIG_INLINE_READ_LOCK
216void __lockfunc _read_lock_irq(rwlock_t *lock) 207void __lockfunc _raw_read_lock(rwlock_t *lock)
217{ 208{
218 __read_lock_irq(lock); 209 __raw_read_lock(lock);
219} 210}
220EXPORT_SYMBOL(_read_lock_irq); 211EXPORT_SYMBOL(_raw_read_lock);
221#endif 212#endif
222 213
223#ifndef CONFIG_INLINE_READ_LOCK_BH 214#ifndef CONFIG_INLINE_READ_LOCK_IRQSAVE
224void __lockfunc _read_lock_bh(rwlock_t *lock) 215unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock)
225{ 216{
226 __read_lock_bh(lock); 217 return __raw_read_lock_irqsave(lock);
227} 218}
228EXPORT_SYMBOL(_read_lock_bh); 219EXPORT_SYMBOL(_raw_read_lock_irqsave);
229#endif 220#endif
230 221
231#ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE 222#ifndef CONFIG_INLINE_READ_LOCK_IRQ
232unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) 223void __lockfunc _raw_read_lock_irq(rwlock_t *lock)
233{ 224{
234 return __write_lock_irqsave(lock); 225 __raw_read_lock_irq(lock);
235} 226}
236EXPORT_SYMBOL(_write_lock_irqsave); 227EXPORT_SYMBOL(_raw_read_lock_irq);
237#endif 228#endif
238 229
239#ifndef CONFIG_INLINE_WRITE_LOCK_IRQ 230#ifndef CONFIG_INLINE_READ_LOCK_BH
240void __lockfunc _write_lock_irq(rwlock_t *lock) 231void __lockfunc _raw_read_lock_bh(rwlock_t *lock)
241{ 232{
242 __write_lock_irq(lock); 233 __raw_read_lock_bh(lock);
243} 234}
244EXPORT_SYMBOL(_write_lock_irq); 235EXPORT_SYMBOL(_raw_read_lock_bh);
245#endif 236#endif
246 237
247#ifndef CONFIG_INLINE_WRITE_LOCK_BH 238#ifndef CONFIG_INLINE_READ_UNLOCK
248void __lockfunc _write_lock_bh(rwlock_t *lock) 239void __lockfunc _raw_read_unlock(rwlock_t *lock)
249{ 240{
250 __write_lock_bh(lock); 241 __raw_read_unlock(lock);
251} 242}
252EXPORT_SYMBOL(_write_lock_bh); 243EXPORT_SYMBOL(_raw_read_unlock);
253#endif 244#endif
254 245
255#ifndef CONFIG_INLINE_SPIN_LOCK 246#ifndef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
256void __lockfunc _spin_lock(spinlock_t *lock) 247void __lockfunc _raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
257{ 248{
258 __spin_lock(lock); 249 __raw_read_unlock_irqrestore(lock, flags);
259} 250}
260EXPORT_SYMBOL(_spin_lock); 251EXPORT_SYMBOL(_raw_read_unlock_irqrestore);
261#endif 252#endif
262 253
263#ifndef CONFIG_INLINE_WRITE_LOCK 254#ifndef CONFIG_INLINE_READ_UNLOCK_IRQ
264void __lockfunc _write_lock(rwlock_t *lock) 255void __lockfunc _raw_read_unlock_irq(rwlock_t *lock)
265{ 256{
266 __write_lock(lock); 257 __raw_read_unlock_irq(lock);
267} 258}
268EXPORT_SYMBOL(_write_lock); 259EXPORT_SYMBOL(_raw_read_unlock_irq);
269#endif 260#endif
270 261
271#ifndef CONFIG_INLINE_SPIN_UNLOCK 262#ifndef CONFIG_INLINE_READ_UNLOCK_BH
272void __lockfunc _spin_unlock(spinlock_t *lock) 263void __lockfunc _raw_read_unlock_bh(rwlock_t *lock)
273{ 264{
274 __spin_unlock(lock); 265 __raw_read_unlock_bh(lock);
275} 266}
276EXPORT_SYMBOL(_spin_unlock); 267EXPORT_SYMBOL(_raw_read_unlock_bh);
277#endif 268#endif
278 269
279#ifndef CONFIG_INLINE_WRITE_UNLOCK 270#ifndef CONFIG_INLINE_WRITE_TRYLOCK
280void __lockfunc _write_unlock(rwlock_t *lock) 271int __lockfunc _raw_write_trylock(rwlock_t *lock)
281{ 272{
282 __write_unlock(lock); 273 return __raw_write_trylock(lock);
283} 274}
284EXPORT_SYMBOL(_write_unlock); 275EXPORT_SYMBOL(_raw_write_trylock);
285#endif 276#endif
286 277
287#ifndef CONFIG_INLINE_READ_UNLOCK 278#ifndef CONFIG_INLINE_WRITE_LOCK
288void __lockfunc _read_unlock(rwlock_t *lock) 279void __lockfunc _raw_write_lock(rwlock_t *lock)
289{ 280{
290 __read_unlock(lock); 281 __raw_write_lock(lock);
291} 282}
292EXPORT_SYMBOL(_read_unlock); 283EXPORT_SYMBOL(_raw_write_lock);
293#endif 284#endif
294 285
295#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE 286#ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
296void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) 287unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock)
297{ 288{
298 __spin_unlock_irqrestore(lock, flags); 289 return __raw_write_lock_irqsave(lock);
299} 290}
300EXPORT_SYMBOL(_spin_unlock_irqrestore); 291EXPORT_SYMBOL(_raw_write_lock_irqsave);
301#endif 292#endif
302 293
303#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ 294#ifndef CONFIG_INLINE_WRITE_LOCK_IRQ
304void __lockfunc _spin_unlock_irq(spinlock_t *lock) 295void __lockfunc _raw_write_lock_irq(rwlock_t *lock)
305{ 296{
306 __spin_unlock_irq(lock); 297 __raw_write_lock_irq(lock);
307} 298}
308EXPORT_SYMBOL(_spin_unlock_irq); 299EXPORT_SYMBOL(_raw_write_lock_irq);
309#endif 300#endif
310 301
311#ifndef CONFIG_INLINE_SPIN_UNLOCK_BH 302#ifndef CONFIG_INLINE_WRITE_LOCK_BH
312void __lockfunc _spin_unlock_bh(spinlock_t *lock) 303void __lockfunc _raw_write_lock_bh(rwlock_t *lock)
313{ 304{
314 __spin_unlock_bh(lock); 305 __raw_write_lock_bh(lock);
315} 306}
316EXPORT_SYMBOL(_spin_unlock_bh); 307EXPORT_SYMBOL(_raw_write_lock_bh);
317#endif 308#endif
318 309
319#ifndef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE 310#ifndef CONFIG_INLINE_WRITE_UNLOCK
320void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) 311void __lockfunc _raw_write_unlock(rwlock_t *lock)
321{ 312{
322 __read_unlock_irqrestore(lock, flags); 313 __raw_write_unlock(lock);
323} 314}
324EXPORT_SYMBOL(_read_unlock_irqrestore); 315EXPORT_SYMBOL(_raw_write_unlock);
325#endif 316#endif
326 317
327#ifndef CONFIG_INLINE_READ_UNLOCK_IRQ 318#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
328void __lockfunc _read_unlock_irq(rwlock_t *lock) 319void __lockfunc _raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
329{ 320{
330 __read_unlock_irq(lock); 321 __raw_write_unlock_irqrestore(lock, flags);
331} 322}
332EXPORT_SYMBOL(_read_unlock_irq); 323EXPORT_SYMBOL(_raw_write_unlock_irqrestore);
333#endif 324#endif
334 325
335#ifndef CONFIG_INLINE_READ_UNLOCK_BH 326#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQ
336void __lockfunc _read_unlock_bh(rwlock_t *lock) 327void __lockfunc _raw_write_unlock_irq(rwlock_t *lock)
337{ 328{
338 __read_unlock_bh(lock); 329 __raw_write_unlock_irq(lock);
339} 330}
340EXPORT_SYMBOL(_read_unlock_bh); 331EXPORT_SYMBOL(_raw_write_unlock_irq);
341#endif 332#endif
342 333
343#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE 334#ifndef CONFIG_INLINE_WRITE_UNLOCK_BH
344void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) 335void __lockfunc _raw_write_unlock_bh(rwlock_t *lock)
345{ 336{
346 __write_unlock_irqrestore(lock, flags); 337 __raw_write_unlock_bh(lock);
347} 338}
348EXPORT_SYMBOL(_write_unlock_irqrestore); 339EXPORT_SYMBOL(_raw_write_unlock_bh);
349#endif 340#endif
350 341
351#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQ 342#ifdef CONFIG_DEBUG_LOCK_ALLOC
352void __lockfunc _write_unlock_irq(rwlock_t *lock) 343
344void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
353{ 345{
354 __write_unlock_irq(lock); 346 preempt_disable();
347 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
348 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
355} 349}
356EXPORT_SYMBOL(_write_unlock_irq); 350EXPORT_SYMBOL(_raw_spin_lock_nested);
357#endif
358 351
359#ifndef CONFIG_INLINE_WRITE_UNLOCK_BH 352unsigned long __lockfunc _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock,
360void __lockfunc _write_unlock_bh(rwlock_t *lock) 353 int subclass)
361{ 354{
362 __write_unlock_bh(lock); 355 unsigned long flags;
356
357 local_irq_save(flags);
358 preempt_disable();
359 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
360 LOCK_CONTENDED_FLAGS(lock, do_raw_spin_trylock, do_raw_spin_lock,
361 do_raw_spin_lock_flags, &flags);
362 return flags;
363} 363}
364EXPORT_SYMBOL(_write_unlock_bh); 364EXPORT_SYMBOL(_raw_spin_lock_irqsave_nested);
365#endif
366 365
367#ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH 366void __lockfunc _raw_spin_lock_nest_lock(raw_spinlock_t *lock,
368int __lockfunc _spin_trylock_bh(spinlock_t *lock) 367 struct lockdep_map *nest_lock)
369{ 368{
370 return __spin_trylock_bh(lock); 369 preempt_disable();
370 spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
371 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
371} 372}
372EXPORT_SYMBOL(_spin_trylock_bh); 373EXPORT_SYMBOL(_raw_spin_lock_nest_lock);
374
373#endif 375#endif
374 376
375notrace int in_lock_functions(unsigned long addr) 377notrace int in_lock_functions(unsigned long addr)
diff --git a/kernel/sys.c b/kernel/sys.c
index 585d6cd10040..20ccfb5da6af 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -189,10 +189,10 @@ SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
189 !(user = find_user(who))) 189 !(user = find_user(who)))
190 goto out_unlock; /* No processes for this user */ 190 goto out_unlock; /* No processes for this user */
191 191
192 do_each_thread(g, p) 192 do_each_thread(g, p) {
193 if (__task_cred(p)->uid == who) 193 if (__task_cred(p)->uid == who)
194 error = set_one_prio(p, niceval, error); 194 error = set_one_prio(p, niceval, error);
195 while_each_thread(g, p); 195 } while_each_thread(g, p);
196 if (who != cred->uid) 196 if (who != cred->uid)
197 free_uid(user); /* For find_user() */ 197 free_uid(user); /* For find_user() */
198 break; 198 break;
@@ -252,13 +252,13 @@ SYSCALL_DEFINE2(getpriority, int, which, int, who)
252 !(user = find_user(who))) 252 !(user = find_user(who)))
253 goto out_unlock; /* No processes for this user */ 253 goto out_unlock; /* No processes for this user */
254 254
255 do_each_thread(g, p) 255 do_each_thread(g, p) {
256 if (__task_cred(p)->uid == who) { 256 if (__task_cred(p)->uid == who) {
257 niceval = 20 - task_nice(p); 257 niceval = 20 - task_nice(p);
258 if (niceval > retval) 258 if (niceval > retval)
259 retval = niceval; 259 retval = niceval;
260 } 260 }
261 while_each_thread(g, p); 261 } while_each_thread(g, p);
262 if (who != cred->uid) 262 if (who != cred->uid)
263 free_uid(user); /* for find_user() */ 263 free_uid(user); /* for find_user() */
264 break; 264 break;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 554ac4894f0f..45e4bef0012a 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -1051,7 +1051,7 @@ static struct ctl_table vm_table[] = {
1051 .extra2 = &one_hundred, 1051 .extra2 = &one_hundred,
1052 }, 1052 },
1053#ifdef CONFIG_HUGETLB_PAGE 1053#ifdef CONFIG_HUGETLB_PAGE
1054 { 1054 {
1055 .procname = "nr_hugepages", 1055 .procname = "nr_hugepages",
1056 .data = NULL, 1056 .data = NULL,
1057 .maxlen = sizeof(unsigned long), 1057 .maxlen = sizeof(unsigned long),
@@ -1059,7 +1059,18 @@ static struct ctl_table vm_table[] = {
1059 .proc_handler = hugetlb_sysctl_handler, 1059 .proc_handler = hugetlb_sysctl_handler,
1060 .extra1 = (void *)&hugetlb_zero, 1060 .extra1 = (void *)&hugetlb_zero,
1061 .extra2 = (void *)&hugetlb_infinity, 1061 .extra2 = (void *)&hugetlb_infinity,
1062 }, 1062 },
1063#ifdef CONFIG_NUMA
1064 {
1065 .procname = "nr_hugepages_mempolicy",
1066 .data = NULL,
1067 .maxlen = sizeof(unsigned long),
1068 .mode = 0644,
1069 .proc_handler = &hugetlb_mempolicy_sysctl_handler,
1070 .extra1 = (void *)&hugetlb_zero,
1071 .extra2 = (void *)&hugetlb_infinity,
1072 },
1073#endif
1063 { 1074 {
1064 .procname = "hugetlb_shm_group", 1075 .procname = "hugetlb_shm_group",
1065 .data = &sysctl_hugetlb_shm_group, 1076 .data = &sysctl_hugetlb_shm_group,
@@ -1120,7 +1131,8 @@ static struct ctl_table vm_table[] = {
1120 .data = &sysctl_max_map_count, 1131 .data = &sysctl_max_map_count,
1121 .maxlen = sizeof(sysctl_max_map_count), 1132 .maxlen = sizeof(sysctl_max_map_count),
1122 .mode = 0644, 1133 .mode = 0644,
1123 .proc_handler = proc_dointvec 1134 .proc_handler = proc_dointvec,
1135 .extra1 = &zero,
1124 }, 1136 },
1125#else 1137#else
1126 { 1138 {
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 20a8920029ee..3d5fc0fd1cca 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -30,7 +30,7 @@ static LIST_HEAD(clockevents_released);
30static RAW_NOTIFIER_HEAD(clockevents_chain); 30static RAW_NOTIFIER_HEAD(clockevents_chain);
31 31
32/* Protection for the above */ 32/* Protection for the above */
33static DEFINE_SPINLOCK(clockevents_lock); 33static DEFINE_RAW_SPINLOCK(clockevents_lock);
34 34
35/** 35/**
36 * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds 36 * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds
@@ -141,9 +141,9 @@ int clockevents_register_notifier(struct notifier_block *nb)
141 unsigned long flags; 141 unsigned long flags;
142 int ret; 142 int ret;
143 143
144 spin_lock_irqsave(&clockevents_lock, flags); 144 raw_spin_lock_irqsave(&clockevents_lock, flags);
145 ret = raw_notifier_chain_register(&clockevents_chain, nb); 145 ret = raw_notifier_chain_register(&clockevents_chain, nb);
146 spin_unlock_irqrestore(&clockevents_lock, flags); 146 raw_spin_unlock_irqrestore(&clockevents_lock, flags);
147 147
148 return ret; 148 return ret;
149} 149}
@@ -185,13 +185,13 @@ void clockevents_register_device(struct clock_event_device *dev)
185 BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); 185 BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
186 BUG_ON(!dev->cpumask); 186 BUG_ON(!dev->cpumask);
187 187
188 spin_lock_irqsave(&clockevents_lock, flags); 188 raw_spin_lock_irqsave(&clockevents_lock, flags);
189 189
190 list_add(&dev->list, &clockevent_devices); 190 list_add(&dev->list, &clockevent_devices);
191 clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD, dev); 191 clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD, dev);
192 clockevents_notify_released(); 192 clockevents_notify_released();
193 193
194 spin_unlock_irqrestore(&clockevents_lock, flags); 194 raw_spin_unlock_irqrestore(&clockevents_lock, flags);
195} 195}
196EXPORT_SYMBOL_GPL(clockevents_register_device); 196EXPORT_SYMBOL_GPL(clockevents_register_device);
197 197
@@ -241,7 +241,7 @@ void clockevents_notify(unsigned long reason, void *arg)
241 struct list_head *node, *tmp; 241 struct list_head *node, *tmp;
242 unsigned long flags; 242 unsigned long flags;
243 243
244 spin_lock_irqsave(&clockevents_lock, flags); 244 raw_spin_lock_irqsave(&clockevents_lock, flags);
245 clockevents_do_notify(reason, arg); 245 clockevents_do_notify(reason, arg);
246 246
247 switch (reason) { 247 switch (reason) {
@@ -256,7 +256,7 @@ void clockevents_notify(unsigned long reason, void *arg)
256 default: 256 default:
257 break; 257 break;
258 } 258 }
259 spin_unlock_irqrestore(&clockevents_lock, flags); 259 raw_spin_unlock_irqrestore(&clockevents_lock, flags);
260} 260}
261EXPORT_SYMBOL_GPL(clockevents_notify); 261EXPORT_SYMBOL_GPL(clockevents_notify);
262#endif 262#endif
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index c2ec25087a35..b3bafd5fc66d 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -31,7 +31,7 @@ static struct tick_device tick_broadcast_device;
31/* FIXME: Use cpumask_var_t. */ 31/* FIXME: Use cpumask_var_t. */
32static DECLARE_BITMAP(tick_broadcast_mask, NR_CPUS); 32static DECLARE_BITMAP(tick_broadcast_mask, NR_CPUS);
33static DECLARE_BITMAP(tmpmask, NR_CPUS); 33static DECLARE_BITMAP(tmpmask, NR_CPUS);
34static DEFINE_SPINLOCK(tick_broadcast_lock); 34static DEFINE_RAW_SPINLOCK(tick_broadcast_lock);
35static int tick_broadcast_force; 35static int tick_broadcast_force;
36 36
37#ifdef CONFIG_TICK_ONESHOT 37#ifdef CONFIG_TICK_ONESHOT
@@ -96,7 +96,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
96 unsigned long flags; 96 unsigned long flags;
97 int ret = 0; 97 int ret = 0;
98 98
99 spin_lock_irqsave(&tick_broadcast_lock, flags); 99 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
100 100
101 /* 101 /*
102 * Devices might be registered with both periodic and oneshot 102 * Devices might be registered with both periodic and oneshot
@@ -122,7 +122,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
122 tick_broadcast_clear_oneshot(cpu); 122 tick_broadcast_clear_oneshot(cpu);
123 } 123 }
124 } 124 }
125 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 125 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
126 return ret; 126 return ret;
127} 127}
128 128
@@ -161,13 +161,13 @@ static void tick_do_broadcast(struct cpumask *mask)
161 */ 161 */
162static void tick_do_periodic_broadcast(void) 162static void tick_do_periodic_broadcast(void)
163{ 163{
164 spin_lock(&tick_broadcast_lock); 164 raw_spin_lock(&tick_broadcast_lock);
165 165
166 cpumask_and(to_cpumask(tmpmask), 166 cpumask_and(to_cpumask(tmpmask),
167 cpu_online_mask, tick_get_broadcast_mask()); 167 cpu_online_mask, tick_get_broadcast_mask());
168 tick_do_broadcast(to_cpumask(tmpmask)); 168 tick_do_broadcast(to_cpumask(tmpmask));
169 169
170 spin_unlock(&tick_broadcast_lock); 170 raw_spin_unlock(&tick_broadcast_lock);
171} 171}
172 172
173/* 173/*
@@ -212,7 +212,7 @@ static void tick_do_broadcast_on_off(unsigned long *reason)
212 unsigned long flags; 212 unsigned long flags;
213 int cpu, bc_stopped; 213 int cpu, bc_stopped;
214 214
215 spin_lock_irqsave(&tick_broadcast_lock, flags); 215 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
216 216
217 cpu = smp_processor_id(); 217 cpu = smp_processor_id();
218 td = &per_cpu(tick_cpu_device, cpu); 218 td = &per_cpu(tick_cpu_device, cpu);
@@ -263,7 +263,7 @@ static void tick_do_broadcast_on_off(unsigned long *reason)
263 tick_broadcast_setup_oneshot(bc); 263 tick_broadcast_setup_oneshot(bc);
264 } 264 }
265out: 265out:
266 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 266 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
267} 267}
268 268
269/* 269/*
@@ -299,7 +299,7 @@ void tick_shutdown_broadcast(unsigned int *cpup)
299 unsigned long flags; 299 unsigned long flags;
300 unsigned int cpu = *cpup; 300 unsigned int cpu = *cpup;
301 301
302 spin_lock_irqsave(&tick_broadcast_lock, flags); 302 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
303 303
304 bc = tick_broadcast_device.evtdev; 304 bc = tick_broadcast_device.evtdev;
305 cpumask_clear_cpu(cpu, tick_get_broadcast_mask()); 305 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
@@ -309,7 +309,7 @@ void tick_shutdown_broadcast(unsigned int *cpup)
309 clockevents_shutdown(bc); 309 clockevents_shutdown(bc);
310 } 310 }
311 311
312 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 312 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
313} 313}
314 314
315void tick_suspend_broadcast(void) 315void tick_suspend_broadcast(void)
@@ -317,13 +317,13 @@ void tick_suspend_broadcast(void)
317 struct clock_event_device *bc; 317 struct clock_event_device *bc;
318 unsigned long flags; 318 unsigned long flags;
319 319
320 spin_lock_irqsave(&tick_broadcast_lock, flags); 320 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
321 321
322 bc = tick_broadcast_device.evtdev; 322 bc = tick_broadcast_device.evtdev;
323 if (bc) 323 if (bc)
324 clockevents_shutdown(bc); 324 clockevents_shutdown(bc);
325 325
326 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 326 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
327} 327}
328 328
329int tick_resume_broadcast(void) 329int tick_resume_broadcast(void)
@@ -332,7 +332,7 @@ int tick_resume_broadcast(void)
332 unsigned long flags; 332 unsigned long flags;
333 int broadcast = 0; 333 int broadcast = 0;
334 334
335 spin_lock_irqsave(&tick_broadcast_lock, flags); 335 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
336 336
337 bc = tick_broadcast_device.evtdev; 337 bc = tick_broadcast_device.evtdev;
338 338
@@ -351,7 +351,7 @@ int tick_resume_broadcast(void)
351 break; 351 break;
352 } 352 }
353 } 353 }
354 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 354 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
355 355
356 return broadcast; 356 return broadcast;
357} 357}
@@ -405,7 +405,7 @@ static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
405 ktime_t now, next_event; 405 ktime_t now, next_event;
406 int cpu; 406 int cpu;
407 407
408 spin_lock(&tick_broadcast_lock); 408 raw_spin_lock(&tick_broadcast_lock);
409again: 409again:
410 dev->next_event.tv64 = KTIME_MAX; 410 dev->next_event.tv64 = KTIME_MAX;
411 next_event.tv64 = KTIME_MAX; 411 next_event.tv64 = KTIME_MAX;
@@ -443,7 +443,7 @@ again:
443 if (tick_broadcast_set_event(next_event, 0)) 443 if (tick_broadcast_set_event(next_event, 0))
444 goto again; 444 goto again;
445 } 445 }
446 spin_unlock(&tick_broadcast_lock); 446 raw_spin_unlock(&tick_broadcast_lock);
447} 447}
448 448
449/* 449/*
@@ -457,7 +457,7 @@ void tick_broadcast_oneshot_control(unsigned long reason)
457 unsigned long flags; 457 unsigned long flags;
458 int cpu; 458 int cpu;
459 459
460 spin_lock_irqsave(&tick_broadcast_lock, flags); 460 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
461 461
462 /* 462 /*
463 * Periodic mode does not care about the enter/exit of power 463 * Periodic mode does not care about the enter/exit of power
@@ -492,7 +492,7 @@ void tick_broadcast_oneshot_control(unsigned long reason)
492 } 492 }
493 493
494out: 494out:
495 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 495 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
496} 496}
497 497
498/* 498/*
@@ -563,13 +563,13 @@ void tick_broadcast_switch_to_oneshot(void)
563 struct clock_event_device *bc; 563 struct clock_event_device *bc;
564 unsigned long flags; 564 unsigned long flags;
565 565
566 spin_lock_irqsave(&tick_broadcast_lock, flags); 566 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
567 567
568 tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT; 568 tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT;
569 bc = tick_broadcast_device.evtdev; 569 bc = tick_broadcast_device.evtdev;
570 if (bc) 570 if (bc)
571 tick_broadcast_setup_oneshot(bc); 571 tick_broadcast_setup_oneshot(bc);
572 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 572 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
573} 573}
574 574
575 575
@@ -581,7 +581,7 @@ void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
581 unsigned long flags; 581 unsigned long flags;
582 unsigned int cpu = *cpup; 582 unsigned int cpu = *cpup;
583 583
584 spin_lock_irqsave(&tick_broadcast_lock, flags); 584 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
585 585
586 /* 586 /*
587 * Clear the broadcast mask flag for the dead cpu, but do not 587 * Clear the broadcast mask flag for the dead cpu, but do not
@@ -589,7 +589,7 @@ void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
589 */ 589 */
590 cpumask_clear_cpu(cpu, tick_get_broadcast_oneshot_mask()); 590 cpumask_clear_cpu(cpu, tick_get_broadcast_oneshot_mask());
591 591
592 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 592 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
593} 593}
594 594
595/* 595/*
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 83c4417b6a3c..b6b898d2eeef 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -34,7 +34,7 @@ DEFINE_PER_CPU(struct tick_device, tick_cpu_device);
34ktime_t tick_next_period; 34ktime_t tick_next_period;
35ktime_t tick_period; 35ktime_t tick_period;
36int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT; 36int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT;
37DEFINE_SPINLOCK(tick_device_lock); 37static DEFINE_RAW_SPINLOCK(tick_device_lock);
38 38
39/* 39/*
40 * Debugging: see timer_list.c 40 * Debugging: see timer_list.c
@@ -209,7 +209,7 @@ static int tick_check_new_device(struct clock_event_device *newdev)
209 int cpu, ret = NOTIFY_OK; 209 int cpu, ret = NOTIFY_OK;
210 unsigned long flags; 210 unsigned long flags;
211 211
212 spin_lock_irqsave(&tick_device_lock, flags); 212 raw_spin_lock_irqsave(&tick_device_lock, flags);
213 213
214 cpu = smp_processor_id(); 214 cpu = smp_processor_id();
215 if (!cpumask_test_cpu(cpu, newdev->cpumask)) 215 if (!cpumask_test_cpu(cpu, newdev->cpumask))
@@ -268,7 +268,7 @@ static int tick_check_new_device(struct clock_event_device *newdev)
268 if (newdev->features & CLOCK_EVT_FEAT_ONESHOT) 268 if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
269 tick_oneshot_notify(); 269 tick_oneshot_notify();
270 270
271 spin_unlock_irqrestore(&tick_device_lock, flags); 271 raw_spin_unlock_irqrestore(&tick_device_lock, flags);
272 return NOTIFY_STOP; 272 return NOTIFY_STOP;
273 273
274out_bc: 274out_bc:
@@ -278,7 +278,7 @@ out_bc:
278 if (tick_check_broadcast_device(newdev)) 278 if (tick_check_broadcast_device(newdev))
279 ret = NOTIFY_STOP; 279 ret = NOTIFY_STOP;
280 280
281 spin_unlock_irqrestore(&tick_device_lock, flags); 281 raw_spin_unlock_irqrestore(&tick_device_lock, flags);
282 282
283 return ret; 283 return ret;
284} 284}
@@ -311,7 +311,7 @@ static void tick_shutdown(unsigned int *cpup)
311 struct clock_event_device *dev = td->evtdev; 311 struct clock_event_device *dev = td->evtdev;
312 unsigned long flags; 312 unsigned long flags;
313 313
314 spin_lock_irqsave(&tick_device_lock, flags); 314 raw_spin_lock_irqsave(&tick_device_lock, flags);
315 td->mode = TICKDEV_MODE_PERIODIC; 315 td->mode = TICKDEV_MODE_PERIODIC;
316 if (dev) { 316 if (dev) {
317 /* 317 /*
@@ -322,7 +322,7 @@ static void tick_shutdown(unsigned int *cpup)
322 clockevents_exchange_device(dev, NULL); 322 clockevents_exchange_device(dev, NULL);
323 td->evtdev = NULL; 323 td->evtdev = NULL;
324 } 324 }
325 spin_unlock_irqrestore(&tick_device_lock, flags); 325 raw_spin_unlock_irqrestore(&tick_device_lock, flags);
326} 326}
327 327
328static void tick_suspend(void) 328static void tick_suspend(void)
@@ -330,9 +330,9 @@ static void tick_suspend(void)
330 struct tick_device *td = &__get_cpu_var(tick_cpu_device); 330 struct tick_device *td = &__get_cpu_var(tick_cpu_device);
331 unsigned long flags; 331 unsigned long flags;
332 332
333 spin_lock_irqsave(&tick_device_lock, flags); 333 raw_spin_lock_irqsave(&tick_device_lock, flags);
334 clockevents_shutdown(td->evtdev); 334 clockevents_shutdown(td->evtdev);
335 spin_unlock_irqrestore(&tick_device_lock, flags); 335 raw_spin_unlock_irqrestore(&tick_device_lock, flags);
336} 336}
337 337
338static void tick_resume(void) 338static void tick_resume(void)
@@ -341,7 +341,7 @@ static void tick_resume(void)
341 unsigned long flags; 341 unsigned long flags;
342 int broadcast = tick_resume_broadcast(); 342 int broadcast = tick_resume_broadcast();
343 343
344 spin_lock_irqsave(&tick_device_lock, flags); 344 raw_spin_lock_irqsave(&tick_device_lock, flags);
345 clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_RESUME); 345 clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_RESUME);
346 346
347 if (!broadcast) { 347 if (!broadcast) {
@@ -350,7 +350,7 @@ static void tick_resume(void)
350 else 350 else
351 tick_resume_oneshot(); 351 tick_resume_oneshot();
352 } 352 }
353 spin_unlock_irqrestore(&tick_device_lock, flags); 353 raw_spin_unlock_irqrestore(&tick_device_lock, flags);
354} 354}
355 355
356/* 356/*
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index b1c05bf75ee0..290eefbc1f60 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -6,7 +6,6 @@
6#define TICK_DO_TIMER_BOOT -2 6#define TICK_DO_TIMER_BOOT -2
7 7
8DECLARE_PER_CPU(struct tick_device, tick_cpu_device); 8DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
9extern spinlock_t tick_device_lock;
10extern ktime_t tick_next_period; 9extern ktime_t tick_next_period;
11extern ktime_t tick_period; 10extern ktime_t tick_period;
12extern int tick_do_timer_cpu __read_mostly; 11extern int tick_do_timer_cpu __read_mostly;
diff --git a/kernel/time/timecompare.c b/kernel/time/timecompare.c
index 96ff643a5a59..12f5c55090be 100644
--- a/kernel/time/timecompare.c
+++ b/kernel/time/timecompare.c
@@ -89,7 +89,7 @@ int timecompare_offset(struct timecompare *sync,
89 * source time 89 * source time
90 */ 90 */
91 sample.offset = 91 sample.offset =
92 ktime_to_ns(ktime_add(end, start)) / 2 - 92 (ktime_to_ns(end) + ktime_to_ns(start)) / 2 -
93 ts; 93 ts;
94 94
95 /* simple insertion sort based on duration */ 95 /* simple insertion sort based on duration */
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index 9d80db4747d4..28265636b6c2 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -84,7 +84,7 @@ print_active_timers(struct seq_file *m, struct hrtimer_clock_base *base,
84 84
85next_one: 85next_one:
86 i = 0; 86 i = 0;
87 spin_lock_irqsave(&base->cpu_base->lock, flags); 87 raw_spin_lock_irqsave(&base->cpu_base->lock, flags);
88 88
89 curr = base->first; 89 curr = base->first;
90 /* 90 /*
@@ -100,13 +100,13 @@ next_one:
100 100
101 timer = rb_entry(curr, struct hrtimer, node); 101 timer = rb_entry(curr, struct hrtimer, node);
102 tmp = *timer; 102 tmp = *timer;
103 spin_unlock_irqrestore(&base->cpu_base->lock, flags); 103 raw_spin_unlock_irqrestore(&base->cpu_base->lock, flags);
104 104
105 print_timer(m, timer, &tmp, i, now); 105 print_timer(m, timer, &tmp, i, now);
106 next++; 106 next++;
107 goto next_one; 107 goto next_one;
108 } 108 }
109 spin_unlock_irqrestore(&base->cpu_base->lock, flags); 109 raw_spin_unlock_irqrestore(&base->cpu_base->lock, flags);
110} 110}
111 111
112static void 112static void
diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
index 63b117e9eba1..2f3b585b8d7d 100644
--- a/kernel/time/timer_stats.c
+++ b/kernel/time/timer_stats.c
@@ -86,7 +86,7 @@ static DEFINE_SPINLOCK(table_lock);
86/* 86/*
87 * Per-CPU lookup locks for fast hash lookup: 87 * Per-CPU lookup locks for fast hash lookup:
88 */ 88 */
89static DEFINE_PER_CPU(spinlock_t, tstats_lookup_lock); 89static DEFINE_PER_CPU(raw_spinlock_t, tstats_lookup_lock);
90 90
91/* 91/*
92 * Mutex to serialize state changes with show-stats activities: 92 * Mutex to serialize state changes with show-stats activities:
@@ -238,7 +238,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
238 /* 238 /*
239 * It doesnt matter which lock we take: 239 * It doesnt matter which lock we take:
240 */ 240 */
241 spinlock_t *lock; 241 raw_spinlock_t *lock;
242 struct entry *entry, input; 242 struct entry *entry, input;
243 unsigned long flags; 243 unsigned long flags;
244 244
@@ -253,7 +253,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
253 input.pid = pid; 253 input.pid = pid;
254 input.timer_flag = timer_flag; 254 input.timer_flag = timer_flag;
255 255
256 spin_lock_irqsave(lock, flags); 256 raw_spin_lock_irqsave(lock, flags);
257 if (!timer_stats_active) 257 if (!timer_stats_active)
258 goto out_unlock; 258 goto out_unlock;
259 259
@@ -264,7 +264,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
264 atomic_inc(&overflow_count); 264 atomic_inc(&overflow_count);
265 265
266 out_unlock: 266 out_unlock:
267 spin_unlock_irqrestore(lock, flags); 267 raw_spin_unlock_irqrestore(lock, flags);
268} 268}
269 269
270static void print_name_offset(struct seq_file *m, unsigned long addr) 270static void print_name_offset(struct seq_file *m, unsigned long addr)
@@ -348,10 +348,11 @@ static void sync_access(void)
348 int cpu; 348 int cpu;
349 349
350 for_each_online_cpu(cpu) { 350 for_each_online_cpu(cpu) {
351 spinlock_t *lock = &per_cpu(tstats_lookup_lock, cpu); 351 raw_spinlock_t *lock = &per_cpu(tstats_lookup_lock, cpu);
352 spin_lock_irqsave(lock, flags); 352
353 raw_spin_lock_irqsave(lock, flags);
353 /* nothing */ 354 /* nothing */
354 spin_unlock_irqrestore(lock, flags); 355 raw_spin_unlock_irqrestore(lock, flags);
355 } 356 }
356} 357}
357 358
@@ -409,7 +410,7 @@ void __init init_timer_stats(void)
409 int cpu; 410 int cpu;
410 411
411 for_each_possible_cpu(cpu) 412 for_each_possible_cpu(cpu)
412 spin_lock_init(&per_cpu(tstats_lookup_lock, cpu)); 413 raw_spin_lock_init(&per_cpu(tstats_lookup_lock, cpu));
413} 414}
414 415
415static int __init init_tstats_procfs(void) 416static int __init init_tstats_procfs(void)
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index a1ca4956ab5e..f58c9ad15830 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -423,7 +423,7 @@ struct ring_buffer_per_cpu {
423 int cpu; 423 int cpu;
424 struct ring_buffer *buffer; 424 struct ring_buffer *buffer;
425 spinlock_t reader_lock; /* serialize readers */ 425 spinlock_t reader_lock; /* serialize readers */
426 raw_spinlock_t lock; 426 arch_spinlock_t lock;
427 struct lock_class_key lock_key; 427 struct lock_class_key lock_key;
428 struct list_head *pages; 428 struct list_head *pages;
429 struct buffer_page *head_page; /* read from head */ 429 struct buffer_page *head_page; /* read from head */
@@ -998,7 +998,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
998 cpu_buffer->buffer = buffer; 998 cpu_buffer->buffer = buffer;
999 spin_lock_init(&cpu_buffer->reader_lock); 999 spin_lock_init(&cpu_buffer->reader_lock);
1000 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); 1000 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
1001 cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 1001 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
1002 1002
1003 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), 1003 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1004 GFP_KERNEL, cpu_to_node(cpu)); 1004 GFP_KERNEL, cpu_to_node(cpu));
@@ -2834,7 +2834,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
2834 int ret; 2834 int ret;
2835 2835
2836 local_irq_save(flags); 2836 local_irq_save(flags);
2837 __raw_spin_lock(&cpu_buffer->lock); 2837 arch_spin_lock(&cpu_buffer->lock);
2838 2838
2839 again: 2839 again:
2840 /* 2840 /*
@@ -2923,7 +2923,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
2923 goto again; 2923 goto again;
2924 2924
2925 out: 2925 out:
2926 __raw_spin_unlock(&cpu_buffer->lock); 2926 arch_spin_unlock(&cpu_buffer->lock);
2927 local_irq_restore(flags); 2927 local_irq_restore(flags);
2928 2928
2929 return reader; 2929 return reader;
@@ -3286,9 +3286,9 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
3286 synchronize_sched(); 3286 synchronize_sched();
3287 3287
3288 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3288 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3289 __raw_spin_lock(&cpu_buffer->lock); 3289 arch_spin_lock(&cpu_buffer->lock);
3290 rb_iter_reset(iter); 3290 rb_iter_reset(iter);
3291 __raw_spin_unlock(&cpu_buffer->lock); 3291 arch_spin_unlock(&cpu_buffer->lock);
3292 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3292 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3293 3293
3294 return iter; 3294 return iter;
@@ -3408,11 +3408,11 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
3408 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) 3408 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
3409 goto out; 3409 goto out;
3410 3410
3411 __raw_spin_lock(&cpu_buffer->lock); 3411 arch_spin_lock(&cpu_buffer->lock);
3412 3412
3413 rb_reset_cpu(cpu_buffer); 3413 rb_reset_cpu(cpu_buffer);
3414 3414
3415 __raw_spin_unlock(&cpu_buffer->lock); 3415 arch_spin_unlock(&cpu_buffer->lock);
3416 3416
3417 out: 3417 out:
3418 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3418 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index c82dfd92fdfd..31118ae16f03 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -493,15 +493,15 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
493 * protected by per_cpu spinlocks. But the action of the swap 493 * protected by per_cpu spinlocks. But the action of the swap
494 * needs its own lock. 494 * needs its own lock.
495 * 495 *
496 * This is defined as a raw_spinlock_t in order to help 496 * This is defined as a arch_spinlock_t in order to help
497 * with performance when lockdep debugging is enabled. 497 * with performance when lockdep debugging is enabled.
498 * 498 *
499 * It is also used in other places outside the update_max_tr 499 * It is also used in other places outside the update_max_tr
500 * so it needs to be defined outside of the 500 * so it needs to be defined outside of the
501 * CONFIG_TRACER_MAX_TRACE. 501 * CONFIG_TRACER_MAX_TRACE.
502 */ 502 */
503static raw_spinlock_t ftrace_max_lock = 503static arch_spinlock_t ftrace_max_lock =
504 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 504 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
505 505
506#ifdef CONFIG_TRACER_MAX_TRACE 506#ifdef CONFIG_TRACER_MAX_TRACE
507unsigned long __read_mostly tracing_max_latency; 507unsigned long __read_mostly tracing_max_latency;
@@ -555,13 +555,13 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
555 return; 555 return;
556 556
557 WARN_ON_ONCE(!irqs_disabled()); 557 WARN_ON_ONCE(!irqs_disabled());
558 __raw_spin_lock(&ftrace_max_lock); 558 arch_spin_lock(&ftrace_max_lock);
559 559
560 tr->buffer = max_tr.buffer; 560 tr->buffer = max_tr.buffer;
561 max_tr.buffer = buf; 561 max_tr.buffer = buf;
562 562
563 __update_max_tr(tr, tsk, cpu); 563 __update_max_tr(tr, tsk, cpu);
564 __raw_spin_unlock(&ftrace_max_lock); 564 arch_spin_unlock(&ftrace_max_lock);
565} 565}
566 566
567/** 567/**
@@ -581,7 +581,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
581 return; 581 return;
582 582
583 WARN_ON_ONCE(!irqs_disabled()); 583 WARN_ON_ONCE(!irqs_disabled());
584 __raw_spin_lock(&ftrace_max_lock); 584 arch_spin_lock(&ftrace_max_lock);
585 585
586 ftrace_disable_cpu(); 586 ftrace_disable_cpu();
587 587
@@ -603,7 +603,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
603 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); 603 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
604 604
605 __update_max_tr(tr, tsk, cpu); 605 __update_max_tr(tr, tsk, cpu);
606 __raw_spin_unlock(&ftrace_max_lock); 606 arch_spin_unlock(&ftrace_max_lock);
607} 607}
608#endif /* CONFIG_TRACER_MAX_TRACE */ 608#endif /* CONFIG_TRACER_MAX_TRACE */
609 609
@@ -802,7 +802,7 @@ static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
802static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; 802static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
803static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; 803static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
804static int cmdline_idx; 804static int cmdline_idx;
805static raw_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED; 805static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
806 806
807/* temporary disable recording */ 807/* temporary disable recording */
808static atomic_t trace_record_cmdline_disabled __read_mostly; 808static atomic_t trace_record_cmdline_disabled __read_mostly;
@@ -915,7 +915,7 @@ static void trace_save_cmdline(struct task_struct *tsk)
915 * nor do we want to disable interrupts, 915 * nor do we want to disable interrupts,
916 * so if we miss here, then better luck next time. 916 * so if we miss here, then better luck next time.
917 */ 917 */
918 if (!__raw_spin_trylock(&trace_cmdline_lock)) 918 if (!arch_spin_trylock(&trace_cmdline_lock))
919 return; 919 return;
920 920
921 idx = map_pid_to_cmdline[tsk->pid]; 921 idx = map_pid_to_cmdline[tsk->pid];
@@ -940,7 +940,7 @@ static void trace_save_cmdline(struct task_struct *tsk)
940 940
941 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); 941 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
942 942
943 __raw_spin_unlock(&trace_cmdline_lock); 943 arch_spin_unlock(&trace_cmdline_lock);
944} 944}
945 945
946void trace_find_cmdline(int pid, char comm[]) 946void trace_find_cmdline(int pid, char comm[])
@@ -958,14 +958,14 @@ void trace_find_cmdline(int pid, char comm[])
958 } 958 }
959 959
960 preempt_disable(); 960 preempt_disable();
961 __raw_spin_lock(&trace_cmdline_lock); 961 arch_spin_lock(&trace_cmdline_lock);
962 map = map_pid_to_cmdline[pid]; 962 map = map_pid_to_cmdline[pid];
963 if (map != NO_CMDLINE_MAP) 963 if (map != NO_CMDLINE_MAP)
964 strcpy(comm, saved_cmdlines[map]); 964 strcpy(comm, saved_cmdlines[map]);
965 else 965 else
966 strcpy(comm, "<...>"); 966 strcpy(comm, "<...>");
967 967
968 __raw_spin_unlock(&trace_cmdline_lock); 968 arch_spin_unlock(&trace_cmdline_lock);
969 preempt_enable(); 969 preempt_enable();
970} 970}
971 971
@@ -1251,8 +1251,8 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
1251 */ 1251 */
1252int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) 1252int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1253{ 1253{
1254 static raw_spinlock_t trace_buf_lock = 1254 static arch_spinlock_t trace_buf_lock =
1255 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 1255 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
1256 static u32 trace_buf[TRACE_BUF_SIZE]; 1256 static u32 trace_buf[TRACE_BUF_SIZE];
1257 1257
1258 struct ftrace_event_call *call = &event_bprint; 1258 struct ftrace_event_call *call = &event_bprint;
@@ -1283,7 +1283,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1283 1283
1284 /* Lockdep uses trace_printk for lock tracing */ 1284 /* Lockdep uses trace_printk for lock tracing */
1285 local_irq_save(flags); 1285 local_irq_save(flags);
1286 __raw_spin_lock(&trace_buf_lock); 1286 arch_spin_lock(&trace_buf_lock);
1287 len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args); 1287 len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args);
1288 1288
1289 if (len > TRACE_BUF_SIZE || len < 0) 1289 if (len > TRACE_BUF_SIZE || len < 0)
@@ -1304,7 +1304,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1304 ring_buffer_unlock_commit(buffer, event); 1304 ring_buffer_unlock_commit(buffer, event);
1305 1305
1306out_unlock: 1306out_unlock:
1307 __raw_spin_unlock(&trace_buf_lock); 1307 arch_spin_unlock(&trace_buf_lock);
1308 local_irq_restore(flags); 1308 local_irq_restore(flags);
1309 1309
1310out: 1310out:
@@ -1334,7 +1334,7 @@ int trace_array_printk(struct trace_array *tr,
1334int trace_array_vprintk(struct trace_array *tr, 1334int trace_array_vprintk(struct trace_array *tr,
1335 unsigned long ip, const char *fmt, va_list args) 1335 unsigned long ip, const char *fmt, va_list args)
1336{ 1336{
1337 static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED; 1337 static arch_spinlock_t trace_buf_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1338 static char trace_buf[TRACE_BUF_SIZE]; 1338 static char trace_buf[TRACE_BUF_SIZE];
1339 1339
1340 struct ftrace_event_call *call = &event_print; 1340 struct ftrace_event_call *call = &event_print;
@@ -1360,7 +1360,7 @@ int trace_array_vprintk(struct trace_array *tr,
1360 1360
1361 pause_graph_tracing(); 1361 pause_graph_tracing();
1362 raw_local_irq_save(irq_flags); 1362 raw_local_irq_save(irq_flags);
1363 __raw_spin_lock(&trace_buf_lock); 1363 arch_spin_lock(&trace_buf_lock);
1364 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); 1364 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
1365 1365
1366 size = sizeof(*entry) + len + 1; 1366 size = sizeof(*entry) + len + 1;
@@ -1378,7 +1378,7 @@ int trace_array_vprintk(struct trace_array *tr,
1378 ring_buffer_unlock_commit(buffer, event); 1378 ring_buffer_unlock_commit(buffer, event);
1379 1379
1380 out_unlock: 1380 out_unlock:
1381 __raw_spin_unlock(&trace_buf_lock); 1381 arch_spin_unlock(&trace_buf_lock);
1382 raw_local_irq_restore(irq_flags); 1382 raw_local_irq_restore(irq_flags);
1383 unpause_graph_tracing(); 1383 unpause_graph_tracing();
1384 out: 1384 out:
@@ -2279,7 +2279,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2279 mutex_lock(&tracing_cpumask_update_lock); 2279 mutex_lock(&tracing_cpumask_update_lock);
2280 2280
2281 local_irq_disable(); 2281 local_irq_disable();
2282 __raw_spin_lock(&ftrace_max_lock); 2282 arch_spin_lock(&ftrace_max_lock);
2283 for_each_tracing_cpu(cpu) { 2283 for_each_tracing_cpu(cpu) {
2284 /* 2284 /*
2285 * Increase/decrease the disabled counter if we are 2285 * Increase/decrease the disabled counter if we are
@@ -2294,7 +2294,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2294 atomic_dec(&global_trace.data[cpu]->disabled); 2294 atomic_dec(&global_trace.data[cpu]->disabled);
2295 } 2295 }
2296 } 2296 }
2297 __raw_spin_unlock(&ftrace_max_lock); 2297 arch_spin_unlock(&ftrace_max_lock);
2298 local_irq_enable(); 2298 local_irq_enable();
2299 2299
2300 cpumask_copy(tracing_cpumask, tracing_cpumask_new); 2300 cpumask_copy(tracing_cpumask, tracing_cpumask_new);
@@ -3133,7 +3133,7 @@ static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
3133 __free_page(spd->pages[idx]); 3133 __free_page(spd->pages[idx]);
3134} 3134}
3135 3135
3136static struct pipe_buf_operations tracing_pipe_buf_ops = { 3136static const struct pipe_buf_operations tracing_pipe_buf_ops = {
3137 .can_merge = 0, 3137 .can_merge = 0,
3138 .map = generic_pipe_buf_map, 3138 .map = generic_pipe_buf_map,
3139 .unmap = generic_pipe_buf_unmap, 3139 .unmap = generic_pipe_buf_unmap,
@@ -3617,7 +3617,7 @@ static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
3617} 3617}
3618 3618
3619/* Pipe buffer operations for a buffer. */ 3619/* Pipe buffer operations for a buffer. */
3620static struct pipe_buf_operations buffer_pipe_buf_ops = { 3620static const struct pipe_buf_operations buffer_pipe_buf_ops = {
3621 .can_merge = 0, 3621 .can_merge = 0,
3622 .map = generic_pipe_buf_map, 3622 .map = generic_pipe_buf_map,
3623 .unmap = generic_pipe_buf_unmap, 3623 .unmap = generic_pipe_buf_unmap,
@@ -4307,8 +4307,8 @@ trace_printk_seq(struct trace_seq *s)
4307 4307
4308static void __ftrace_dump(bool disable_tracing) 4308static void __ftrace_dump(bool disable_tracing)
4309{ 4309{
4310 static raw_spinlock_t ftrace_dump_lock = 4310 static arch_spinlock_t ftrace_dump_lock =
4311 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 4311 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
4312 /* use static because iter can be a bit big for the stack */ 4312 /* use static because iter can be a bit big for the stack */
4313 static struct trace_iterator iter; 4313 static struct trace_iterator iter;
4314 unsigned int old_userobj; 4314 unsigned int old_userobj;
@@ -4318,7 +4318,7 @@ static void __ftrace_dump(bool disable_tracing)
4318 4318
4319 /* only one dump */ 4319 /* only one dump */
4320 local_irq_save(flags); 4320 local_irq_save(flags);
4321 __raw_spin_lock(&ftrace_dump_lock); 4321 arch_spin_lock(&ftrace_dump_lock);
4322 if (dump_ran) 4322 if (dump_ran)
4323 goto out; 4323 goto out;
4324 4324
@@ -4393,7 +4393,7 @@ static void __ftrace_dump(bool disable_tracing)
4393 } 4393 }
4394 4394
4395 out: 4395 out:
4396 __raw_spin_unlock(&ftrace_dump_lock); 4396 arch_spin_unlock(&ftrace_dump_lock);
4397 local_irq_restore(flags); 4397 local_irq_restore(flags);
4398} 4398}
4399 4399
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
index 878c03f386ba..84a3a7ba072a 100644
--- a/kernel/trace/trace_clock.c
+++ b/kernel/trace/trace_clock.c
@@ -71,10 +71,10 @@ u64 notrace trace_clock(void)
71/* keep prev_time and lock in the same cacheline. */ 71/* keep prev_time and lock in the same cacheline. */
72static struct { 72static struct {
73 u64 prev_time; 73 u64 prev_time;
74 raw_spinlock_t lock; 74 arch_spinlock_t lock;
75} trace_clock_struct ____cacheline_aligned_in_smp = 75} trace_clock_struct ____cacheline_aligned_in_smp =
76 { 76 {
77 .lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED, 77 .lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED,
78 }; 78 };
79 79
80u64 notrace trace_clock_global(void) 80u64 notrace trace_clock_global(void)
@@ -94,7 +94,7 @@ u64 notrace trace_clock_global(void)
94 if (unlikely(in_nmi())) 94 if (unlikely(in_nmi()))
95 goto out; 95 goto out;
96 96
97 __raw_spin_lock(&trace_clock_struct.lock); 97 arch_spin_lock(&trace_clock_struct.lock);
98 98
99 /* 99 /*
100 * TODO: if this happens often then maybe we should reset 100 * TODO: if this happens often then maybe we should reset
@@ -106,7 +106,7 @@ u64 notrace trace_clock_global(void)
106 106
107 trace_clock_struct.prev_time = now; 107 trace_clock_struct.prev_time = now;
108 108
109 __raw_spin_unlock(&trace_clock_struct.lock); 109 arch_spin_unlock(&trace_clock_struct.lock);
110 110
111 out: 111 out:
112 raw_local_irq_restore(flags); 112 raw_local_irq_restore(flags);
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 26185d727676..0271742abb8d 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -28,8 +28,8 @@ static int wakeup_current_cpu;
28static unsigned wakeup_prio = -1; 28static unsigned wakeup_prio = -1;
29static int wakeup_rt; 29static int wakeup_rt;
30 30
31static raw_spinlock_t wakeup_lock = 31static arch_spinlock_t wakeup_lock =
32 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 32 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
33 33
34static void __wakeup_reset(struct trace_array *tr); 34static void __wakeup_reset(struct trace_array *tr);
35 35
@@ -143,7 +143,7 @@ probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev,
143 goto out; 143 goto out;
144 144
145 local_irq_save(flags); 145 local_irq_save(flags);
146 __raw_spin_lock(&wakeup_lock); 146 arch_spin_lock(&wakeup_lock);
147 147
148 /* We could race with grabbing wakeup_lock */ 148 /* We could race with grabbing wakeup_lock */
149 if (unlikely(!tracer_enabled || next != wakeup_task)) 149 if (unlikely(!tracer_enabled || next != wakeup_task))
@@ -169,7 +169,7 @@ probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev,
169 169
170out_unlock: 170out_unlock:
171 __wakeup_reset(wakeup_trace); 171 __wakeup_reset(wakeup_trace);
172 __raw_spin_unlock(&wakeup_lock); 172 arch_spin_unlock(&wakeup_lock);
173 local_irq_restore(flags); 173 local_irq_restore(flags);
174out: 174out:
175 atomic_dec(&wakeup_trace->data[cpu]->disabled); 175 atomic_dec(&wakeup_trace->data[cpu]->disabled);
@@ -193,9 +193,9 @@ static void wakeup_reset(struct trace_array *tr)
193 tracing_reset_online_cpus(tr); 193 tracing_reset_online_cpus(tr);
194 194
195 local_irq_save(flags); 195 local_irq_save(flags);
196 __raw_spin_lock(&wakeup_lock); 196 arch_spin_lock(&wakeup_lock);
197 __wakeup_reset(tr); 197 __wakeup_reset(tr);
198 __raw_spin_unlock(&wakeup_lock); 198 arch_spin_unlock(&wakeup_lock);
199 local_irq_restore(flags); 199 local_irq_restore(flags);
200} 200}
201 201
@@ -225,7 +225,7 @@ probe_wakeup(struct rq *rq, struct task_struct *p, int success)
225 goto out; 225 goto out;
226 226
227 /* interrupts should be off from try_to_wake_up */ 227 /* interrupts should be off from try_to_wake_up */
228 __raw_spin_lock(&wakeup_lock); 228 arch_spin_lock(&wakeup_lock);
229 229
230 /* check for races. */ 230 /* check for races. */
231 if (!tracer_enabled || p->prio >= wakeup_prio) 231 if (!tracer_enabled || p->prio >= wakeup_prio)
@@ -255,7 +255,7 @@ probe_wakeup(struct rq *rq, struct task_struct *p, int success)
255 trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc); 255 trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
256 256
257out_locked: 257out_locked:
258 __raw_spin_unlock(&wakeup_lock); 258 arch_spin_unlock(&wakeup_lock);
259out: 259out:
260 atomic_dec(&wakeup_trace->data[cpu]->disabled); 260 atomic_dec(&wakeup_trace->data[cpu]->disabled);
261} 261}
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index dc98309e839a..280fea470d67 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -67,7 +67,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
67 67
68 /* Don't allow flipping of max traces now */ 68 /* Don't allow flipping of max traces now */
69 local_irq_save(flags); 69 local_irq_save(flags);
70 __raw_spin_lock(&ftrace_max_lock); 70 arch_spin_lock(&ftrace_max_lock);
71 71
72 cnt = ring_buffer_entries(tr->buffer); 72 cnt = ring_buffer_entries(tr->buffer);
73 73
@@ -85,7 +85,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
85 break; 85 break;
86 } 86 }
87 tracing_on(); 87 tracing_on();
88 __raw_spin_unlock(&ftrace_max_lock); 88 arch_spin_unlock(&ftrace_max_lock);
89 local_irq_restore(flags); 89 local_irq_restore(flags);
90 90
91 if (count) 91 if (count)
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 8504ac71e4e8..678a5120ee30 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -27,8 +27,8 @@ static struct stack_trace max_stack_trace = {
27}; 27};
28 28
29static unsigned long max_stack_size; 29static unsigned long max_stack_size;
30static raw_spinlock_t max_stack_lock = 30static arch_spinlock_t max_stack_lock =
31 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 31 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
32 32
33static int stack_trace_disabled __read_mostly; 33static int stack_trace_disabled __read_mostly;
34static DEFINE_PER_CPU(int, trace_active); 34static DEFINE_PER_CPU(int, trace_active);
@@ -54,7 +54,7 @@ static inline void check_stack(void)
54 return; 54 return;
55 55
56 local_irq_save(flags); 56 local_irq_save(flags);
57 __raw_spin_lock(&max_stack_lock); 57 arch_spin_lock(&max_stack_lock);
58 58
59 /* a race could have already updated it */ 59 /* a race could have already updated it */
60 if (this_size <= max_stack_size) 60 if (this_size <= max_stack_size)
@@ -103,7 +103,7 @@ static inline void check_stack(void)
103 } 103 }
104 104
105 out: 105 out:
106 __raw_spin_unlock(&max_stack_lock); 106 arch_spin_unlock(&max_stack_lock);
107 local_irq_restore(flags); 107 local_irq_restore(flags);
108} 108}
109 109
@@ -171,9 +171,9 @@ stack_max_size_write(struct file *filp, const char __user *ubuf,
171 return ret; 171 return ret;
172 172
173 local_irq_save(flags); 173 local_irq_save(flags);
174 __raw_spin_lock(&max_stack_lock); 174 arch_spin_lock(&max_stack_lock);
175 *ptr = val; 175 *ptr = val;
176 __raw_spin_unlock(&max_stack_lock); 176 arch_spin_unlock(&max_stack_lock);
177 local_irq_restore(flags); 177 local_irq_restore(flags);
178 178
179 return count; 179 return count;
@@ -207,7 +207,7 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
207static void *t_start(struct seq_file *m, loff_t *pos) 207static void *t_start(struct seq_file *m, loff_t *pos)
208{ 208{
209 local_irq_disable(); 209 local_irq_disable();
210 __raw_spin_lock(&max_stack_lock); 210 arch_spin_lock(&max_stack_lock);
211 211
212 if (*pos == 0) 212 if (*pos == 0)
213 return SEQ_START_TOKEN; 213 return SEQ_START_TOKEN;
@@ -217,7 +217,7 @@ static void *t_start(struct seq_file *m, loff_t *pos)
217 217
218static void t_stop(struct seq_file *m, void *p) 218static void t_stop(struct seq_file *m, void *p)
219{ 219{
220 __raw_spin_unlock(&max_stack_lock); 220 arch_spin_unlock(&max_stack_lock);
221 local_irq_enable(); 221 local_irq_enable();
222} 222}
223 223
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 2f22cf4576db..8cf9938dd147 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -575,7 +575,7 @@ config DEBUG_BUGVERBOSE
575 depends on BUG 575 depends on BUG
576 depends on ARM || AVR32 || M32R || M68K || SPARC32 || SPARC64 || \ 576 depends on ARM || AVR32 || M32R || M68K || SPARC32 || SPARC64 || \
577 FRV || SUPERH || GENERIC_BUG || BLACKFIN || MN10300 577 FRV || SUPERH || GENERIC_BUG || BLACKFIN || MN10300
578 default !EMBEDDED 578 default y
579 help 579 help
580 Say Y here to make BUG() panics output the file name and line number 580 Say Y here to make BUG() panics output the file name and line number
581 of the BUG call as well as the EIP and oops trace. This aids 581 of the BUG call as well as the EIP and oops trace. This aids
diff --git a/lib/argv_split.c b/lib/argv_split.c
index 5205a8dae5bc..4b1b083f219c 100644
--- a/lib/argv_split.c
+++ b/lib/argv_split.c
@@ -4,17 +4,10 @@
4 4
5#include <linux/kernel.h> 5#include <linux/kernel.h>
6#include <linux/ctype.h> 6#include <linux/ctype.h>
7#include <linux/string.h>
7#include <linux/slab.h> 8#include <linux/slab.h>
8#include <linux/module.h> 9#include <linux/module.h>
9 10
10static const char *skip_sep(const char *cp)
11{
12 while (*cp && isspace(*cp))
13 cp++;
14
15 return cp;
16}
17
18static const char *skip_arg(const char *cp) 11static const char *skip_arg(const char *cp)
19{ 12{
20 while (*cp && !isspace(*cp)) 13 while (*cp && !isspace(*cp))
@@ -28,7 +21,7 @@ static int count_argc(const char *str)
28 int count = 0; 21 int count = 0;
29 22
30 while (*str) { 23 while (*str) {
31 str = skip_sep(str); 24 str = skip_spaces(str);
32 if (*str) { 25 if (*str) {
33 count++; 26 count++;
34 str = skip_arg(str); 27 str = skip_arg(str);
@@ -82,7 +75,7 @@ char **argv_split(gfp_t gfp, const char *str, int *argcp)
82 argvp = argv; 75 argvp = argv;
83 76
84 while (*str) { 77 while (*str) {
85 str = skip_sep(str); 78 str = skip_spaces(str);
86 79
87 if (*str) { 80 if (*str) {
88 const char *p = str; 81 const char *p = str;
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 702565821c99..11bf49750583 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -271,6 +271,87 @@ int __bitmap_weight(const unsigned long *bitmap, int bits)
271} 271}
272EXPORT_SYMBOL(__bitmap_weight); 272EXPORT_SYMBOL(__bitmap_weight);
273 273
274#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) % BITS_PER_LONG))
275
276void bitmap_set(unsigned long *map, int start, int nr)
277{
278 unsigned long *p = map + BIT_WORD(start);
279 const int size = start + nr;
280 int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
281 unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
282
283 while (nr - bits_to_set >= 0) {
284 *p |= mask_to_set;
285 nr -= bits_to_set;
286 bits_to_set = BITS_PER_LONG;
287 mask_to_set = ~0UL;
288 p++;
289 }
290 if (nr) {
291 mask_to_set &= BITMAP_LAST_WORD_MASK(size);
292 *p |= mask_to_set;
293 }
294}
295EXPORT_SYMBOL(bitmap_set);
296
297void bitmap_clear(unsigned long *map, int start, int nr)
298{
299 unsigned long *p = map + BIT_WORD(start);
300 const int size = start + nr;
301 int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
302 unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
303
304 while (nr - bits_to_clear >= 0) {
305 *p &= ~mask_to_clear;
306 nr -= bits_to_clear;
307 bits_to_clear = BITS_PER_LONG;
308 mask_to_clear = ~0UL;
309 p++;
310 }
311 if (nr) {
312 mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
313 *p &= ~mask_to_clear;
314 }
315}
316EXPORT_SYMBOL(bitmap_clear);
317
318/*
319 * bitmap_find_next_zero_area - find a contiguous aligned zero area
320 * @map: The address to base the search on
321 * @size: The bitmap size in bits
322 * @start: The bitnumber to start searching at
323 * @nr: The number of zeroed bits we're looking for
324 * @align_mask: Alignment mask for zero area
325 *
326 * The @align_mask should be one less than a power of 2; the effect is that
327 * the bit offset of all zero areas this function finds is multiples of that
328 * power of 2. A @align_mask of 0 means no alignment is required.
329 */
330unsigned long bitmap_find_next_zero_area(unsigned long *map,
331 unsigned long size,
332 unsigned long start,
333 unsigned int nr,
334 unsigned long align_mask)
335{
336 unsigned long index, end, i;
337again:
338 index = find_next_zero_bit(map, size, start);
339
340 /* Align allocation */
341 index = __ALIGN_MASK(index, align_mask);
342
343 end = index + nr;
344 if (end > size)
345 return end;
346 i = find_next_bit(map, end, index);
347 if (i < end) {
348 start = i + 1;
349 goto again;
350 }
351 return index;
352}
353EXPORT_SYMBOL(bitmap_find_next_zero_area);
354
274/* 355/*
275 * Bitmap printing & parsing functions: first version by Bill Irwin, 356 * Bitmap printing & parsing functions: first version by Bill Irwin,
276 * second version by Paul Jackson, third by Joe Korty. 357 * second version by Paul Jackson, third by Joe Korty.
diff --git a/lib/crc32.c b/lib/crc32.c
index 49d1c9e3ce38..02e3b31b3a79 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -42,6 +42,48 @@ MODULE_AUTHOR("Matt Domsch <Matt_Domsch@dell.com>");
42MODULE_DESCRIPTION("Ethernet CRC32 calculations"); 42MODULE_DESCRIPTION("Ethernet CRC32 calculations");
43MODULE_LICENSE("GPL"); 43MODULE_LICENSE("GPL");
44 44
45#if CRC_LE_BITS == 8 || CRC_BE_BITS == 8
46
47static inline u32
48crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 *tab)
49{
50# ifdef __LITTLE_ENDIAN
51# define DO_CRC(x) crc = tab[(crc ^ (x)) & 255 ] ^ (crc >> 8)
52# else
53# define DO_CRC(x) crc = tab[((crc >> 24) ^ (x)) & 255] ^ (crc << 8)
54# endif
55 const u32 *b = (const u32 *)buf;
56 size_t rem_len;
57
58 /* Align it */
59 if (unlikely((long)b & 3 && len)) {
60 u8 *p = (u8 *)b;
61 do {
62 DO_CRC(*p++);
63 } while ((--len) && ((long)p)&3);
64 b = (u32 *)p;
65 }
66 rem_len = len & 3;
67 /* load data 32 bits wide, xor data 32 bits wide. */
68 len = len >> 2;
69 for (--b; len; --len) {
70 crc ^= *++b; /* use pre increment for speed */
71 DO_CRC(0);
72 DO_CRC(0);
73 DO_CRC(0);
74 DO_CRC(0);
75 }
76 len = rem_len;
77 /* And the last few bytes */
78 if (len) {
79 u8 *p = (u8 *)(b + 1) - 1;
80 do {
81 DO_CRC(*++p); /* use pre increment for speed */
82 } while (--len);
83 }
84 return crc;
85}
86#endif
45/** 87/**
46 * crc32_le() - Calculate bitwise little-endian Ethernet AUTODIN II CRC32 88 * crc32_le() - Calculate bitwise little-endian Ethernet AUTODIN II CRC32
47 * @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for 89 * @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for
@@ -72,48 +114,10 @@ u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len)
72u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len) 114u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len)
73{ 115{
74# if CRC_LE_BITS == 8 116# if CRC_LE_BITS == 8
75 const u32 *b =(u32 *)p;
76 const u32 *tab = crc32table_le; 117 const u32 *tab = crc32table_le;
77 118
78# ifdef __LITTLE_ENDIAN
79# define DO_CRC(x) crc = tab[ (crc ^ (x)) & 255 ] ^ (crc>>8)
80# else
81# define DO_CRC(x) crc = tab[ ((crc >> 24) ^ (x)) & 255] ^ (crc<<8)
82# endif
83
84 crc = __cpu_to_le32(crc); 119 crc = __cpu_to_le32(crc);
85 /* Align it */ 120 crc = crc32_body(crc, p, len, tab);
86 if(unlikely(((long)b)&3 && len)){
87 do {
88 u8 *p = (u8 *)b;
89 DO_CRC(*p++);
90 b = (void *)p;
91 } while ((--len) && ((long)b)&3 );
92 }
93 if(likely(len >= 4)){
94 /* load data 32 bits wide, xor data 32 bits wide. */
95 size_t save_len = len & 3;
96 len = len >> 2;
97 --b; /* use pre increment below(*++b) for speed */
98 do {
99 crc ^= *++b;
100 DO_CRC(0);
101 DO_CRC(0);
102 DO_CRC(0);
103 DO_CRC(0);
104 } while (--len);
105 b++; /* point to next byte(s) */
106 len = save_len;
107 }
108 /* And the last few bytes */
109 if(len){
110 do {
111 u8 *p = (u8 *)b;
112 DO_CRC(*p++);
113 b = (void *)p;
114 } while (--len);
115 }
116
117 return __le32_to_cpu(crc); 121 return __le32_to_cpu(crc);
118#undef ENDIAN_SHIFT 122#undef ENDIAN_SHIFT
119#undef DO_CRC 123#undef DO_CRC
@@ -170,47 +174,10 @@ u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
170u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len) 174u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
171{ 175{
172# if CRC_BE_BITS == 8 176# if CRC_BE_BITS == 8
173 const u32 *b =(u32 *)p;
174 const u32 *tab = crc32table_be; 177 const u32 *tab = crc32table_be;
175 178
176# ifdef __LITTLE_ENDIAN
177# define DO_CRC(x) crc = tab[ (crc ^ (x)) & 255 ] ^ (crc>>8)
178# else
179# define DO_CRC(x) crc = tab[ ((crc >> 24) ^ (x)) & 255] ^ (crc<<8)
180# endif
181
182 crc = __cpu_to_be32(crc); 179 crc = __cpu_to_be32(crc);
183 /* Align it */ 180 crc = crc32_body(crc, p, len, tab);
184 if(unlikely(((long)b)&3 && len)){
185 do {
186 u8 *p = (u8 *)b;
187 DO_CRC(*p++);
188 b = (u32 *)p;
189 } while ((--len) && ((long)b)&3 );
190 }
191 if(likely(len >= 4)){
192 /* load data 32 bits wide, xor data 32 bits wide. */
193 size_t save_len = len & 3;
194 len = len >> 2;
195 --b; /* use pre increment below(*++b) for speed */
196 do {
197 crc ^= *++b;
198 DO_CRC(0);
199 DO_CRC(0);
200 DO_CRC(0);
201 DO_CRC(0);
202 } while (--len);
203 b++; /* point to next byte(s) */
204 len = save_len;
205 }
206 /* And the last few bytes */
207 if(len){
208 do {
209 u8 *p = (u8 *)b;
210 DO_CRC(*p++);
211 b = (void *)p;
212 } while (--len);
213 }
214 return __be32_to_cpu(crc); 181 return __be32_to_cpu(crc);
215#undef ENDIAN_SHIFT 182#undef ENDIAN_SHIFT
216#undef DO_CRC 183#undef DO_CRC
diff --git a/lib/ctype.c b/lib/ctype.c
index d02ace14a322..26baa620e95b 100644
--- a/lib/ctype.c
+++ b/lib/ctype.c
@@ -7,30 +7,30 @@
7#include <linux/ctype.h> 7#include <linux/ctype.h>
8#include <linux/module.h> 8#include <linux/module.h>
9 9
10unsigned char _ctype[] = { 10const unsigned char _ctype[] = {
11_C,_C,_C,_C,_C,_C,_C,_C, /* 0-7 */ 11_C,_C,_C,_C,_C,_C,_C,_C, /* 0-7 */
12_C,_C|_S,_C|_S,_C|_S,_C|_S,_C|_S,_C,_C, /* 8-15 */ 12_C,_C|_S,_C|_S,_C|_S,_C|_S,_C|_S,_C,_C, /* 8-15 */
13_C,_C,_C,_C,_C,_C,_C,_C, /* 16-23 */ 13_C,_C,_C,_C,_C,_C,_C,_C, /* 16-23 */
14_C,_C,_C,_C,_C,_C,_C,_C, /* 24-31 */ 14_C,_C,_C,_C,_C,_C,_C,_C, /* 24-31 */
15_S|_SP,_P,_P,_P,_P,_P,_P,_P, /* 32-39 */ 15_S|_SP,_P,_P,_P,_P,_P,_P,_P, /* 32-39 */
16_P,_P,_P,_P,_P,_P,_P,_P, /* 40-47 */ 16_P,_P,_P,_P,_P,_P,_P,_P, /* 40-47 */
17_D,_D,_D,_D,_D,_D,_D,_D, /* 48-55 */ 17_D,_D,_D,_D,_D,_D,_D,_D, /* 48-55 */
18_D,_D,_P,_P,_P,_P,_P,_P, /* 56-63 */ 18_D,_D,_P,_P,_P,_P,_P,_P, /* 56-63 */
19_P,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U, /* 64-71 */ 19_P,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U, /* 64-71 */
20_U,_U,_U,_U,_U,_U,_U,_U, /* 72-79 */ 20_U,_U,_U,_U,_U,_U,_U,_U, /* 72-79 */
21_U,_U,_U,_U,_U,_U,_U,_U, /* 80-87 */ 21_U,_U,_U,_U,_U,_U,_U,_U, /* 80-87 */
22_U,_U,_U,_P,_P,_P,_P,_P, /* 88-95 */ 22_U,_U,_U,_P,_P,_P,_P,_P, /* 88-95 */
23_P,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L, /* 96-103 */ 23_P,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L, /* 96-103 */
24_L,_L,_L,_L,_L,_L,_L,_L, /* 104-111 */ 24_L,_L,_L,_L,_L,_L,_L,_L, /* 104-111 */
25_L,_L,_L,_L,_L,_L,_L,_L, /* 112-119 */ 25_L,_L,_L,_L,_L,_L,_L,_L, /* 112-119 */
26_L,_L,_L,_P,_P,_P,_P,_C, /* 120-127 */ 26_L,_L,_L,_P,_P,_P,_P,_C, /* 120-127 */
270,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 128-143 */ 270,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 128-143 */
280,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 144-159 */ 280,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 144-159 */
29_S|_SP,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 160-175 */ 29_S|_SP,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 160-175 */
30_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 176-191 */ 30_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 176-191 */
31_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U, /* 192-207 */ 31_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U, /* 192-207 */
32_U,_U,_U,_U,_U,_U,_U,_P,_U,_U,_U,_U,_U,_U,_U,_L, /* 208-223 */ 32_U,_U,_U,_U,_U,_U,_U,_P,_U,_U,_U,_U,_U,_U,_U,_L, /* 208-223 */
33_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L, /* 224-239 */ 33_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L, /* 224-239 */
34_L,_L,_L,_L,_L,_L,_L,_P,_L,_L,_L,_L,_L,_L,_L,_L}; /* 240-255 */ 34_L,_L,_L,_L,_L,_L,_L,_P,_L,_L,_L,_L,_L,_L,_L,_L}; /* 240-255 */
35 35
36EXPORT_SYMBOL(_ctype); 36EXPORT_SYMBOL(_ctype);
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index eae56fddfa3b..a9a8996d286a 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -26,14 +26,14 @@
26 26
27struct debug_bucket { 27struct debug_bucket {
28 struct hlist_head list; 28 struct hlist_head list;
29 spinlock_t lock; 29 raw_spinlock_t lock;
30}; 30};
31 31
32static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE]; 32static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
33 33
34static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata; 34static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
35 35
36static DEFINE_SPINLOCK(pool_lock); 36static DEFINE_RAW_SPINLOCK(pool_lock);
37 37
38static HLIST_HEAD(obj_pool); 38static HLIST_HEAD(obj_pool);
39 39
@@ -96,10 +96,10 @@ static int fill_pool(void)
96 if (!new) 96 if (!new)
97 return obj_pool_free; 97 return obj_pool_free;
98 98
99 spin_lock_irqsave(&pool_lock, flags); 99 raw_spin_lock_irqsave(&pool_lock, flags);
100 hlist_add_head(&new->node, &obj_pool); 100 hlist_add_head(&new->node, &obj_pool);
101 obj_pool_free++; 101 obj_pool_free++;
102 spin_unlock_irqrestore(&pool_lock, flags); 102 raw_spin_unlock_irqrestore(&pool_lock, flags);
103 } 103 }
104 return obj_pool_free; 104 return obj_pool_free;
105} 105}
@@ -133,7 +133,7 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
133{ 133{
134 struct debug_obj *obj = NULL; 134 struct debug_obj *obj = NULL;
135 135
136 spin_lock(&pool_lock); 136 raw_spin_lock(&pool_lock);
137 if (obj_pool.first) { 137 if (obj_pool.first) {
138 obj = hlist_entry(obj_pool.first, typeof(*obj), node); 138 obj = hlist_entry(obj_pool.first, typeof(*obj), node);
139 139
@@ -152,7 +152,7 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
152 if (obj_pool_free < obj_pool_min_free) 152 if (obj_pool_free < obj_pool_min_free)
153 obj_pool_min_free = obj_pool_free; 153 obj_pool_min_free = obj_pool_free;
154 } 154 }
155 spin_unlock(&pool_lock); 155 raw_spin_unlock(&pool_lock);
156 156
157 return obj; 157 return obj;
158} 158}
@@ -165,7 +165,7 @@ static void free_obj_work(struct work_struct *work)
165 struct debug_obj *obj; 165 struct debug_obj *obj;
166 unsigned long flags; 166 unsigned long flags;
167 167
168 spin_lock_irqsave(&pool_lock, flags); 168 raw_spin_lock_irqsave(&pool_lock, flags);
169 while (obj_pool_free > ODEBUG_POOL_SIZE) { 169 while (obj_pool_free > ODEBUG_POOL_SIZE) {
170 obj = hlist_entry(obj_pool.first, typeof(*obj), node); 170 obj = hlist_entry(obj_pool.first, typeof(*obj), node);
171 hlist_del(&obj->node); 171 hlist_del(&obj->node);
@@ -174,11 +174,11 @@ static void free_obj_work(struct work_struct *work)
174 * We release pool_lock across kmem_cache_free() to 174 * We release pool_lock across kmem_cache_free() to
175 * avoid contention on pool_lock. 175 * avoid contention on pool_lock.
176 */ 176 */
177 spin_unlock_irqrestore(&pool_lock, flags); 177 raw_spin_unlock_irqrestore(&pool_lock, flags);
178 kmem_cache_free(obj_cache, obj); 178 kmem_cache_free(obj_cache, obj);
179 spin_lock_irqsave(&pool_lock, flags); 179 raw_spin_lock_irqsave(&pool_lock, flags);
180 } 180 }
181 spin_unlock_irqrestore(&pool_lock, flags); 181 raw_spin_unlock_irqrestore(&pool_lock, flags);
182} 182}
183 183
184/* 184/*
@@ -190,7 +190,7 @@ static void free_object(struct debug_obj *obj)
190 unsigned long flags; 190 unsigned long flags;
191 int sched = 0; 191 int sched = 0;
192 192
193 spin_lock_irqsave(&pool_lock, flags); 193 raw_spin_lock_irqsave(&pool_lock, flags);
194 /* 194 /*
195 * schedule work when the pool is filled and the cache is 195 * schedule work when the pool is filled and the cache is
196 * initialized: 196 * initialized:
@@ -200,7 +200,7 @@ static void free_object(struct debug_obj *obj)
200 hlist_add_head(&obj->node, &obj_pool); 200 hlist_add_head(&obj->node, &obj_pool);
201 obj_pool_free++; 201 obj_pool_free++;
202 obj_pool_used--; 202 obj_pool_used--;
203 spin_unlock_irqrestore(&pool_lock, flags); 203 raw_spin_unlock_irqrestore(&pool_lock, flags);
204 if (sched) 204 if (sched)
205 schedule_work(&debug_obj_work); 205 schedule_work(&debug_obj_work);
206} 206}
@@ -221,9 +221,9 @@ static void debug_objects_oom(void)
221 printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n"); 221 printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n");
222 222
223 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { 223 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
224 spin_lock_irqsave(&db->lock, flags); 224 raw_spin_lock_irqsave(&db->lock, flags);
225 hlist_move_list(&db->list, &freelist); 225 hlist_move_list(&db->list, &freelist);
226 spin_unlock_irqrestore(&db->lock, flags); 226 raw_spin_unlock_irqrestore(&db->lock, flags);
227 227
228 /* Now free them */ 228 /* Now free them */
229 hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { 229 hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
@@ -303,14 +303,14 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
303 303
304 db = get_bucket((unsigned long) addr); 304 db = get_bucket((unsigned long) addr);
305 305
306 spin_lock_irqsave(&db->lock, flags); 306 raw_spin_lock_irqsave(&db->lock, flags);
307 307
308 obj = lookup_object(addr, db); 308 obj = lookup_object(addr, db);
309 if (!obj) { 309 if (!obj) {
310 obj = alloc_object(addr, db, descr); 310 obj = alloc_object(addr, db, descr);
311 if (!obj) { 311 if (!obj) {
312 debug_objects_enabled = 0; 312 debug_objects_enabled = 0;
313 spin_unlock_irqrestore(&db->lock, flags); 313 raw_spin_unlock_irqrestore(&db->lock, flags);
314 debug_objects_oom(); 314 debug_objects_oom();
315 return; 315 return;
316 } 316 }
@@ -327,7 +327,7 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
327 case ODEBUG_STATE_ACTIVE: 327 case ODEBUG_STATE_ACTIVE:
328 debug_print_object(obj, "init"); 328 debug_print_object(obj, "init");
329 state = obj->state; 329 state = obj->state;
330 spin_unlock_irqrestore(&db->lock, flags); 330 raw_spin_unlock_irqrestore(&db->lock, flags);
331 debug_object_fixup(descr->fixup_init, addr, state); 331 debug_object_fixup(descr->fixup_init, addr, state);
332 return; 332 return;
333 333
@@ -338,7 +338,7 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
338 break; 338 break;
339 } 339 }
340 340
341 spin_unlock_irqrestore(&db->lock, flags); 341 raw_spin_unlock_irqrestore(&db->lock, flags);
342} 342}
343 343
344/** 344/**
@@ -385,7 +385,7 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr)
385 385
386 db = get_bucket((unsigned long) addr); 386 db = get_bucket((unsigned long) addr);
387 387
388 spin_lock_irqsave(&db->lock, flags); 388 raw_spin_lock_irqsave(&db->lock, flags);
389 389
390 obj = lookup_object(addr, db); 390 obj = lookup_object(addr, db);
391 if (obj) { 391 if (obj) {
@@ -398,7 +398,7 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr)
398 case ODEBUG_STATE_ACTIVE: 398 case ODEBUG_STATE_ACTIVE:
399 debug_print_object(obj, "activate"); 399 debug_print_object(obj, "activate");
400 state = obj->state; 400 state = obj->state;
401 spin_unlock_irqrestore(&db->lock, flags); 401 raw_spin_unlock_irqrestore(&db->lock, flags);
402 debug_object_fixup(descr->fixup_activate, addr, state); 402 debug_object_fixup(descr->fixup_activate, addr, state);
403 return; 403 return;
404 404
@@ -408,11 +408,11 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr)
408 default: 408 default:
409 break; 409 break;
410 } 410 }
411 spin_unlock_irqrestore(&db->lock, flags); 411 raw_spin_unlock_irqrestore(&db->lock, flags);
412 return; 412 return;
413 } 413 }
414 414
415 spin_unlock_irqrestore(&db->lock, flags); 415 raw_spin_unlock_irqrestore(&db->lock, flags);
416 /* 416 /*
417 * This happens when a static object is activated. We 417 * This happens when a static object is activated. We
418 * let the type specific code decide whether this is 418 * let the type specific code decide whether this is
@@ -438,7 +438,7 @@ void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
438 438
439 db = get_bucket((unsigned long) addr); 439 db = get_bucket((unsigned long) addr);
440 440
441 spin_lock_irqsave(&db->lock, flags); 441 raw_spin_lock_irqsave(&db->lock, flags);
442 442
443 obj = lookup_object(addr, db); 443 obj = lookup_object(addr, db);
444 if (obj) { 444 if (obj) {
@@ -463,7 +463,7 @@ void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
463 debug_print_object(&o, "deactivate"); 463 debug_print_object(&o, "deactivate");
464 } 464 }
465 465
466 spin_unlock_irqrestore(&db->lock, flags); 466 raw_spin_unlock_irqrestore(&db->lock, flags);
467} 467}
468 468
469/** 469/**
@@ -483,7 +483,7 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
483 483
484 db = get_bucket((unsigned long) addr); 484 db = get_bucket((unsigned long) addr);
485 485
486 spin_lock_irqsave(&db->lock, flags); 486 raw_spin_lock_irqsave(&db->lock, flags);
487 487
488 obj = lookup_object(addr, db); 488 obj = lookup_object(addr, db);
489 if (!obj) 489 if (!obj)
@@ -498,7 +498,7 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
498 case ODEBUG_STATE_ACTIVE: 498 case ODEBUG_STATE_ACTIVE:
499 debug_print_object(obj, "destroy"); 499 debug_print_object(obj, "destroy");
500 state = obj->state; 500 state = obj->state;
501 spin_unlock_irqrestore(&db->lock, flags); 501 raw_spin_unlock_irqrestore(&db->lock, flags);
502 debug_object_fixup(descr->fixup_destroy, addr, state); 502 debug_object_fixup(descr->fixup_destroy, addr, state);
503 return; 503 return;
504 504
@@ -509,7 +509,7 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
509 break; 509 break;
510 } 510 }
511out_unlock: 511out_unlock:
512 spin_unlock_irqrestore(&db->lock, flags); 512 raw_spin_unlock_irqrestore(&db->lock, flags);
513} 513}
514 514
515/** 515/**
@@ -529,7 +529,7 @@ void debug_object_free(void *addr, struct debug_obj_descr *descr)
529 529
530 db = get_bucket((unsigned long) addr); 530 db = get_bucket((unsigned long) addr);
531 531
532 spin_lock_irqsave(&db->lock, flags); 532 raw_spin_lock_irqsave(&db->lock, flags);
533 533
534 obj = lookup_object(addr, db); 534 obj = lookup_object(addr, db);
535 if (!obj) 535 if (!obj)
@@ -539,17 +539,17 @@ void debug_object_free(void *addr, struct debug_obj_descr *descr)
539 case ODEBUG_STATE_ACTIVE: 539 case ODEBUG_STATE_ACTIVE:
540 debug_print_object(obj, "free"); 540 debug_print_object(obj, "free");
541 state = obj->state; 541 state = obj->state;
542 spin_unlock_irqrestore(&db->lock, flags); 542 raw_spin_unlock_irqrestore(&db->lock, flags);
543 debug_object_fixup(descr->fixup_free, addr, state); 543 debug_object_fixup(descr->fixup_free, addr, state);
544 return; 544 return;
545 default: 545 default:
546 hlist_del(&obj->node); 546 hlist_del(&obj->node);
547 spin_unlock_irqrestore(&db->lock, flags); 547 raw_spin_unlock_irqrestore(&db->lock, flags);
548 free_object(obj); 548 free_object(obj);
549 return; 549 return;
550 } 550 }
551out_unlock: 551out_unlock:
552 spin_unlock_irqrestore(&db->lock, flags); 552 raw_spin_unlock_irqrestore(&db->lock, flags);
553} 553}
554 554
555#ifdef CONFIG_DEBUG_OBJECTS_FREE 555#ifdef CONFIG_DEBUG_OBJECTS_FREE
@@ -575,7 +575,7 @@ static void __debug_check_no_obj_freed(const void *address, unsigned long size)
575 575
576repeat: 576repeat:
577 cnt = 0; 577 cnt = 0;
578 spin_lock_irqsave(&db->lock, flags); 578 raw_spin_lock_irqsave(&db->lock, flags);
579 hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) { 579 hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) {
580 cnt++; 580 cnt++;
581 oaddr = (unsigned long) obj->object; 581 oaddr = (unsigned long) obj->object;
@@ -587,7 +587,7 @@ repeat:
587 debug_print_object(obj, "free"); 587 debug_print_object(obj, "free");
588 descr = obj->descr; 588 descr = obj->descr;
589 state = obj->state; 589 state = obj->state;
590 spin_unlock_irqrestore(&db->lock, flags); 590 raw_spin_unlock_irqrestore(&db->lock, flags);
591 debug_object_fixup(descr->fixup_free, 591 debug_object_fixup(descr->fixup_free,
592 (void *) oaddr, state); 592 (void *) oaddr, state);
593 goto repeat; 593 goto repeat;
@@ -597,7 +597,7 @@ repeat:
597 break; 597 break;
598 } 598 }
599 } 599 }
600 spin_unlock_irqrestore(&db->lock, flags); 600 raw_spin_unlock_irqrestore(&db->lock, flags);
601 601
602 /* Now free them */ 602 /* Now free them */
603 hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { 603 hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
@@ -783,7 +783,7 @@ check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
783 783
784 db = get_bucket((unsigned long) addr); 784 db = get_bucket((unsigned long) addr);
785 785
786 spin_lock_irqsave(&db->lock, flags); 786 raw_spin_lock_irqsave(&db->lock, flags);
787 787
788 obj = lookup_object(addr, db); 788 obj = lookup_object(addr, db);
789 if (!obj && state != ODEBUG_STATE_NONE) { 789 if (!obj && state != ODEBUG_STATE_NONE) {
@@ -807,7 +807,7 @@ check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
807 } 807 }
808 res = 0; 808 res = 0;
809out: 809out:
810 spin_unlock_irqrestore(&db->lock, flags); 810 raw_spin_unlock_irqrestore(&db->lock, flags);
811 if (res) 811 if (res)
812 debug_objects_enabled = 0; 812 debug_objects_enabled = 0;
813 return res; 813 return res;
@@ -907,7 +907,7 @@ void __init debug_objects_early_init(void)
907 int i; 907 int i;
908 908
909 for (i = 0; i < ODEBUG_HASH_SIZE; i++) 909 for (i = 0; i < ODEBUG_HASH_SIZE; i++)
910 spin_lock_init(&obj_hash[i].lock); 910 raw_spin_lock_init(&obj_hash[i].lock);
911 911
912 for (i = 0; i < ODEBUG_POOL_SIZE; i++) 912 for (i = 0; i < ODEBUG_POOL_SIZE; i++)
913 hlist_add_head(&obj_static_pool[i].node, &obj_pool); 913 hlist_add_head(&obj_static_pool[i].node, &obj_pool);
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index e22c148e4b7f..f93502915988 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -21,6 +21,7 @@
21#include <linux/list.h> 21#include <linux/list.h>
22#include <linux/sysctl.h> 22#include <linux/sysctl.h>
23#include <linux/ctype.h> 23#include <linux/ctype.h>
24#include <linux/string.h>
24#include <linux/uaccess.h> 25#include <linux/uaccess.h>
25#include <linux/dynamic_debug.h> 26#include <linux/dynamic_debug.h>
26#include <linux/debugfs.h> 27#include <linux/debugfs.h>
@@ -209,8 +210,7 @@ static int ddebug_tokenize(char *buf, char *words[], int maxwords)
209 char *end; 210 char *end;
210 211
211 /* Skip leading whitespace */ 212 /* Skip leading whitespace */
212 while (*buf && isspace(*buf)) 213 buf = skip_spaces(buf);
213 buf++;
214 if (!*buf) 214 if (!*buf)
215 break; /* oh, it was trailing whitespace */ 215 break; /* oh, it was trailing whitespace */
216 216
diff --git a/lib/genalloc.c b/lib/genalloc.c
index eed2bdb865e7..e67f97495dd5 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -11,6 +11,7 @@
11 */ 11 */
12 12
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/bitmap.h>
14#include <linux/genalloc.h> 15#include <linux/genalloc.h>
15 16
16 17
@@ -114,7 +115,7 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
114 struct gen_pool_chunk *chunk; 115 struct gen_pool_chunk *chunk;
115 unsigned long addr, flags; 116 unsigned long addr, flags;
116 int order = pool->min_alloc_order; 117 int order = pool->min_alloc_order;
117 int nbits, bit, start_bit, end_bit; 118 int nbits, start_bit, end_bit;
118 119
119 if (size == 0) 120 if (size == 0)
120 return 0; 121 return 0;
@@ -129,29 +130,19 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
129 end_bit -= nbits + 1; 130 end_bit -= nbits + 1;
130 131
131 spin_lock_irqsave(&chunk->lock, flags); 132 spin_lock_irqsave(&chunk->lock, flags);
132 bit = -1; 133 start_bit = bitmap_find_next_zero_area(chunk->bits, end_bit, 0,
133 while (bit + 1 < end_bit) { 134 nbits, 0);
134 bit = find_next_zero_bit(chunk->bits, end_bit, bit + 1); 135 if (start_bit >= end_bit) {
135 if (bit >= end_bit)
136 break;
137
138 start_bit = bit;
139 if (nbits > 1) {
140 bit = find_next_bit(chunk->bits, bit + nbits,
141 bit + 1);
142 if (bit - start_bit < nbits)
143 continue;
144 }
145
146 addr = chunk->start_addr +
147 ((unsigned long)start_bit << order);
148 while (nbits--)
149 __set_bit(start_bit++, chunk->bits);
150 spin_unlock_irqrestore(&chunk->lock, flags); 136 spin_unlock_irqrestore(&chunk->lock, flags);
151 read_unlock(&pool->lock); 137 continue;
152 return addr;
153 } 138 }
139
140 addr = chunk->start_addr + ((unsigned long)start_bit << order);
141
142 bitmap_set(chunk->bits, start_bit, nbits);
154 spin_unlock_irqrestore(&chunk->lock, flags); 143 spin_unlock_irqrestore(&chunk->lock, flags);
144 read_unlock(&pool->lock);
145 return addr;
155 } 146 }
156 read_unlock(&pool->lock); 147 read_unlock(&pool->lock);
157 return 0; 148 return 0;
diff --git a/lib/iommu-helper.c b/lib/iommu-helper.c
index 75dbda03f4fb..c0251f4ad08b 100644
--- a/lib/iommu-helper.c
+++ b/lib/iommu-helper.c
@@ -3,41 +3,7 @@
3 */ 3 */
4 4
5#include <linux/module.h> 5#include <linux/module.h>
6#include <linux/bitops.h> 6#include <linux/bitmap.h>
7
8static unsigned long find_next_zero_area(unsigned long *map,
9 unsigned long size,
10 unsigned long start,
11 unsigned int nr,
12 unsigned long align_mask)
13{
14 unsigned long index, end, i;
15again:
16 index = find_next_zero_bit(map, size, start);
17
18 /* Align allocation */
19 index = (index + align_mask) & ~align_mask;
20
21 end = index + nr;
22 if (end >= size)
23 return -1;
24 for (i = index; i < end; i++) {
25 if (test_bit(i, map)) {
26 start = i+1;
27 goto again;
28 }
29 }
30 return index;
31}
32
33void iommu_area_reserve(unsigned long *map, unsigned long i, int len)
34{
35 unsigned long end = i + len;
36 while (i < end) {
37 __set_bit(i, map);
38 i++;
39 }
40}
41 7
42int iommu_is_span_boundary(unsigned int index, unsigned int nr, 8int iommu_is_span_boundary(unsigned int index, unsigned int nr,
43 unsigned long shift, 9 unsigned long shift,
@@ -55,31 +21,24 @@ unsigned long iommu_area_alloc(unsigned long *map, unsigned long size,
55 unsigned long align_mask) 21 unsigned long align_mask)
56{ 22{
57 unsigned long index; 23 unsigned long index;
24
25 /* We don't want the last of the limit */
26 size -= 1;
58again: 27again:
59 index = find_next_zero_area(map, size, start, nr, align_mask); 28 index = bitmap_find_next_zero_area(map, size, start, nr, align_mask);
60 if (index != -1) { 29 if (index < size) {
61 if (iommu_is_span_boundary(index, nr, shift, boundary_size)) { 30 if (iommu_is_span_boundary(index, nr, shift, boundary_size)) {
62 /* we could do more effectively */ 31 /* we could do more effectively */
63 start = index + 1; 32 start = index + 1;
64 goto again; 33 goto again;
65 } 34 }
66 iommu_area_reserve(map, index, nr); 35 bitmap_set(map, index, nr);
36 return index;
67 } 37 }
68 return index; 38 return -1;
69} 39}
70EXPORT_SYMBOL(iommu_area_alloc); 40EXPORT_SYMBOL(iommu_area_alloc);
71 41
72void iommu_area_free(unsigned long *map, unsigned long start, unsigned int nr)
73{
74 unsigned long end = start + nr;
75
76 while (start < end) {
77 __clear_bit(start, map);
78 start++;
79 }
80}
81EXPORT_SYMBOL(iommu_area_free);
82
83unsigned long iommu_num_pages(unsigned long addr, unsigned long len, 42unsigned long iommu_num_pages(unsigned long addr, unsigned long len,
84 unsigned long io_page_size) 43 unsigned long io_page_size)
85{ 44{
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
index 5526b46aba94..b135d04aa48a 100644
--- a/lib/kernel_lock.c
+++ b/lib/kernel_lock.c
@@ -23,7 +23,7 @@
23 * 23 *
24 * Don't use in new code. 24 * Don't use in new code.
25 */ 25 */
26static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag); 26static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(kernel_flag);
27 27
28 28
29/* 29/*
@@ -36,12 +36,12 @@ static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag);
36 * If it successfully gets the lock, it should increment 36 * If it successfully gets the lock, it should increment
37 * the preemption count like any spinlock does. 37 * the preemption count like any spinlock does.
38 * 38 *
39 * (This works on UP too - _raw_spin_trylock will never 39 * (This works on UP too - do_raw_spin_trylock will never
40 * return false in that case) 40 * return false in that case)
41 */ 41 */
42int __lockfunc __reacquire_kernel_lock(void) 42int __lockfunc __reacquire_kernel_lock(void)
43{ 43{
44 while (!_raw_spin_trylock(&kernel_flag)) { 44 while (!do_raw_spin_trylock(&kernel_flag)) {
45 if (need_resched()) 45 if (need_resched())
46 return -EAGAIN; 46 return -EAGAIN;
47 cpu_relax(); 47 cpu_relax();
@@ -52,27 +52,27 @@ int __lockfunc __reacquire_kernel_lock(void)
52 52
53void __lockfunc __release_kernel_lock(void) 53void __lockfunc __release_kernel_lock(void)
54{ 54{
55 _raw_spin_unlock(&kernel_flag); 55 do_raw_spin_unlock(&kernel_flag);
56 preempt_enable_no_resched(); 56 preempt_enable_no_resched();
57} 57}
58 58
59/* 59/*
60 * These are the BKL spinlocks - we try to be polite about preemption. 60 * These are the BKL spinlocks - we try to be polite about preemption.
61 * If SMP is not on (ie UP preemption), this all goes away because the 61 * If SMP is not on (ie UP preemption), this all goes away because the
62 * _raw_spin_trylock() will always succeed. 62 * do_raw_spin_trylock() will always succeed.
63 */ 63 */
64#ifdef CONFIG_PREEMPT 64#ifdef CONFIG_PREEMPT
65static inline void __lock_kernel(void) 65static inline void __lock_kernel(void)
66{ 66{
67 preempt_disable(); 67 preempt_disable();
68 if (unlikely(!_raw_spin_trylock(&kernel_flag))) { 68 if (unlikely(!do_raw_spin_trylock(&kernel_flag))) {
69 /* 69 /*
70 * If preemption was disabled even before this 70 * If preemption was disabled even before this
71 * was called, there's nothing we can be polite 71 * was called, there's nothing we can be polite
72 * about - just spin. 72 * about - just spin.
73 */ 73 */
74 if (preempt_count() > 1) { 74 if (preempt_count() > 1) {
75 _raw_spin_lock(&kernel_flag); 75 do_raw_spin_lock(&kernel_flag);
76 return; 76 return;
77 } 77 }
78 78
@@ -82,10 +82,10 @@ static inline void __lock_kernel(void)
82 */ 82 */
83 do { 83 do {
84 preempt_enable(); 84 preempt_enable();
85 while (spin_is_locked(&kernel_flag)) 85 while (raw_spin_is_locked(&kernel_flag))
86 cpu_relax(); 86 cpu_relax();
87 preempt_disable(); 87 preempt_disable();
88 } while (!_raw_spin_trylock(&kernel_flag)); 88 } while (!do_raw_spin_trylock(&kernel_flag));
89 } 89 }
90} 90}
91 91
@@ -96,7 +96,7 @@ static inline void __lock_kernel(void)
96 */ 96 */
97static inline void __lock_kernel(void) 97static inline void __lock_kernel(void)
98{ 98{
99 _raw_spin_lock(&kernel_flag); 99 do_raw_spin_lock(&kernel_flag);
100} 100}
101#endif 101#endif
102 102
@@ -106,7 +106,7 @@ static inline void __unlock_kernel(void)
106 * the BKL is not covered by lockdep, so we open-code the 106 * the BKL is not covered by lockdep, so we open-code the
107 * unlocking sequence (and thus avoid the dep-chain ops): 107 * unlocking sequence (and thus avoid the dep-chain ops):
108 */ 108 */
109 _raw_spin_unlock(&kernel_flag); 109 do_raw_spin_unlock(&kernel_flag);
110 preempt_enable(); 110 preempt_enable();
111} 111}
112 112
diff --git a/lib/parser.c b/lib/parser.c
index b00d02059a5f..fb34977246bb 100644
--- a/lib/parser.c
+++ b/lib/parser.c
@@ -56,13 +56,16 @@ static int match_one(char *s, const char *p, substring_t args[])
56 56
57 args[argc].from = s; 57 args[argc].from = s;
58 switch (*p++) { 58 switch (*p++) {
59 case 's': 59 case 's': {
60 if (strlen(s) == 0) 60 size_t str_len = strlen(s);
61
62 if (str_len == 0)
61 return 0; 63 return 0;
62 else if (len == -1 || len > strlen(s)) 64 if (len == -1 || len > str_len)
63 len = strlen(s); 65 len = str_len;
64 args[argc].to = s + len; 66 args[argc].to = s + len;
65 break; 67 break;
68 }
66 case 'd': 69 case 'd':
67 simple_strtol(s, &args[argc].to, 0); 70 simple_strtol(s, &args[argc].to, 0);
68 goto num; 71 goto num;
diff --git a/lib/plist.c b/lib/plist.c
index d6c64a824e1d..1471988d9190 100644
--- a/lib/plist.c
+++ b/lib/plist.c
@@ -54,9 +54,11 @@ static void plist_check_list(struct list_head *top)
54 54
55static void plist_check_head(struct plist_head *head) 55static void plist_check_head(struct plist_head *head)
56{ 56{
57 WARN_ON(!head->lock); 57 WARN_ON(!head->rawlock && !head->spinlock);
58 if (head->lock) 58 if (head->rawlock)
59 WARN_ON_SMP(!spin_is_locked(head->lock)); 59 WARN_ON_SMP(!raw_spin_is_locked(head->rawlock));
60 if (head->spinlock)
61 WARN_ON_SMP(!spin_is_locked(head->spinlock));
60 plist_check_list(&head->prio_list); 62 plist_check_list(&head->prio_list);
61 plist_check_list(&head->node_list); 63 plist_check_list(&head->node_list);
62} 64}
diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c
index 9df3ca56db11..ccf95bff7984 100644
--- a/lib/rwsem-spinlock.c
+++ b/lib/rwsem-spinlock.c
@@ -17,6 +17,19 @@ struct rwsem_waiter {
17#define RWSEM_WAITING_FOR_WRITE 0x00000002 17#define RWSEM_WAITING_FOR_WRITE 0x00000002
18}; 18};
19 19
20int rwsem_is_locked(struct rw_semaphore *sem)
21{
22 int ret = 1;
23 unsigned long flags;
24
25 if (spin_trylock_irqsave(&sem->wait_lock, flags)) {
26 ret = (sem->activity != 0);
27 spin_unlock_irqrestore(&sem->wait_lock, flags);
28 }
29 return ret;
30}
31EXPORT_SYMBOL(rwsem_is_locked);
32
20/* 33/*
21 * initialise the semaphore 34 * initialise the semaphore
22 */ 35 */
@@ -34,6 +47,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
34 spin_lock_init(&sem->wait_lock); 47 spin_lock_init(&sem->wait_lock);
35 INIT_LIST_HEAD(&sem->wait_list); 48 INIT_LIST_HEAD(&sem->wait_list);
36} 49}
50EXPORT_SYMBOL(__init_rwsem);
37 51
38/* 52/*
39 * handle the lock release when processes blocked on it that can now run 53 * handle the lock release when processes blocked on it that can now run
@@ -305,12 +319,3 @@ void __downgrade_write(struct rw_semaphore *sem)
305 spin_unlock_irqrestore(&sem->wait_lock, flags); 319 spin_unlock_irqrestore(&sem->wait_lock, flags);
306} 320}
307 321
308EXPORT_SYMBOL(__init_rwsem);
309EXPORT_SYMBOL(__down_read);
310EXPORT_SYMBOL(__down_read_trylock);
311EXPORT_SYMBOL(__down_write_nested);
312EXPORT_SYMBOL(__down_write);
313EXPORT_SYMBOL(__down_write_trylock);
314EXPORT_SYMBOL(__up_read);
315EXPORT_SYMBOL(__up_write);
316EXPORT_SYMBOL(__downgrade_write);
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
index 9c4b0256490b..4755b98b6dfb 100644
--- a/lib/spinlock_debug.c
+++ b/lib/spinlock_debug.c
@@ -13,8 +13,8 @@
13#include <linux/delay.h> 13#include <linux/delay.h>
14#include <linux/module.h> 14#include <linux/module.h>
15 15
16void __spin_lock_init(spinlock_t *lock, const char *name, 16void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
17 struct lock_class_key *key) 17 struct lock_class_key *key)
18{ 18{
19#ifdef CONFIG_DEBUG_LOCK_ALLOC 19#ifdef CONFIG_DEBUG_LOCK_ALLOC
20 /* 20 /*
@@ -23,13 +23,13 @@ void __spin_lock_init(spinlock_t *lock, const char *name,
23 debug_check_no_locks_freed((void *)lock, sizeof(*lock)); 23 debug_check_no_locks_freed((void *)lock, sizeof(*lock));
24 lockdep_init_map(&lock->dep_map, name, key, 0); 24 lockdep_init_map(&lock->dep_map, name, key, 0);
25#endif 25#endif
26 lock->raw_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 26 lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
27 lock->magic = SPINLOCK_MAGIC; 27 lock->magic = SPINLOCK_MAGIC;
28 lock->owner = SPINLOCK_OWNER_INIT; 28 lock->owner = SPINLOCK_OWNER_INIT;
29 lock->owner_cpu = -1; 29 lock->owner_cpu = -1;
30} 30}
31 31
32EXPORT_SYMBOL(__spin_lock_init); 32EXPORT_SYMBOL(__raw_spin_lock_init);
33 33
34void __rwlock_init(rwlock_t *lock, const char *name, 34void __rwlock_init(rwlock_t *lock, const char *name,
35 struct lock_class_key *key) 35 struct lock_class_key *key)
@@ -41,7 +41,7 @@ void __rwlock_init(rwlock_t *lock, const char *name,
41 debug_check_no_locks_freed((void *)lock, sizeof(*lock)); 41 debug_check_no_locks_freed((void *)lock, sizeof(*lock));
42 lockdep_init_map(&lock->dep_map, name, key, 0); 42 lockdep_init_map(&lock->dep_map, name, key, 0);
43#endif 43#endif
44 lock->raw_lock = (raw_rwlock_t) __RAW_RW_LOCK_UNLOCKED; 44 lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED;
45 lock->magic = RWLOCK_MAGIC; 45 lock->magic = RWLOCK_MAGIC;
46 lock->owner = SPINLOCK_OWNER_INIT; 46 lock->owner = SPINLOCK_OWNER_INIT;
47 lock->owner_cpu = -1; 47 lock->owner_cpu = -1;
@@ -49,7 +49,7 @@ void __rwlock_init(rwlock_t *lock, const char *name,
49 49
50EXPORT_SYMBOL(__rwlock_init); 50EXPORT_SYMBOL(__rwlock_init);
51 51
52static void spin_bug(spinlock_t *lock, const char *msg) 52static void spin_bug(raw_spinlock_t *lock, const char *msg)
53{ 53{
54 struct task_struct *owner = NULL; 54 struct task_struct *owner = NULL;
55 55
@@ -73,7 +73,7 @@ static void spin_bug(spinlock_t *lock, const char *msg)
73#define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg) 73#define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
74 74
75static inline void 75static inline void
76debug_spin_lock_before(spinlock_t *lock) 76debug_spin_lock_before(raw_spinlock_t *lock)
77{ 77{
78 SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); 78 SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
79 SPIN_BUG_ON(lock->owner == current, lock, "recursion"); 79 SPIN_BUG_ON(lock->owner == current, lock, "recursion");
@@ -81,16 +81,16 @@ debug_spin_lock_before(spinlock_t *lock)
81 lock, "cpu recursion"); 81 lock, "cpu recursion");
82} 82}
83 83
84static inline void debug_spin_lock_after(spinlock_t *lock) 84static inline void debug_spin_lock_after(raw_spinlock_t *lock)
85{ 85{
86 lock->owner_cpu = raw_smp_processor_id(); 86 lock->owner_cpu = raw_smp_processor_id();
87 lock->owner = current; 87 lock->owner = current;
88} 88}
89 89
90static inline void debug_spin_unlock(spinlock_t *lock) 90static inline void debug_spin_unlock(raw_spinlock_t *lock)
91{ 91{
92 SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); 92 SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
93 SPIN_BUG_ON(!spin_is_locked(lock), lock, "already unlocked"); 93 SPIN_BUG_ON(!raw_spin_is_locked(lock), lock, "already unlocked");
94 SPIN_BUG_ON(lock->owner != current, lock, "wrong owner"); 94 SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
95 SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(), 95 SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
96 lock, "wrong CPU"); 96 lock, "wrong CPU");
@@ -98,7 +98,7 @@ static inline void debug_spin_unlock(spinlock_t *lock)
98 lock->owner_cpu = -1; 98 lock->owner_cpu = -1;
99} 99}
100 100
101static void __spin_lock_debug(spinlock_t *lock) 101static void __spin_lock_debug(raw_spinlock_t *lock)
102{ 102{
103 u64 i; 103 u64 i;
104 u64 loops = loops_per_jiffy * HZ; 104 u64 loops = loops_per_jiffy * HZ;
@@ -106,7 +106,7 @@ static void __spin_lock_debug(spinlock_t *lock)
106 106
107 for (;;) { 107 for (;;) {
108 for (i = 0; i < loops; i++) { 108 for (i = 0; i < loops; i++) {
109 if (__raw_spin_trylock(&lock->raw_lock)) 109 if (arch_spin_trylock(&lock->raw_lock))
110 return; 110 return;
111 __delay(1); 111 __delay(1);
112 } 112 }
@@ -125,17 +125,17 @@ static void __spin_lock_debug(spinlock_t *lock)
125 } 125 }
126} 126}
127 127
128void _raw_spin_lock(spinlock_t *lock) 128void do_raw_spin_lock(raw_spinlock_t *lock)
129{ 129{
130 debug_spin_lock_before(lock); 130 debug_spin_lock_before(lock);
131 if (unlikely(!__raw_spin_trylock(&lock->raw_lock))) 131 if (unlikely(!arch_spin_trylock(&lock->raw_lock)))
132 __spin_lock_debug(lock); 132 __spin_lock_debug(lock);
133 debug_spin_lock_after(lock); 133 debug_spin_lock_after(lock);
134} 134}
135 135
136int _raw_spin_trylock(spinlock_t *lock) 136int do_raw_spin_trylock(raw_spinlock_t *lock)
137{ 137{
138 int ret = __raw_spin_trylock(&lock->raw_lock); 138 int ret = arch_spin_trylock(&lock->raw_lock);
139 139
140 if (ret) 140 if (ret)
141 debug_spin_lock_after(lock); 141 debug_spin_lock_after(lock);
@@ -148,10 +148,10 @@ int _raw_spin_trylock(spinlock_t *lock)
148 return ret; 148 return ret;
149} 149}
150 150
151void _raw_spin_unlock(spinlock_t *lock) 151void do_raw_spin_unlock(raw_spinlock_t *lock)
152{ 152{
153 debug_spin_unlock(lock); 153 debug_spin_unlock(lock);
154 __raw_spin_unlock(&lock->raw_lock); 154 arch_spin_unlock(&lock->raw_lock);
155} 155}
156 156
157static void rwlock_bug(rwlock_t *lock, const char *msg) 157static void rwlock_bug(rwlock_t *lock, const char *msg)
@@ -176,7 +176,7 @@ static void __read_lock_debug(rwlock_t *lock)
176 176
177 for (;;) { 177 for (;;) {
178 for (i = 0; i < loops; i++) { 178 for (i = 0; i < loops; i++) {
179 if (__raw_read_trylock(&lock->raw_lock)) 179 if (arch_read_trylock(&lock->raw_lock))
180 return; 180 return;
181 __delay(1); 181 __delay(1);
182 } 182 }
@@ -193,15 +193,15 @@ static void __read_lock_debug(rwlock_t *lock)
193} 193}
194#endif 194#endif
195 195
196void _raw_read_lock(rwlock_t *lock) 196void do_raw_read_lock(rwlock_t *lock)
197{ 197{
198 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); 198 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
199 __raw_read_lock(&lock->raw_lock); 199 arch_read_lock(&lock->raw_lock);
200} 200}
201 201
202int _raw_read_trylock(rwlock_t *lock) 202int do_raw_read_trylock(rwlock_t *lock)
203{ 203{
204 int ret = __raw_read_trylock(&lock->raw_lock); 204 int ret = arch_read_trylock(&lock->raw_lock);
205 205
206#ifndef CONFIG_SMP 206#ifndef CONFIG_SMP
207 /* 207 /*
@@ -212,10 +212,10 @@ int _raw_read_trylock(rwlock_t *lock)
212 return ret; 212 return ret;
213} 213}
214 214
215void _raw_read_unlock(rwlock_t *lock) 215void do_raw_read_unlock(rwlock_t *lock)
216{ 216{
217 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); 217 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
218 __raw_read_unlock(&lock->raw_lock); 218 arch_read_unlock(&lock->raw_lock);
219} 219}
220 220
221static inline void debug_write_lock_before(rwlock_t *lock) 221static inline void debug_write_lock_before(rwlock_t *lock)
@@ -251,7 +251,7 @@ static void __write_lock_debug(rwlock_t *lock)
251 251
252 for (;;) { 252 for (;;) {
253 for (i = 0; i < loops; i++) { 253 for (i = 0; i < loops; i++) {
254 if (__raw_write_trylock(&lock->raw_lock)) 254 if (arch_write_trylock(&lock->raw_lock))
255 return; 255 return;
256 __delay(1); 256 __delay(1);
257 } 257 }
@@ -268,16 +268,16 @@ static void __write_lock_debug(rwlock_t *lock)
268} 268}
269#endif 269#endif
270 270
271void _raw_write_lock(rwlock_t *lock) 271void do_raw_write_lock(rwlock_t *lock)
272{ 272{
273 debug_write_lock_before(lock); 273 debug_write_lock_before(lock);
274 __raw_write_lock(&lock->raw_lock); 274 arch_write_lock(&lock->raw_lock);
275 debug_write_lock_after(lock); 275 debug_write_lock_after(lock);
276} 276}
277 277
278int _raw_write_trylock(rwlock_t *lock) 278int do_raw_write_trylock(rwlock_t *lock)
279{ 279{
280 int ret = __raw_write_trylock(&lock->raw_lock); 280 int ret = arch_write_trylock(&lock->raw_lock);
281 281
282 if (ret) 282 if (ret)
283 debug_write_lock_after(lock); 283 debug_write_lock_after(lock);
@@ -290,8 +290,8 @@ int _raw_write_trylock(rwlock_t *lock)
290 return ret; 290 return ret;
291} 291}
292 292
293void _raw_write_unlock(rwlock_t *lock) 293void do_raw_write_unlock(rwlock_t *lock)
294{ 294{
295 debug_write_unlock(lock); 295 debug_write_unlock(lock);
296 __raw_write_unlock(&lock->raw_lock); 296 arch_write_unlock(&lock->raw_lock);
297} 297}
diff --git a/lib/string.c b/lib/string.c
index e96421ab9a9a..afce96af3afd 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -338,20 +338,34 @@ EXPORT_SYMBOL(strnchr);
338#endif 338#endif
339 339
340/** 340/**
341 * strstrip - Removes leading and trailing whitespace from @s. 341 * skip_spaces - Removes leading whitespace from @s.
342 * @s: The string to be stripped.
343 *
344 * Returns a pointer to the first non-whitespace character in @s.
345 */
346char *skip_spaces(const char *str)
347{
348 while (isspace(*str))
349 ++str;
350 return (char *)str;
351}
352EXPORT_SYMBOL(skip_spaces);
353
354/**
355 * strim - Removes leading and trailing whitespace from @s.
342 * @s: The string to be stripped. 356 * @s: The string to be stripped.
343 * 357 *
344 * Note that the first trailing whitespace is replaced with a %NUL-terminator 358 * Note that the first trailing whitespace is replaced with a %NUL-terminator
345 * in the given string @s. Returns a pointer to the first non-whitespace 359 * in the given string @s. Returns a pointer to the first non-whitespace
346 * character in @s. 360 * character in @s.
347 */ 361 */
348char *strstrip(char *s) 362char *strim(char *s)
349{ 363{
350 size_t size; 364 size_t size;
351 char *end; 365 char *end;
352 366
367 s = skip_spaces(s);
353 size = strlen(s); 368 size = strlen(s);
354
355 if (!size) 369 if (!size)
356 return s; 370 return s;
357 371
@@ -360,12 +374,9 @@ char *strstrip(char *s)
360 end--; 374 end--;
361 *(end + 1) = '\0'; 375 *(end + 1) = '\0';
362 376
363 while (*s && isspace(*s))
364 s++;
365
366 return s; 377 return s;
367} 378}
368EXPORT_SYMBOL(strstrip); 379EXPORT_SYMBOL(strim);
369 380
370#ifndef __HAVE_ARCH_STRLEN 381#ifndef __HAVE_ARCH_STRLEN
371/** 382/**
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 5bc01803f8f8..437eedb5a53b 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -549,7 +549,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
549 dma_mask = hwdev->coherent_dma_mask; 549 dma_mask = hwdev->coherent_dma_mask;
550 550
551 ret = (void *)__get_free_pages(flags, order); 551 ret = (void *)__get_free_pages(flags, order);
552 if (ret && swiotlb_virt_to_bus(hwdev, ret) + size > dma_mask) { 552 if (ret && swiotlb_virt_to_bus(hwdev, ret) + size - 1 > dma_mask) {
553 /* 553 /*
554 * The allocated memory isn't reachable by the device. 554 * The allocated memory isn't reachable by the device.
555 */ 555 */
@@ -571,7 +571,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
571 dev_addr = swiotlb_virt_to_bus(hwdev, ret); 571 dev_addr = swiotlb_virt_to_bus(hwdev, ret);
572 572
573 /* Confirm address can be DMA'd by device */ 573 /* Confirm address can be DMA'd by device */
574 if (dev_addr + size > dma_mask) { 574 if (dev_addr + size - 1 > dma_mask) {
575 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", 575 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
576 (unsigned long long)dma_mask, 576 (unsigned long long)dma_mask,
577 (unsigned long long)dev_addr); 577 (unsigned long long)dev_addr);
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 6438cd5599ee..735343fc857a 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -9,7 +9,7 @@
9 * Wirzenius wrote this portably, Torvalds fucked it up :-) 9 * Wirzenius wrote this portably, Torvalds fucked it up :-)
10 */ 10 */
11 11
12/* 12/*
13 * Fri Jul 13 2001 Crutcher Dunnavant <crutcher+kernel@datastacks.com> 13 * Fri Jul 13 2001 Crutcher Dunnavant <crutcher+kernel@datastacks.com>
14 * - changed to provide snprintf and vsnprintf functions 14 * - changed to provide snprintf and vsnprintf functions
15 * So Feb 1 16:51:32 CET 2004 Juergen Quade <quade@hsnr.de> 15 * So Feb 1 16:51:32 CET 2004 Juergen Quade <quade@hsnr.de>
@@ -47,14 +47,14 @@ static unsigned int simple_guess_base(const char *cp)
47} 47}
48 48
49/** 49/**
50 * simple_strtoul - convert a string to an unsigned long 50 * simple_strtoull - convert a string to an unsigned long long
51 * @cp: The start of the string 51 * @cp: The start of the string
52 * @endp: A pointer to the end of the parsed string will be placed here 52 * @endp: A pointer to the end of the parsed string will be placed here
53 * @base: The number base to use 53 * @base: The number base to use
54 */ 54 */
55unsigned long simple_strtoul(const char *cp, char **endp, unsigned int base) 55unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base)
56{ 56{
57 unsigned long result = 0; 57 unsigned long long result = 0;
58 58
59 if (!base) 59 if (!base)
60 base = simple_guess_base(cp); 60 base = simple_guess_base(cp);
@@ -71,58 +71,39 @@ unsigned long simple_strtoul(const char *cp, char **endp, unsigned int base)
71 result = result * base + value; 71 result = result * base + value;
72 cp++; 72 cp++;
73 } 73 }
74
75 if (endp) 74 if (endp)
76 *endp = (char *)cp; 75 *endp = (char *)cp;
76
77 return result; 77 return result;
78} 78}
79EXPORT_SYMBOL(simple_strtoul); 79EXPORT_SYMBOL(simple_strtoull);
80 80
81/** 81/**
82 * simple_strtol - convert a string to a signed long 82 * simple_strtoul - convert a string to an unsigned long
83 * @cp: The start of the string 83 * @cp: The start of the string
84 * @endp: A pointer to the end of the parsed string will be placed here 84 * @endp: A pointer to the end of the parsed string will be placed here
85 * @base: The number base to use 85 * @base: The number base to use
86 */ 86 */
87long simple_strtol(const char *cp, char **endp, unsigned int base) 87unsigned long simple_strtoul(const char *cp, char **endp, unsigned int base)
88{ 88{
89 if(*cp == '-') 89 return simple_strtoull(cp, endp, base);
90 return -simple_strtoul(cp + 1, endp, base);
91 return simple_strtoul(cp, endp, base);
92} 90}
93EXPORT_SYMBOL(simple_strtol); 91EXPORT_SYMBOL(simple_strtoul);
94 92
95/** 93/**
96 * simple_strtoull - convert a string to an unsigned long long 94 * simple_strtol - convert a string to a signed long
97 * @cp: The start of the string 95 * @cp: The start of the string
98 * @endp: A pointer to the end of the parsed string will be placed here 96 * @endp: A pointer to the end of the parsed string will be placed here
99 * @base: The number base to use 97 * @base: The number base to use
100 */ 98 */
101unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base) 99long simple_strtol(const char *cp, char **endp, unsigned int base)
102{ 100{
103 unsigned long long result = 0; 101 if (*cp == '-')
104 102 return -simple_strtoul(cp + 1, endp, base);
105 if (!base)
106 base = simple_guess_base(cp);
107
108 if (base == 16 && cp[0] == '0' && TOLOWER(cp[1]) == 'x')
109 cp += 2;
110
111 while (isxdigit(*cp)) {
112 unsigned int value;
113
114 value = isdigit(*cp) ? *cp - '0' : TOLOWER(*cp) - 'a' + 10;
115 if (value >= base)
116 break;
117 result = result * base + value;
118 cp++;
119 }
120 103
121 if (endp) 104 return simple_strtoul(cp, endp, base);
122 *endp = (char *)cp;
123 return result;
124} 105}
125EXPORT_SYMBOL(simple_strtoull); 106EXPORT_SYMBOL(simple_strtol);
126 107
127/** 108/**
128 * simple_strtoll - convert a string to a signed long long 109 * simple_strtoll - convert a string to a signed long long
@@ -132,8 +113,9 @@ EXPORT_SYMBOL(simple_strtoull);
132 */ 113 */
133long long simple_strtoll(const char *cp, char **endp, unsigned int base) 114long long simple_strtoll(const char *cp, char **endp, unsigned int base)
134{ 115{
135 if(*cp=='-') 116 if (*cp == '-')
136 return -simple_strtoull(cp + 1, endp, base); 117 return -simple_strtoull(cp + 1, endp, base);
118
137 return simple_strtoull(cp, endp, base); 119 return simple_strtoull(cp, endp, base);
138} 120}
139 121
@@ -173,6 +155,7 @@ int strict_strtoul(const char *cp, unsigned int base, unsigned long *res)
173 val = simple_strtoul(cp, &tail, base); 155 val = simple_strtoul(cp, &tail, base);
174 if (tail == cp) 156 if (tail == cp)
175 return -EINVAL; 157 return -EINVAL;
158
176 if ((*tail == '\0') || 159 if ((*tail == '\0') ||
177 ((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) { 160 ((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) {
178 *res = val; 161 *res = val;
@@ -285,10 +268,11 @@ EXPORT_SYMBOL(strict_strtoll);
285 268
286static int skip_atoi(const char **s) 269static int skip_atoi(const char **s)
287{ 270{
288 int i=0; 271 int i = 0;
289 272
290 while (isdigit(**s)) 273 while (isdigit(**s))
291 i = i*10 + *((*s)++) - '0'; 274 i = i*10 + *((*s)++) - '0';
275
292 return i; 276 return i;
293} 277}
294 278
@@ -302,7 +286,7 @@ static int skip_atoi(const char **s)
302/* Formats correctly any integer in [0,99999]. 286/* Formats correctly any integer in [0,99999].
303 * Outputs from one to five digits depending on input. 287 * Outputs from one to five digits depending on input.
304 * On i386 gcc 4.1.2 -O2: ~250 bytes of code. */ 288 * On i386 gcc 4.1.2 -O2: ~250 bytes of code. */
305static char* put_dec_trunc(char *buf, unsigned q) 289static char *put_dec_trunc(char *buf, unsigned q)
306{ 290{
307 unsigned d3, d2, d1, d0; 291 unsigned d3, d2, d1, d0;
308 d1 = (q>>4) & 0xf; 292 d1 = (q>>4) & 0xf;
@@ -331,14 +315,15 @@ static char* put_dec_trunc(char *buf, unsigned q)
331 d3 = d3 - 10*q; 315 d3 = d3 - 10*q;
332 *buf++ = d3 + '0'; /* next digit */ 316 *buf++ = d3 + '0'; /* next digit */
333 if (q != 0) 317 if (q != 0)
334 *buf++ = q + '0'; /* most sign. digit */ 318 *buf++ = q + '0'; /* most sign. digit */
335 } 319 }
336 } 320 }
337 } 321 }
322
338 return buf; 323 return buf;
339} 324}
340/* Same with if's removed. Always emits five digits */ 325/* Same with if's removed. Always emits five digits */
341static char* put_dec_full(char *buf, unsigned q) 326static char *put_dec_full(char *buf, unsigned q)
342{ 327{
343 /* BTW, if q is in [0,9999], 8-bit ints will be enough, */ 328 /* BTW, if q is in [0,9999], 8-bit ints will be enough, */
344 /* but anyway, gcc produces better code with full-sized ints */ 329 /* but anyway, gcc produces better code with full-sized ints */
@@ -347,14 +332,15 @@ static char* put_dec_full(char *buf, unsigned q)
347 d2 = (q>>8) & 0xf; 332 d2 = (q>>8) & 0xf;
348 d3 = (q>>12); 333 d3 = (q>>12);
349 334
350 /* Possible ways to approx. divide by 10 */ 335 /*
351 /* gcc -O2 replaces multiply with shifts and adds */ 336 * Possible ways to approx. divide by 10
352 // (x * 0xcd) >> 11: 11001101 - shorter code than * 0x67 (on i386) 337 * gcc -O2 replaces multiply with shifts and adds
353 // (x * 0x67) >> 10: 1100111 338 * (x * 0xcd) >> 11: 11001101 - shorter code than * 0x67 (on i386)
354 // (x * 0x34) >> 9: 110100 - same 339 * (x * 0x67) >> 10: 1100111
355 // (x * 0x1a) >> 8: 11010 - same 340 * (x * 0x34) >> 9: 110100 - same
356 // (x * 0x0d) >> 7: 1101 - same, shortest code (on i386) 341 * (x * 0x1a) >> 8: 11010 - same
357 342 * (x * 0x0d) >> 7: 1101 - same, shortest code (on i386)
343 */
358 d0 = 6*(d3 + d2 + d1) + (q & 0xf); 344 d0 = 6*(d3 + d2 + d1) + (q & 0xf);
359 q = (d0 * 0xcd) >> 11; 345 q = (d0 * 0xcd) >> 11;
360 d0 = d0 - 10*q; 346 d0 = d0 - 10*q;
@@ -375,10 +361,11 @@ static char* put_dec_full(char *buf, unsigned q)
375 d3 = d3 - 10*q; 361 d3 = d3 - 10*q;
376 *buf++ = d3 + '0'; 362 *buf++ = d3 + '0';
377 *buf++ = q + '0'; 363 *buf++ = q + '0';
364
378 return buf; 365 return buf;
379} 366}
380/* No inlining helps gcc to use registers better */ 367/* No inlining helps gcc to use registers better */
381static noinline char* put_dec(char *buf, unsigned long long num) 368static noinline char *put_dec(char *buf, unsigned long long num)
382{ 369{
383 while (1) { 370 while (1) {
384 unsigned rem; 371 unsigned rem;
@@ -448,9 +435,9 @@ static char *number(char *buf, char *end, unsigned long long num,
448 spec.flags &= ~ZEROPAD; 435 spec.flags &= ~ZEROPAD;
449 sign = 0; 436 sign = 0;
450 if (spec.flags & SIGN) { 437 if (spec.flags & SIGN) {
451 if ((signed long long) num < 0) { 438 if ((signed long long)num < 0) {
452 sign = '-'; 439 sign = '-';
453 num = - (signed long long) num; 440 num = -(signed long long)num;
454 spec.field_width--; 441 spec.field_width--;
455 } else if (spec.flags & PLUS) { 442 } else if (spec.flags & PLUS) {
456 sign = '+'; 443 sign = '+';
@@ -478,7 +465,9 @@ static char *number(char *buf, char *end, unsigned long long num,
478 else if (spec.base != 10) { /* 8 or 16 */ 465 else if (spec.base != 10) { /* 8 or 16 */
479 int mask = spec.base - 1; 466 int mask = spec.base - 1;
480 int shift = 3; 467 int shift = 3;
481 if (spec.base == 16) shift = 4; 468
469 if (spec.base == 16)
470 shift = 4;
482 do { 471 do {
483 tmp[i++] = (digits[((unsigned char)num) & mask] | locase); 472 tmp[i++] = (digits[((unsigned char)num) & mask] | locase);
484 num >>= shift; 473 num >>= shift;
@@ -493,7 +482,7 @@ static char *number(char *buf, char *end, unsigned long long num,
493 /* leading space padding */ 482 /* leading space padding */
494 spec.field_width -= spec.precision; 483 spec.field_width -= spec.precision;
495 if (!(spec.flags & (ZEROPAD+LEFT))) { 484 if (!(spec.flags & (ZEROPAD+LEFT))) {
496 while(--spec.field_width >= 0) { 485 while (--spec.field_width >= 0) {
497 if (buf < end) 486 if (buf < end)
498 *buf = ' '; 487 *buf = ' ';
499 ++buf; 488 ++buf;
@@ -543,15 +532,16 @@ static char *number(char *buf, char *end, unsigned long long num,
543 *buf = ' '; 532 *buf = ' ';
544 ++buf; 533 ++buf;
545 } 534 }
535
546 return buf; 536 return buf;
547} 537}
548 538
549static char *string(char *buf, char *end, char *s, struct printf_spec spec) 539static char *string(char *buf, char *end, const char *s, struct printf_spec spec)
550{ 540{
551 int len, i; 541 int len, i;
552 542
553 if ((unsigned long)s < PAGE_SIZE) 543 if ((unsigned long)s < PAGE_SIZE)
554 s = "<NULL>"; 544 s = "(null)";
555 545
556 len = strnlen(s, spec.precision); 546 len = strnlen(s, spec.precision);
557 547
@@ -572,6 +562,7 @@ static char *string(char *buf, char *end, char *s, struct printf_spec spec)
572 *buf = ' '; 562 *buf = ' ';
573 ++buf; 563 ++buf;
574 } 564 }
565
575 return buf; 566 return buf;
576} 567}
577 568
@@ -585,11 +576,13 @@ static char *symbol_string(char *buf, char *end, void *ptr,
585 sprint_symbol(sym, value); 576 sprint_symbol(sym, value);
586 else 577 else
587 kallsyms_lookup(value, NULL, NULL, NULL, sym); 578 kallsyms_lookup(value, NULL, NULL, NULL, sym);
579
588 return string(buf, end, sym, spec); 580 return string(buf, end, sym, spec);
589#else 581#else
590 spec.field_width = 2*sizeof(void *); 582 spec.field_width = 2 * sizeof(void *);
591 spec.flags |= SPECIAL | SMALL | ZEROPAD; 583 spec.flags |= SPECIAL | SMALL | ZEROPAD;
592 spec.base = 16; 584 spec.base = 16;
585
593 return number(buf, end, value, spec); 586 return number(buf, end, value, spec);
594#endif 587#endif
595} 588}
@@ -718,22 +711,19 @@ static char *ip4_string(char *p, const u8 *addr, bool leading_zeros)
718 if (i < 3) 711 if (i < 3)
719 *p++ = '.'; 712 *p++ = '.';
720 } 713 }
721
722 *p = '\0'; 714 *p = '\0';
715
723 return p; 716 return p;
724} 717}
725 718
726static char *ip6_compressed_string(char *p, const char *addr) 719static char *ip6_compressed_string(char *p, const char *addr)
727{ 720{
728 int i; 721 int i, j, range;
729 int j;
730 int range;
731 unsigned char zerolength[8]; 722 unsigned char zerolength[8];
732 int longest = 1; 723 int longest = 1;
733 int colonpos = -1; 724 int colonpos = -1;
734 u16 word; 725 u16 word;
735 u8 hi; 726 u8 hi, lo;
736 u8 lo;
737 bool needcolon = false; 727 bool needcolon = false;
738 bool useIPv4; 728 bool useIPv4;
739 struct in6_addr in6; 729 struct in6_addr in6;
@@ -787,8 +777,9 @@ static char *ip6_compressed_string(char *p, const char *addr)
787 p = pack_hex_byte(p, hi); 777 p = pack_hex_byte(p, hi);
788 else 778 else
789 *p++ = hex_asc_lo(hi); 779 *p++ = hex_asc_lo(hi);
780 p = pack_hex_byte(p, lo);
790 } 781 }
791 if (hi || lo > 0x0f) 782 else if (lo > 0x0f)
792 p = pack_hex_byte(p, lo); 783 p = pack_hex_byte(p, lo);
793 else 784 else
794 *p++ = hex_asc_lo(lo); 785 *p++ = hex_asc_lo(lo);
@@ -800,22 +791,23 @@ static char *ip6_compressed_string(char *p, const char *addr)
800 *p++ = ':'; 791 *p++ = ':';
801 p = ip4_string(p, &in6.s6_addr[12], false); 792 p = ip4_string(p, &in6.s6_addr[12], false);
802 } 793 }
803
804 *p = '\0'; 794 *p = '\0';
795
805 return p; 796 return p;
806} 797}
807 798
808static char *ip6_string(char *p, const char *addr, const char *fmt) 799static char *ip6_string(char *p, const char *addr, const char *fmt)
809{ 800{
810 int i; 801 int i;
802
811 for (i = 0; i < 8; i++) { 803 for (i = 0; i < 8; i++) {
812 p = pack_hex_byte(p, *addr++); 804 p = pack_hex_byte(p, *addr++);
813 p = pack_hex_byte(p, *addr++); 805 p = pack_hex_byte(p, *addr++);
814 if (fmt[0] == 'I' && i != 7) 806 if (fmt[0] == 'I' && i != 7)
815 *p++ = ':'; 807 *p++ = ':';
816 } 808 }
817
818 *p = '\0'; 809 *p = '\0';
810
819 return p; 811 return p;
820} 812}
821 813
@@ -842,6 +834,52 @@ static char *ip4_addr_string(char *buf, char *end, const u8 *addr,
842 return string(buf, end, ip4_addr, spec); 834 return string(buf, end, ip4_addr, spec);
843} 835}
844 836
837static char *uuid_string(char *buf, char *end, const u8 *addr,
838 struct printf_spec spec, const char *fmt)
839{
840 char uuid[sizeof("xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx")];
841 char *p = uuid;
842 int i;
843 static const u8 be[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
844 static const u8 le[16] = {3,2,1,0,5,4,7,6,8,9,10,11,12,13,14,15};
845 const u8 *index = be;
846 bool uc = false;
847
848 switch (*(++fmt)) {
849 case 'L':
850 uc = true; /* fall-through */
851 case 'l':
852 index = le;
853 break;
854 case 'B':
855 uc = true;
856 break;
857 }
858
859 for (i = 0; i < 16; i++) {
860 p = pack_hex_byte(p, addr[index[i]]);
861 switch (i) {
862 case 3:
863 case 5:
864 case 7:
865 case 9:
866 *p++ = '-';
867 break;
868 }
869 }
870
871 *p = 0;
872
873 if (uc) {
874 p = uuid;
875 do {
876 *p = toupper(*p);
877 } while (*(++p));
878 }
879
880 return string(buf, end, uuid, spec);
881}
882
845/* 883/*
846 * Show a '%p' thing. A kernel extension is that the '%p' is followed 884 * Show a '%p' thing. A kernel extension is that the '%p' is followed
847 * by an extra set of alphanumeric characters that are extended format 885 * by an extra set of alphanumeric characters that are extended format
@@ -866,6 +904,18 @@ static char *ip4_addr_string(char *buf, char *end, const u8 *addr,
866 * IPv4 uses dot-separated decimal with leading 0's (010.123.045.006) 904 * IPv4 uses dot-separated decimal with leading 0's (010.123.045.006)
867 * - 'I6c' for IPv6 addresses printed as specified by 905 * - 'I6c' for IPv6 addresses printed as specified by
868 * http://www.ietf.org/id/draft-kawamura-ipv6-text-representation-03.txt 906 * http://www.ietf.org/id/draft-kawamura-ipv6-text-representation-03.txt
907 * - 'U' For a 16 byte UUID/GUID, it prints the UUID/GUID in the form
908 * "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
909 * Options for %pU are:
910 * b big endian lower case hex (default)
911 * B big endian UPPER case hex
912 * l little endian lower case hex
913 * L little endian UPPER case hex
914 * big endian output byte order is:
915 * [0][1][2][3]-[4][5]-[6][7]-[8][9]-[10][11][12][13][14][15]
916 * little endian output byte order is:
917 * [3][2][1][0]-[5][4]-[7][6]-[8][9]-[10][11][12][13][14][15]
918 *
869 * Note: The difference between 'S' and 'F' is that on ia64 and ppc64 919 * Note: The difference between 'S' and 'F' is that on ia64 and ppc64
870 * function pointers are really function descriptors, which contain a 920 * function pointers are really function descriptors, which contain a
871 * pointer to the real address. 921 * pointer to the real address.
@@ -880,9 +930,9 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
880 case 'F': 930 case 'F':
881 case 'f': 931 case 'f':
882 ptr = dereference_function_descriptor(ptr); 932 ptr = dereference_function_descriptor(ptr);
883 case 's':
884 /* Fallthrough */ 933 /* Fallthrough */
885 case 'S': 934 case 'S':
935 case 's':
886 return symbol_string(buf, end, ptr, spec, *fmt); 936 return symbol_string(buf, end, ptr, spec, *fmt);
887 case 'R': 937 case 'R':
888 case 'r': 938 case 'r':
@@ -906,6 +956,8 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
906 return ip4_addr_string(buf, end, ptr, spec, fmt); 956 return ip4_addr_string(buf, end, ptr, spec, fmt);
907 } 957 }
908 break; 958 break;
959 case 'U':
960 return uuid_string(buf, end, ptr, spec, fmt);
909 } 961 }
910 spec.flags |= SMALL; 962 spec.flags |= SMALL;
911 if (spec.field_width == -1) { 963 if (spec.field_width == -1) {
@@ -1023,8 +1075,8 @@ precision:
1023qualifier: 1075qualifier:
1024 /* get the conversion qualifier */ 1076 /* get the conversion qualifier */
1025 spec->qualifier = -1; 1077 spec->qualifier = -1;
1026 if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || 1078 if (*fmt == 'h' || TOLOWER(*fmt) == 'l' ||
1027 *fmt == 'Z' || *fmt == 'z' || *fmt == 't') { 1079 TOLOWER(*fmt) == 'z' || *fmt == 't') {
1028 spec->qualifier = *fmt++; 1080 spec->qualifier = *fmt++;
1029 if (unlikely(spec->qualifier == *fmt)) { 1081 if (unlikely(spec->qualifier == *fmt)) {
1030 if (spec->qualifier == 'l') { 1082 if (spec->qualifier == 'l') {
@@ -1091,7 +1143,7 @@ qualifier:
1091 spec->type = FORMAT_TYPE_LONG; 1143 spec->type = FORMAT_TYPE_LONG;
1092 else 1144 else
1093 spec->type = FORMAT_TYPE_ULONG; 1145 spec->type = FORMAT_TYPE_ULONG;
1094 } else if (spec->qualifier == 'Z' || spec->qualifier == 'z') { 1146 } else if (TOLOWER(spec->qualifier) == 'z') {
1095 spec->type = FORMAT_TYPE_SIZE_T; 1147 spec->type = FORMAT_TYPE_SIZE_T;
1096 } else if (spec->qualifier == 't') { 1148 } else if (spec->qualifier == 't') {
1097 spec->type = FORMAT_TYPE_PTRDIFF; 1149 spec->type = FORMAT_TYPE_PTRDIFF;
@@ -1144,8 +1196,7 @@ qualifier:
1144int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) 1196int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
1145{ 1197{
1146 unsigned long long num; 1198 unsigned long long num;
1147 char *str, *end, c; 1199 char *str, *end;
1148 int read;
1149 struct printf_spec spec = {0}; 1200 struct printf_spec spec = {0};
1150 1201
1151 /* Reject out-of-range values early. Large positive sizes are 1202 /* Reject out-of-range values early. Large positive sizes are
@@ -1164,8 +1215,7 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
1164 1215
1165 while (*fmt) { 1216 while (*fmt) {
1166 const char *old_fmt = fmt; 1217 const char *old_fmt = fmt;
1167 1218 int read = format_decode(fmt, &spec);
1168 read = format_decode(fmt, &spec);
1169 1219
1170 fmt += read; 1220 fmt += read;
1171 1221
@@ -1189,7 +1239,9 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
1189 spec.precision = va_arg(args, int); 1239 spec.precision = va_arg(args, int);
1190 break; 1240 break;
1191 1241
1192 case FORMAT_TYPE_CHAR: 1242 case FORMAT_TYPE_CHAR: {
1243 char c;
1244
1193 if (!(spec.flags & LEFT)) { 1245 if (!(spec.flags & LEFT)) {
1194 while (--spec.field_width > 0) { 1246 while (--spec.field_width > 0) {
1195 if (str < end) 1247 if (str < end)
@@ -1208,6 +1260,7 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
1208 ++str; 1260 ++str;
1209 } 1261 }
1210 break; 1262 break;
1263 }
1211 1264
1212 case FORMAT_TYPE_STR: 1265 case FORMAT_TYPE_STR:
1213 str = string(str, end, va_arg(args, char *), spec); 1266 str = string(str, end, va_arg(args, char *), spec);
@@ -1238,8 +1291,7 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
1238 if (qualifier == 'l') { 1291 if (qualifier == 'l') {
1239 long *ip = va_arg(args, long *); 1292 long *ip = va_arg(args, long *);
1240 *ip = (str - buf); 1293 *ip = (str - buf);
1241 } else if (qualifier == 'Z' || 1294 } else if (TOLOWER(qualifier) == 'z') {
1242 qualifier == 'z') {
1243 size_t *ip = va_arg(args, size_t *); 1295 size_t *ip = va_arg(args, size_t *);
1244 *ip = (str - buf); 1296 *ip = (str - buf);
1245 } else { 1297 } else {
@@ -1322,7 +1374,8 @@ int vscnprintf(char *buf, size_t size, const char *fmt, va_list args)
1322{ 1374{
1323 int i; 1375 int i;
1324 1376
1325 i=vsnprintf(buf,size,fmt,args); 1377 i = vsnprintf(buf, size, fmt, args);
1378
1326 return (i >= size) ? (size - 1) : i; 1379 return (i >= size) ? (size - 1) : i;
1327} 1380}
1328EXPORT_SYMBOL(vscnprintf); 1381EXPORT_SYMBOL(vscnprintf);
@@ -1341,14 +1394,15 @@ EXPORT_SYMBOL(vscnprintf);
1341 * 1394 *
1342 * See the vsnprintf() documentation for format string extensions over C99. 1395 * See the vsnprintf() documentation for format string extensions over C99.
1343 */ 1396 */
1344int snprintf(char * buf, size_t size, const char *fmt, ...) 1397int snprintf(char *buf, size_t size, const char *fmt, ...)
1345{ 1398{
1346 va_list args; 1399 va_list args;
1347 int i; 1400 int i;
1348 1401
1349 va_start(args, fmt); 1402 va_start(args, fmt);
1350 i=vsnprintf(buf,size,fmt,args); 1403 i = vsnprintf(buf, size, fmt, args);
1351 va_end(args); 1404 va_end(args);
1405
1352 return i; 1406 return i;
1353} 1407}
1354EXPORT_SYMBOL(snprintf); 1408EXPORT_SYMBOL(snprintf);
@@ -1364,7 +1418,7 @@ EXPORT_SYMBOL(snprintf);
1364 * the trailing '\0'. If @size is <= 0 the function returns 0. 1418 * the trailing '\0'. If @size is <= 0 the function returns 0.
1365 */ 1419 */
1366 1420
1367int scnprintf(char * buf, size_t size, const char *fmt, ...) 1421int scnprintf(char *buf, size_t size, const char *fmt, ...)
1368{ 1422{
1369 va_list args; 1423 va_list args;
1370 int i; 1424 int i;
@@ -1372,6 +1426,7 @@ int scnprintf(char * buf, size_t size, const char *fmt, ...)
1372 va_start(args, fmt); 1426 va_start(args, fmt);
1373 i = vsnprintf(buf, size, fmt, args); 1427 i = vsnprintf(buf, size, fmt, args);
1374 va_end(args); 1428 va_end(args);
1429
1375 return (i >= size) ? (size - 1) : i; 1430 return (i >= size) ? (size - 1) : i;
1376} 1431}
1377EXPORT_SYMBOL(scnprintf); 1432EXPORT_SYMBOL(scnprintf);
@@ -1409,14 +1464,15 @@ EXPORT_SYMBOL(vsprintf);
1409 * 1464 *
1410 * See the vsnprintf() documentation for format string extensions over C99. 1465 * See the vsnprintf() documentation for format string extensions over C99.
1411 */ 1466 */
1412int sprintf(char * buf, const char *fmt, ...) 1467int sprintf(char *buf, const char *fmt, ...)
1413{ 1468{
1414 va_list args; 1469 va_list args;
1415 int i; 1470 int i;
1416 1471
1417 va_start(args, fmt); 1472 va_start(args, fmt);
1418 i=vsnprintf(buf, INT_MAX, fmt, args); 1473 i = vsnprintf(buf, INT_MAX, fmt, args);
1419 va_end(args); 1474 va_end(args);
1475
1420 return i; 1476 return i;
1421} 1477}
1422EXPORT_SYMBOL(sprintf); 1478EXPORT_SYMBOL(sprintf);
@@ -1449,7 +1505,6 @@ int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args)
1449{ 1505{
1450 struct printf_spec spec = {0}; 1506 struct printf_spec spec = {0};
1451 char *str, *end; 1507 char *str, *end;
1452 int read;
1453 1508
1454 str = (char *)bin_buf; 1509 str = (char *)bin_buf;
1455 end = (char *)(bin_buf + size); 1510 end = (char *)(bin_buf + size);
@@ -1474,14 +1529,15 @@ do { \
1474 str += sizeof(type); \ 1529 str += sizeof(type); \
1475} while (0) 1530} while (0)
1476 1531
1477
1478 while (*fmt) { 1532 while (*fmt) {
1479 read = format_decode(fmt, &spec); 1533 int read = format_decode(fmt, &spec);
1480 1534
1481 fmt += read; 1535 fmt += read;
1482 1536
1483 switch (spec.type) { 1537 switch (spec.type) {
1484 case FORMAT_TYPE_NONE: 1538 case FORMAT_TYPE_NONE:
1539 case FORMAT_TYPE_INVALID:
1540 case FORMAT_TYPE_PERCENT_CHAR:
1485 break; 1541 break;
1486 1542
1487 case FORMAT_TYPE_WIDTH: 1543 case FORMAT_TYPE_WIDTH:
@@ -1496,13 +1552,14 @@ do { \
1496 case FORMAT_TYPE_STR: { 1552 case FORMAT_TYPE_STR: {
1497 const char *save_str = va_arg(args, char *); 1553 const char *save_str = va_arg(args, char *);
1498 size_t len; 1554 size_t len;
1555
1499 if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE 1556 if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE
1500 || (unsigned long)save_str < PAGE_SIZE) 1557 || (unsigned long)save_str < PAGE_SIZE)
1501 save_str = "<NULL>"; 1558 save_str = "(null)";
1502 len = strlen(save_str); 1559 len = strlen(save_str) + 1;
1503 if (str + len + 1 < end) 1560 if (str + len < end)
1504 memcpy(str, save_str, len + 1); 1561 memcpy(str, save_str, len);
1505 str += len + 1; 1562 str += len;
1506 break; 1563 break;
1507 } 1564 }
1508 1565
@@ -1513,19 +1570,13 @@ do { \
1513 fmt++; 1570 fmt++;
1514 break; 1571 break;
1515 1572
1516 case FORMAT_TYPE_PERCENT_CHAR:
1517 break;
1518
1519 case FORMAT_TYPE_INVALID:
1520 break;
1521
1522 case FORMAT_TYPE_NRCHARS: { 1573 case FORMAT_TYPE_NRCHARS: {
1523 /* skip %n 's argument */ 1574 /* skip %n 's argument */
1524 int qualifier = spec.qualifier; 1575 int qualifier = spec.qualifier;
1525 void *skip_arg; 1576 void *skip_arg;
1526 if (qualifier == 'l') 1577 if (qualifier == 'l')
1527 skip_arg = va_arg(args, long *); 1578 skip_arg = va_arg(args, long *);
1528 else if (qualifier == 'Z' || qualifier == 'z') 1579 else if (TOLOWER(qualifier) == 'z')
1529 skip_arg = va_arg(args, size_t *); 1580 skip_arg = va_arg(args, size_t *);
1530 else 1581 else
1531 skip_arg = va_arg(args, int *); 1582 skip_arg = va_arg(args, int *);
@@ -1561,8 +1612,8 @@ do { \
1561 } 1612 }
1562 } 1613 }
1563 } 1614 }
1564 return (u32 *)(PTR_ALIGN(str, sizeof(u32))) - bin_buf;
1565 1615
1616 return (u32 *)(PTR_ALIGN(str, sizeof(u32))) - bin_buf;
1566#undef save_arg 1617#undef save_arg
1567} 1618}
1568EXPORT_SYMBOL_GPL(vbin_printf); 1619EXPORT_SYMBOL_GPL(vbin_printf);
@@ -1591,11 +1642,9 @@ EXPORT_SYMBOL_GPL(vbin_printf);
1591 */ 1642 */
1592int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf) 1643int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
1593{ 1644{
1594 unsigned long long num;
1595 char *str, *end, c;
1596 const char *args = (const char *)bin_buf;
1597
1598 struct printf_spec spec = {0}; 1645 struct printf_spec spec = {0};
1646 char *str, *end;
1647 const char *args = (const char *)bin_buf;
1599 1648
1600 if (WARN_ON_ONCE((int) size < 0)) 1649 if (WARN_ON_ONCE((int) size < 0))
1601 return 0; 1650 return 0;
@@ -1625,10 +1674,8 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
1625 } 1674 }
1626 1675
1627 while (*fmt) { 1676 while (*fmt) {
1628 int read;
1629 const char *old_fmt = fmt; 1677 const char *old_fmt = fmt;
1630 1678 int read = format_decode(fmt, &spec);
1631 read = format_decode(fmt, &spec);
1632 1679
1633 fmt += read; 1680 fmt += read;
1634 1681
@@ -1652,7 +1699,9 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
1652 spec.precision = get_arg(int); 1699 spec.precision = get_arg(int);
1653 break; 1700 break;
1654 1701
1655 case FORMAT_TYPE_CHAR: 1702 case FORMAT_TYPE_CHAR: {
1703 char c;
1704
1656 if (!(spec.flags & LEFT)) { 1705 if (!(spec.flags & LEFT)) {
1657 while (--spec.field_width > 0) { 1706 while (--spec.field_width > 0) {
1658 if (str < end) 1707 if (str < end)
@@ -1670,11 +1719,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
1670 ++str; 1719 ++str;
1671 } 1720 }
1672 break; 1721 break;
1722 }
1673 1723
1674 case FORMAT_TYPE_STR: { 1724 case FORMAT_TYPE_STR: {
1675 const char *str_arg = args; 1725 const char *str_arg = args;
1676 size_t len = strlen(str_arg); 1726 args += strlen(str_arg) + 1;
1677 args += len + 1;
1678 str = string(str, end, (char *)str_arg, spec); 1727 str = string(str, end, (char *)str_arg, spec);
1679 break; 1728 break;
1680 } 1729 }
@@ -1686,11 +1735,6 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
1686 break; 1735 break;
1687 1736
1688 case FORMAT_TYPE_PERCENT_CHAR: 1737 case FORMAT_TYPE_PERCENT_CHAR:
1689 if (str < end)
1690 *str = '%';
1691 ++str;
1692 break;
1693
1694 case FORMAT_TYPE_INVALID: 1738 case FORMAT_TYPE_INVALID:
1695 if (str < end) 1739 if (str < end)
1696 *str = '%'; 1740 *str = '%';
@@ -1701,15 +1745,15 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
1701 /* skip */ 1745 /* skip */
1702 break; 1746 break;
1703 1747
1704 default: 1748 default: {
1749 unsigned long long num;
1750
1705 switch (spec.type) { 1751 switch (spec.type) {
1706 1752
1707 case FORMAT_TYPE_LONG_LONG: 1753 case FORMAT_TYPE_LONG_LONG:
1708 num = get_arg(long long); 1754 num = get_arg(long long);
1709 break; 1755 break;
1710 case FORMAT_TYPE_ULONG: 1756 case FORMAT_TYPE_ULONG:
1711 num = get_arg(unsigned long);
1712 break;
1713 case FORMAT_TYPE_LONG: 1757 case FORMAT_TYPE_LONG:
1714 num = get_arg(unsigned long); 1758 num = get_arg(unsigned long);
1715 break; 1759 break;
@@ -1739,8 +1783,9 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
1739 } 1783 }
1740 1784
1741 str = number(str, end, num, spec); 1785 str = number(str, end, num, spec);
1742 } 1786 } /* default: */
1743 } 1787 } /* switch(spec.type) */
1788 } /* while(*fmt) */
1744 1789
1745 if (size > 0) { 1790 if (size > 0) {
1746 if (str < end) 1791 if (str < end)
@@ -1774,6 +1819,7 @@ int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...)
1774 va_start(args, fmt); 1819 va_start(args, fmt);
1775 ret = vbin_printf(bin_buf, size, fmt, args); 1820 ret = vbin_printf(bin_buf, size, fmt, args);
1776 va_end(args); 1821 va_end(args);
1822
1777 return ret; 1823 return ret;
1778} 1824}
1779EXPORT_SYMBOL_GPL(bprintf); 1825EXPORT_SYMBOL_GPL(bprintf);
@@ -1786,27 +1832,23 @@ EXPORT_SYMBOL_GPL(bprintf);
1786 * @fmt: format of buffer 1832 * @fmt: format of buffer
1787 * @args: arguments 1833 * @args: arguments
1788 */ 1834 */
1789int vsscanf(const char * buf, const char * fmt, va_list args) 1835int vsscanf(const char *buf, const char *fmt, va_list args)
1790{ 1836{
1791 const char *str = buf; 1837 const char *str = buf;
1792 char *next; 1838 char *next;
1793 char digit; 1839 char digit;
1794 int num = 0; 1840 int num = 0;
1795 int qualifier; 1841 int qualifier, base, field_width;
1796 int base; 1842 bool is_sign;
1797 int field_width;
1798 int is_sign = 0;
1799 1843
1800 while(*fmt && *str) { 1844 while (*fmt && *str) {
1801 /* skip any white space in format */ 1845 /* skip any white space in format */
1802 /* white space in format matchs any amount of 1846 /* white space in format matchs any amount of
1803 * white space, including none, in the input. 1847 * white space, including none, in the input.
1804 */ 1848 */
1805 if (isspace(*fmt)) { 1849 if (isspace(*fmt)) {
1806 while (isspace(*fmt)) 1850 fmt = skip_spaces(++fmt);
1807 ++fmt; 1851 str = skip_spaces(str);
1808 while (isspace(*str))
1809 ++str;
1810 } 1852 }
1811 1853
1812 /* anything that is not a conversion must match exactly */ 1854 /* anything that is not a conversion must match exactly */
@@ -1819,7 +1861,7 @@ int vsscanf(const char * buf, const char * fmt, va_list args)
1819 if (!*fmt) 1861 if (!*fmt)
1820 break; 1862 break;
1821 ++fmt; 1863 ++fmt;
1822 1864
1823 /* skip this conversion. 1865 /* skip this conversion.
1824 * advance both strings to next white space 1866 * advance both strings to next white space
1825 */ 1867 */
@@ -1838,8 +1880,8 @@ int vsscanf(const char * buf, const char * fmt, va_list args)
1838 1880
1839 /* get conversion qualifier */ 1881 /* get conversion qualifier */
1840 qualifier = -1; 1882 qualifier = -1;
1841 if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || 1883 if (*fmt == 'h' || TOLOWER(*fmt) == 'l' ||
1842 *fmt == 'Z' || *fmt == 'z') { 1884 TOLOWER(*fmt) == 'z') {
1843 qualifier = *fmt++; 1885 qualifier = *fmt++;
1844 if (unlikely(qualifier == *fmt)) { 1886 if (unlikely(qualifier == *fmt)) {
1845 if (qualifier == 'h') { 1887 if (qualifier == 'h') {
@@ -1851,16 +1893,17 @@ int vsscanf(const char * buf, const char * fmt, va_list args)
1851 } 1893 }
1852 } 1894 }
1853 } 1895 }
1854 base = 10;
1855 is_sign = 0;
1856 1896
1857 if (!*fmt || !*str) 1897 if (!*fmt || !*str)
1858 break; 1898 break;
1859 1899
1860 switch(*fmt++) { 1900 base = 10;
1901 is_sign = 0;
1902
1903 switch (*fmt++) {
1861 case 'c': 1904 case 'c':
1862 { 1905 {
1863 char *s = (char *) va_arg(args,char*); 1906 char *s = (char *)va_arg(args, char*);
1864 if (field_width == -1) 1907 if (field_width == -1)
1865 field_width = 1; 1908 field_width = 1;
1866 do { 1909 do {
@@ -1871,17 +1914,15 @@ int vsscanf(const char * buf, const char * fmt, va_list args)
1871 continue; 1914 continue;
1872 case 's': 1915 case 's':
1873 { 1916 {
1874 char *s = (char *) va_arg(args, char *); 1917 char *s = (char *)va_arg(args, char *);
1875 if(field_width == -1) 1918 if (field_width == -1)
1876 field_width = INT_MAX; 1919 field_width = INT_MAX;
1877 /* first, skip leading white space in buffer */ 1920 /* first, skip leading white space in buffer */
1878 while (isspace(*str)) 1921 str = skip_spaces(str);
1879 str++;
1880 1922
1881 /* now copy until next white space */ 1923 /* now copy until next white space */
1882 while (*str && !isspace(*str) && field_width--) { 1924 while (*str && !isspace(*str) && field_width--)
1883 *s++ = *str++; 1925 *s++ = *str++;
1884 }
1885 *s = '\0'; 1926 *s = '\0';
1886 num++; 1927 num++;
1887 } 1928 }
@@ -1889,7 +1930,7 @@ int vsscanf(const char * buf, const char * fmt, va_list args)
1889 case 'n': 1930 case 'n':
1890 /* return number of characters read so far */ 1931 /* return number of characters read so far */
1891 { 1932 {
1892 int *i = (int *)va_arg(args,int*); 1933 int *i = (int *)va_arg(args, int*);
1893 *i = str - buf; 1934 *i = str - buf;
1894 } 1935 }
1895 continue; 1936 continue;
@@ -1901,14 +1942,14 @@ int vsscanf(const char * buf, const char * fmt, va_list args)
1901 base = 16; 1942 base = 16;
1902 break; 1943 break;
1903 case 'i': 1944 case 'i':
1904 base = 0; 1945 base = 0;
1905 case 'd': 1946 case 'd':
1906 is_sign = 1; 1947 is_sign = 1;
1907 case 'u': 1948 case 'u':
1908 break; 1949 break;
1909 case '%': 1950 case '%':
1910 /* looking for '%' in str */ 1951 /* looking for '%' in str */
1911 if (*str++ != '%') 1952 if (*str++ != '%')
1912 return num; 1953 return num;
1913 continue; 1954 continue;
1914 default: 1955 default:
@@ -1919,71 +1960,70 @@ int vsscanf(const char * buf, const char * fmt, va_list args)
1919 /* have some sort of integer conversion. 1960 /* have some sort of integer conversion.
1920 * first, skip white space in buffer. 1961 * first, skip white space in buffer.
1921 */ 1962 */
1922 while (isspace(*str)) 1963 str = skip_spaces(str);
1923 str++;
1924 1964
1925 digit = *str; 1965 digit = *str;
1926 if (is_sign && digit == '-') 1966 if (is_sign && digit == '-')
1927 digit = *(str + 1); 1967 digit = *(str + 1);
1928 1968
1929 if (!digit 1969 if (!digit
1930 || (base == 16 && !isxdigit(digit)) 1970 || (base == 16 && !isxdigit(digit))
1931 || (base == 10 && !isdigit(digit)) 1971 || (base == 10 && !isdigit(digit))
1932 || (base == 8 && (!isdigit(digit) || digit > '7')) 1972 || (base == 8 && (!isdigit(digit) || digit > '7'))
1933 || (base == 0 && !isdigit(digit))) 1973 || (base == 0 && !isdigit(digit)))
1934 break; 1974 break;
1935 1975
1936 switch(qualifier) { 1976 switch (qualifier) {
1937 case 'H': /* that's 'hh' in format */ 1977 case 'H': /* that's 'hh' in format */
1938 if (is_sign) { 1978 if (is_sign) {
1939 signed char *s = (signed char *) va_arg(args,signed char *); 1979 signed char *s = (signed char *)va_arg(args, signed char *);
1940 *s = (signed char) simple_strtol(str,&next,base); 1980 *s = (signed char)simple_strtol(str, &next, base);
1941 } else { 1981 } else {
1942 unsigned char *s = (unsigned char *) va_arg(args, unsigned char *); 1982 unsigned char *s = (unsigned char *)va_arg(args, unsigned char *);
1943 *s = (unsigned char) simple_strtoul(str, &next, base); 1983 *s = (unsigned char)simple_strtoul(str, &next, base);
1944 } 1984 }
1945 break; 1985 break;
1946 case 'h': 1986 case 'h':
1947 if (is_sign) { 1987 if (is_sign) {
1948 short *s = (short *) va_arg(args,short *); 1988 short *s = (short *)va_arg(args, short *);
1949 *s = (short) simple_strtol(str,&next,base); 1989 *s = (short)simple_strtol(str, &next, base);
1950 } else { 1990 } else {
1951 unsigned short *s = (unsigned short *) va_arg(args, unsigned short *); 1991 unsigned short *s = (unsigned short *)va_arg(args, unsigned short *);
1952 *s = (unsigned short) simple_strtoul(str, &next, base); 1992 *s = (unsigned short)simple_strtoul(str, &next, base);
1953 } 1993 }
1954 break; 1994 break;
1955 case 'l': 1995 case 'l':
1956 if (is_sign) { 1996 if (is_sign) {
1957 long *l = (long *) va_arg(args,long *); 1997 long *l = (long *)va_arg(args, long *);
1958 *l = simple_strtol(str,&next,base); 1998 *l = simple_strtol(str, &next, base);
1959 } else { 1999 } else {
1960 unsigned long *l = (unsigned long*) va_arg(args,unsigned long*); 2000 unsigned long *l = (unsigned long *)va_arg(args, unsigned long *);
1961 *l = simple_strtoul(str,&next,base); 2001 *l = simple_strtoul(str, &next, base);
1962 } 2002 }
1963 break; 2003 break;
1964 case 'L': 2004 case 'L':
1965 if (is_sign) { 2005 if (is_sign) {
1966 long long *l = (long long*) va_arg(args,long long *); 2006 long long *l = (long long *)va_arg(args, long long *);
1967 *l = simple_strtoll(str,&next,base); 2007 *l = simple_strtoll(str, &next, base);
1968 } else { 2008 } else {
1969 unsigned long long *l = (unsigned long long*) va_arg(args,unsigned long long*); 2009 unsigned long long *l = (unsigned long long *)va_arg(args, unsigned long long *);
1970 *l = simple_strtoull(str,&next,base); 2010 *l = simple_strtoull(str, &next, base);
1971 } 2011 }
1972 break; 2012 break;
1973 case 'Z': 2013 case 'Z':
1974 case 'z': 2014 case 'z':
1975 { 2015 {
1976 size_t *s = (size_t*) va_arg(args,size_t*); 2016 size_t *s = (size_t *)va_arg(args, size_t *);
1977 *s = (size_t) simple_strtoul(str,&next,base); 2017 *s = (size_t)simple_strtoul(str, &next, base);
1978 } 2018 }
1979 break; 2019 break;
1980 default: 2020 default:
1981 if (is_sign) { 2021 if (is_sign) {
1982 int *i = (int *) va_arg(args, int*); 2022 int *i = (int *)va_arg(args, int *);
1983 *i = (int) simple_strtol(str,&next,base); 2023 *i = (int)simple_strtol(str, &next, base);
1984 } else { 2024 } else {
1985 unsigned int *i = (unsigned int*) va_arg(args, unsigned int*); 2025 unsigned int *i = (unsigned int *)va_arg(args, unsigned int*);
1986 *i = (unsigned int) simple_strtoul(str,&next,base); 2026 *i = (unsigned int)simple_strtoul(str, &next, base);
1987 } 2027 }
1988 break; 2028 break;
1989 } 2029 }
@@ -2014,14 +2054,15 @@ EXPORT_SYMBOL(vsscanf);
2014 * @fmt: formatting of buffer 2054 * @fmt: formatting of buffer
2015 * @...: resulting arguments 2055 * @...: resulting arguments
2016 */ 2056 */
2017int sscanf(const char * buf, const char * fmt, ...) 2057int sscanf(const char *buf, const char *fmt, ...)
2018{ 2058{
2019 va_list args; 2059 va_list args;
2020 int i; 2060 int i;
2021 2061
2022 va_start(args,fmt); 2062 va_start(args, fmt);
2023 i = vsscanf(buf,fmt,args); 2063 i = vsscanf(buf, fmt, args);
2024 va_end(args); 2064 va_end(args);
2065
2025 return i; 2066 return i;
2026} 2067}
2027EXPORT_SYMBOL(sscanf); 2068EXPORT_SYMBOL(sscanf);
diff --git a/mm/Kconfig b/mm/Kconfig
index 44cf6f0a3a6d..2310984591ed 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -158,11 +158,13 @@ config PAGEFLAGS_EXTENDED
158# Default to 4 for wider testing, though 8 might be more appropriate. 158# Default to 4 for wider testing, though 8 might be more appropriate.
159# ARM's adjust_pte (unused if VIPT) depends on mm-wide page_table_lock. 159# ARM's adjust_pte (unused if VIPT) depends on mm-wide page_table_lock.
160# PA-RISC 7xxx's spinlock_t would enlarge struct page from 32 to 44 bytes. 160# PA-RISC 7xxx's spinlock_t would enlarge struct page from 32 to 44 bytes.
161# DEBUG_SPINLOCK and DEBUG_LOCK_ALLOC spinlock_t also enlarge struct page.
161# 162#
162config SPLIT_PTLOCK_CPUS 163config SPLIT_PTLOCK_CPUS
163 int 164 int
164 default "4096" if ARM && !CPU_CACHE_VIPT 165 default "999999" if ARM && !CPU_CACHE_VIPT
165 default "4096" if PARISC && !PA20 166 default "999999" if PARISC && !PA20
167 default "999999" if DEBUG_SPINLOCK || DEBUG_LOCK_ALLOC
166 default "4" 168 default "4"
167 169
168# 170#
@@ -200,14 +202,6 @@ config VIRT_TO_BUS
200 def_bool y 202 def_bool y
201 depends on !ARCH_NO_VIRT_TO_BUS 203 depends on !ARCH_NO_VIRT_TO_BUS
202 204
203config HAVE_MLOCK
204 bool
205 default y if MMU=y
206
207config HAVE_MLOCKED_PAGE_BIT
208 bool
209 default y if HAVE_MLOCK=y
210
211config MMU_NOTIFIER 205config MMU_NOTIFIER
212 bool 206 bool
213 207
@@ -218,7 +212,7 @@ config KSM
218 Enable Kernel Samepage Merging: KSM periodically scans those areas 212 Enable Kernel Samepage Merging: KSM periodically scans those areas
219 of an application's address space that an app has advised may be 213 of an application's address space that an app has advised may be
220 mergeable. When it finds pages of identical content, it replaces 214 mergeable. When it finds pages of identical content, it replaces
221 the many instances by a single resident page with that content, so 215 the many instances by a single page with that content, so
222 saving memory until one or another app needs to modify the content. 216 saving memory until one or another app needs to modify the content.
223 Recommended for use with KVM, or with other duplicative applications. 217 Recommended for use with KVM, or with other duplicative applications.
224 See Documentation/vm/ksm.txt for more information: KSM is inactive 218 See Documentation/vm/ksm.txt for more information: KSM is inactive
diff --git a/mm/bootmem.c b/mm/bootmem.c
index d1dc23cc7f10..7d1486875e1c 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -432,8 +432,8 @@ int __init reserve_bootmem(unsigned long addr, unsigned long size,
432 return mark_bootmem(start, end, 1, flags); 432 return mark_bootmem(start, end, 1, flags);
433} 433}
434 434
435static unsigned long align_idx(struct bootmem_data *bdata, unsigned long idx, 435static unsigned long __init align_idx(struct bootmem_data *bdata,
436 unsigned long step) 436 unsigned long idx, unsigned long step)
437{ 437{
438 unsigned long base = bdata->node_min_pfn; 438 unsigned long base = bdata->node_min_pfn;
439 439
@@ -445,8 +445,8 @@ static unsigned long align_idx(struct bootmem_data *bdata, unsigned long idx,
445 return ALIGN(base + idx, step) - base; 445 return ALIGN(base + idx, step) - base;
446} 446}
447 447
448static unsigned long align_off(struct bootmem_data *bdata, unsigned long off, 448static unsigned long __init align_off(struct bootmem_data *bdata,
449 unsigned long align) 449 unsigned long off, unsigned long align)
450{ 450{
451 unsigned long base = PFN_PHYS(bdata->node_min_pfn); 451 unsigned long base = PFN_PHYS(bdata->node_min_pfn);
452 452
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 5d7601b02874..65f38c218207 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -24,6 +24,7 @@
24#include <asm/io.h> 24#include <asm/io.h>
25 25
26#include <linux/hugetlb.h> 26#include <linux/hugetlb.h>
27#include <linux/node.h>
27#include "internal.h" 28#include "internal.h"
28 29
29const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL; 30const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
@@ -622,42 +623,66 @@ static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
622} 623}
623 624
624/* 625/*
625 * Use a helper variable to find the next node and then 626 * common helper functions for hstate_next_node_to_{alloc|free}.
626 * copy it back to next_nid_to_alloc afterwards: 627 * We may have allocated or freed a huge page based on a different
627 * otherwise there's a window in which a racer might 628 * nodes_allowed previously, so h->next_node_to_{alloc|free} might
628 * pass invalid nid MAX_NUMNODES to alloc_pages_exact_node. 629 * be outside of *nodes_allowed. Ensure that we use an allowed
629 * But we don't need to use a spin_lock here: it really 630 * node for alloc or free.
630 * doesn't matter if occasionally a racer chooses the
631 * same nid as we do. Move nid forward in the mask even
632 * if we just successfully allocated a hugepage so that
633 * the next caller gets hugepages on the next node.
634 */ 631 */
635static int hstate_next_node_to_alloc(struct hstate *h) 632static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
636{ 633{
637 int next_nid; 634 nid = next_node(nid, *nodes_allowed);
638 next_nid = next_node(h->next_nid_to_alloc, node_online_map); 635 if (nid == MAX_NUMNODES)
639 if (next_nid == MAX_NUMNODES) 636 nid = first_node(*nodes_allowed);
640 next_nid = first_node(node_online_map); 637 VM_BUG_ON(nid >= MAX_NUMNODES);
641 h->next_nid_to_alloc = next_nid; 638
642 return next_nid; 639 return nid;
640}
641
642static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
643{
644 if (!node_isset(nid, *nodes_allowed))
645 nid = next_node_allowed(nid, nodes_allowed);
646 return nid;
647}
648
649/*
650 * returns the previously saved node ["this node"] from which to
651 * allocate a persistent huge page for the pool and advance the
652 * next node from which to allocate, handling wrap at end of node
653 * mask.
654 */
655static int hstate_next_node_to_alloc(struct hstate *h,
656 nodemask_t *nodes_allowed)
657{
658 int nid;
659
660 VM_BUG_ON(!nodes_allowed);
661
662 nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
663 h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
664
665 return nid;
643} 666}
644 667
645static int alloc_fresh_huge_page(struct hstate *h) 668static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
646{ 669{
647 struct page *page; 670 struct page *page;
648 int start_nid; 671 int start_nid;
649 int next_nid; 672 int next_nid;
650 int ret = 0; 673 int ret = 0;
651 674
652 start_nid = h->next_nid_to_alloc; 675 start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
653 next_nid = start_nid; 676 next_nid = start_nid;
654 677
655 do { 678 do {
656 page = alloc_fresh_huge_page_node(h, next_nid); 679 page = alloc_fresh_huge_page_node(h, next_nid);
657 if (page) 680 if (page) {
658 ret = 1; 681 ret = 1;
659 next_nid = hstate_next_node_to_alloc(h); 682 break;
660 } while (!page && next_nid != start_nid); 683 }
684 next_nid = hstate_next_node_to_alloc(h, nodes_allowed);
685 } while (next_nid != start_nid);
661 686
662 if (ret) 687 if (ret)
663 count_vm_event(HTLB_BUDDY_PGALLOC); 688 count_vm_event(HTLB_BUDDY_PGALLOC);
@@ -668,17 +693,21 @@ static int alloc_fresh_huge_page(struct hstate *h)
668} 693}
669 694
670/* 695/*
671 * helper for free_pool_huge_page() - find next node 696 * helper for free_pool_huge_page() - return the previously saved
672 * from which to free a huge page 697 * node ["this node"] from which to free a huge page. Advance the
698 * next node id whether or not we find a free huge page to free so
699 * that the next attempt to free addresses the next node.
673 */ 700 */
674static int hstate_next_node_to_free(struct hstate *h) 701static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
675{ 702{
676 int next_nid; 703 int nid;
677 next_nid = next_node(h->next_nid_to_free, node_online_map); 704
678 if (next_nid == MAX_NUMNODES) 705 VM_BUG_ON(!nodes_allowed);
679 next_nid = first_node(node_online_map); 706
680 h->next_nid_to_free = next_nid; 707 nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
681 return next_nid; 708 h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
709
710 return nid;
682} 711}
683 712
684/* 713/*
@@ -687,13 +716,14 @@ static int hstate_next_node_to_free(struct hstate *h)
687 * balanced over allowed nodes. 716 * balanced over allowed nodes.
688 * Called with hugetlb_lock locked. 717 * Called with hugetlb_lock locked.
689 */ 718 */
690static int free_pool_huge_page(struct hstate *h, bool acct_surplus) 719static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
720 bool acct_surplus)
691{ 721{
692 int start_nid; 722 int start_nid;
693 int next_nid; 723 int next_nid;
694 int ret = 0; 724 int ret = 0;
695 725
696 start_nid = h->next_nid_to_free; 726 start_nid = hstate_next_node_to_free(h, nodes_allowed);
697 next_nid = start_nid; 727 next_nid = start_nid;
698 728
699 do { 729 do {
@@ -715,9 +745,10 @@ static int free_pool_huge_page(struct hstate *h, bool acct_surplus)
715 } 745 }
716 update_and_free_page(h, page); 746 update_and_free_page(h, page);
717 ret = 1; 747 ret = 1;
748 break;
718 } 749 }
719 next_nid = hstate_next_node_to_free(h); 750 next_nid = hstate_next_node_to_free(h, nodes_allowed);
720 } while (!ret && next_nid != start_nid); 751 } while (next_nid != start_nid);
721 752
722 return ret; 753 return ret;
723} 754}
@@ -911,14 +942,14 @@ static void return_unused_surplus_pages(struct hstate *h,
911 942
912 /* 943 /*
913 * We want to release as many surplus pages as possible, spread 944 * We want to release as many surplus pages as possible, spread
914 * evenly across all nodes. Iterate across all nodes until we 945 * evenly across all nodes with memory. Iterate across these nodes
915 * can no longer free unreserved surplus pages. This occurs when 946 * until we can no longer free unreserved surplus pages. This occurs
916 * the nodes with surplus pages have no free pages. 947 * when the nodes with surplus pages have no free pages.
917 * free_pool_huge_page() will balance the the frees across the 948 * free_pool_huge_page() will balance the the freed pages across the
918 * on-line nodes for us and will handle the hstate accounting. 949 * on-line nodes with memory and will handle the hstate accounting.
919 */ 950 */
920 while (nr_pages--) { 951 while (nr_pages--) {
921 if (!free_pool_huge_page(h, 1)) 952 if (!free_pool_huge_page(h, &node_states[N_HIGH_MEMORY], 1))
922 break; 953 break;
923 } 954 }
924} 955}
@@ -1022,16 +1053,16 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
1022int __weak alloc_bootmem_huge_page(struct hstate *h) 1053int __weak alloc_bootmem_huge_page(struct hstate *h)
1023{ 1054{
1024 struct huge_bootmem_page *m; 1055 struct huge_bootmem_page *m;
1025 int nr_nodes = nodes_weight(node_online_map); 1056 int nr_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
1026 1057
1027 while (nr_nodes) { 1058 while (nr_nodes) {
1028 void *addr; 1059 void *addr;
1029 1060
1030 addr = __alloc_bootmem_node_nopanic( 1061 addr = __alloc_bootmem_node_nopanic(
1031 NODE_DATA(h->next_nid_to_alloc), 1062 NODE_DATA(hstate_next_node_to_alloc(h,
1063 &node_states[N_HIGH_MEMORY])),
1032 huge_page_size(h), huge_page_size(h), 0); 1064 huge_page_size(h), huge_page_size(h), 0);
1033 1065
1034 hstate_next_node_to_alloc(h);
1035 if (addr) { 1066 if (addr) {
1036 /* 1067 /*
1037 * Use the beginning of the huge page to store the 1068 * Use the beginning of the huge page to store the
@@ -1084,7 +1115,8 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
1084 if (h->order >= MAX_ORDER) { 1115 if (h->order >= MAX_ORDER) {
1085 if (!alloc_bootmem_huge_page(h)) 1116 if (!alloc_bootmem_huge_page(h))
1086 break; 1117 break;
1087 } else if (!alloc_fresh_huge_page(h)) 1118 } else if (!alloc_fresh_huge_page(h,
1119 &node_states[N_HIGH_MEMORY]))
1088 break; 1120 break;
1089 } 1121 }
1090 h->max_huge_pages = i; 1122 h->max_huge_pages = i;
@@ -1126,14 +1158,15 @@ static void __init report_hugepages(void)
1126} 1158}
1127 1159
1128#ifdef CONFIG_HIGHMEM 1160#ifdef CONFIG_HIGHMEM
1129static void try_to_free_low(struct hstate *h, unsigned long count) 1161static void try_to_free_low(struct hstate *h, unsigned long count,
1162 nodemask_t *nodes_allowed)
1130{ 1163{
1131 int i; 1164 int i;
1132 1165
1133 if (h->order >= MAX_ORDER) 1166 if (h->order >= MAX_ORDER)
1134 return; 1167 return;
1135 1168
1136 for (i = 0; i < MAX_NUMNODES; ++i) { 1169 for_each_node_mask(i, *nodes_allowed) {
1137 struct page *page, *next; 1170 struct page *page, *next;
1138 struct list_head *freel = &h->hugepage_freelists[i]; 1171 struct list_head *freel = &h->hugepage_freelists[i];
1139 list_for_each_entry_safe(page, next, freel, lru) { 1172 list_for_each_entry_safe(page, next, freel, lru) {
@@ -1149,7 +1182,8 @@ static void try_to_free_low(struct hstate *h, unsigned long count)
1149 } 1182 }
1150} 1183}
1151#else 1184#else
1152static inline void try_to_free_low(struct hstate *h, unsigned long count) 1185static inline void try_to_free_low(struct hstate *h, unsigned long count,
1186 nodemask_t *nodes_allowed)
1153{ 1187{
1154} 1188}
1155#endif 1189#endif
@@ -1159,7 +1193,8 @@ static inline void try_to_free_low(struct hstate *h, unsigned long count)
1159 * balanced by operating on them in a round-robin fashion. 1193 * balanced by operating on them in a round-robin fashion.
1160 * Returns 1 if an adjustment was made. 1194 * Returns 1 if an adjustment was made.
1161 */ 1195 */
1162static int adjust_pool_surplus(struct hstate *h, int delta) 1196static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
1197 int delta)
1163{ 1198{
1164 int start_nid, next_nid; 1199 int start_nid, next_nid;
1165 int ret = 0; 1200 int ret = 0;
@@ -1167,29 +1202,33 @@ static int adjust_pool_surplus(struct hstate *h, int delta)
1167 VM_BUG_ON(delta != -1 && delta != 1); 1202 VM_BUG_ON(delta != -1 && delta != 1);
1168 1203
1169 if (delta < 0) 1204 if (delta < 0)
1170 start_nid = h->next_nid_to_alloc; 1205 start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
1171 else 1206 else
1172 start_nid = h->next_nid_to_free; 1207 start_nid = hstate_next_node_to_free(h, nodes_allowed);
1173 next_nid = start_nid; 1208 next_nid = start_nid;
1174 1209
1175 do { 1210 do {
1176 int nid = next_nid; 1211 int nid = next_nid;
1177 if (delta < 0) { 1212 if (delta < 0) {
1178 next_nid = hstate_next_node_to_alloc(h);
1179 /* 1213 /*
1180 * To shrink on this node, there must be a surplus page 1214 * To shrink on this node, there must be a surplus page
1181 */ 1215 */
1182 if (!h->surplus_huge_pages_node[nid]) 1216 if (!h->surplus_huge_pages_node[nid]) {
1217 next_nid = hstate_next_node_to_alloc(h,
1218 nodes_allowed);
1183 continue; 1219 continue;
1220 }
1184 } 1221 }
1185 if (delta > 0) { 1222 if (delta > 0) {
1186 next_nid = hstate_next_node_to_free(h);
1187 /* 1223 /*
1188 * Surplus cannot exceed the total number of pages 1224 * Surplus cannot exceed the total number of pages
1189 */ 1225 */
1190 if (h->surplus_huge_pages_node[nid] >= 1226 if (h->surplus_huge_pages_node[nid] >=
1191 h->nr_huge_pages_node[nid]) 1227 h->nr_huge_pages_node[nid]) {
1228 next_nid = hstate_next_node_to_free(h,
1229 nodes_allowed);
1192 continue; 1230 continue;
1231 }
1193 } 1232 }
1194 1233
1195 h->surplus_huge_pages += delta; 1234 h->surplus_huge_pages += delta;
@@ -1202,7 +1241,8 @@ static int adjust_pool_surplus(struct hstate *h, int delta)
1202} 1241}
1203 1242
1204#define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages) 1243#define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
1205static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count) 1244static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
1245 nodemask_t *nodes_allowed)
1206{ 1246{
1207 unsigned long min_count, ret; 1247 unsigned long min_count, ret;
1208 1248
@@ -1222,7 +1262,7 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count)
1222 */ 1262 */
1223 spin_lock(&hugetlb_lock); 1263 spin_lock(&hugetlb_lock);
1224 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) { 1264 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
1225 if (!adjust_pool_surplus(h, -1)) 1265 if (!adjust_pool_surplus(h, nodes_allowed, -1))
1226 break; 1266 break;
1227 } 1267 }
1228 1268
@@ -1233,11 +1273,14 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count)
1233 * and reducing the surplus. 1273 * and reducing the surplus.
1234 */ 1274 */
1235 spin_unlock(&hugetlb_lock); 1275 spin_unlock(&hugetlb_lock);
1236 ret = alloc_fresh_huge_page(h); 1276 ret = alloc_fresh_huge_page(h, nodes_allowed);
1237 spin_lock(&hugetlb_lock); 1277 spin_lock(&hugetlb_lock);
1238 if (!ret) 1278 if (!ret)
1239 goto out; 1279 goto out;
1240 1280
1281 /* Bail for signals. Probably ctrl-c from user */
1282 if (signal_pending(current))
1283 goto out;
1241 } 1284 }
1242 1285
1243 /* 1286 /*
@@ -1257,13 +1300,13 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count)
1257 */ 1300 */
1258 min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages; 1301 min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
1259 min_count = max(count, min_count); 1302 min_count = max(count, min_count);
1260 try_to_free_low(h, min_count); 1303 try_to_free_low(h, min_count, nodes_allowed);
1261 while (min_count < persistent_huge_pages(h)) { 1304 while (min_count < persistent_huge_pages(h)) {
1262 if (!free_pool_huge_page(h, 0)) 1305 if (!free_pool_huge_page(h, nodes_allowed, 0))
1263 break; 1306 break;
1264 } 1307 }
1265 while (count < persistent_huge_pages(h)) { 1308 while (count < persistent_huge_pages(h)) {
1266 if (!adjust_pool_surplus(h, 1)) 1309 if (!adjust_pool_surplus(h, nodes_allowed, 1))
1267 break; 1310 break;
1268 } 1311 }
1269out: 1312out:
@@ -1282,43 +1325,117 @@ out:
1282static struct kobject *hugepages_kobj; 1325static struct kobject *hugepages_kobj;
1283static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE]; 1326static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
1284 1327
1285static struct hstate *kobj_to_hstate(struct kobject *kobj) 1328static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
1329
1330static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
1286{ 1331{
1287 int i; 1332 int i;
1333
1288 for (i = 0; i < HUGE_MAX_HSTATE; i++) 1334 for (i = 0; i < HUGE_MAX_HSTATE; i++)
1289 if (hstate_kobjs[i] == kobj) 1335 if (hstate_kobjs[i] == kobj) {
1336 if (nidp)
1337 *nidp = NUMA_NO_NODE;
1290 return &hstates[i]; 1338 return &hstates[i];
1291 BUG(); 1339 }
1292 return NULL; 1340
1341 return kobj_to_node_hstate(kobj, nidp);
1293} 1342}
1294 1343
1295static ssize_t nr_hugepages_show(struct kobject *kobj, 1344static ssize_t nr_hugepages_show_common(struct kobject *kobj,
1296 struct kobj_attribute *attr, char *buf) 1345 struct kobj_attribute *attr, char *buf)
1297{ 1346{
1298 struct hstate *h = kobj_to_hstate(kobj); 1347 struct hstate *h;
1299 return sprintf(buf, "%lu\n", h->nr_huge_pages); 1348 unsigned long nr_huge_pages;
1349 int nid;
1350
1351 h = kobj_to_hstate(kobj, &nid);
1352 if (nid == NUMA_NO_NODE)
1353 nr_huge_pages = h->nr_huge_pages;
1354 else
1355 nr_huge_pages = h->nr_huge_pages_node[nid];
1356
1357 return sprintf(buf, "%lu\n", nr_huge_pages);
1300} 1358}
1301static ssize_t nr_hugepages_store(struct kobject *kobj, 1359static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
1302 struct kobj_attribute *attr, const char *buf, size_t count) 1360 struct kobject *kobj, struct kobj_attribute *attr,
1361 const char *buf, size_t len)
1303{ 1362{
1304 int err; 1363 int err;
1305 unsigned long input; 1364 int nid;
1306 struct hstate *h = kobj_to_hstate(kobj); 1365 unsigned long count;
1366 struct hstate *h;
1367 NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
1307 1368
1308 err = strict_strtoul(buf, 10, &input); 1369 err = strict_strtoul(buf, 10, &count);
1309 if (err) 1370 if (err)
1310 return 0; 1371 return 0;
1311 1372
1312 h->max_huge_pages = set_max_huge_pages(h, input); 1373 h = kobj_to_hstate(kobj, &nid);
1374 if (nid == NUMA_NO_NODE) {
1375 /*
1376 * global hstate attribute
1377 */
1378 if (!(obey_mempolicy &&
1379 init_nodemask_of_mempolicy(nodes_allowed))) {
1380 NODEMASK_FREE(nodes_allowed);
1381 nodes_allowed = &node_states[N_HIGH_MEMORY];
1382 }
1383 } else if (nodes_allowed) {
1384 /*
1385 * per node hstate attribute: adjust count to global,
1386 * but restrict alloc/free to the specified node.
1387 */
1388 count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
1389 init_nodemask_of_node(nodes_allowed, nid);
1390 } else
1391 nodes_allowed = &node_states[N_HIGH_MEMORY];
1392
1393 h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
1313 1394
1314 return count; 1395 if (nodes_allowed != &node_states[N_HIGH_MEMORY])
1396 NODEMASK_FREE(nodes_allowed);
1397
1398 return len;
1399}
1400
1401static ssize_t nr_hugepages_show(struct kobject *kobj,
1402 struct kobj_attribute *attr, char *buf)
1403{
1404 return nr_hugepages_show_common(kobj, attr, buf);
1405}
1406
1407static ssize_t nr_hugepages_store(struct kobject *kobj,
1408 struct kobj_attribute *attr, const char *buf, size_t len)
1409{
1410 return nr_hugepages_store_common(false, kobj, attr, buf, len);
1315} 1411}
1316HSTATE_ATTR(nr_hugepages); 1412HSTATE_ATTR(nr_hugepages);
1317 1413
1414#ifdef CONFIG_NUMA
1415
1416/*
1417 * hstate attribute for optionally mempolicy-based constraint on persistent
1418 * huge page alloc/free.
1419 */
1420static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
1421 struct kobj_attribute *attr, char *buf)
1422{
1423 return nr_hugepages_show_common(kobj, attr, buf);
1424}
1425
1426static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
1427 struct kobj_attribute *attr, const char *buf, size_t len)
1428{
1429 return nr_hugepages_store_common(true, kobj, attr, buf, len);
1430}
1431HSTATE_ATTR(nr_hugepages_mempolicy);
1432#endif
1433
1434
1318static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj, 1435static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
1319 struct kobj_attribute *attr, char *buf) 1436 struct kobj_attribute *attr, char *buf)
1320{ 1437{
1321 struct hstate *h = kobj_to_hstate(kobj); 1438 struct hstate *h = kobj_to_hstate(kobj, NULL);
1322 return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages); 1439 return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
1323} 1440}
1324static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj, 1441static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
@@ -1326,7 +1443,7 @@ static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
1326{ 1443{
1327 int err; 1444 int err;
1328 unsigned long input; 1445 unsigned long input;
1329 struct hstate *h = kobj_to_hstate(kobj); 1446 struct hstate *h = kobj_to_hstate(kobj, NULL);
1330 1447
1331 err = strict_strtoul(buf, 10, &input); 1448 err = strict_strtoul(buf, 10, &input);
1332 if (err) 1449 if (err)
@@ -1343,15 +1460,24 @@ HSTATE_ATTR(nr_overcommit_hugepages);
1343static ssize_t free_hugepages_show(struct kobject *kobj, 1460static ssize_t free_hugepages_show(struct kobject *kobj,
1344 struct kobj_attribute *attr, char *buf) 1461 struct kobj_attribute *attr, char *buf)
1345{ 1462{
1346 struct hstate *h = kobj_to_hstate(kobj); 1463 struct hstate *h;
1347 return sprintf(buf, "%lu\n", h->free_huge_pages); 1464 unsigned long free_huge_pages;
1465 int nid;
1466
1467 h = kobj_to_hstate(kobj, &nid);
1468 if (nid == NUMA_NO_NODE)
1469 free_huge_pages = h->free_huge_pages;
1470 else
1471 free_huge_pages = h->free_huge_pages_node[nid];
1472
1473 return sprintf(buf, "%lu\n", free_huge_pages);
1348} 1474}
1349HSTATE_ATTR_RO(free_hugepages); 1475HSTATE_ATTR_RO(free_hugepages);
1350 1476
1351static ssize_t resv_hugepages_show(struct kobject *kobj, 1477static ssize_t resv_hugepages_show(struct kobject *kobj,
1352 struct kobj_attribute *attr, char *buf) 1478 struct kobj_attribute *attr, char *buf)
1353{ 1479{
1354 struct hstate *h = kobj_to_hstate(kobj); 1480 struct hstate *h = kobj_to_hstate(kobj, NULL);
1355 return sprintf(buf, "%lu\n", h->resv_huge_pages); 1481 return sprintf(buf, "%lu\n", h->resv_huge_pages);
1356} 1482}
1357HSTATE_ATTR_RO(resv_hugepages); 1483HSTATE_ATTR_RO(resv_hugepages);
@@ -1359,8 +1485,17 @@ HSTATE_ATTR_RO(resv_hugepages);
1359static ssize_t surplus_hugepages_show(struct kobject *kobj, 1485static ssize_t surplus_hugepages_show(struct kobject *kobj,
1360 struct kobj_attribute *attr, char *buf) 1486 struct kobj_attribute *attr, char *buf)
1361{ 1487{
1362 struct hstate *h = kobj_to_hstate(kobj); 1488 struct hstate *h;
1363 return sprintf(buf, "%lu\n", h->surplus_huge_pages); 1489 unsigned long surplus_huge_pages;
1490 int nid;
1491
1492 h = kobj_to_hstate(kobj, &nid);
1493 if (nid == NUMA_NO_NODE)
1494 surplus_huge_pages = h->surplus_huge_pages;
1495 else
1496 surplus_huge_pages = h->surplus_huge_pages_node[nid];
1497
1498 return sprintf(buf, "%lu\n", surplus_huge_pages);
1364} 1499}
1365HSTATE_ATTR_RO(surplus_hugepages); 1500HSTATE_ATTR_RO(surplus_hugepages);
1366 1501
@@ -1370,6 +1505,9 @@ static struct attribute *hstate_attrs[] = {
1370 &free_hugepages_attr.attr, 1505 &free_hugepages_attr.attr,
1371 &resv_hugepages_attr.attr, 1506 &resv_hugepages_attr.attr,
1372 &surplus_hugepages_attr.attr, 1507 &surplus_hugepages_attr.attr,
1508#ifdef CONFIG_NUMA
1509 &nr_hugepages_mempolicy_attr.attr,
1510#endif
1373 NULL, 1511 NULL,
1374}; 1512};
1375 1513
@@ -1377,19 +1515,21 @@ static struct attribute_group hstate_attr_group = {
1377 .attrs = hstate_attrs, 1515 .attrs = hstate_attrs,
1378}; 1516};
1379 1517
1380static int __init hugetlb_sysfs_add_hstate(struct hstate *h) 1518static int __init hugetlb_sysfs_add_hstate(struct hstate *h,
1519 struct kobject *parent,
1520 struct kobject **hstate_kobjs,
1521 struct attribute_group *hstate_attr_group)
1381{ 1522{
1382 int retval; 1523 int retval;
1524 int hi = h - hstates;
1383 1525
1384 hstate_kobjs[h - hstates] = kobject_create_and_add(h->name, 1526 hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
1385 hugepages_kobj); 1527 if (!hstate_kobjs[hi])
1386 if (!hstate_kobjs[h - hstates])
1387 return -ENOMEM; 1528 return -ENOMEM;
1388 1529
1389 retval = sysfs_create_group(hstate_kobjs[h - hstates], 1530 retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
1390 &hstate_attr_group);
1391 if (retval) 1531 if (retval)
1392 kobject_put(hstate_kobjs[h - hstates]); 1532 kobject_put(hstate_kobjs[hi]);
1393 1533
1394 return retval; 1534 return retval;
1395} 1535}
@@ -1404,17 +1544,184 @@ static void __init hugetlb_sysfs_init(void)
1404 return; 1544 return;
1405 1545
1406 for_each_hstate(h) { 1546 for_each_hstate(h) {
1407 err = hugetlb_sysfs_add_hstate(h); 1547 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
1548 hstate_kobjs, &hstate_attr_group);
1408 if (err) 1549 if (err)
1409 printk(KERN_ERR "Hugetlb: Unable to add hstate %s", 1550 printk(KERN_ERR "Hugetlb: Unable to add hstate %s",
1410 h->name); 1551 h->name);
1411 } 1552 }
1412} 1553}
1413 1554
1555#ifdef CONFIG_NUMA
1556
1557/*
1558 * node_hstate/s - associate per node hstate attributes, via their kobjects,
1559 * with node sysdevs in node_devices[] using a parallel array. The array
1560 * index of a node sysdev or _hstate == node id.
1561 * This is here to avoid any static dependency of the node sysdev driver, in
1562 * the base kernel, on the hugetlb module.
1563 */
1564struct node_hstate {
1565 struct kobject *hugepages_kobj;
1566 struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
1567};
1568struct node_hstate node_hstates[MAX_NUMNODES];
1569
1570/*
1571 * A subset of global hstate attributes for node sysdevs
1572 */
1573static struct attribute *per_node_hstate_attrs[] = {
1574 &nr_hugepages_attr.attr,
1575 &free_hugepages_attr.attr,
1576 &surplus_hugepages_attr.attr,
1577 NULL,
1578};
1579
1580static struct attribute_group per_node_hstate_attr_group = {
1581 .attrs = per_node_hstate_attrs,
1582};
1583
1584/*
1585 * kobj_to_node_hstate - lookup global hstate for node sysdev hstate attr kobj.
1586 * Returns node id via non-NULL nidp.
1587 */
1588static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
1589{
1590 int nid;
1591
1592 for (nid = 0; nid < nr_node_ids; nid++) {
1593 struct node_hstate *nhs = &node_hstates[nid];
1594 int i;
1595 for (i = 0; i < HUGE_MAX_HSTATE; i++)
1596 if (nhs->hstate_kobjs[i] == kobj) {
1597 if (nidp)
1598 *nidp = nid;
1599 return &hstates[i];
1600 }
1601 }
1602
1603 BUG();
1604 return NULL;
1605}
1606
1607/*
1608 * Unregister hstate attributes from a single node sysdev.
1609 * No-op if no hstate attributes attached.
1610 */
1611void hugetlb_unregister_node(struct node *node)
1612{
1613 struct hstate *h;
1614 struct node_hstate *nhs = &node_hstates[node->sysdev.id];
1615
1616 if (!nhs->hugepages_kobj)
1617 return; /* no hstate attributes */
1618
1619 for_each_hstate(h)
1620 if (nhs->hstate_kobjs[h - hstates]) {
1621 kobject_put(nhs->hstate_kobjs[h - hstates]);
1622 nhs->hstate_kobjs[h - hstates] = NULL;
1623 }
1624
1625 kobject_put(nhs->hugepages_kobj);
1626 nhs->hugepages_kobj = NULL;
1627}
1628
1629/*
1630 * hugetlb module exit: unregister hstate attributes from node sysdevs
1631 * that have them.
1632 */
1633static void hugetlb_unregister_all_nodes(void)
1634{
1635 int nid;
1636
1637 /*
1638 * disable node sysdev registrations.
1639 */
1640 register_hugetlbfs_with_node(NULL, NULL);
1641
1642 /*
1643 * remove hstate attributes from any nodes that have them.
1644 */
1645 for (nid = 0; nid < nr_node_ids; nid++)
1646 hugetlb_unregister_node(&node_devices[nid]);
1647}
1648
1649/*
1650 * Register hstate attributes for a single node sysdev.
1651 * No-op if attributes already registered.
1652 */
1653void hugetlb_register_node(struct node *node)
1654{
1655 struct hstate *h;
1656 struct node_hstate *nhs = &node_hstates[node->sysdev.id];
1657 int err;
1658
1659 if (nhs->hugepages_kobj)
1660 return; /* already allocated */
1661
1662 nhs->hugepages_kobj = kobject_create_and_add("hugepages",
1663 &node->sysdev.kobj);
1664 if (!nhs->hugepages_kobj)
1665 return;
1666
1667 for_each_hstate(h) {
1668 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
1669 nhs->hstate_kobjs,
1670 &per_node_hstate_attr_group);
1671 if (err) {
1672 printk(KERN_ERR "Hugetlb: Unable to add hstate %s"
1673 " for node %d\n",
1674 h->name, node->sysdev.id);
1675 hugetlb_unregister_node(node);
1676 break;
1677 }
1678 }
1679}
1680
1681/*
1682 * hugetlb init time: register hstate attributes for all registered node
1683 * sysdevs of nodes that have memory. All on-line nodes should have
1684 * registered their associated sysdev by this time.
1685 */
1686static void hugetlb_register_all_nodes(void)
1687{
1688 int nid;
1689
1690 for_each_node_state(nid, N_HIGH_MEMORY) {
1691 struct node *node = &node_devices[nid];
1692 if (node->sysdev.id == nid)
1693 hugetlb_register_node(node);
1694 }
1695
1696 /*
1697 * Let the node sysdev driver know we're here so it can
1698 * [un]register hstate attributes on node hotplug.
1699 */
1700 register_hugetlbfs_with_node(hugetlb_register_node,
1701 hugetlb_unregister_node);
1702}
1703#else /* !CONFIG_NUMA */
1704
1705static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
1706{
1707 BUG();
1708 if (nidp)
1709 *nidp = -1;
1710 return NULL;
1711}
1712
1713static void hugetlb_unregister_all_nodes(void) { }
1714
1715static void hugetlb_register_all_nodes(void) { }
1716
1717#endif
1718
1414static void __exit hugetlb_exit(void) 1719static void __exit hugetlb_exit(void)
1415{ 1720{
1416 struct hstate *h; 1721 struct hstate *h;
1417 1722
1723 hugetlb_unregister_all_nodes();
1724
1418 for_each_hstate(h) { 1725 for_each_hstate(h) {
1419 kobject_put(hstate_kobjs[h - hstates]); 1726 kobject_put(hstate_kobjs[h - hstates]);
1420 } 1727 }
@@ -1449,6 +1756,8 @@ static int __init hugetlb_init(void)
1449 1756
1450 hugetlb_sysfs_init(); 1757 hugetlb_sysfs_init();
1451 1758
1759 hugetlb_register_all_nodes();
1760
1452 return 0; 1761 return 0;
1453} 1762}
1454module_init(hugetlb_init); 1763module_init(hugetlb_init);
@@ -1472,8 +1781,8 @@ void __init hugetlb_add_hstate(unsigned order)
1472 h->free_huge_pages = 0; 1781 h->free_huge_pages = 0;
1473 for (i = 0; i < MAX_NUMNODES; ++i) 1782 for (i = 0; i < MAX_NUMNODES; ++i)
1474 INIT_LIST_HEAD(&h->hugepage_freelists[i]); 1783 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
1475 h->next_nid_to_alloc = first_node(node_online_map); 1784 h->next_nid_to_alloc = first_node(node_states[N_HIGH_MEMORY]);
1476 h->next_nid_to_free = first_node(node_online_map); 1785 h->next_nid_to_free = first_node(node_states[N_HIGH_MEMORY]);
1477 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB", 1786 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
1478 huge_page_size(h)/1024); 1787 huge_page_size(h)/1024);
1479 1788
@@ -1536,9 +1845,9 @@ static unsigned int cpuset_mems_nr(unsigned int *array)
1536} 1845}
1537 1846
1538#ifdef CONFIG_SYSCTL 1847#ifdef CONFIG_SYSCTL
1539int hugetlb_sysctl_handler(struct ctl_table *table, int write, 1848static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
1540 void __user *buffer, 1849 struct ctl_table *table, int write,
1541 size_t *length, loff_t *ppos) 1850 void __user *buffer, size_t *length, loff_t *ppos)
1542{ 1851{
1543 struct hstate *h = &default_hstate; 1852 struct hstate *h = &default_hstate;
1544 unsigned long tmp; 1853 unsigned long tmp;
@@ -1550,12 +1859,40 @@ int hugetlb_sysctl_handler(struct ctl_table *table, int write,
1550 table->maxlen = sizeof(unsigned long); 1859 table->maxlen = sizeof(unsigned long);
1551 proc_doulongvec_minmax(table, write, buffer, length, ppos); 1860 proc_doulongvec_minmax(table, write, buffer, length, ppos);
1552 1861
1553 if (write) 1862 if (write) {
1554 h->max_huge_pages = set_max_huge_pages(h, tmp); 1863 NODEMASK_ALLOC(nodemask_t, nodes_allowed,
1864 GFP_KERNEL | __GFP_NORETRY);
1865 if (!(obey_mempolicy &&
1866 init_nodemask_of_mempolicy(nodes_allowed))) {
1867 NODEMASK_FREE(nodes_allowed);
1868 nodes_allowed = &node_states[N_HIGH_MEMORY];
1869 }
1870 h->max_huge_pages = set_max_huge_pages(h, tmp, nodes_allowed);
1871
1872 if (nodes_allowed != &node_states[N_HIGH_MEMORY])
1873 NODEMASK_FREE(nodes_allowed);
1874 }
1555 1875
1556 return 0; 1876 return 0;
1557} 1877}
1558 1878
1879int hugetlb_sysctl_handler(struct ctl_table *table, int write,
1880 void __user *buffer, size_t *length, loff_t *ppos)
1881{
1882
1883 return hugetlb_sysctl_handler_common(false, table, write,
1884 buffer, length, ppos);
1885}
1886
1887#ifdef CONFIG_NUMA
1888int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
1889 void __user *buffer, size_t *length, loff_t *ppos)
1890{
1891 return hugetlb_sysctl_handler_common(true, table, write,
1892 buffer, length, ppos);
1893}
1894#endif /* CONFIG_NUMA */
1895
1559int hugetlb_treat_movable_handler(struct ctl_table *table, int write, 1896int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
1560 void __user *buffer, 1897 void __user *buffer,
1561 size_t *length, loff_t *ppos) 1898 size_t *length, loff_t *ppos)
@@ -1903,6 +2240,12 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
1903 + (vma->vm_pgoff >> PAGE_SHIFT); 2240 + (vma->vm_pgoff >> PAGE_SHIFT);
1904 mapping = (struct address_space *)page_private(page); 2241 mapping = (struct address_space *)page_private(page);
1905 2242
2243 /*
2244 * Take the mapping lock for the duration of the table walk. As
2245 * this mapping should be shared between all the VMAs,
2246 * __unmap_hugepage_range() is called as the lock is already held
2247 */
2248 spin_lock(&mapping->i_mmap_lock);
1906 vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 2249 vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
1907 /* Do not unmap the current VMA */ 2250 /* Do not unmap the current VMA */
1908 if (iter_vma == vma) 2251 if (iter_vma == vma)
@@ -1916,10 +2259,11 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
1916 * from the time of fork. This would look like data corruption 2259 * from the time of fork. This would look like data corruption
1917 */ 2260 */
1918 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER)) 2261 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
1919 unmap_hugepage_range(iter_vma, 2262 __unmap_hugepage_range(iter_vma,
1920 address, address + huge_page_size(h), 2263 address, address + huge_page_size(h),
1921 page); 2264 page);
1922 } 2265 }
2266 spin_unlock(&mapping->i_mmap_lock);
1923 2267
1924 return 1; 2268 return 1;
1925} 2269}
@@ -1959,6 +2303,9 @@ retry_avoidcopy:
1959 outside_reserve = 1; 2303 outside_reserve = 1;
1960 2304
1961 page_cache_get(old_page); 2305 page_cache_get(old_page);
2306
2307 /* Drop page_table_lock as buddy allocator may be called */
2308 spin_unlock(&mm->page_table_lock);
1962 new_page = alloc_huge_page(vma, address, outside_reserve); 2309 new_page = alloc_huge_page(vma, address, outside_reserve);
1963 2310
1964 if (IS_ERR(new_page)) { 2311 if (IS_ERR(new_page)) {
@@ -1976,19 +2323,25 @@ retry_avoidcopy:
1976 if (unmap_ref_private(mm, vma, old_page, address)) { 2323 if (unmap_ref_private(mm, vma, old_page, address)) {
1977 BUG_ON(page_count(old_page) != 1); 2324 BUG_ON(page_count(old_page) != 1);
1978 BUG_ON(huge_pte_none(pte)); 2325 BUG_ON(huge_pte_none(pte));
2326 spin_lock(&mm->page_table_lock);
1979 goto retry_avoidcopy; 2327 goto retry_avoidcopy;
1980 } 2328 }
1981 WARN_ON_ONCE(1); 2329 WARN_ON_ONCE(1);
1982 } 2330 }
1983 2331
2332 /* Caller expects lock to be held */
2333 spin_lock(&mm->page_table_lock);
1984 return -PTR_ERR(new_page); 2334 return -PTR_ERR(new_page);
1985 } 2335 }
1986 2336
1987 spin_unlock(&mm->page_table_lock);
1988 copy_huge_page(new_page, old_page, address, vma); 2337 copy_huge_page(new_page, old_page, address, vma);
1989 __SetPageUptodate(new_page); 2338 __SetPageUptodate(new_page);
1990 spin_lock(&mm->page_table_lock);
1991 2339
2340 /*
2341 * Retake the page_table_lock to check for racing updates
2342 * before the page tables are altered
2343 */
2344 spin_lock(&mm->page_table_lock);
1992 ptep = huge_pte_offset(mm, address & huge_page_mask(h)); 2345 ptep = huge_pte_offset(mm, address & huge_page_mask(h));
1993 if (likely(pte_same(huge_ptep_get(ptep), pte))) { 2346 if (likely(pte_same(huge_ptep_get(ptep), pte))) {
1994 /* Break COW */ 2347 /* Break COW */
diff --git a/mm/internal.h b/mm/internal.h
index 22ec8d2b0fb8..4fe67a162cb4 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -63,7 +63,7 @@ static inline unsigned long page_order(struct page *page)
63 return page_private(page); 63 return page_private(page);
64} 64}
65 65
66#ifdef CONFIG_HAVE_MLOCK 66#ifdef CONFIG_MMU
67extern long mlock_vma_pages_range(struct vm_area_struct *vma, 67extern long mlock_vma_pages_range(struct vm_area_struct *vma,
68 unsigned long start, unsigned long end); 68 unsigned long start, unsigned long end);
69extern void munlock_vma_pages_range(struct vm_area_struct *vma, 69extern void munlock_vma_pages_range(struct vm_area_struct *vma,
@@ -72,21 +72,7 @@ static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
72{ 72{
73 munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end); 73 munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end);
74} 74}
75#endif
76
77/*
78 * unevictable_migrate_page() called only from migrate_page_copy() to
79 * migrate unevictable flag to new page.
80 * Note that the old page has been isolated from the LRU lists at this
81 * point so we don't need to worry about LRU statistics.
82 */
83static inline void unevictable_migrate_page(struct page *new, struct page *old)
84{
85 if (TestClearPageUnevictable(old))
86 SetPageUnevictable(new);
87}
88 75
89#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
90/* 76/*
91 * Called only in fault path via page_evictable() for a new page 77 * Called only in fault path via page_evictable() for a new page
92 * to determine if it's being mapped into a LOCKED vma. 78 * to determine if it's being mapped into a LOCKED vma.
@@ -107,9 +93,10 @@ static inline int is_mlocked_vma(struct vm_area_struct *vma, struct page *page)
107} 93}
108 94
109/* 95/*
110 * must be called with vma's mmap_sem held for read, and page locked. 96 * must be called with vma's mmap_sem held for read or write, and page locked.
111 */ 97 */
112extern void mlock_vma_page(struct page *page); 98extern void mlock_vma_page(struct page *page);
99extern void munlock_vma_page(struct page *page);
113 100
114/* 101/*
115 * Clear the page's PageMlocked(). This can be useful in a situation where 102 * Clear the page's PageMlocked(). This can be useful in a situation where
@@ -144,7 +131,7 @@ static inline void mlock_migrate_page(struct page *newpage, struct page *page)
144 } 131 }
145} 132}
146 133
147#else /* CONFIG_HAVE_MLOCKED_PAGE_BIT */ 134#else /* !CONFIG_MMU */
148static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p) 135static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p)
149{ 136{
150 return 0; 137 return 0;
@@ -153,7 +140,7 @@ static inline void clear_page_mlock(struct page *page) { }
153static inline void mlock_vma_page(struct page *page) { } 140static inline void mlock_vma_page(struct page *page) { }
154static inline void mlock_migrate_page(struct page *new, struct page *old) { } 141static inline void mlock_migrate_page(struct page *new, struct page *old) { }
155 142
156#endif /* CONFIG_HAVE_MLOCKED_PAGE_BIT */ 143#endif /* !CONFIG_MMU */
157 144
158/* 145/*
159 * Return the mem_map entry representing the 'offset' subpage within 146 * Return the mem_map entry representing the 'offset' subpage within
diff --git a/mm/ksm.c b/mm/ksm.c
index 5575f8628fef..56a0da1f9979 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -29,11 +29,13 @@
29#include <linux/wait.h> 29#include <linux/wait.h>
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/rbtree.h> 31#include <linux/rbtree.h>
32#include <linux/memory.h>
32#include <linux/mmu_notifier.h> 33#include <linux/mmu_notifier.h>
33#include <linux/swap.h> 34#include <linux/swap.h>
34#include <linux/ksm.h> 35#include <linux/ksm.h>
35 36
36#include <asm/tlbflush.h> 37#include <asm/tlbflush.h>
38#include "internal.h"
37 39
38/* 40/*
39 * A few notes about the KSM scanning process, 41 * A few notes about the KSM scanning process,
@@ -79,13 +81,13 @@
79 * struct mm_slot - ksm information per mm that is being scanned 81 * struct mm_slot - ksm information per mm that is being scanned
80 * @link: link to the mm_slots hash list 82 * @link: link to the mm_slots hash list
81 * @mm_list: link into the mm_slots list, rooted in ksm_mm_head 83 * @mm_list: link into the mm_slots list, rooted in ksm_mm_head
82 * @rmap_list: head for this mm_slot's list of rmap_items 84 * @rmap_list: head for this mm_slot's singly-linked list of rmap_items
83 * @mm: the mm that this information is valid for 85 * @mm: the mm that this information is valid for
84 */ 86 */
85struct mm_slot { 87struct mm_slot {
86 struct hlist_node link; 88 struct hlist_node link;
87 struct list_head mm_list; 89 struct list_head mm_list;
88 struct list_head rmap_list; 90 struct rmap_item *rmap_list;
89 struct mm_struct *mm; 91 struct mm_struct *mm;
90}; 92};
91 93
@@ -93,7 +95,7 @@ struct mm_slot {
93 * struct ksm_scan - cursor for scanning 95 * struct ksm_scan - cursor for scanning
94 * @mm_slot: the current mm_slot we are scanning 96 * @mm_slot: the current mm_slot we are scanning
95 * @address: the next address inside that to be scanned 97 * @address: the next address inside that to be scanned
96 * @rmap_item: the current rmap that we are scanning inside the rmap_list 98 * @rmap_list: link to the next rmap to be scanned in the rmap_list
97 * @seqnr: count of completed full scans (needed when removing unstable node) 99 * @seqnr: count of completed full scans (needed when removing unstable node)
98 * 100 *
99 * There is only the one ksm_scan instance of this cursor structure. 101 * There is only the one ksm_scan instance of this cursor structure.
@@ -101,37 +103,51 @@ struct mm_slot {
101struct ksm_scan { 103struct ksm_scan {
102 struct mm_slot *mm_slot; 104 struct mm_slot *mm_slot;
103 unsigned long address; 105 unsigned long address;
104 struct rmap_item *rmap_item; 106 struct rmap_item **rmap_list;
105 unsigned long seqnr; 107 unsigned long seqnr;
106}; 108};
107 109
108/** 110/**
111 * struct stable_node - node of the stable rbtree
112 * @node: rb node of this ksm page in the stable tree
113 * @hlist: hlist head of rmap_items using this ksm page
114 * @kpfn: page frame number of this ksm page
115 */
116struct stable_node {
117 struct rb_node node;
118 struct hlist_head hlist;
119 unsigned long kpfn;
120};
121
122/**
109 * struct rmap_item - reverse mapping item for virtual addresses 123 * struct rmap_item - reverse mapping item for virtual addresses
110 * @link: link into mm_slot's rmap_list (rmap_list is per mm) 124 * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list
125 * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree
111 * @mm: the memory structure this rmap_item is pointing into 126 * @mm: the memory structure this rmap_item is pointing into
112 * @address: the virtual address this rmap_item tracks (+ flags in low bits) 127 * @address: the virtual address this rmap_item tracks (+ flags in low bits)
113 * @oldchecksum: previous checksum of the page at that virtual address 128 * @oldchecksum: previous checksum of the page at that virtual address
114 * @node: rb_node of this rmap_item in either unstable or stable tree 129 * @node: rb node of this rmap_item in the unstable tree
115 * @next: next rmap_item hanging off the same node of the stable tree 130 * @head: pointer to stable_node heading this list in the stable tree
116 * @prev: previous rmap_item hanging off the same node of the stable tree 131 * @hlist: link into hlist of rmap_items hanging off that stable_node
117 */ 132 */
118struct rmap_item { 133struct rmap_item {
119 struct list_head link; 134 struct rmap_item *rmap_list;
135 struct anon_vma *anon_vma; /* when stable */
120 struct mm_struct *mm; 136 struct mm_struct *mm;
121 unsigned long address; /* + low bits used for flags below */ 137 unsigned long address; /* + low bits used for flags below */
138 unsigned int oldchecksum; /* when unstable */
122 union { 139 union {
123 unsigned int oldchecksum; /* when unstable */ 140 struct rb_node node; /* when node of unstable tree */
124 struct rmap_item *next; /* when stable */ 141 struct { /* when listed from stable tree */
125 }; 142 struct stable_node *head;
126 union { 143 struct hlist_node hlist;
127 struct rb_node node; /* when tree node */ 144 };
128 struct rmap_item *prev; /* in stable list */
129 }; 145 };
130}; 146};
131 147
132#define SEQNR_MASK 0x0ff /* low bits of unstable tree seqnr */ 148#define SEQNR_MASK 0x0ff /* low bits of unstable tree seqnr */
133#define NODE_FLAG 0x100 /* is a node of unstable or stable tree */ 149#define UNSTABLE_FLAG 0x100 /* is a node of the unstable tree */
134#define STABLE_FLAG 0x200 /* is a node or list item of stable tree */ 150#define STABLE_FLAG 0x200 /* is listed from the stable tree */
135 151
136/* The stable and unstable tree heads */ 152/* The stable and unstable tree heads */
137static struct rb_root root_stable_tree = RB_ROOT; 153static struct rb_root root_stable_tree = RB_ROOT;
@@ -148,6 +164,7 @@ static struct ksm_scan ksm_scan = {
148}; 164};
149 165
150static struct kmem_cache *rmap_item_cache; 166static struct kmem_cache *rmap_item_cache;
167static struct kmem_cache *stable_node_cache;
151static struct kmem_cache *mm_slot_cache; 168static struct kmem_cache *mm_slot_cache;
152 169
153/* The number of nodes in the stable tree */ 170/* The number of nodes in the stable tree */
@@ -162,9 +179,6 @@ static unsigned long ksm_pages_unshared;
162/* The number of rmap_items in use: to calculate pages_volatile */ 179/* The number of rmap_items in use: to calculate pages_volatile */
163static unsigned long ksm_rmap_items; 180static unsigned long ksm_rmap_items;
164 181
165/* Limit on the number of unswappable pages used */
166static unsigned long ksm_max_kernel_pages;
167
168/* Number of pages ksmd should scan in one batch */ 182/* Number of pages ksmd should scan in one batch */
169static unsigned int ksm_thread_pages_to_scan = 100; 183static unsigned int ksm_thread_pages_to_scan = 100;
170 184
@@ -190,13 +204,19 @@ static int __init ksm_slab_init(void)
190 if (!rmap_item_cache) 204 if (!rmap_item_cache)
191 goto out; 205 goto out;
192 206
207 stable_node_cache = KSM_KMEM_CACHE(stable_node, 0);
208 if (!stable_node_cache)
209 goto out_free1;
210
193 mm_slot_cache = KSM_KMEM_CACHE(mm_slot, 0); 211 mm_slot_cache = KSM_KMEM_CACHE(mm_slot, 0);
194 if (!mm_slot_cache) 212 if (!mm_slot_cache)
195 goto out_free; 213 goto out_free2;
196 214
197 return 0; 215 return 0;
198 216
199out_free: 217out_free2:
218 kmem_cache_destroy(stable_node_cache);
219out_free1:
200 kmem_cache_destroy(rmap_item_cache); 220 kmem_cache_destroy(rmap_item_cache);
201out: 221out:
202 return -ENOMEM; 222 return -ENOMEM;
@@ -205,6 +225,7 @@ out:
205static void __init ksm_slab_free(void) 225static void __init ksm_slab_free(void)
206{ 226{
207 kmem_cache_destroy(mm_slot_cache); 227 kmem_cache_destroy(mm_slot_cache);
228 kmem_cache_destroy(stable_node_cache);
208 kmem_cache_destroy(rmap_item_cache); 229 kmem_cache_destroy(rmap_item_cache);
209 mm_slot_cache = NULL; 230 mm_slot_cache = NULL;
210} 231}
@@ -226,6 +247,16 @@ static inline void free_rmap_item(struct rmap_item *rmap_item)
226 kmem_cache_free(rmap_item_cache, rmap_item); 247 kmem_cache_free(rmap_item_cache, rmap_item);
227} 248}
228 249
250static inline struct stable_node *alloc_stable_node(void)
251{
252 return kmem_cache_alloc(stable_node_cache, GFP_KERNEL);
253}
254
255static inline void free_stable_node(struct stable_node *stable_node)
256{
257 kmem_cache_free(stable_node_cache, stable_node);
258}
259
229static inline struct mm_slot *alloc_mm_slot(void) 260static inline struct mm_slot *alloc_mm_slot(void)
230{ 261{
231 if (!mm_slot_cache) /* initialization failed */ 262 if (!mm_slot_cache) /* initialization failed */
@@ -275,7 +306,6 @@ static void insert_to_mm_slots_hash(struct mm_struct *mm,
275 bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct)) 306 bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
276 % MM_SLOTS_HASH_HEADS]; 307 % MM_SLOTS_HASH_HEADS];
277 mm_slot->mm = mm; 308 mm_slot->mm = mm;
278 INIT_LIST_HEAD(&mm_slot->rmap_list);
279 hlist_add_head(&mm_slot->link, bucket); 309 hlist_add_head(&mm_slot->link, bucket);
280} 310}
281 311
@@ -284,6 +314,25 @@ static inline int in_stable_tree(struct rmap_item *rmap_item)
284 return rmap_item->address & STABLE_FLAG; 314 return rmap_item->address & STABLE_FLAG;
285} 315}
286 316
317static void hold_anon_vma(struct rmap_item *rmap_item,
318 struct anon_vma *anon_vma)
319{
320 rmap_item->anon_vma = anon_vma;
321 atomic_inc(&anon_vma->ksm_refcount);
322}
323
324static void drop_anon_vma(struct rmap_item *rmap_item)
325{
326 struct anon_vma *anon_vma = rmap_item->anon_vma;
327
328 if (atomic_dec_and_lock(&anon_vma->ksm_refcount, &anon_vma->lock)) {
329 int empty = list_empty(&anon_vma->head);
330 spin_unlock(&anon_vma->lock);
331 if (empty)
332 anon_vma_free(anon_vma);
333 }
334}
335
287/* 336/*
288 * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's 337 * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's
289 * page tables after it has passed through ksm_exit() - which, if necessary, 338 * page tables after it has passed through ksm_exit() - which, if necessary,
@@ -356,10 +405,18 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
356 return (ret & VM_FAULT_OOM) ? -ENOMEM : 0; 405 return (ret & VM_FAULT_OOM) ? -ENOMEM : 0;
357} 406}
358 407
359static void break_cow(struct mm_struct *mm, unsigned long addr) 408static void break_cow(struct rmap_item *rmap_item)
360{ 409{
410 struct mm_struct *mm = rmap_item->mm;
411 unsigned long addr = rmap_item->address;
361 struct vm_area_struct *vma; 412 struct vm_area_struct *vma;
362 413
414 /*
415 * It is not an accident that whenever we want to break COW
416 * to undo, we also need to drop a reference to the anon_vma.
417 */
418 drop_anon_vma(rmap_item);
419
363 down_read(&mm->mmap_sem); 420 down_read(&mm->mmap_sem);
364 if (ksm_test_exit(mm)) 421 if (ksm_test_exit(mm))
365 goto out; 422 goto out;
@@ -403,21 +460,77 @@ out: page = NULL;
403 return page; 460 return page;
404} 461}
405 462
463static void remove_node_from_stable_tree(struct stable_node *stable_node)
464{
465 struct rmap_item *rmap_item;
466 struct hlist_node *hlist;
467
468 hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) {
469 if (rmap_item->hlist.next)
470 ksm_pages_sharing--;
471 else
472 ksm_pages_shared--;
473 drop_anon_vma(rmap_item);
474 rmap_item->address &= PAGE_MASK;
475 cond_resched();
476 }
477
478 rb_erase(&stable_node->node, &root_stable_tree);
479 free_stable_node(stable_node);
480}
481
406/* 482/*
407 * get_ksm_page: checks if the page at the virtual address in rmap_item 483 * get_ksm_page: checks if the page indicated by the stable node
408 * is still PageKsm, in which case we can trust the content of the page, 484 * is still its ksm page, despite having held no reference to it.
409 * and it returns the gotten page; but NULL if the page has been zapped. 485 * In which case we can trust the content of the page, and it
486 * returns the gotten page; but if the page has now been zapped,
487 * remove the stale node from the stable tree and return NULL.
488 *
489 * You would expect the stable_node to hold a reference to the ksm page.
490 * But if it increments the page's count, swapping out has to wait for
491 * ksmd to come around again before it can free the page, which may take
492 * seconds or even minutes: much too unresponsive. So instead we use a
493 * "keyhole reference": access to the ksm page from the stable node peeps
494 * out through its keyhole to see if that page still holds the right key,
495 * pointing back to this stable node. This relies on freeing a PageAnon
496 * page to reset its page->mapping to NULL, and relies on no other use of
497 * a page to put something that might look like our key in page->mapping.
498 *
499 * include/linux/pagemap.h page_cache_get_speculative() is a good reference,
500 * but this is different - made simpler by ksm_thread_mutex being held, but
501 * interesting for assuming that no other use of the struct page could ever
502 * put our expected_mapping into page->mapping (or a field of the union which
503 * coincides with page->mapping). The RCU calls are not for KSM at all, but
504 * to keep the page_count protocol described with page_cache_get_speculative.
505 *
506 * Note: it is possible that get_ksm_page() will return NULL one moment,
507 * then page the next, if the page is in between page_freeze_refs() and
508 * page_unfreeze_refs(): this shouldn't be a problem anywhere, the page
509 * is on its way to being freed; but it is an anomaly to bear in mind.
410 */ 510 */
411static struct page *get_ksm_page(struct rmap_item *rmap_item) 511static struct page *get_ksm_page(struct stable_node *stable_node)
412{ 512{
413 struct page *page; 513 struct page *page;
414 514 void *expected_mapping;
415 page = get_mergeable_page(rmap_item); 515
416 if (page && !PageKsm(page)) { 516 page = pfn_to_page(stable_node->kpfn);
517 expected_mapping = (void *)stable_node +
518 (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
519 rcu_read_lock();
520 if (page->mapping != expected_mapping)
521 goto stale;
522 if (!get_page_unless_zero(page))
523 goto stale;
524 if (page->mapping != expected_mapping) {
417 put_page(page); 525 put_page(page);
418 page = NULL; 526 goto stale;
419 } 527 }
528 rcu_read_unlock();
420 return page; 529 return page;
530stale:
531 rcu_read_unlock();
532 remove_node_from_stable_tree(stable_node);
533 return NULL;
421} 534}
422 535
423/* 536/*
@@ -426,35 +539,29 @@ static struct page *get_ksm_page(struct rmap_item *rmap_item)
426 */ 539 */
427static void remove_rmap_item_from_tree(struct rmap_item *rmap_item) 540static void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
428{ 541{
429 if (in_stable_tree(rmap_item)) { 542 if (rmap_item->address & STABLE_FLAG) {
430 struct rmap_item *next_item = rmap_item->next; 543 struct stable_node *stable_node;
431 544 struct page *page;
432 if (rmap_item->address & NODE_FLAG) {
433 if (next_item) {
434 rb_replace_node(&rmap_item->node,
435 &next_item->node,
436 &root_stable_tree);
437 next_item->address |= NODE_FLAG;
438 ksm_pages_sharing--;
439 } else {
440 rb_erase(&rmap_item->node, &root_stable_tree);
441 ksm_pages_shared--;
442 }
443 } else {
444 struct rmap_item *prev_item = rmap_item->prev;
445 545
446 BUG_ON(prev_item->next != rmap_item); 546 stable_node = rmap_item->head;
447 prev_item->next = next_item; 547 page = get_ksm_page(stable_node);
448 if (next_item) { 548 if (!page)
449 BUG_ON(next_item->prev != rmap_item); 549 goto out;
450 next_item->prev = rmap_item->prev; 550
451 } 551 lock_page(page);
552 hlist_del(&rmap_item->hlist);
553 unlock_page(page);
554 put_page(page);
555
556 if (stable_node->hlist.first)
452 ksm_pages_sharing--; 557 ksm_pages_sharing--;
453 } 558 else
559 ksm_pages_shared--;
454 560
455 rmap_item->next = NULL; 561 drop_anon_vma(rmap_item);
562 rmap_item->address &= PAGE_MASK;
456 563
457 } else if (rmap_item->address & NODE_FLAG) { 564 } else if (rmap_item->address & UNSTABLE_FLAG) {
458 unsigned char age; 565 unsigned char age;
459 /* 566 /*
460 * Usually ksmd can and must skip the rb_erase, because 567 * Usually ksmd can and must skip the rb_erase, because
@@ -467,24 +574,21 @@ static void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
467 BUG_ON(age > 1); 574 BUG_ON(age > 1);
468 if (!age) 575 if (!age)
469 rb_erase(&rmap_item->node, &root_unstable_tree); 576 rb_erase(&rmap_item->node, &root_unstable_tree);
577
470 ksm_pages_unshared--; 578 ksm_pages_unshared--;
579 rmap_item->address &= PAGE_MASK;
471 } 580 }
472 581out:
473 rmap_item->address &= PAGE_MASK;
474
475 cond_resched(); /* we're called from many long loops */ 582 cond_resched(); /* we're called from many long loops */
476} 583}
477 584
478static void remove_trailing_rmap_items(struct mm_slot *mm_slot, 585static void remove_trailing_rmap_items(struct mm_slot *mm_slot,
479 struct list_head *cur) 586 struct rmap_item **rmap_list)
480{ 587{
481 struct rmap_item *rmap_item; 588 while (*rmap_list) {
482 589 struct rmap_item *rmap_item = *rmap_list;
483 while (cur != &mm_slot->rmap_list) { 590 *rmap_list = rmap_item->rmap_list;
484 rmap_item = list_entry(cur, struct rmap_item, link);
485 cur = cur->next;
486 remove_rmap_item_from_tree(rmap_item); 591 remove_rmap_item_from_tree(rmap_item);
487 list_del(&rmap_item->link);
488 free_rmap_item(rmap_item); 592 free_rmap_item(rmap_item);
489 } 593 }
490} 594}
@@ -550,7 +654,7 @@ static int unmerge_and_remove_all_rmap_items(void)
550 goto error; 654 goto error;
551 } 655 }
552 656
553 remove_trailing_rmap_items(mm_slot, mm_slot->rmap_list.next); 657 remove_trailing_rmap_items(mm_slot, &mm_slot->rmap_list);
554 658
555 spin_lock(&ksm_mmlist_lock); 659 spin_lock(&ksm_mmlist_lock);
556 ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next, 660 ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next,
@@ -646,7 +750,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
646 * Check that no O_DIRECT or similar I/O is in progress on the 750 * Check that no O_DIRECT or similar I/O is in progress on the
647 * page 751 * page
648 */ 752 */
649 if ((page_mapcount(page) + 2 + swapped) != page_count(page)) { 753 if (page_mapcount(page) + 1 + swapped != page_count(page)) {
650 set_pte_at_notify(mm, addr, ptep, entry); 754 set_pte_at_notify(mm, addr, ptep, entry);
651 goto out_unlock; 755 goto out_unlock;
652 } 756 }
@@ -664,15 +768,15 @@ out:
664 768
665/** 769/**
666 * replace_page - replace page in vma by new ksm page 770 * replace_page - replace page in vma by new ksm page
667 * @vma: vma that holds the pte pointing to oldpage 771 * @vma: vma that holds the pte pointing to page
668 * @oldpage: the page we are replacing by newpage 772 * @page: the page we are replacing by kpage
669 * @newpage: the ksm page we replace oldpage by 773 * @kpage: the ksm page we replace page by
670 * @orig_pte: the original value of the pte 774 * @orig_pte: the original value of the pte
671 * 775 *
672 * Returns 0 on success, -EFAULT on failure. 776 * Returns 0 on success, -EFAULT on failure.
673 */ 777 */
674static int replace_page(struct vm_area_struct *vma, struct page *oldpage, 778static int replace_page(struct vm_area_struct *vma, struct page *page,
675 struct page *newpage, pte_t orig_pte) 779 struct page *kpage, pte_t orig_pte)
676{ 780{
677 struct mm_struct *mm = vma->vm_mm; 781 struct mm_struct *mm = vma->vm_mm;
678 pgd_t *pgd; 782 pgd_t *pgd;
@@ -681,12 +785,9 @@ static int replace_page(struct vm_area_struct *vma, struct page *oldpage,
681 pte_t *ptep; 785 pte_t *ptep;
682 spinlock_t *ptl; 786 spinlock_t *ptl;
683 unsigned long addr; 787 unsigned long addr;
684 pgprot_t prot;
685 int err = -EFAULT; 788 int err = -EFAULT;
686 789
687 prot = vm_get_page_prot(vma->vm_flags & ~VM_WRITE); 790 addr = page_address_in_vma(page, vma);
688
689 addr = page_address_in_vma(oldpage, vma);
690 if (addr == -EFAULT) 791 if (addr == -EFAULT)
691 goto out; 792 goto out;
692 793
@@ -708,15 +809,15 @@ static int replace_page(struct vm_area_struct *vma, struct page *oldpage,
708 goto out; 809 goto out;
709 } 810 }
710 811
711 get_page(newpage); 812 get_page(kpage);
712 page_add_ksm_rmap(newpage); 813 page_add_anon_rmap(kpage, vma, addr);
713 814
714 flush_cache_page(vma, addr, pte_pfn(*ptep)); 815 flush_cache_page(vma, addr, pte_pfn(*ptep));
715 ptep_clear_flush(vma, addr, ptep); 816 ptep_clear_flush(vma, addr, ptep);
716 set_pte_at_notify(mm, addr, ptep, mk_pte(newpage, prot)); 817 set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot));
717 818
718 page_remove_rmap(oldpage); 819 page_remove_rmap(page);
719 put_page(oldpage); 820 put_page(page);
720 821
721 pte_unmap_unlock(ptep, ptl); 822 pte_unmap_unlock(ptep, ptl);
722 err = 0; 823 err = 0;
@@ -726,32 +827,27 @@ out:
726 827
727/* 828/*
728 * try_to_merge_one_page - take two pages and merge them into one 829 * try_to_merge_one_page - take two pages and merge them into one
729 * @vma: the vma that hold the pte pointing into oldpage 830 * @vma: the vma that holds the pte pointing to page
730 * @oldpage: the page that we want to replace with newpage 831 * @page: the PageAnon page that we want to replace with kpage
731 * @newpage: the page that we want to map instead of oldpage 832 * @kpage: the PageKsm page that we want to map instead of page,
732 * 833 * or NULL the first time when we want to use page as kpage.
733 * Note:
734 * oldpage should be a PageAnon page, while newpage should be a PageKsm page,
735 * or a newly allocated kernel page which page_add_ksm_rmap will make PageKsm.
736 * 834 *
737 * This function returns 0 if the pages were merged, -EFAULT otherwise. 835 * This function returns 0 if the pages were merged, -EFAULT otherwise.
738 */ 836 */
739static int try_to_merge_one_page(struct vm_area_struct *vma, 837static int try_to_merge_one_page(struct vm_area_struct *vma,
740 struct page *oldpage, 838 struct page *page, struct page *kpage)
741 struct page *newpage)
742{ 839{
743 pte_t orig_pte = __pte(0); 840 pte_t orig_pte = __pte(0);
744 int err = -EFAULT; 841 int err = -EFAULT;
745 842
843 if (page == kpage) /* ksm page forked */
844 return 0;
845
746 if (!(vma->vm_flags & VM_MERGEABLE)) 846 if (!(vma->vm_flags & VM_MERGEABLE))
747 goto out; 847 goto out;
748 848 if (!PageAnon(page))
749 if (!PageAnon(oldpage))
750 goto out; 849 goto out;
751 850
752 get_page(newpage);
753 get_page(oldpage);
754
755 /* 851 /*
756 * We need the page lock to read a stable PageSwapCache in 852 * We need the page lock to read a stable PageSwapCache in
757 * write_protect_page(). We use trylock_page() instead of 853 * write_protect_page(). We use trylock_page() instead of
@@ -759,26 +855,39 @@ static int try_to_merge_one_page(struct vm_area_struct *vma,
759 * prefer to continue scanning and merging different pages, 855 * prefer to continue scanning and merging different pages,
760 * then come back to this page when it is unlocked. 856 * then come back to this page when it is unlocked.
761 */ 857 */
762 if (!trylock_page(oldpage)) 858 if (!trylock_page(page))
763 goto out_putpage; 859 goto out;
764 /* 860 /*
765 * If this anonymous page is mapped only here, its pte may need 861 * If this anonymous page is mapped only here, its pte may need
766 * to be write-protected. If it's mapped elsewhere, all of its 862 * to be write-protected. If it's mapped elsewhere, all of its
767 * ptes are necessarily already write-protected. But in either 863 * ptes are necessarily already write-protected. But in either
768 * case, we need to lock and check page_count is not raised. 864 * case, we need to lock and check page_count is not raised.
769 */ 865 */
770 if (write_protect_page(vma, oldpage, &orig_pte)) { 866 if (write_protect_page(vma, page, &orig_pte) == 0) {
771 unlock_page(oldpage); 867 if (!kpage) {
772 goto out_putpage; 868 /*
869 * While we hold page lock, upgrade page from
870 * PageAnon+anon_vma to PageKsm+NULL stable_node:
871 * stable_tree_insert() will update stable_node.
872 */
873 set_page_stable_node(page, NULL);
874 mark_page_accessed(page);
875 err = 0;
876 } else if (pages_identical(page, kpage))
877 err = replace_page(vma, page, kpage, orig_pte);
773 } 878 }
774 unlock_page(oldpage);
775 879
776 if (pages_identical(oldpage, newpage)) 880 if ((vma->vm_flags & VM_LOCKED) && kpage && !err) {
777 err = replace_page(vma, oldpage, newpage, orig_pte); 881 munlock_vma_page(page);
882 if (!PageMlocked(kpage)) {
883 unlock_page(page);
884 lock_page(kpage);
885 mlock_vma_page(kpage);
886 page = kpage; /* for final unlock */
887 }
888 }
778 889
779out_putpage: 890 unlock_page(page);
780 put_page(oldpage);
781 put_page(newpage);
782out: 891out:
783 return err; 892 return err;
784} 893}
@@ -786,26 +895,31 @@ out:
786/* 895/*
787 * try_to_merge_with_ksm_page - like try_to_merge_two_pages, 896 * try_to_merge_with_ksm_page - like try_to_merge_two_pages,
788 * but no new kernel page is allocated: kpage must already be a ksm page. 897 * but no new kernel page is allocated: kpage must already be a ksm page.
898 *
899 * This function returns 0 if the pages were merged, -EFAULT otherwise.
789 */ 900 */
790static int try_to_merge_with_ksm_page(struct mm_struct *mm1, 901static int try_to_merge_with_ksm_page(struct rmap_item *rmap_item,
791 unsigned long addr1, 902 struct page *page, struct page *kpage)
792 struct page *page1,
793 struct page *kpage)
794{ 903{
904 struct mm_struct *mm = rmap_item->mm;
795 struct vm_area_struct *vma; 905 struct vm_area_struct *vma;
796 int err = -EFAULT; 906 int err = -EFAULT;
797 907
798 down_read(&mm1->mmap_sem); 908 down_read(&mm->mmap_sem);
799 if (ksm_test_exit(mm1)) 909 if (ksm_test_exit(mm))
910 goto out;
911 vma = find_vma(mm, rmap_item->address);
912 if (!vma || vma->vm_start > rmap_item->address)
800 goto out; 913 goto out;
801 914
802 vma = find_vma(mm1, addr1); 915 err = try_to_merge_one_page(vma, page, kpage);
803 if (!vma || vma->vm_start > addr1) 916 if (err)
804 goto out; 917 goto out;
805 918
806 err = try_to_merge_one_page(vma, page1, kpage); 919 /* Must get reference to anon_vma while still holding mmap_sem */
920 hold_anon_vma(rmap_item, vma->anon_vma);
807out: 921out:
808 up_read(&mm1->mmap_sem); 922 up_read(&mm->mmap_sem);
809 return err; 923 return err;
810} 924}
811 925
@@ -813,109 +927,73 @@ out:
813 * try_to_merge_two_pages - take two identical pages and prepare them 927 * try_to_merge_two_pages - take two identical pages and prepare them
814 * to be merged into one page. 928 * to be merged into one page.
815 * 929 *
816 * This function returns 0 if we successfully mapped two identical pages 930 * This function returns the kpage if we successfully merged two identical
817 * into one page, -EFAULT otherwise. 931 * pages into one ksm page, NULL otherwise.
818 * 932 *
819 * Note that this function allocates a new kernel page: if one of the pages 933 * Note that this function upgrades page to ksm page: if one of the pages
820 * is already a ksm page, try_to_merge_with_ksm_page should be used. 934 * is already a ksm page, try_to_merge_with_ksm_page should be used.
821 */ 935 */
822static int try_to_merge_two_pages(struct mm_struct *mm1, unsigned long addr1, 936static struct page *try_to_merge_two_pages(struct rmap_item *rmap_item,
823 struct page *page1, struct mm_struct *mm2, 937 struct page *page,
824 unsigned long addr2, struct page *page2) 938 struct rmap_item *tree_rmap_item,
939 struct page *tree_page)
825{ 940{
826 struct vm_area_struct *vma; 941 int err;
827 struct page *kpage;
828 int err = -EFAULT;
829
830 /*
831 * The number of nodes in the stable tree
832 * is the number of kernel pages that we hold.
833 */
834 if (ksm_max_kernel_pages &&
835 ksm_max_kernel_pages <= ksm_pages_shared)
836 return err;
837
838 kpage = alloc_page(GFP_HIGHUSER);
839 if (!kpage)
840 return err;
841
842 down_read(&mm1->mmap_sem);
843 if (ksm_test_exit(mm1)) {
844 up_read(&mm1->mmap_sem);
845 goto out;
846 }
847 vma = find_vma(mm1, addr1);
848 if (!vma || vma->vm_start > addr1) {
849 up_read(&mm1->mmap_sem);
850 goto out;
851 }
852
853 copy_user_highpage(kpage, page1, addr1, vma);
854 err = try_to_merge_one_page(vma, page1, kpage);
855 up_read(&mm1->mmap_sem);
856 942
943 err = try_to_merge_with_ksm_page(rmap_item, page, NULL);
857 if (!err) { 944 if (!err) {
858 err = try_to_merge_with_ksm_page(mm2, addr2, page2, kpage); 945 err = try_to_merge_with_ksm_page(tree_rmap_item,
946 tree_page, page);
859 /* 947 /*
860 * If that fails, we have a ksm page with only one pte 948 * If that fails, we have a ksm page with only one pte
861 * pointing to it: so break it. 949 * pointing to it: so break it.
862 */ 950 */
863 if (err) 951 if (err)
864 break_cow(mm1, addr1); 952 break_cow(rmap_item);
865 } 953 }
866out: 954 return err ? NULL : page;
867 put_page(kpage);
868 return err;
869} 955}
870 956
871/* 957/*
872 * stable_tree_search - search page inside the stable tree 958 * stable_tree_search - search for page inside the stable tree
873 * @page: the page that we are searching identical pages to.
874 * @page2: pointer into identical page that we are holding inside the stable
875 * tree that we have found.
876 * @rmap_item: the reverse mapping item
877 * 959 *
878 * This function checks if there is a page inside the stable tree 960 * This function checks if there is a page inside the stable tree
879 * with identical content to the page that we are scanning right now. 961 * with identical content to the page that we are scanning right now.
880 * 962 *
881 * This function return rmap_item pointer to the identical item if found, 963 * This function returns the stable tree node of identical content if found,
882 * NULL otherwise. 964 * NULL otherwise.
883 */ 965 */
884static struct rmap_item *stable_tree_search(struct page *page, 966static struct page *stable_tree_search(struct page *page)
885 struct page **page2,
886 struct rmap_item *rmap_item)
887{ 967{
888 struct rb_node *node = root_stable_tree.rb_node; 968 struct rb_node *node = root_stable_tree.rb_node;
969 struct stable_node *stable_node;
970
971 stable_node = page_stable_node(page);
972 if (stable_node) { /* ksm page forked */
973 get_page(page);
974 return page;
975 }
889 976
890 while (node) { 977 while (node) {
891 struct rmap_item *tree_rmap_item, *next_rmap_item; 978 struct page *tree_page;
892 int ret; 979 int ret;
893 980
894 tree_rmap_item = rb_entry(node, struct rmap_item, node); 981 cond_resched();
895 while (tree_rmap_item) { 982 stable_node = rb_entry(node, struct stable_node, node);
896 BUG_ON(!in_stable_tree(tree_rmap_item)); 983 tree_page = get_ksm_page(stable_node);
897 cond_resched(); 984 if (!tree_page)
898 page2[0] = get_ksm_page(tree_rmap_item);
899 if (page2[0])
900 break;
901 next_rmap_item = tree_rmap_item->next;
902 remove_rmap_item_from_tree(tree_rmap_item);
903 tree_rmap_item = next_rmap_item;
904 }
905 if (!tree_rmap_item)
906 return NULL; 985 return NULL;
907 986
908 ret = memcmp_pages(page, page2[0]); 987 ret = memcmp_pages(page, tree_page);
909 988
910 if (ret < 0) { 989 if (ret < 0) {
911 put_page(page2[0]); 990 put_page(tree_page);
912 node = node->rb_left; 991 node = node->rb_left;
913 } else if (ret > 0) { 992 } else if (ret > 0) {
914 put_page(page2[0]); 993 put_page(tree_page);
915 node = node->rb_right; 994 node = node->rb_right;
916 } else { 995 } else
917 return tree_rmap_item; 996 return tree_page;
918 }
919 } 997 }
920 998
921 return NULL; 999 return NULL;
@@ -925,38 +1003,26 @@ static struct rmap_item *stable_tree_search(struct page *page,
925 * stable_tree_insert - insert rmap_item pointing to new ksm page 1003 * stable_tree_insert - insert rmap_item pointing to new ksm page
926 * into the stable tree. 1004 * into the stable tree.
927 * 1005 *
928 * @page: the page that we are searching identical page to inside the stable 1006 * This function returns the stable tree node just allocated on success,
929 * tree. 1007 * NULL otherwise.
930 * @rmap_item: pointer to the reverse mapping item.
931 *
932 * This function returns rmap_item if success, NULL otherwise.
933 */ 1008 */
934static struct rmap_item *stable_tree_insert(struct page *page, 1009static struct stable_node *stable_tree_insert(struct page *kpage)
935 struct rmap_item *rmap_item)
936{ 1010{
937 struct rb_node **new = &root_stable_tree.rb_node; 1011 struct rb_node **new = &root_stable_tree.rb_node;
938 struct rb_node *parent = NULL; 1012 struct rb_node *parent = NULL;
1013 struct stable_node *stable_node;
939 1014
940 while (*new) { 1015 while (*new) {
941 struct rmap_item *tree_rmap_item, *next_rmap_item;
942 struct page *tree_page; 1016 struct page *tree_page;
943 int ret; 1017 int ret;
944 1018
945 tree_rmap_item = rb_entry(*new, struct rmap_item, node); 1019 cond_resched();
946 while (tree_rmap_item) { 1020 stable_node = rb_entry(*new, struct stable_node, node);
947 BUG_ON(!in_stable_tree(tree_rmap_item)); 1021 tree_page = get_ksm_page(stable_node);
948 cond_resched(); 1022 if (!tree_page)
949 tree_page = get_ksm_page(tree_rmap_item);
950 if (tree_page)
951 break;
952 next_rmap_item = tree_rmap_item->next;
953 remove_rmap_item_from_tree(tree_rmap_item);
954 tree_rmap_item = next_rmap_item;
955 }
956 if (!tree_rmap_item)
957 return NULL; 1023 return NULL;
958 1024
959 ret = memcmp_pages(page, tree_page); 1025 ret = memcmp_pages(kpage, tree_page);
960 put_page(tree_page); 1026 put_page(tree_page);
961 1027
962 parent = *new; 1028 parent = *new;
@@ -974,22 +1040,24 @@ static struct rmap_item *stable_tree_insert(struct page *page,
974 } 1040 }
975 } 1041 }
976 1042
977 rmap_item->address |= NODE_FLAG | STABLE_FLAG; 1043 stable_node = alloc_stable_node();
978 rmap_item->next = NULL; 1044 if (!stable_node)
979 rb_link_node(&rmap_item->node, parent, new); 1045 return NULL;
980 rb_insert_color(&rmap_item->node, &root_stable_tree);
981 1046
982 ksm_pages_shared++; 1047 rb_link_node(&stable_node->node, parent, new);
983 return rmap_item; 1048 rb_insert_color(&stable_node->node, &root_stable_tree);
1049
1050 INIT_HLIST_HEAD(&stable_node->hlist);
1051
1052 stable_node->kpfn = page_to_pfn(kpage);
1053 set_page_stable_node(kpage, stable_node);
1054
1055 return stable_node;
984} 1056}
985 1057
986/* 1058/*
987 * unstable_tree_search_insert - search and insert items into the unstable tree. 1059 * unstable_tree_search_insert - search for identical page,
988 * 1060 * else insert rmap_item into the unstable tree.
989 * @page: the page that we are going to search for identical page or to insert
990 * into the unstable tree
991 * @page2: pointer into identical page that was found inside the unstable tree
992 * @rmap_item: the reverse mapping item of page
993 * 1061 *
994 * This function searches for a page in the unstable tree identical to the 1062 * This function searches for a page in the unstable tree identical to the
995 * page currently being scanned; and if no identical page is found in the 1063 * page currently being scanned; and if no identical page is found in the
@@ -1001,47 +1069,50 @@ static struct rmap_item *stable_tree_insert(struct page *page,
1001 * This function does both searching and inserting, because they share 1069 * This function does both searching and inserting, because they share
1002 * the same walking algorithm in an rbtree. 1070 * the same walking algorithm in an rbtree.
1003 */ 1071 */
1004static struct rmap_item *unstable_tree_search_insert(struct page *page, 1072static
1005 struct page **page2, 1073struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item,
1006 struct rmap_item *rmap_item) 1074 struct page *page,
1075 struct page **tree_pagep)
1076
1007{ 1077{
1008 struct rb_node **new = &root_unstable_tree.rb_node; 1078 struct rb_node **new = &root_unstable_tree.rb_node;
1009 struct rb_node *parent = NULL; 1079 struct rb_node *parent = NULL;
1010 1080
1011 while (*new) { 1081 while (*new) {
1012 struct rmap_item *tree_rmap_item; 1082 struct rmap_item *tree_rmap_item;
1083 struct page *tree_page;
1013 int ret; 1084 int ret;
1014 1085
1015 cond_resched(); 1086 cond_resched();
1016 tree_rmap_item = rb_entry(*new, struct rmap_item, node); 1087 tree_rmap_item = rb_entry(*new, struct rmap_item, node);
1017 page2[0] = get_mergeable_page(tree_rmap_item); 1088 tree_page = get_mergeable_page(tree_rmap_item);
1018 if (!page2[0]) 1089 if (!tree_page)
1019 return NULL; 1090 return NULL;
1020 1091
1021 /* 1092 /*
1022 * Don't substitute an unswappable ksm page 1093 * Don't substitute a ksm page for a forked page.
1023 * just for one good swappable forked page.
1024 */ 1094 */
1025 if (page == page2[0]) { 1095 if (page == tree_page) {
1026 put_page(page2[0]); 1096 put_page(tree_page);
1027 return NULL; 1097 return NULL;
1028 } 1098 }
1029 1099
1030 ret = memcmp_pages(page, page2[0]); 1100 ret = memcmp_pages(page, tree_page);
1031 1101
1032 parent = *new; 1102 parent = *new;
1033 if (ret < 0) { 1103 if (ret < 0) {
1034 put_page(page2[0]); 1104 put_page(tree_page);
1035 new = &parent->rb_left; 1105 new = &parent->rb_left;
1036 } else if (ret > 0) { 1106 } else if (ret > 0) {
1037 put_page(page2[0]); 1107 put_page(tree_page);
1038 new = &parent->rb_right; 1108 new = &parent->rb_right;
1039 } else { 1109 } else {
1110 *tree_pagep = tree_page;
1040 return tree_rmap_item; 1111 return tree_rmap_item;
1041 } 1112 }
1042 } 1113 }
1043 1114
1044 rmap_item->address |= NODE_FLAG; 1115 rmap_item->address |= UNSTABLE_FLAG;
1045 rmap_item->address |= (ksm_scan.seqnr & SEQNR_MASK); 1116 rmap_item->address |= (ksm_scan.seqnr & SEQNR_MASK);
1046 rb_link_node(&rmap_item->node, parent, new); 1117 rb_link_node(&rmap_item->node, parent, new);
1047 rb_insert_color(&rmap_item->node, &root_unstable_tree); 1118 rb_insert_color(&rmap_item->node, &root_unstable_tree);
@@ -1056,18 +1127,16 @@ static struct rmap_item *unstable_tree_search_insert(struct page *page,
1056 * the same ksm page. 1127 * the same ksm page.
1057 */ 1128 */
1058static void stable_tree_append(struct rmap_item *rmap_item, 1129static void stable_tree_append(struct rmap_item *rmap_item,
1059 struct rmap_item *tree_rmap_item) 1130 struct stable_node *stable_node)
1060{ 1131{
1061 rmap_item->next = tree_rmap_item->next; 1132 rmap_item->head = stable_node;
1062 rmap_item->prev = tree_rmap_item;
1063
1064 if (tree_rmap_item->next)
1065 tree_rmap_item->next->prev = rmap_item;
1066
1067 tree_rmap_item->next = rmap_item;
1068 rmap_item->address |= STABLE_FLAG; 1133 rmap_item->address |= STABLE_FLAG;
1134 hlist_add_head(&rmap_item->hlist, &stable_node->hlist);
1069 1135
1070 ksm_pages_sharing++; 1136 if (rmap_item->hlist.next)
1137 ksm_pages_sharing++;
1138 else
1139 ksm_pages_shared++;
1071} 1140}
1072 1141
1073/* 1142/*
@@ -1081,49 +1150,37 @@ static void stable_tree_append(struct rmap_item *rmap_item,
1081 */ 1150 */
1082static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item) 1151static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
1083{ 1152{
1084 struct page *page2[1];
1085 struct rmap_item *tree_rmap_item; 1153 struct rmap_item *tree_rmap_item;
1154 struct page *tree_page = NULL;
1155 struct stable_node *stable_node;
1156 struct page *kpage;
1086 unsigned int checksum; 1157 unsigned int checksum;
1087 int err; 1158 int err;
1088 1159
1089 if (in_stable_tree(rmap_item)) 1160 remove_rmap_item_from_tree(rmap_item);
1090 remove_rmap_item_from_tree(rmap_item);
1091 1161
1092 /* We first start with searching the page inside the stable tree */ 1162 /* We first start with searching the page inside the stable tree */
1093 tree_rmap_item = stable_tree_search(page, page2, rmap_item); 1163 kpage = stable_tree_search(page);
1094 if (tree_rmap_item) { 1164 if (kpage) {
1095 if (page == page2[0]) /* forked */ 1165 err = try_to_merge_with_ksm_page(rmap_item, page, kpage);
1096 err = 0;
1097 else
1098 err = try_to_merge_with_ksm_page(rmap_item->mm,
1099 rmap_item->address,
1100 page, page2[0]);
1101 put_page(page2[0]);
1102
1103 if (!err) { 1166 if (!err) {
1104 /* 1167 /*
1105 * The page was successfully merged: 1168 * The page was successfully merged:
1106 * add its rmap_item to the stable tree. 1169 * add its rmap_item to the stable tree.
1107 */ 1170 */
1108 stable_tree_append(rmap_item, tree_rmap_item); 1171 lock_page(kpage);
1172 stable_tree_append(rmap_item, page_stable_node(kpage));
1173 unlock_page(kpage);
1109 } 1174 }
1175 put_page(kpage);
1110 return; 1176 return;
1111 } 1177 }
1112 1178
1113 /* 1179 /*
1114 * A ksm page might have got here by fork, but its other 1180 * If the hash value of the page has changed from the last time
1115 * references have already been removed from the stable tree. 1181 * we calculated it, this page is changing frequently: therefore we
1116 * Or it might be left over from a break_ksm which failed 1182 * don't want to insert it in the unstable tree, and we don't want
1117 * when the mem_cgroup had reached its limit: try again now. 1183 * to waste our time searching for something identical to it there.
1118 */
1119 if (PageKsm(page))
1120 break_cow(rmap_item->mm, rmap_item->address);
1121
1122 /*
1123 * In case the hash value of the page was changed from the last time we
1124 * have calculated it, this page to be changed frequely, therefore we
1125 * don't want to insert it to the unstable tree, and we don't want to
1126 * waste our time to search if there is something identical to it there.
1127 */ 1184 */
1128 checksum = calc_checksum(page); 1185 checksum = calc_checksum(page);
1129 if (rmap_item->oldchecksum != checksum) { 1186 if (rmap_item->oldchecksum != checksum) {
@@ -1131,21 +1188,27 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
1131 return; 1188 return;
1132 } 1189 }
1133 1190
1134 tree_rmap_item = unstable_tree_search_insert(page, page2, rmap_item); 1191 tree_rmap_item =
1192 unstable_tree_search_insert(rmap_item, page, &tree_page);
1135 if (tree_rmap_item) { 1193 if (tree_rmap_item) {
1136 err = try_to_merge_two_pages(rmap_item->mm, 1194 kpage = try_to_merge_two_pages(rmap_item, page,
1137 rmap_item->address, page, 1195 tree_rmap_item, tree_page);
1138 tree_rmap_item->mm, 1196 put_page(tree_page);
1139 tree_rmap_item->address, page2[0]);
1140 /* 1197 /*
1141 * As soon as we merge this page, we want to remove the 1198 * As soon as we merge this page, we want to remove the
1142 * rmap_item of the page we have merged with from the unstable 1199 * rmap_item of the page we have merged with from the unstable
1143 * tree, and insert it instead as new node in the stable tree. 1200 * tree, and insert it instead as new node in the stable tree.
1144 */ 1201 */
1145 if (!err) { 1202 if (kpage) {
1146 rb_erase(&tree_rmap_item->node, &root_unstable_tree); 1203 remove_rmap_item_from_tree(tree_rmap_item);
1147 tree_rmap_item->address &= ~NODE_FLAG; 1204
1148 ksm_pages_unshared--; 1205 lock_page(kpage);
1206 stable_node = stable_tree_insert(kpage);
1207 if (stable_node) {
1208 stable_tree_append(tree_rmap_item, stable_node);
1209 stable_tree_append(rmap_item, stable_node);
1210 }
1211 unlock_page(kpage);
1149 1212
1150 /* 1213 /*
1151 * If we fail to insert the page into the stable tree, 1214 * If we fail to insert the page into the stable tree,
@@ -1153,37 +1216,28 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
1153 * to a ksm page left outside the stable tree, 1216 * to a ksm page left outside the stable tree,
1154 * in which case we need to break_cow on both. 1217 * in which case we need to break_cow on both.
1155 */ 1218 */
1156 if (stable_tree_insert(page2[0], tree_rmap_item)) 1219 if (!stable_node) {
1157 stable_tree_append(rmap_item, tree_rmap_item); 1220 break_cow(tree_rmap_item);
1158 else { 1221 break_cow(rmap_item);
1159 break_cow(tree_rmap_item->mm,
1160 tree_rmap_item->address);
1161 break_cow(rmap_item->mm, rmap_item->address);
1162 } 1222 }
1163 } 1223 }
1164
1165 put_page(page2[0]);
1166 } 1224 }
1167} 1225}
1168 1226
1169static struct rmap_item *get_next_rmap_item(struct mm_slot *mm_slot, 1227static struct rmap_item *get_next_rmap_item(struct mm_slot *mm_slot,
1170 struct list_head *cur, 1228 struct rmap_item **rmap_list,
1171 unsigned long addr) 1229 unsigned long addr)
1172{ 1230{
1173 struct rmap_item *rmap_item; 1231 struct rmap_item *rmap_item;
1174 1232
1175 while (cur != &mm_slot->rmap_list) { 1233 while (*rmap_list) {
1176 rmap_item = list_entry(cur, struct rmap_item, link); 1234 rmap_item = *rmap_list;
1177 if ((rmap_item->address & PAGE_MASK) == addr) { 1235 if ((rmap_item->address & PAGE_MASK) == addr)
1178 if (!in_stable_tree(rmap_item))
1179 remove_rmap_item_from_tree(rmap_item);
1180 return rmap_item; 1236 return rmap_item;
1181 }
1182 if (rmap_item->address > addr) 1237 if (rmap_item->address > addr)
1183 break; 1238 break;
1184 cur = cur->next; 1239 *rmap_list = rmap_item->rmap_list;
1185 remove_rmap_item_from_tree(rmap_item); 1240 remove_rmap_item_from_tree(rmap_item);
1186 list_del(&rmap_item->link);
1187 free_rmap_item(rmap_item); 1241 free_rmap_item(rmap_item);
1188 } 1242 }
1189 1243
@@ -1192,7 +1246,8 @@ static struct rmap_item *get_next_rmap_item(struct mm_slot *mm_slot,
1192 /* It has already been zeroed */ 1246 /* It has already been zeroed */
1193 rmap_item->mm = mm_slot->mm; 1247 rmap_item->mm = mm_slot->mm;
1194 rmap_item->address = addr; 1248 rmap_item->address = addr;
1195 list_add_tail(&rmap_item->link, cur); 1249 rmap_item->rmap_list = *rmap_list;
1250 *rmap_list = rmap_item;
1196 } 1251 }
1197 return rmap_item; 1252 return rmap_item;
1198} 1253}
@@ -1217,8 +1272,7 @@ static struct rmap_item *scan_get_next_rmap_item(struct page **page)
1217 spin_unlock(&ksm_mmlist_lock); 1272 spin_unlock(&ksm_mmlist_lock);
1218next_mm: 1273next_mm:
1219 ksm_scan.address = 0; 1274 ksm_scan.address = 0;
1220 ksm_scan.rmap_item = list_entry(&slot->rmap_list, 1275 ksm_scan.rmap_list = &slot->rmap_list;
1221 struct rmap_item, link);
1222 } 1276 }
1223 1277
1224 mm = slot->mm; 1278 mm = slot->mm;
@@ -1244,10 +1298,10 @@ next_mm:
1244 flush_anon_page(vma, *page, ksm_scan.address); 1298 flush_anon_page(vma, *page, ksm_scan.address);
1245 flush_dcache_page(*page); 1299 flush_dcache_page(*page);
1246 rmap_item = get_next_rmap_item(slot, 1300 rmap_item = get_next_rmap_item(slot,
1247 ksm_scan.rmap_item->link.next, 1301 ksm_scan.rmap_list, ksm_scan.address);
1248 ksm_scan.address);
1249 if (rmap_item) { 1302 if (rmap_item) {
1250 ksm_scan.rmap_item = rmap_item; 1303 ksm_scan.rmap_list =
1304 &rmap_item->rmap_list;
1251 ksm_scan.address += PAGE_SIZE; 1305 ksm_scan.address += PAGE_SIZE;
1252 } else 1306 } else
1253 put_page(*page); 1307 put_page(*page);
@@ -1263,14 +1317,13 @@ next_mm:
1263 1317
1264 if (ksm_test_exit(mm)) { 1318 if (ksm_test_exit(mm)) {
1265 ksm_scan.address = 0; 1319 ksm_scan.address = 0;
1266 ksm_scan.rmap_item = list_entry(&slot->rmap_list, 1320 ksm_scan.rmap_list = &slot->rmap_list;
1267 struct rmap_item, link);
1268 } 1321 }
1269 /* 1322 /*
1270 * Nuke all the rmap_items that are above this current rmap: 1323 * Nuke all the rmap_items that are above this current rmap:
1271 * because there were no VM_MERGEABLE vmas with such addresses. 1324 * because there were no VM_MERGEABLE vmas with such addresses.
1272 */ 1325 */
1273 remove_trailing_rmap_items(slot, ksm_scan.rmap_item->link.next); 1326 remove_trailing_rmap_items(slot, ksm_scan.rmap_list);
1274 1327
1275 spin_lock(&ksm_mmlist_lock); 1328 spin_lock(&ksm_mmlist_lock);
1276 ksm_scan.mm_slot = list_entry(slot->mm_list.next, 1329 ksm_scan.mm_slot = list_entry(slot->mm_list.next,
@@ -1323,14 +1376,6 @@ static void ksm_do_scan(unsigned int scan_npages)
1323 return; 1376 return;
1324 if (!PageKsm(page) || !in_stable_tree(rmap_item)) 1377 if (!PageKsm(page) || !in_stable_tree(rmap_item))
1325 cmp_and_merge_page(page, rmap_item); 1378 cmp_and_merge_page(page, rmap_item);
1326 else if (page_mapcount(page) == 1) {
1327 /*
1328 * Replace now-unshared ksm page by ordinary page.
1329 */
1330 break_cow(rmap_item->mm, rmap_item->address);
1331 remove_rmap_item_from_tree(rmap_item);
1332 rmap_item->oldchecksum = calc_checksum(page);
1333 }
1334 put_page(page); 1379 put_page(page);
1335 } 1380 }
1336} 1381}
@@ -1375,7 +1420,7 @@ int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
1375 if (*vm_flags & (VM_MERGEABLE | VM_SHARED | VM_MAYSHARE | 1420 if (*vm_flags & (VM_MERGEABLE | VM_SHARED | VM_MAYSHARE |
1376 VM_PFNMAP | VM_IO | VM_DONTEXPAND | 1421 VM_PFNMAP | VM_IO | VM_DONTEXPAND |
1377 VM_RESERVED | VM_HUGETLB | VM_INSERTPAGE | 1422 VM_RESERVED | VM_HUGETLB | VM_INSERTPAGE |
1378 VM_MIXEDMAP | VM_SAO)) 1423 VM_NONLINEAR | VM_MIXEDMAP | VM_SAO))
1379 return 0; /* just ignore the advice */ 1424 return 0; /* just ignore the advice */
1380 1425
1381 if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) { 1426 if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) {
@@ -1452,7 +1497,7 @@ void __ksm_exit(struct mm_struct *mm)
1452 spin_lock(&ksm_mmlist_lock); 1497 spin_lock(&ksm_mmlist_lock);
1453 mm_slot = get_mm_slot(mm); 1498 mm_slot = get_mm_slot(mm);
1454 if (mm_slot && ksm_scan.mm_slot != mm_slot) { 1499 if (mm_slot && ksm_scan.mm_slot != mm_slot) {
1455 if (list_empty(&mm_slot->rmap_list)) { 1500 if (!mm_slot->rmap_list) {
1456 hlist_del(&mm_slot->link); 1501 hlist_del(&mm_slot->link);
1457 list_del(&mm_slot->mm_list); 1502 list_del(&mm_slot->mm_list);
1458 easy_to_free = 1; 1503 easy_to_free = 1;
@@ -1473,6 +1518,249 @@ void __ksm_exit(struct mm_struct *mm)
1473 } 1518 }
1474} 1519}
1475 1520
1521struct page *ksm_does_need_to_copy(struct page *page,
1522 struct vm_area_struct *vma, unsigned long address)
1523{
1524 struct page *new_page;
1525
1526 unlock_page(page); /* any racers will COW it, not modify it */
1527
1528 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
1529 if (new_page) {
1530 copy_user_highpage(new_page, page, address, vma);
1531
1532 SetPageDirty(new_page);
1533 __SetPageUptodate(new_page);
1534 SetPageSwapBacked(new_page);
1535 __set_page_locked(new_page);
1536
1537 if (page_evictable(new_page, vma))
1538 lru_cache_add_lru(new_page, LRU_ACTIVE_ANON);
1539 else
1540 add_page_to_unevictable_list(new_page);
1541 }
1542
1543 page_cache_release(page);
1544 return new_page;
1545}
1546
1547int page_referenced_ksm(struct page *page, struct mem_cgroup *memcg,
1548 unsigned long *vm_flags)
1549{
1550 struct stable_node *stable_node;
1551 struct rmap_item *rmap_item;
1552 struct hlist_node *hlist;
1553 unsigned int mapcount = page_mapcount(page);
1554 int referenced = 0;
1555 int search_new_forks = 0;
1556
1557 VM_BUG_ON(!PageKsm(page));
1558 VM_BUG_ON(!PageLocked(page));
1559
1560 stable_node = page_stable_node(page);
1561 if (!stable_node)
1562 return 0;
1563again:
1564 hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) {
1565 struct anon_vma *anon_vma = rmap_item->anon_vma;
1566 struct vm_area_struct *vma;
1567
1568 spin_lock(&anon_vma->lock);
1569 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
1570 if (rmap_item->address < vma->vm_start ||
1571 rmap_item->address >= vma->vm_end)
1572 continue;
1573 /*
1574 * Initially we examine only the vma which covers this
1575 * rmap_item; but later, if there is still work to do,
1576 * we examine covering vmas in other mms: in case they
1577 * were forked from the original since ksmd passed.
1578 */
1579 if ((rmap_item->mm == vma->vm_mm) == search_new_forks)
1580 continue;
1581
1582 if (memcg && !mm_match_cgroup(vma->vm_mm, memcg))
1583 continue;
1584
1585 referenced += page_referenced_one(page, vma,
1586 rmap_item->address, &mapcount, vm_flags);
1587 if (!search_new_forks || !mapcount)
1588 break;
1589 }
1590 spin_unlock(&anon_vma->lock);
1591 if (!mapcount)
1592 goto out;
1593 }
1594 if (!search_new_forks++)
1595 goto again;
1596out:
1597 return referenced;
1598}
1599
1600int try_to_unmap_ksm(struct page *page, enum ttu_flags flags)
1601{
1602 struct stable_node *stable_node;
1603 struct hlist_node *hlist;
1604 struct rmap_item *rmap_item;
1605 int ret = SWAP_AGAIN;
1606 int search_new_forks = 0;
1607
1608 VM_BUG_ON(!PageKsm(page));
1609 VM_BUG_ON(!PageLocked(page));
1610
1611 stable_node = page_stable_node(page);
1612 if (!stable_node)
1613 return SWAP_FAIL;
1614again:
1615 hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) {
1616 struct anon_vma *anon_vma = rmap_item->anon_vma;
1617 struct vm_area_struct *vma;
1618
1619 spin_lock(&anon_vma->lock);
1620 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
1621 if (rmap_item->address < vma->vm_start ||
1622 rmap_item->address >= vma->vm_end)
1623 continue;
1624 /*
1625 * Initially we examine only the vma which covers this
1626 * rmap_item; but later, if there is still work to do,
1627 * we examine covering vmas in other mms: in case they
1628 * were forked from the original since ksmd passed.
1629 */
1630 if ((rmap_item->mm == vma->vm_mm) == search_new_forks)
1631 continue;
1632
1633 ret = try_to_unmap_one(page, vma,
1634 rmap_item->address, flags);
1635 if (ret != SWAP_AGAIN || !page_mapped(page)) {
1636 spin_unlock(&anon_vma->lock);
1637 goto out;
1638 }
1639 }
1640 spin_unlock(&anon_vma->lock);
1641 }
1642 if (!search_new_forks++)
1643 goto again;
1644out:
1645 return ret;
1646}
1647
1648#ifdef CONFIG_MIGRATION
1649int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *,
1650 struct vm_area_struct *, unsigned long, void *), void *arg)
1651{
1652 struct stable_node *stable_node;
1653 struct hlist_node *hlist;
1654 struct rmap_item *rmap_item;
1655 int ret = SWAP_AGAIN;
1656 int search_new_forks = 0;
1657
1658 VM_BUG_ON(!PageKsm(page));
1659 VM_BUG_ON(!PageLocked(page));
1660
1661 stable_node = page_stable_node(page);
1662 if (!stable_node)
1663 return ret;
1664again:
1665 hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) {
1666 struct anon_vma *anon_vma = rmap_item->anon_vma;
1667 struct vm_area_struct *vma;
1668
1669 spin_lock(&anon_vma->lock);
1670 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
1671 if (rmap_item->address < vma->vm_start ||
1672 rmap_item->address >= vma->vm_end)
1673 continue;
1674 /*
1675 * Initially we examine only the vma which covers this
1676 * rmap_item; but later, if there is still work to do,
1677 * we examine covering vmas in other mms: in case they
1678 * were forked from the original since ksmd passed.
1679 */
1680 if ((rmap_item->mm == vma->vm_mm) == search_new_forks)
1681 continue;
1682
1683 ret = rmap_one(page, vma, rmap_item->address, arg);
1684 if (ret != SWAP_AGAIN) {
1685 spin_unlock(&anon_vma->lock);
1686 goto out;
1687 }
1688 }
1689 spin_unlock(&anon_vma->lock);
1690 }
1691 if (!search_new_forks++)
1692 goto again;
1693out:
1694 return ret;
1695}
1696
1697void ksm_migrate_page(struct page *newpage, struct page *oldpage)
1698{
1699 struct stable_node *stable_node;
1700
1701 VM_BUG_ON(!PageLocked(oldpage));
1702 VM_BUG_ON(!PageLocked(newpage));
1703 VM_BUG_ON(newpage->mapping != oldpage->mapping);
1704
1705 stable_node = page_stable_node(newpage);
1706 if (stable_node) {
1707 VM_BUG_ON(stable_node->kpfn != page_to_pfn(oldpage));
1708 stable_node->kpfn = page_to_pfn(newpage);
1709 }
1710}
1711#endif /* CONFIG_MIGRATION */
1712
1713#ifdef CONFIG_MEMORY_HOTREMOVE
1714static struct stable_node *ksm_check_stable_tree(unsigned long start_pfn,
1715 unsigned long end_pfn)
1716{
1717 struct rb_node *node;
1718
1719 for (node = rb_first(&root_stable_tree); node; node = rb_next(node)) {
1720 struct stable_node *stable_node;
1721
1722 stable_node = rb_entry(node, struct stable_node, node);
1723 if (stable_node->kpfn >= start_pfn &&
1724 stable_node->kpfn < end_pfn)
1725 return stable_node;
1726 }
1727 return NULL;
1728}
1729
1730static int ksm_memory_callback(struct notifier_block *self,
1731 unsigned long action, void *arg)
1732{
1733 struct memory_notify *mn = arg;
1734 struct stable_node *stable_node;
1735
1736 switch (action) {
1737 case MEM_GOING_OFFLINE:
1738 /*
1739 * Keep it very simple for now: just lock out ksmd and
1740 * MADV_UNMERGEABLE while any memory is going offline.
1741 */
1742 mutex_lock(&ksm_thread_mutex);
1743 break;
1744
1745 case MEM_OFFLINE:
1746 /*
1747 * Most of the work is done by page migration; but there might
1748 * be a few stable_nodes left over, still pointing to struct
1749 * pages which have been offlined: prune those from the tree.
1750 */
1751 while ((stable_node = ksm_check_stable_tree(mn->start_pfn,
1752 mn->start_pfn + mn->nr_pages)) != NULL)
1753 remove_node_from_stable_tree(stable_node);
1754 /* fallthrough */
1755
1756 case MEM_CANCEL_OFFLINE:
1757 mutex_unlock(&ksm_thread_mutex);
1758 break;
1759 }
1760 return NOTIFY_OK;
1761}
1762#endif /* CONFIG_MEMORY_HOTREMOVE */
1763
1476#ifdef CONFIG_SYSFS 1764#ifdef CONFIG_SYSFS
1477/* 1765/*
1478 * This all compiles without CONFIG_SYSFS, but is a waste of space. 1766 * This all compiles without CONFIG_SYSFS, but is a waste of space.
@@ -1551,8 +1839,8 @@ static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr,
1551 /* 1839 /*
1552 * KSM_RUN_MERGE sets ksmd running, and 0 stops it running. 1840 * KSM_RUN_MERGE sets ksmd running, and 0 stops it running.
1553 * KSM_RUN_UNMERGE stops it running and unmerges all rmap_items, 1841 * KSM_RUN_UNMERGE stops it running and unmerges all rmap_items,
1554 * breaking COW to free the unswappable pages_shared (but leaves 1842 * breaking COW to free the pages_shared (but leaves mm_slots
1555 * mm_slots on the list for when ksmd may be set running again). 1843 * on the list for when ksmd may be set running again).
1556 */ 1844 */
1557 1845
1558 mutex_lock(&ksm_thread_mutex); 1846 mutex_lock(&ksm_thread_mutex);
@@ -1577,29 +1865,6 @@ static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr,
1577} 1865}
1578KSM_ATTR(run); 1866KSM_ATTR(run);
1579 1867
1580static ssize_t max_kernel_pages_store(struct kobject *kobj,
1581 struct kobj_attribute *attr,
1582 const char *buf, size_t count)
1583{
1584 int err;
1585 unsigned long nr_pages;
1586
1587 err = strict_strtoul(buf, 10, &nr_pages);
1588 if (err)
1589 return -EINVAL;
1590
1591 ksm_max_kernel_pages = nr_pages;
1592
1593 return count;
1594}
1595
1596static ssize_t max_kernel_pages_show(struct kobject *kobj,
1597 struct kobj_attribute *attr, char *buf)
1598{
1599 return sprintf(buf, "%lu\n", ksm_max_kernel_pages);
1600}
1601KSM_ATTR(max_kernel_pages);
1602
1603static ssize_t pages_shared_show(struct kobject *kobj, 1868static ssize_t pages_shared_show(struct kobject *kobj,
1604 struct kobj_attribute *attr, char *buf) 1869 struct kobj_attribute *attr, char *buf)
1605{ 1870{
@@ -1649,7 +1914,6 @@ static struct attribute *ksm_attrs[] = {
1649 &sleep_millisecs_attr.attr, 1914 &sleep_millisecs_attr.attr,
1650 &pages_to_scan_attr.attr, 1915 &pages_to_scan_attr.attr,
1651 &run_attr.attr, 1916 &run_attr.attr,
1652 &max_kernel_pages_attr.attr,
1653 &pages_shared_attr.attr, 1917 &pages_shared_attr.attr,
1654 &pages_sharing_attr.attr, 1918 &pages_sharing_attr.attr,
1655 &pages_unshared_attr.attr, 1919 &pages_unshared_attr.attr,
@@ -1669,8 +1933,6 @@ static int __init ksm_init(void)
1669 struct task_struct *ksm_thread; 1933 struct task_struct *ksm_thread;
1670 int err; 1934 int err;
1671 1935
1672 ksm_max_kernel_pages = totalram_pages / 4;
1673
1674 err = ksm_slab_init(); 1936 err = ksm_slab_init();
1675 if (err) 1937 if (err)
1676 goto out; 1938 goto out;
@@ -1698,6 +1960,13 @@ static int __init ksm_init(void)
1698 1960
1699#endif /* CONFIG_SYSFS */ 1961#endif /* CONFIG_SYSFS */
1700 1962
1963#ifdef CONFIG_MEMORY_HOTREMOVE
1964 /*
1965 * Choose a high priority since the callback takes ksm_thread_mutex:
1966 * later callbacks could only be taking locks which nest within that.
1967 */
1968 hotplug_memory_notifier(ksm_memory_callback, 100);
1969#endif
1701 return 0; 1970 return 0;
1702 1971
1703out_free2: 1972out_free2:
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index c31a310aa146..878808c4fcbe 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -38,6 +38,7 @@
38#include <linux/vmalloc.h> 38#include <linux/vmalloc.h>
39#include <linux/mm_inline.h> 39#include <linux/mm_inline.h>
40#include <linux/page_cgroup.h> 40#include <linux/page_cgroup.h>
41#include <linux/cpu.h>
41#include "internal.h" 42#include "internal.h"
42 43
43#include <asm/uaccess.h> 44#include <asm/uaccess.h>
@@ -54,7 +55,6 @@ static int really_do_swap_account __initdata = 1; /* for remember boot option*/
54#define do_swap_account (0) 55#define do_swap_account (0)
55#endif 56#endif
56 57
57static DEFINE_MUTEX(memcg_tasklist); /* can be hold under cgroup_mutex */
58#define SOFTLIMIT_EVENTS_THRESH (1000) 58#define SOFTLIMIT_EVENTS_THRESH (1000)
59 59
60/* 60/*
@@ -66,7 +66,7 @@ enum mem_cgroup_stat_index {
66 */ 66 */
67 MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */ 67 MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */
68 MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */ 68 MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */
69 MEM_CGROUP_STAT_MAPPED_FILE, /* # of pages charged as file rss */ 69 MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */
70 MEM_CGROUP_STAT_PGPGIN_COUNT, /* # of pages paged in */ 70 MEM_CGROUP_STAT_PGPGIN_COUNT, /* # of pages paged in */
71 MEM_CGROUP_STAT_PGPGOUT_COUNT, /* # of pages paged out */ 71 MEM_CGROUP_STAT_PGPGOUT_COUNT, /* # of pages paged out */
72 MEM_CGROUP_STAT_EVENTS, /* sum of pagein + pageout for internal use */ 72 MEM_CGROUP_STAT_EVENTS, /* sum of pagein + pageout for internal use */
@@ -275,6 +275,7 @@ enum charge_type {
275static void mem_cgroup_get(struct mem_cgroup *mem); 275static void mem_cgroup_get(struct mem_cgroup *mem);
276static void mem_cgroup_put(struct mem_cgroup *mem); 276static void mem_cgroup_put(struct mem_cgroup *mem);
277static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem); 277static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem);
278static void drain_all_stock_async(void);
278 279
279static struct mem_cgroup_per_zone * 280static struct mem_cgroup_per_zone *
280mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid) 281mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
@@ -758,7 +759,13 @@ int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
758 task_unlock(task); 759 task_unlock(task);
759 if (!curr) 760 if (!curr)
760 return 0; 761 return 0;
761 if (curr->use_hierarchy) 762 /*
763 * We should check use_hierarchy of "mem" not "curr". Because checking
764 * use_hierarchy of "curr" here make this function true if hierarchy is
765 * enabled in "curr" and "curr" is a child of "mem" in *cgroup*
766 * hierarchy(even if use_hierarchy is disabled in "mem").
767 */
768 if (mem->use_hierarchy)
762 ret = css_is_ancestor(&curr->css, &mem->css); 769 ret = css_is_ancestor(&curr->css, &mem->css);
763 else 770 else
764 ret = (curr == mem); 771 ret = (curr == mem);
@@ -1007,7 +1014,7 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1007 static char memcg_name[PATH_MAX]; 1014 static char memcg_name[PATH_MAX];
1008 int ret; 1015 int ret;
1009 1016
1010 if (!memcg) 1017 if (!memcg || !p)
1011 return; 1018 return;
1012 1019
1013 1020
@@ -1137,6 +1144,8 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
1137 victim = mem_cgroup_select_victim(root_mem); 1144 victim = mem_cgroup_select_victim(root_mem);
1138 if (victim == root_mem) { 1145 if (victim == root_mem) {
1139 loop++; 1146 loop++;
1147 if (loop >= 1)
1148 drain_all_stock_async();
1140 if (loop >= 2) { 1149 if (loop >= 2) {
1141 /* 1150 /*
1142 * If we have not been able to reclaim 1151 * If we have not been able to reclaim
@@ -1223,7 +1232,7 @@ static void record_last_oom(struct mem_cgroup *mem)
1223 * Currently used to update mapped file statistics, but the routine can be 1232 * Currently used to update mapped file statistics, but the routine can be
1224 * generalized to update other statistics as well. 1233 * generalized to update other statistics as well.
1225 */ 1234 */
1226void mem_cgroup_update_mapped_file_stat(struct page *page, int val) 1235void mem_cgroup_update_file_mapped(struct page *page, int val)
1227{ 1236{
1228 struct mem_cgroup *mem; 1237 struct mem_cgroup *mem;
1229 struct mem_cgroup_stat *stat; 1238 struct mem_cgroup_stat *stat;
@@ -1231,9 +1240,6 @@ void mem_cgroup_update_mapped_file_stat(struct page *page, int val)
1231 int cpu; 1240 int cpu;
1232 struct page_cgroup *pc; 1241 struct page_cgroup *pc;
1233 1242
1234 if (!page_is_file_cache(page))
1235 return;
1236
1237 pc = lookup_page_cgroup(page); 1243 pc = lookup_page_cgroup(page);
1238 if (unlikely(!pc)) 1244 if (unlikely(!pc))
1239 return; 1245 return;
@@ -1253,12 +1259,139 @@ void mem_cgroup_update_mapped_file_stat(struct page *page, int val)
1253 stat = &mem->stat; 1259 stat = &mem->stat;
1254 cpustat = &stat->cpustat[cpu]; 1260 cpustat = &stat->cpustat[cpu];
1255 1261
1256 __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_MAPPED_FILE, val); 1262 __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_FILE_MAPPED, val);
1257done: 1263done:
1258 unlock_page_cgroup(pc); 1264 unlock_page_cgroup(pc);
1259} 1265}
1260 1266
1261/* 1267/*
1268 * size of first charge trial. "32" comes from vmscan.c's magic value.
1269 * TODO: maybe necessary to use big numbers in big irons.
1270 */
1271#define CHARGE_SIZE (32 * PAGE_SIZE)
1272struct memcg_stock_pcp {
1273 struct mem_cgroup *cached; /* this never be root cgroup */
1274 int charge;
1275 struct work_struct work;
1276};
1277static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
1278static atomic_t memcg_drain_count;
1279
1280/*
1281 * Try to consume stocked charge on this cpu. If success, PAGE_SIZE is consumed
1282 * from local stock and true is returned. If the stock is 0 or charges from a
1283 * cgroup which is not current target, returns false. This stock will be
1284 * refilled.
1285 */
1286static bool consume_stock(struct mem_cgroup *mem)
1287{
1288 struct memcg_stock_pcp *stock;
1289 bool ret = true;
1290
1291 stock = &get_cpu_var(memcg_stock);
1292 if (mem == stock->cached && stock->charge)
1293 stock->charge -= PAGE_SIZE;
1294 else /* need to call res_counter_charge */
1295 ret = false;
1296 put_cpu_var(memcg_stock);
1297 return ret;
1298}
1299
1300/*
1301 * Returns stocks cached in percpu to res_counter and reset cached information.
1302 */
1303static void drain_stock(struct memcg_stock_pcp *stock)
1304{
1305 struct mem_cgroup *old = stock->cached;
1306
1307 if (stock->charge) {
1308 res_counter_uncharge(&old->res, stock->charge);
1309 if (do_swap_account)
1310 res_counter_uncharge(&old->memsw, stock->charge);
1311 }
1312 stock->cached = NULL;
1313 stock->charge = 0;
1314}
1315
1316/*
1317 * This must be called under preempt disabled or must be called by
1318 * a thread which is pinned to local cpu.
1319 */
1320static void drain_local_stock(struct work_struct *dummy)
1321{
1322 struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock);
1323 drain_stock(stock);
1324}
1325
1326/*
1327 * Cache charges(val) which is from res_counter, to local per_cpu area.
1328 * This will be consumed by consumt_stock() function, later.
1329 */
1330static void refill_stock(struct mem_cgroup *mem, int val)
1331{
1332 struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
1333
1334 if (stock->cached != mem) { /* reset if necessary */
1335 drain_stock(stock);
1336 stock->cached = mem;
1337 }
1338 stock->charge += val;
1339 put_cpu_var(memcg_stock);
1340}
1341
1342/*
1343 * Tries to drain stocked charges in other cpus. This function is asynchronous
1344 * and just put a work per cpu for draining localy on each cpu. Caller can
1345 * expects some charges will be back to res_counter later but cannot wait for
1346 * it.
1347 */
1348static void drain_all_stock_async(void)
1349{
1350 int cpu;
1351 /* This function is for scheduling "drain" in asynchronous way.
1352 * The result of "drain" is not directly handled by callers. Then,
1353 * if someone is calling drain, we don't have to call drain more.
1354 * Anyway, WORK_STRUCT_PENDING check in queue_work_on() will catch if
1355 * there is a race. We just do loose check here.
1356 */
1357 if (atomic_read(&memcg_drain_count))
1358 return;
1359 /* Notify other cpus that system-wide "drain" is running */
1360 atomic_inc(&memcg_drain_count);
1361 get_online_cpus();
1362 for_each_online_cpu(cpu) {
1363 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
1364 schedule_work_on(cpu, &stock->work);
1365 }
1366 put_online_cpus();
1367 atomic_dec(&memcg_drain_count);
1368 /* We don't wait for flush_work */
1369}
1370
1371/* This is a synchronous drain interface. */
1372static void drain_all_stock_sync(void)
1373{
1374 /* called when force_empty is called */
1375 atomic_inc(&memcg_drain_count);
1376 schedule_on_each_cpu(drain_local_stock);
1377 atomic_dec(&memcg_drain_count);
1378}
1379
1380static int __cpuinit memcg_stock_cpu_callback(struct notifier_block *nb,
1381 unsigned long action,
1382 void *hcpu)
1383{
1384 int cpu = (unsigned long)hcpu;
1385 struct memcg_stock_pcp *stock;
1386
1387 if (action != CPU_DEAD)
1388 return NOTIFY_OK;
1389 stock = &per_cpu(memcg_stock, cpu);
1390 drain_stock(stock);
1391 return NOTIFY_OK;
1392}
1393
1394/*
1262 * Unlike exported interface, "oom" parameter is added. if oom==true, 1395 * Unlike exported interface, "oom" parameter is added. if oom==true,
1263 * oom-killer can be invoked. 1396 * oom-killer can be invoked.
1264 */ 1397 */
@@ -1269,6 +1402,7 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
1269 struct mem_cgroup *mem, *mem_over_limit; 1402 struct mem_cgroup *mem, *mem_over_limit;
1270 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; 1403 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
1271 struct res_counter *fail_res; 1404 struct res_counter *fail_res;
1405 int csize = CHARGE_SIZE;
1272 1406
1273 if (unlikely(test_thread_flag(TIF_MEMDIE))) { 1407 if (unlikely(test_thread_flag(TIF_MEMDIE))) {
1274 /* Don't account this! */ 1408 /* Don't account this! */
@@ -1293,23 +1427,25 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
1293 return 0; 1427 return 0;
1294 1428
1295 VM_BUG_ON(css_is_removed(&mem->css)); 1429 VM_BUG_ON(css_is_removed(&mem->css));
1430 if (mem_cgroup_is_root(mem))
1431 goto done;
1296 1432
1297 while (1) { 1433 while (1) {
1298 int ret = 0; 1434 int ret = 0;
1299 unsigned long flags = 0; 1435 unsigned long flags = 0;
1300 1436
1301 if (mem_cgroup_is_root(mem)) 1437 if (consume_stock(mem))
1302 goto done; 1438 goto charged;
1303 ret = res_counter_charge(&mem->res, PAGE_SIZE, &fail_res); 1439
1440 ret = res_counter_charge(&mem->res, csize, &fail_res);
1304 if (likely(!ret)) { 1441 if (likely(!ret)) {
1305 if (!do_swap_account) 1442 if (!do_swap_account)
1306 break; 1443 break;
1307 ret = res_counter_charge(&mem->memsw, PAGE_SIZE, 1444 ret = res_counter_charge(&mem->memsw, csize, &fail_res);
1308 &fail_res);
1309 if (likely(!ret)) 1445 if (likely(!ret))
1310 break; 1446 break;
1311 /* mem+swap counter fails */ 1447 /* mem+swap counter fails */
1312 res_counter_uncharge(&mem->res, PAGE_SIZE); 1448 res_counter_uncharge(&mem->res, csize);
1313 flags |= MEM_CGROUP_RECLAIM_NOSWAP; 1449 flags |= MEM_CGROUP_RECLAIM_NOSWAP;
1314 mem_over_limit = mem_cgroup_from_res_counter(fail_res, 1450 mem_over_limit = mem_cgroup_from_res_counter(fail_res,
1315 memsw); 1451 memsw);
@@ -1318,6 +1454,11 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
1318 mem_over_limit = mem_cgroup_from_res_counter(fail_res, 1454 mem_over_limit = mem_cgroup_from_res_counter(fail_res,
1319 res); 1455 res);
1320 1456
1457 /* reduce request size and retry */
1458 if (csize > PAGE_SIZE) {
1459 csize = PAGE_SIZE;
1460 continue;
1461 }
1321 if (!(gfp_mask & __GFP_WAIT)) 1462 if (!(gfp_mask & __GFP_WAIT))
1322 goto nomem; 1463 goto nomem;
1323 1464
@@ -1339,14 +1480,15 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
1339 1480
1340 if (!nr_retries--) { 1481 if (!nr_retries--) {
1341 if (oom) { 1482 if (oom) {
1342 mutex_lock(&memcg_tasklist);
1343 mem_cgroup_out_of_memory(mem_over_limit, gfp_mask); 1483 mem_cgroup_out_of_memory(mem_over_limit, gfp_mask);
1344 mutex_unlock(&memcg_tasklist);
1345 record_last_oom(mem_over_limit); 1484 record_last_oom(mem_over_limit);
1346 } 1485 }
1347 goto nomem; 1486 goto nomem;
1348 } 1487 }
1349 } 1488 }
1489 if (csize > PAGE_SIZE)
1490 refill_stock(mem, csize - PAGE_SIZE);
1491charged:
1350 /* 1492 /*
1351 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree. 1493 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
1352 * if they exceeds softlimit. 1494 * if they exceeds softlimit.
@@ -1361,6 +1503,21 @@ nomem:
1361} 1503}
1362 1504
1363/* 1505/*
1506 * Somemtimes we have to undo a charge we got by try_charge().
1507 * This function is for that and do uncharge, put css's refcnt.
1508 * gotten by try_charge().
1509 */
1510static void mem_cgroup_cancel_charge(struct mem_cgroup *mem)
1511{
1512 if (!mem_cgroup_is_root(mem)) {
1513 res_counter_uncharge(&mem->res, PAGE_SIZE);
1514 if (do_swap_account)
1515 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1516 }
1517 css_put(&mem->css);
1518}
1519
1520/*
1364 * A helper function to get mem_cgroup from ID. must be called under 1521 * A helper function to get mem_cgroup from ID. must be called under
1365 * rcu_read_lock(). The caller must check css_is_removed() or some if 1522 * rcu_read_lock(). The caller must check css_is_removed() or some if
1366 * it's concern. (dropping refcnt from swap can be called against removed 1523 * it's concern. (dropping refcnt from swap can be called against removed
@@ -1426,12 +1583,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
1426 lock_page_cgroup(pc); 1583 lock_page_cgroup(pc);
1427 if (unlikely(PageCgroupUsed(pc))) { 1584 if (unlikely(PageCgroupUsed(pc))) {
1428 unlock_page_cgroup(pc); 1585 unlock_page_cgroup(pc);
1429 if (!mem_cgroup_is_root(mem)) { 1586 mem_cgroup_cancel_charge(mem);
1430 res_counter_uncharge(&mem->res, PAGE_SIZE);
1431 if (do_swap_account)
1432 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1433 }
1434 css_put(&mem->css);
1435 return; 1587 return;
1436 } 1588 }
1437 1589
@@ -1464,27 +1616,22 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
1464} 1616}
1465 1617
1466/** 1618/**
1467 * mem_cgroup_move_account - move account of the page 1619 * __mem_cgroup_move_account - move account of the page
1468 * @pc: page_cgroup of the page. 1620 * @pc: page_cgroup of the page.
1469 * @from: mem_cgroup which the page is moved from. 1621 * @from: mem_cgroup which the page is moved from.
1470 * @to: mem_cgroup which the page is moved to. @from != @to. 1622 * @to: mem_cgroup which the page is moved to. @from != @to.
1471 * 1623 *
1472 * The caller must confirm following. 1624 * The caller must confirm following.
1473 * - page is not on LRU (isolate_page() is useful.) 1625 * - page is not on LRU (isolate_page() is useful.)
1474 * 1626 * - the pc is locked, used, and ->mem_cgroup points to @from.
1475 * returns 0 at success,
1476 * returns -EBUSY when lock is busy or "pc" is unstable.
1477 * 1627 *
1478 * This function does "uncharge" from old cgroup but doesn't do "charge" to 1628 * This function does "uncharge" from old cgroup but doesn't do "charge" to
1479 * new cgroup. It should be done by a caller. 1629 * new cgroup. It should be done by a caller.
1480 */ 1630 */
1481 1631
1482static int mem_cgroup_move_account(struct page_cgroup *pc, 1632static void __mem_cgroup_move_account(struct page_cgroup *pc,
1483 struct mem_cgroup *from, struct mem_cgroup *to) 1633 struct mem_cgroup *from, struct mem_cgroup *to)
1484{ 1634{
1485 struct mem_cgroup_per_zone *from_mz, *to_mz;
1486 int nid, zid;
1487 int ret = -EBUSY;
1488 struct page *page; 1635 struct page *page;
1489 int cpu; 1636 int cpu;
1490 struct mem_cgroup_stat *stat; 1637 struct mem_cgroup_stat *stat;
@@ -1492,38 +1639,27 @@ static int mem_cgroup_move_account(struct page_cgroup *pc,
1492 1639
1493 VM_BUG_ON(from == to); 1640 VM_BUG_ON(from == to);
1494 VM_BUG_ON(PageLRU(pc->page)); 1641 VM_BUG_ON(PageLRU(pc->page));
1495 1642 VM_BUG_ON(!PageCgroupLocked(pc));
1496 nid = page_cgroup_nid(pc); 1643 VM_BUG_ON(!PageCgroupUsed(pc));
1497 zid = page_cgroup_zid(pc); 1644 VM_BUG_ON(pc->mem_cgroup != from);
1498 from_mz = mem_cgroup_zoneinfo(from, nid, zid);
1499 to_mz = mem_cgroup_zoneinfo(to, nid, zid);
1500
1501 if (!trylock_page_cgroup(pc))
1502 return ret;
1503
1504 if (!PageCgroupUsed(pc))
1505 goto out;
1506
1507 if (pc->mem_cgroup != from)
1508 goto out;
1509 1645
1510 if (!mem_cgroup_is_root(from)) 1646 if (!mem_cgroup_is_root(from))
1511 res_counter_uncharge(&from->res, PAGE_SIZE); 1647 res_counter_uncharge(&from->res, PAGE_SIZE);
1512 mem_cgroup_charge_statistics(from, pc, false); 1648 mem_cgroup_charge_statistics(from, pc, false);
1513 1649
1514 page = pc->page; 1650 page = pc->page;
1515 if (page_is_file_cache(page) && page_mapped(page)) { 1651 if (page_mapped(page) && !PageAnon(page)) {
1516 cpu = smp_processor_id(); 1652 cpu = smp_processor_id();
1517 /* Update mapped_file data for mem_cgroup "from" */ 1653 /* Update mapped_file data for mem_cgroup "from" */
1518 stat = &from->stat; 1654 stat = &from->stat;
1519 cpustat = &stat->cpustat[cpu]; 1655 cpustat = &stat->cpustat[cpu];
1520 __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_MAPPED_FILE, 1656 __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_FILE_MAPPED,
1521 -1); 1657 -1);
1522 1658
1523 /* Update mapped_file data for mem_cgroup "to" */ 1659 /* Update mapped_file data for mem_cgroup "to" */
1524 stat = &to->stat; 1660 stat = &to->stat;
1525 cpustat = &stat->cpustat[cpu]; 1661 cpustat = &stat->cpustat[cpu];
1526 __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_MAPPED_FILE, 1662 __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_FILE_MAPPED,
1527 1); 1663 1);
1528 } 1664 }
1529 1665
@@ -1534,15 +1670,28 @@ static int mem_cgroup_move_account(struct page_cgroup *pc,
1534 css_get(&to->css); 1670 css_get(&to->css);
1535 pc->mem_cgroup = to; 1671 pc->mem_cgroup = to;
1536 mem_cgroup_charge_statistics(to, pc, true); 1672 mem_cgroup_charge_statistics(to, pc, true);
1537 ret = 0;
1538out:
1539 unlock_page_cgroup(pc);
1540 /* 1673 /*
1541 * We charges against "to" which may not have any tasks. Then, "to" 1674 * We charges against "to" which may not have any tasks. Then, "to"
1542 * can be under rmdir(). But in current implementation, caller of 1675 * can be under rmdir(). But in current implementation, caller of
1543 * this function is just force_empty() and it's garanteed that 1676 * this function is just force_empty() and it's garanteed that
1544 * "to" is never removed. So, we don't check rmdir status here. 1677 * "to" is never removed. So, we don't check rmdir status here.
1545 */ 1678 */
1679}
1680
1681/*
1682 * check whether the @pc is valid for moving account and call
1683 * __mem_cgroup_move_account()
1684 */
1685static int mem_cgroup_move_account(struct page_cgroup *pc,
1686 struct mem_cgroup *from, struct mem_cgroup *to)
1687{
1688 int ret = -EINVAL;
1689 lock_page_cgroup(pc);
1690 if (PageCgroupUsed(pc) && pc->mem_cgroup == from) {
1691 __mem_cgroup_move_account(pc, from, to);
1692 ret = 0;
1693 }
1694 unlock_page_cgroup(pc);
1546 return ret; 1695 return ret;
1547} 1696}
1548 1697
@@ -1564,45 +1713,27 @@ static int mem_cgroup_move_parent(struct page_cgroup *pc,
1564 if (!pcg) 1713 if (!pcg)
1565 return -EINVAL; 1714 return -EINVAL;
1566 1715
1716 ret = -EBUSY;
1717 if (!get_page_unless_zero(page))
1718 goto out;
1719 if (isolate_lru_page(page))
1720 goto put;
1567 1721
1568 parent = mem_cgroup_from_cont(pcg); 1722 parent = mem_cgroup_from_cont(pcg);
1569
1570
1571 ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false, page); 1723 ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false, page);
1572 if (ret || !parent) 1724 if (ret || !parent)
1573 return ret; 1725 goto put_back;
1574
1575 if (!get_page_unless_zero(page)) {
1576 ret = -EBUSY;
1577 goto uncharge;
1578 }
1579
1580 ret = isolate_lru_page(page);
1581
1582 if (ret)
1583 goto cancel;
1584 1726
1585 ret = mem_cgroup_move_account(pc, child, parent); 1727 ret = mem_cgroup_move_account(pc, child, parent);
1586 1728 if (!ret)
1729 css_put(&parent->css); /* drop extra refcnt by try_charge() */
1730 else
1731 mem_cgroup_cancel_charge(parent); /* does css_put */
1732put_back:
1587 putback_lru_page(page); 1733 putback_lru_page(page);
1588 if (!ret) { 1734put:
1589 put_page(page);
1590 /* drop extra refcnt by try_charge() */
1591 css_put(&parent->css);
1592 return 0;
1593 }
1594
1595cancel:
1596 put_page(page); 1735 put_page(page);
1597uncharge: 1736out:
1598 /* drop extra refcnt by try_charge() */
1599 css_put(&parent->css);
1600 /* uncharge if move fails */
1601 if (!mem_cgroup_is_root(parent)) {
1602 res_counter_uncharge(&parent->res, PAGE_SIZE);
1603 if (do_swap_account)
1604 res_counter_uncharge(&parent->memsw, PAGE_SIZE);
1605 }
1606 return ret; 1737 return ret;
1607} 1738}
1608 1739
@@ -1737,11 +1868,12 @@ int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
1737 goto charge_cur_mm; 1868 goto charge_cur_mm;
1738 /* 1869 /*
1739 * A racing thread's fault, or swapoff, may have already updated 1870 * A racing thread's fault, or swapoff, may have already updated
1740 * the pte, and even removed page from swap cache: return success 1871 * the pte, and even removed page from swap cache: in those cases
1741 * to go on to do_swap_page()'s pte_same() test, which should fail. 1872 * do_swap_page()'s pte_same() test will fail; but there's also a
1873 * KSM case which does need to charge the page.
1742 */ 1874 */
1743 if (!PageSwapCache(page)) 1875 if (!PageSwapCache(page))
1744 return 0; 1876 goto charge_cur_mm;
1745 mem = try_get_mem_cgroup_from_swapcache(page); 1877 mem = try_get_mem_cgroup_from_swapcache(page);
1746 if (!mem) 1878 if (!mem)
1747 goto charge_cur_mm; 1879 goto charge_cur_mm;
@@ -1818,14 +1950,53 @@ void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
1818 return; 1950 return;
1819 if (!mem) 1951 if (!mem)
1820 return; 1952 return;
1821 if (!mem_cgroup_is_root(mem)) { 1953 mem_cgroup_cancel_charge(mem);
1822 res_counter_uncharge(&mem->res, PAGE_SIZE);
1823 if (do_swap_account)
1824 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1825 }
1826 css_put(&mem->css);
1827} 1954}
1828 1955
1956static void
1957__do_uncharge(struct mem_cgroup *mem, const enum charge_type ctype)
1958{
1959 struct memcg_batch_info *batch = NULL;
1960 bool uncharge_memsw = true;
1961 /* If swapout, usage of swap doesn't decrease */
1962 if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
1963 uncharge_memsw = false;
1964 /*
1965 * do_batch > 0 when unmapping pages or inode invalidate/truncate.
1966 * In those cases, all pages freed continously can be expected to be in
1967 * the same cgroup and we have chance to coalesce uncharges.
1968 * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE)
1969 * because we want to do uncharge as soon as possible.
1970 */
1971 if (!current->memcg_batch.do_batch || test_thread_flag(TIF_MEMDIE))
1972 goto direct_uncharge;
1973
1974 batch = &current->memcg_batch;
1975 /*
1976 * In usual, we do css_get() when we remember memcg pointer.
1977 * But in this case, we keep res->usage until end of a series of
1978 * uncharges. Then, it's ok to ignore memcg's refcnt.
1979 */
1980 if (!batch->memcg)
1981 batch->memcg = mem;
1982 /*
1983 * In typical case, batch->memcg == mem. This means we can
1984 * merge a series of uncharges to an uncharge of res_counter.
1985 * If not, we uncharge res_counter ony by one.
1986 */
1987 if (batch->memcg != mem)
1988 goto direct_uncharge;
1989 /* remember freed charge and uncharge it later */
1990 batch->bytes += PAGE_SIZE;
1991 if (uncharge_memsw)
1992 batch->memsw_bytes += PAGE_SIZE;
1993 return;
1994direct_uncharge:
1995 res_counter_uncharge(&mem->res, PAGE_SIZE);
1996 if (uncharge_memsw)
1997 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1998 return;
1999}
1829 2000
1830/* 2001/*
1831 * uncharge if !page_mapped(page) 2002 * uncharge if !page_mapped(page)
@@ -1874,12 +2045,8 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
1874 break; 2045 break;
1875 } 2046 }
1876 2047
1877 if (!mem_cgroup_is_root(mem)) { 2048 if (!mem_cgroup_is_root(mem))
1878 res_counter_uncharge(&mem->res, PAGE_SIZE); 2049 __do_uncharge(mem, ctype);
1879 if (do_swap_account &&
1880 (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT))
1881 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1882 }
1883 if (ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) 2050 if (ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
1884 mem_cgroup_swap_statistics(mem, true); 2051 mem_cgroup_swap_statistics(mem, true);
1885 mem_cgroup_charge_statistics(mem, pc, false); 2052 mem_cgroup_charge_statistics(mem, pc, false);
@@ -1925,6 +2092,50 @@ void mem_cgroup_uncharge_cache_page(struct page *page)
1925 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE); 2092 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
1926} 2093}
1927 2094
2095/*
2096 * Batch_start/batch_end is called in unmap_page_range/invlidate/trucate.
2097 * In that cases, pages are freed continuously and we can expect pages
2098 * are in the same memcg. All these calls itself limits the number of
2099 * pages freed at once, then uncharge_start/end() is called properly.
2100 * This may be called prural(2) times in a context,
2101 */
2102
2103void mem_cgroup_uncharge_start(void)
2104{
2105 current->memcg_batch.do_batch++;
2106 /* We can do nest. */
2107 if (current->memcg_batch.do_batch == 1) {
2108 current->memcg_batch.memcg = NULL;
2109 current->memcg_batch.bytes = 0;
2110 current->memcg_batch.memsw_bytes = 0;
2111 }
2112}
2113
2114void mem_cgroup_uncharge_end(void)
2115{
2116 struct memcg_batch_info *batch = &current->memcg_batch;
2117
2118 if (!batch->do_batch)
2119 return;
2120
2121 batch->do_batch--;
2122 if (batch->do_batch) /* If stacked, do nothing. */
2123 return;
2124
2125 if (!batch->memcg)
2126 return;
2127 /*
2128 * This "batch->memcg" is valid without any css_get/put etc...
2129 * bacause we hide charges behind us.
2130 */
2131 if (batch->bytes)
2132 res_counter_uncharge(&batch->memcg->res, batch->bytes);
2133 if (batch->memsw_bytes)
2134 res_counter_uncharge(&batch->memcg->memsw, batch->memsw_bytes);
2135 /* forget this pointer (for sanity check) */
2136 batch->memcg = NULL;
2137}
2138
1928#ifdef CONFIG_SWAP 2139#ifdef CONFIG_SWAP
1929/* 2140/*
1930 * called after __delete_from_swap_cache() and drop "page" account. 2141 * called after __delete_from_swap_cache() and drop "page" account.
@@ -2100,7 +2311,6 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
2100 unsigned long long val) 2311 unsigned long long val)
2101{ 2312{
2102 int retry_count; 2313 int retry_count;
2103 int progress;
2104 u64 memswlimit; 2314 u64 memswlimit;
2105 int ret = 0; 2315 int ret = 0;
2106 int children = mem_cgroup_count_children(memcg); 2316 int children = mem_cgroup_count_children(memcg);
@@ -2144,8 +2354,7 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
2144 if (!ret) 2354 if (!ret)
2145 break; 2355 break;
2146 2356
2147 progress = mem_cgroup_hierarchical_reclaim(memcg, NULL, 2357 mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL,
2148 GFP_KERNEL,
2149 MEM_CGROUP_RECLAIM_SHRINK); 2358 MEM_CGROUP_RECLAIM_SHRINK);
2150 curusage = res_counter_read_u64(&memcg->res, RES_USAGE); 2359 curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
2151 /* Usage is reduced ? */ 2360 /* Usage is reduced ? */
@@ -2384,6 +2593,7 @@ move_account:
2384 goto out; 2593 goto out;
2385 /* This is for making all *used* pages to be on LRU. */ 2594 /* This is for making all *used* pages to be on LRU. */
2386 lru_add_drain_all(); 2595 lru_add_drain_all();
2596 drain_all_stock_sync();
2387 ret = 0; 2597 ret = 0;
2388 for_each_node_state(node, N_HIGH_MEMORY) { 2598 for_each_node_state(node, N_HIGH_MEMORY) {
2389 for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) { 2599 for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
@@ -2541,6 +2751,7 @@ static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
2541 val += idx_val; 2751 val += idx_val;
2542 mem_cgroup_get_recursive_idx_stat(mem, 2752 mem_cgroup_get_recursive_idx_stat(mem,
2543 MEM_CGROUP_STAT_SWAPOUT, &idx_val); 2753 MEM_CGROUP_STAT_SWAPOUT, &idx_val);
2754 val += idx_val;
2544 val <<= PAGE_SHIFT; 2755 val <<= PAGE_SHIFT;
2545 } else 2756 } else
2546 val = res_counter_read_u64(&mem->memsw, name); 2757 val = res_counter_read_u64(&mem->memsw, name);
@@ -2660,7 +2871,7 @@ static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
2660enum { 2871enum {
2661 MCS_CACHE, 2872 MCS_CACHE,
2662 MCS_RSS, 2873 MCS_RSS,
2663 MCS_MAPPED_FILE, 2874 MCS_FILE_MAPPED,
2664 MCS_PGPGIN, 2875 MCS_PGPGIN,
2665 MCS_PGPGOUT, 2876 MCS_PGPGOUT,
2666 MCS_SWAP, 2877 MCS_SWAP,
@@ -2704,8 +2915,8 @@ static int mem_cgroup_get_local_stat(struct mem_cgroup *mem, void *data)
2704 s->stat[MCS_CACHE] += val * PAGE_SIZE; 2915 s->stat[MCS_CACHE] += val * PAGE_SIZE;
2705 val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS); 2916 val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
2706 s->stat[MCS_RSS] += val * PAGE_SIZE; 2917 s->stat[MCS_RSS] += val * PAGE_SIZE;
2707 val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_MAPPED_FILE); 2918 val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_FILE_MAPPED);
2708 s->stat[MCS_MAPPED_FILE] += val * PAGE_SIZE; 2919 s->stat[MCS_FILE_MAPPED] += val * PAGE_SIZE;
2709 val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGIN_COUNT); 2920 val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGIN_COUNT);
2710 s->stat[MCS_PGPGIN] += val; 2921 s->stat[MCS_PGPGIN] += val;
2711 val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGOUT_COUNT); 2922 val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGOUT_COUNT);
@@ -3097,11 +3308,18 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
3097 3308
3098 /* root ? */ 3309 /* root ? */
3099 if (cont->parent == NULL) { 3310 if (cont->parent == NULL) {
3311 int cpu;
3100 enable_swap_cgroup(); 3312 enable_swap_cgroup();
3101 parent = NULL; 3313 parent = NULL;
3102 root_mem_cgroup = mem; 3314 root_mem_cgroup = mem;
3103 if (mem_cgroup_soft_limit_tree_init()) 3315 if (mem_cgroup_soft_limit_tree_init())
3104 goto free_out; 3316 goto free_out;
3317 for_each_possible_cpu(cpu) {
3318 struct memcg_stock_pcp *stock =
3319 &per_cpu(memcg_stock, cpu);
3320 INIT_WORK(&stock->work, drain_local_stock);
3321 }
3322 hotcpu_notifier(memcg_stock_cpu_callback, 0);
3105 3323
3106 } else { 3324 } else {
3107 parent = mem_cgroup_from_cont(cont->parent); 3325 parent = mem_cgroup_from_cont(cont->parent);
@@ -3170,12 +3388,10 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss,
3170 struct task_struct *p, 3388 struct task_struct *p,
3171 bool threadgroup) 3389 bool threadgroup)
3172{ 3390{
3173 mutex_lock(&memcg_tasklist);
3174 /* 3391 /*
3175 * FIXME: It's better to move charges of this process from old 3392 * FIXME: It's better to move charges of this process from old
3176 * memcg to new memcg. But it's just on TODO-List now. 3393 * memcg to new memcg. But it's just on TODO-List now.
3177 */ 3394 */
3178 mutex_unlock(&memcg_tasklist);
3179} 3395}
3180 3396
3181struct cgroup_subsys mem_cgroup_subsys = { 3397struct cgroup_subsys mem_cgroup_subsys = {
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 1ac49fef95ab..50d4f8d7024a 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -582,10 +582,8 @@ static struct page_state {
582 { unevict|dirty, unevict|dirty, "unevictable LRU", me_pagecache_dirty}, 582 { unevict|dirty, unevict|dirty, "unevictable LRU", me_pagecache_dirty},
583 { unevict, unevict, "unevictable LRU", me_pagecache_clean}, 583 { unevict, unevict, "unevictable LRU", me_pagecache_clean},
584 584
585#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
586 { mlock|dirty, mlock|dirty, "mlocked LRU", me_pagecache_dirty }, 585 { mlock|dirty, mlock|dirty, "mlocked LRU", me_pagecache_dirty },
587 { mlock, mlock, "mlocked LRU", me_pagecache_clean }, 586 { mlock, mlock, "mlocked LRU", me_pagecache_clean },
588#endif
589 587
590 { lru|dirty, lru|dirty, "LRU", me_pagecache_dirty }, 588 { lru|dirty, lru|dirty, "LRU", me_pagecache_dirty },
591 { lru|dirty, lru, "clean LRU", me_pagecache_clean }, 589 { lru|dirty, lru, "clean LRU", me_pagecache_clean },
diff --git a/mm/memory.c b/mm/memory.c
index 6ab19dd4a199..aed45eaf8ac9 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -572,7 +572,7 @@ out:
572 * covered by this vma. 572 * covered by this vma.
573 */ 573 */
574 574
575static inline void 575static inline unsigned long
576copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, 576copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
577 pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma, 577 pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
578 unsigned long addr, int *rss) 578 unsigned long addr, int *rss)
@@ -586,7 +586,9 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
586 if (!pte_file(pte)) { 586 if (!pte_file(pte)) {
587 swp_entry_t entry = pte_to_swp_entry(pte); 587 swp_entry_t entry = pte_to_swp_entry(pte);
588 588
589 swap_duplicate(entry); 589 if (swap_duplicate(entry) < 0)
590 return entry.val;
591
590 /* make sure dst_mm is on swapoff's mmlist. */ 592 /* make sure dst_mm is on swapoff's mmlist. */
591 if (unlikely(list_empty(&dst_mm->mmlist))) { 593 if (unlikely(list_empty(&dst_mm->mmlist))) {
592 spin_lock(&mmlist_lock); 594 spin_lock(&mmlist_lock);
@@ -635,6 +637,7 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
635 637
636out_set_pte: 638out_set_pte:
637 set_pte_at(dst_mm, addr, dst_pte, pte); 639 set_pte_at(dst_mm, addr, dst_pte, pte);
640 return 0;
638} 641}
639 642
640static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, 643static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
@@ -646,6 +649,7 @@ static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
646 spinlock_t *src_ptl, *dst_ptl; 649 spinlock_t *src_ptl, *dst_ptl;
647 int progress = 0; 650 int progress = 0;
648 int rss[2]; 651 int rss[2];
652 swp_entry_t entry = (swp_entry_t){0};
649 653
650again: 654again:
651 rss[1] = rss[0] = 0; 655 rss[1] = rss[0] = 0;
@@ -674,7 +678,10 @@ again:
674 progress++; 678 progress++;
675 continue; 679 continue;
676 } 680 }
677 copy_one_pte(dst_mm, src_mm, dst_pte, src_pte, vma, addr, rss); 681 entry.val = copy_one_pte(dst_mm, src_mm, dst_pte, src_pte,
682 vma, addr, rss);
683 if (entry.val)
684 break;
678 progress += 8; 685 progress += 8;
679 } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end); 686 } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
680 687
@@ -684,6 +691,12 @@ again:
684 add_mm_rss(dst_mm, rss[0], rss[1]); 691 add_mm_rss(dst_mm, rss[0], rss[1]);
685 pte_unmap_unlock(orig_dst_pte, dst_ptl); 692 pte_unmap_unlock(orig_dst_pte, dst_ptl);
686 cond_resched(); 693 cond_resched();
694
695 if (entry.val) {
696 if (add_swap_count_continuation(entry, GFP_KERNEL) < 0)
697 return -ENOMEM;
698 progress = 0;
699 }
687 if (addr != end) 700 if (addr != end)
688 goto again; 701 goto again;
689 return 0; 702 return 0;
@@ -943,6 +956,7 @@ static unsigned long unmap_page_range(struct mmu_gather *tlb,
943 details = NULL; 956 details = NULL;
944 957
945 BUG_ON(addr >= end); 958 BUG_ON(addr >= end);
959 mem_cgroup_uncharge_start();
946 tlb_start_vma(tlb, vma); 960 tlb_start_vma(tlb, vma);
947 pgd = pgd_offset(vma->vm_mm, addr); 961 pgd = pgd_offset(vma->vm_mm, addr);
948 do { 962 do {
@@ -955,6 +969,7 @@ static unsigned long unmap_page_range(struct mmu_gather *tlb,
955 zap_work, details); 969 zap_work, details);
956 } while (pgd++, addr = next, (addr != end && *zap_work > 0)); 970 } while (pgd++, addr = next, (addr != end && *zap_work > 0));
957 tlb_end_vma(tlb, vma); 971 tlb_end_vma(tlb, vma);
972 mem_cgroup_uncharge_end();
958 973
959 return addr; 974 return addr;
960} 975}
@@ -2514,7 +2529,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2514 ret = VM_FAULT_HWPOISON; 2529 ret = VM_FAULT_HWPOISON;
2515 } else { 2530 } else {
2516 print_bad_pte(vma, address, orig_pte, NULL); 2531 print_bad_pte(vma, address, orig_pte, NULL);
2517 ret = VM_FAULT_OOM; 2532 ret = VM_FAULT_SIGBUS;
2518 } 2533 }
2519 goto out; 2534 goto out;
2520 } 2535 }
@@ -2548,6 +2563,12 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2548 lock_page(page); 2563 lock_page(page);
2549 delayacct_clear_flag(DELAYACCT_PF_SWAPIN); 2564 delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
2550 2565
2566 page = ksm_might_need_to_copy(page, vma, address);
2567 if (!page) {
2568 ret = VM_FAULT_OOM;
2569 goto out;
2570 }
2571
2551 if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) { 2572 if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) {
2552 ret = VM_FAULT_OOM; 2573 ret = VM_FAULT_OOM;
2553 goto out_page; 2574 goto out_page;
@@ -2910,7 +2931,7 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2910 * Page table corrupted: show pte and kill process. 2931 * Page table corrupted: show pte and kill process.
2911 */ 2932 */
2912 print_bad_pte(vma, address, orig_pte, NULL); 2933 print_bad_pte(vma, address, orig_pte, NULL);
2913 return VM_FAULT_OOM; 2934 return VM_FAULT_SIGBUS;
2914 } 2935 }
2915 2936
2916 pgoff = pte_to_pgoff(orig_pte); 2937 pgoff = pte_to_pgoff(orig_pte);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 2047465cd27c..030ce8a5bb0e 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -27,6 +27,7 @@
27#include <linux/page-isolation.h> 27#include <linux/page-isolation.h>
28#include <linux/pfn.h> 28#include <linux/pfn.h>
29#include <linux/suspend.h> 29#include <linux/suspend.h>
30#include <linux/mm_inline.h>
30 31
31#include <asm/tlbflush.h> 32#include <asm/tlbflush.h>
32 33
@@ -71,7 +72,9 @@ static void get_page_bootmem(unsigned long info, struct page *page, int type)
71 atomic_inc(&page->_count); 72 atomic_inc(&page->_count);
72} 73}
73 74
74void put_page_bootmem(struct page *page) 75/* reference to __meminit __free_pages_bootmem is valid
76 * so use __ref to tell modpost not to generate a warning */
77void __ref put_page_bootmem(struct page *page)
75{ 78{
76 int type; 79 int type;
77 80
@@ -672,6 +675,9 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
672 if (!ret) { /* Success */ 675 if (!ret) { /* Success */
673 list_add_tail(&page->lru, &source); 676 list_add_tail(&page->lru, &source);
674 move_pages--; 677 move_pages--;
678 inc_zone_page_state(page, NR_ISOLATED_ANON +
679 page_is_file_cache(page));
680
675 } else { 681 } else {
676 /* Becasue we don't have big zone->lock. we should 682 /* Becasue we don't have big zone->lock. we should
677 check this again here. */ 683 check this again here. */
@@ -694,7 +700,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
694 if (list_empty(&source)) 700 if (list_empty(&source))
695 goto out; 701 goto out;
696 /* this function returns # of failed pages */ 702 /* this function returns # of failed pages */
697 ret = migrate_pages(&source, hotremove_migrate_alloc, 0); 703 ret = migrate_pages(&source, hotremove_migrate_alloc, 0, 1);
698 704
699out: 705out:
700 return ret; 706 return ret;
@@ -747,7 +753,7 @@ check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
747 return offlined; 753 return offlined;
748} 754}
749 755
750int offline_pages(unsigned long start_pfn, 756static int offline_pages(unsigned long start_pfn,
751 unsigned long end_pfn, unsigned long timeout) 757 unsigned long end_pfn, unsigned long timeout)
752{ 758{
753 unsigned long pfn, nr_pages, expire; 759 unsigned long pfn, nr_pages, expire;
@@ -849,6 +855,10 @@ repeat:
849 855
850 setup_per_zone_wmarks(); 856 setup_per_zone_wmarks();
851 calculate_zone_inactive_ratio(zone); 857 calculate_zone_inactive_ratio(zone);
858 if (!node_present_pages(node)) {
859 node_clear_state(node, N_HIGH_MEMORY);
860 kswapd_stop(node);
861 }
852 862
853 vm_total_pages = nr_free_pagecache_pages(); 863 vm_total_pages = nr_free_pagecache_pages();
854 writeback_set_ratelimit(); 864 writeback_set_ratelimit();
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 4545d5944243..290fb5bf0440 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -85,10 +85,12 @@
85#include <linux/seq_file.h> 85#include <linux/seq_file.h>
86#include <linux/proc_fs.h> 86#include <linux/proc_fs.h>
87#include <linux/migrate.h> 87#include <linux/migrate.h>
88#include <linux/ksm.h>
88#include <linux/rmap.h> 89#include <linux/rmap.h>
89#include <linux/security.h> 90#include <linux/security.h>
90#include <linux/syscalls.h> 91#include <linux/syscalls.h>
91#include <linux/ctype.h> 92#include <linux/ctype.h>
93#include <linux/mm_inline.h>
92 94
93#include <asm/tlbflush.h> 95#include <asm/tlbflush.h>
94#include <asm/uaccess.h> 96#include <asm/uaccess.h>
@@ -412,17 +414,11 @@ static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
412 if (!page) 414 if (!page)
413 continue; 415 continue;
414 /* 416 /*
415 * The check for PageReserved here is important to avoid 417 * vm_normal_page() filters out zero pages, but there might
416 * handling zero pages and other pages that may have been 418 * still be PageReserved pages to skip, perhaps in a VDSO.
417 * marked special by the system. 419 * And we cannot move PageKsm pages sensibly or safely yet.
418 *
419 * If the PageReserved would not be checked here then f.e.
420 * the location of the zero page could have an influence
421 * on MPOL_MF_STRICT, zero pages would be counted for
422 * the per node stats, and there would be useless attempts
423 * to put zero pages on the migration list.
424 */ 420 */
425 if (PageReserved(page)) 421 if (PageReserved(page) || PageKsm(page))
426 continue; 422 continue;
427 nid = page_to_nid(page); 423 nid = page_to_nid(page);
428 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT)) 424 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
@@ -809,6 +805,8 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist,
809 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) { 805 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
810 if (!isolate_lru_page(page)) { 806 if (!isolate_lru_page(page)) {
811 list_add_tail(&page->lru, pagelist); 807 list_add_tail(&page->lru, pagelist);
808 inc_zone_page_state(page, NR_ISOLATED_ANON +
809 page_is_file_cache(page));
812 } 810 }
813 } 811 }
814} 812}
@@ -836,7 +834,7 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest,
836 flags | MPOL_MF_DISCONTIG_OK, &pagelist); 834 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
837 835
838 if (!list_empty(&pagelist)) 836 if (!list_empty(&pagelist))
839 err = migrate_pages(&pagelist, new_node_page, dest); 837 err = migrate_pages(&pagelist, new_node_page, dest, 0);
840 838
841 return err; 839 return err;
842} 840}
@@ -1053,7 +1051,7 @@ static long do_mbind(unsigned long start, unsigned long len,
1053 1051
1054 if (!list_empty(&pagelist)) 1052 if (!list_empty(&pagelist))
1055 nr_failed = migrate_pages(&pagelist, new_vma_page, 1053 nr_failed = migrate_pages(&pagelist, new_vma_page,
1056 (unsigned long)vma); 1054 (unsigned long)vma, 0);
1057 1055
1058 if (!err && nr_failed && (flags & MPOL_MF_STRICT)) 1056 if (!err && nr_failed && (flags & MPOL_MF_STRICT))
1059 err = -EIO; 1057 err = -EIO;
@@ -1565,6 +1563,53 @@ struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
1565 } 1563 }
1566 return zl; 1564 return zl;
1567} 1565}
1566
1567/*
1568 * init_nodemask_of_mempolicy
1569 *
1570 * If the current task's mempolicy is "default" [NULL], return 'false'
1571 * to indicate default policy. Otherwise, extract the policy nodemask
1572 * for 'bind' or 'interleave' policy into the argument nodemask, or
1573 * initialize the argument nodemask to contain the single node for
1574 * 'preferred' or 'local' policy and return 'true' to indicate presence
1575 * of non-default mempolicy.
1576 *
1577 * We don't bother with reference counting the mempolicy [mpol_get/put]
1578 * because the current task is examining it's own mempolicy and a task's
1579 * mempolicy is only ever changed by the task itself.
1580 *
1581 * N.B., it is the caller's responsibility to free a returned nodemask.
1582 */
1583bool init_nodemask_of_mempolicy(nodemask_t *mask)
1584{
1585 struct mempolicy *mempolicy;
1586 int nid;
1587
1588 if (!(mask && current->mempolicy))
1589 return false;
1590
1591 mempolicy = current->mempolicy;
1592 switch (mempolicy->mode) {
1593 case MPOL_PREFERRED:
1594 if (mempolicy->flags & MPOL_F_LOCAL)
1595 nid = numa_node_id();
1596 else
1597 nid = mempolicy->v.preferred_node;
1598 init_nodemask_of_node(mask, nid);
1599 break;
1600
1601 case MPOL_BIND:
1602 /* Fall through */
1603 case MPOL_INTERLEAVE:
1604 *mask = mempolicy->v.nodes;
1605 break;
1606
1607 default:
1608 BUG();
1609 }
1610
1611 return true;
1612}
1568#endif 1613#endif
1569 1614
1570/* Allocate a page in interleaved policy. 1615/* Allocate a page in interleaved policy.
diff --git a/mm/migrate.c b/mm/migrate.c
index 0bc640fd68fa..efddbf0926b2 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -21,6 +21,7 @@
21#include <linux/mm_inline.h> 21#include <linux/mm_inline.h>
22#include <linux/nsproxy.h> 22#include <linux/nsproxy.h>
23#include <linux/pagevec.h> 23#include <linux/pagevec.h>
24#include <linux/ksm.h>
24#include <linux/rmap.h> 25#include <linux/rmap.h>
25#include <linux/topology.h> 26#include <linux/topology.h>
26#include <linux/cpu.h> 27#include <linux/cpu.h>
@@ -78,8 +79,8 @@ int putback_lru_pages(struct list_head *l)
78/* 79/*
79 * Restore a potential migration pte to a working pte entry 80 * Restore a potential migration pte to a working pte entry
80 */ 81 */
81static void remove_migration_pte(struct vm_area_struct *vma, 82static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
82 struct page *old, struct page *new) 83 unsigned long addr, void *old)
83{ 84{
84 struct mm_struct *mm = vma->vm_mm; 85 struct mm_struct *mm = vma->vm_mm;
85 swp_entry_t entry; 86 swp_entry_t entry;
@@ -88,40 +89,37 @@ static void remove_migration_pte(struct vm_area_struct *vma,
88 pmd_t *pmd; 89 pmd_t *pmd;
89 pte_t *ptep, pte; 90 pte_t *ptep, pte;
90 spinlock_t *ptl; 91 spinlock_t *ptl;
91 unsigned long addr = page_address_in_vma(new, vma);
92
93 if (addr == -EFAULT)
94 return;
95 92
96 pgd = pgd_offset(mm, addr); 93 pgd = pgd_offset(mm, addr);
97 if (!pgd_present(*pgd)) 94 if (!pgd_present(*pgd))
98 return; 95 goto out;
99 96
100 pud = pud_offset(pgd, addr); 97 pud = pud_offset(pgd, addr);
101 if (!pud_present(*pud)) 98 if (!pud_present(*pud))
102 return; 99 goto out;
103 100
104 pmd = pmd_offset(pud, addr); 101 pmd = pmd_offset(pud, addr);
105 if (!pmd_present(*pmd)) 102 if (!pmd_present(*pmd))
106 return; 103 goto out;
107 104
108 ptep = pte_offset_map(pmd, addr); 105 ptep = pte_offset_map(pmd, addr);
109 106
110 if (!is_swap_pte(*ptep)) { 107 if (!is_swap_pte(*ptep)) {
111 pte_unmap(ptep); 108 pte_unmap(ptep);
112 return; 109 goto out;
113 } 110 }
114 111
115 ptl = pte_lockptr(mm, pmd); 112 ptl = pte_lockptr(mm, pmd);
116 spin_lock(ptl); 113 spin_lock(ptl);
117 pte = *ptep; 114 pte = *ptep;
118 if (!is_swap_pte(pte)) 115 if (!is_swap_pte(pte))
119 goto out; 116 goto unlock;
120 117
121 entry = pte_to_swp_entry(pte); 118 entry = pte_to_swp_entry(pte);
122 119
123 if (!is_migration_entry(entry) || migration_entry_to_page(entry) != old) 120 if (!is_migration_entry(entry) ||
124 goto out; 121 migration_entry_to_page(entry) != old)
122 goto unlock;
125 123
126 get_page(new); 124 get_page(new);
127 pte = pte_mkold(mk_pte(new, vma->vm_page_prot)); 125 pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
@@ -137,58 +135,10 @@ static void remove_migration_pte(struct vm_area_struct *vma,
137 135
138 /* No need to invalidate - it was non-present before */ 136 /* No need to invalidate - it was non-present before */
139 update_mmu_cache(vma, addr, pte); 137 update_mmu_cache(vma, addr, pte);
140 138unlock:
141out:
142 pte_unmap_unlock(ptep, ptl); 139 pte_unmap_unlock(ptep, ptl);
143} 140out:
144 141 return SWAP_AGAIN;
145/*
146 * Note that remove_file_migration_ptes will only work on regular mappings,
147 * Nonlinear mappings do not use migration entries.
148 */
149static void remove_file_migration_ptes(struct page *old, struct page *new)
150{
151 struct vm_area_struct *vma;
152 struct address_space *mapping = new->mapping;
153 struct prio_tree_iter iter;
154 pgoff_t pgoff = new->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
155
156 if (!mapping)
157 return;
158
159 spin_lock(&mapping->i_mmap_lock);
160
161 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff)
162 remove_migration_pte(vma, old, new);
163
164 spin_unlock(&mapping->i_mmap_lock);
165}
166
167/*
168 * Must hold mmap_sem lock on at least one of the vmas containing
169 * the page so that the anon_vma cannot vanish.
170 */
171static void remove_anon_migration_ptes(struct page *old, struct page *new)
172{
173 struct anon_vma *anon_vma;
174 struct vm_area_struct *vma;
175 unsigned long mapping;
176
177 mapping = (unsigned long)new->mapping;
178
179 if (!mapping || (mapping & PAGE_MAPPING_ANON) == 0)
180 return;
181
182 /*
183 * We hold the mmap_sem lock. So no need to call page_lock_anon_vma.
184 */
185 anon_vma = (struct anon_vma *) (mapping - PAGE_MAPPING_ANON);
186 spin_lock(&anon_vma->lock);
187
188 list_for_each_entry(vma, &anon_vma->head, anon_vma_node)
189 remove_migration_pte(vma, old, new);
190
191 spin_unlock(&anon_vma->lock);
192} 142}
193 143
194/* 144/*
@@ -197,10 +147,7 @@ static void remove_anon_migration_ptes(struct page *old, struct page *new)
197 */ 147 */
198static void remove_migration_ptes(struct page *old, struct page *new) 148static void remove_migration_ptes(struct page *old, struct page *new)
199{ 149{
200 if (PageAnon(new)) 150 rmap_walk(new, remove_migration_pte, old);
201 remove_anon_migration_ptes(old, new);
202 else
203 remove_file_migration_ptes(old, new);
204} 151}
205 152
206/* 153/*
@@ -341,8 +288,8 @@ static void migrate_page_copy(struct page *newpage, struct page *page)
341 if (TestClearPageActive(page)) { 288 if (TestClearPageActive(page)) {
342 VM_BUG_ON(PageUnevictable(page)); 289 VM_BUG_ON(PageUnevictable(page));
343 SetPageActive(newpage); 290 SetPageActive(newpage);
344 } else 291 } else if (TestClearPageUnevictable(page))
345 unevictable_migrate_page(newpage, page); 292 SetPageUnevictable(newpage);
346 if (PageChecked(page)) 293 if (PageChecked(page))
347 SetPageChecked(newpage); 294 SetPageChecked(newpage);
348 if (PageMappedToDisk(page)) 295 if (PageMappedToDisk(page))
@@ -361,6 +308,7 @@ static void migrate_page_copy(struct page *newpage, struct page *page)
361 } 308 }
362 309
363 mlock_migrate_page(newpage, page); 310 mlock_migrate_page(newpage, page);
311 ksm_migrate_page(newpage, page);
364 312
365 ClearPageSwapCache(page); 313 ClearPageSwapCache(page);
366 ClearPagePrivate(page); 314 ClearPagePrivate(page);
@@ -580,9 +528,9 @@ static int move_to_new_page(struct page *newpage, struct page *page)
580 else 528 else
581 rc = fallback_migrate_page(mapping, newpage, page); 529 rc = fallback_migrate_page(mapping, newpage, page);
582 530
583 if (!rc) { 531 if (!rc)
584 remove_migration_ptes(page, newpage); 532 remove_migration_ptes(page, newpage);
585 } else 533 else
586 newpage->mapping = NULL; 534 newpage->mapping = NULL;
587 535
588 unlock_page(newpage); 536 unlock_page(newpage);
@@ -595,7 +543,7 @@ static int move_to_new_page(struct page *newpage, struct page *page)
595 * to the newly allocated page in newpage. 543 * to the newly allocated page in newpage.
596 */ 544 */
597static int unmap_and_move(new_page_t get_new_page, unsigned long private, 545static int unmap_and_move(new_page_t get_new_page, unsigned long private,
598 struct page *page, int force) 546 struct page *page, int force, int offlining)
599{ 547{
600 int rc = 0; 548 int rc = 0;
601 int *result = NULL; 549 int *result = NULL;
@@ -621,6 +569,20 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
621 lock_page(page); 569 lock_page(page);
622 } 570 }
623 571
572 /*
573 * Only memory hotplug's offline_pages() caller has locked out KSM,
574 * and can safely migrate a KSM page. The other cases have skipped
575 * PageKsm along with PageReserved - but it is only now when we have
576 * the page lock that we can be certain it will not go KSM beneath us
577 * (KSM will not upgrade a page from PageAnon to PageKsm when it sees
578 * its pagecount raised, but only here do we take the page lock which
579 * serializes that).
580 */
581 if (PageKsm(page) && !offlining) {
582 rc = -EBUSY;
583 goto unlock;
584 }
585
624 /* charge against new page */ 586 /* charge against new page */
625 charge = mem_cgroup_prepare_migration(page, &mem); 587 charge = mem_cgroup_prepare_migration(page, &mem);
626 if (charge == -ENOMEM) { 588 if (charge == -ENOMEM) {
@@ -737,7 +699,7 @@ move_newpage:
737 * Return: Number of pages not migrated or error code. 699 * Return: Number of pages not migrated or error code.
738 */ 700 */
739int migrate_pages(struct list_head *from, 701int migrate_pages(struct list_head *from,
740 new_page_t get_new_page, unsigned long private) 702 new_page_t get_new_page, unsigned long private, int offlining)
741{ 703{
742 int retry = 1; 704 int retry = 1;
743 int nr_failed = 0; 705 int nr_failed = 0;
@@ -746,13 +708,6 @@ int migrate_pages(struct list_head *from,
746 struct page *page2; 708 struct page *page2;
747 int swapwrite = current->flags & PF_SWAPWRITE; 709 int swapwrite = current->flags & PF_SWAPWRITE;
748 int rc; 710 int rc;
749 unsigned long flags;
750
751 local_irq_save(flags);
752 list_for_each_entry(page, from, lru)
753 __inc_zone_page_state(page, NR_ISOLATED_ANON +
754 page_is_file_cache(page));
755 local_irq_restore(flags);
756 711
757 if (!swapwrite) 712 if (!swapwrite)
758 current->flags |= PF_SWAPWRITE; 713 current->flags |= PF_SWAPWRITE;
@@ -764,7 +719,7 @@ int migrate_pages(struct list_head *from,
764 cond_resched(); 719 cond_resched();
765 720
766 rc = unmap_and_move(get_new_page, private, 721 rc = unmap_and_move(get_new_page, private,
767 page, pass > 2); 722 page, pass > 2, offlining);
768 723
769 switch(rc) { 724 switch(rc) {
770 case -ENOMEM: 725 case -ENOMEM:
@@ -860,7 +815,8 @@ static int do_move_page_to_node_array(struct mm_struct *mm,
860 if (!page) 815 if (!page)
861 goto set_status; 816 goto set_status;
862 817
863 if (PageReserved(page)) /* Check for zero page */ 818 /* Use PageReserved to check for zero page */
819 if (PageReserved(page) || PageKsm(page))
864 goto put_and_set; 820 goto put_and_set;
865 821
866 pp->page = page; 822 pp->page = page;
@@ -878,8 +834,11 @@ static int do_move_page_to_node_array(struct mm_struct *mm,
878 goto put_and_set; 834 goto put_and_set;
879 835
880 err = isolate_lru_page(page); 836 err = isolate_lru_page(page);
881 if (!err) 837 if (!err) {
882 list_add_tail(&page->lru, &pagelist); 838 list_add_tail(&page->lru, &pagelist);
839 inc_zone_page_state(page, NR_ISOLATED_ANON +
840 page_is_file_cache(page));
841 }
883put_and_set: 842put_and_set:
884 /* 843 /*
885 * Either remove the duplicate refcount from 844 * Either remove the duplicate refcount from
@@ -894,7 +853,7 @@ set_status:
894 err = 0; 853 err = 0;
895 if (!list_empty(&pagelist)) 854 if (!list_empty(&pagelist))
896 err = migrate_pages(&pagelist, new_page_node, 855 err = migrate_pages(&pagelist, new_page_node,
897 (unsigned long)pm); 856 (unsigned long)pm, 0);
898 857
899 up_read(&mm->mmap_sem); 858 up_read(&mm->mmap_sem);
900 return err; 859 return err;
@@ -1015,7 +974,7 @@ static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
1015 974
1016 err = -ENOENT; 975 err = -ENOENT;
1017 /* Use PageReserved to check for zero page */ 976 /* Use PageReserved to check for zero page */
1018 if (!page || PageReserved(page)) 977 if (!page || PageReserved(page) || PageKsm(page))
1019 goto set_status; 978 goto set_status;
1020 979
1021 err = page_to_nid(page); 980 err = page_to_nid(page);
diff --git a/mm/mincore.c b/mm/mincore.c
index 8cb508f84ea4..7a3436ef39eb 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -14,6 +14,7 @@
14#include <linux/syscalls.h> 14#include <linux/syscalls.h>
15#include <linux/swap.h> 15#include <linux/swap.h>
16#include <linux/swapops.h> 16#include <linux/swapops.h>
17#include <linux/hugetlb.h>
17 18
18#include <asm/uaccess.h> 19#include <asm/uaccess.h>
19#include <asm/pgtable.h> 20#include <asm/pgtable.h>
@@ -72,6 +73,42 @@ static long do_mincore(unsigned long addr, unsigned char *vec, unsigned long pag
72 if (!vma || addr < vma->vm_start) 73 if (!vma || addr < vma->vm_start)
73 return -ENOMEM; 74 return -ENOMEM;
74 75
76#ifdef CONFIG_HUGETLB_PAGE
77 if (is_vm_hugetlb_page(vma)) {
78 struct hstate *h;
79 unsigned long nr_huge;
80 unsigned char present;
81
82 i = 0;
83 nr = min(pages, (vma->vm_end - addr) >> PAGE_SHIFT);
84 h = hstate_vma(vma);
85 nr_huge = ((addr + pages * PAGE_SIZE - 1) >> huge_page_shift(h))
86 - (addr >> huge_page_shift(h)) + 1;
87 nr_huge = min(nr_huge,
88 (vma->vm_end - addr) >> huge_page_shift(h));
89 while (1) {
90 /* hugepage always in RAM for now,
91 * but generally it needs to be check */
92 ptep = huge_pte_offset(current->mm,
93 addr & huge_page_mask(h));
94 present = !!(ptep &&
95 !huge_pte_none(huge_ptep_get(ptep)));
96 while (1) {
97 vec[i++] = present;
98 addr += PAGE_SIZE;
99 /* reach buffer limit */
100 if (i == nr)
101 return nr;
102 /* check hugepage border */
103 if (!((addr & ~huge_page_mask(h))
104 >> PAGE_SHIFT))
105 break;
106 }
107 }
108 return nr;
109 }
110#endif
111
75 /* 112 /*
76 * Calculate how many pages there are left in the last level of the 113 * Calculate how many pages there are left in the last level of the
77 * PTE array for our address. 114 * PTE array for our address.
diff --git a/mm/mlock.c b/mm/mlock.c
index bd6f0e466f6c..2b8335a89400 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -88,25 +88,22 @@ void mlock_vma_page(struct page *page)
88 } 88 }
89} 89}
90 90
91/* 91/**
92 * called from munlock()/munmap() path with page supposedly on the LRU. 92 * munlock_vma_page - munlock a vma page
93 * @page - page to be unlocked
93 * 94 *
94 * Note: unlike mlock_vma_page(), we can't just clear the PageMlocked 95 * called from munlock()/munmap() path with page supposedly on the LRU.
95 * [in try_to_munlock()] and then attempt to isolate the page. We must 96 * When we munlock a page, because the vma where we found the page is being
96 * isolate the page to keep others from messing with its unevictable 97 * munlock()ed or munmap()ed, we want to check whether other vmas hold the
97 * and mlocked state while trying to munlock. However, we pre-clear the 98 * page locked so that we can leave it on the unevictable lru list and not
98 * mlocked state anyway as we might lose the isolation race and we might 99 * bother vmscan with it. However, to walk the page's rmap list in
99 * not get another chance to clear PageMlocked. If we successfully 100 * try_to_munlock() we must isolate the page from the LRU. If some other
100 * isolate the page and try_to_munlock() detects other VM_LOCKED vmas 101 * task has removed the page from the LRU, we won't be able to do that.
101 * mapping the page, it will restore the PageMlocked state, unless the page 102 * So we clear the PageMlocked as we might not get another chance. If we
102 * is mapped in a non-linear vma. So, we go ahead and SetPageMlocked(), 103 * can't isolate the page, we leave it for putback_lru_page() and vmscan
103 * perhaps redundantly. 104 * [page_referenced()/try_to_unmap()] to deal with.
104 * If we lose the isolation race, and the page is mapped by other VM_LOCKED
105 * vmas, we'll detect this in vmscan--via try_to_munlock() or try_to_unmap()
106 * either of which will restore the PageMlocked state by calling
107 * mlock_vma_page() above, if it can grab the vma's mmap sem.
108 */ 105 */
109static void munlock_vma_page(struct page *page) 106void munlock_vma_page(struct page *page)
110{ 107{
111 BUG_ON(!PageLocked(page)); 108 BUG_ON(!PageLocked(page));
112 109
@@ -117,18 +114,18 @@ static void munlock_vma_page(struct page *page)
117 /* 114 /*
118 * did try_to_unlock() succeed or punt? 115 * did try_to_unlock() succeed or punt?
119 */ 116 */
120 if (ret == SWAP_SUCCESS || ret == SWAP_AGAIN) 117 if (ret != SWAP_MLOCK)
121 count_vm_event(UNEVICTABLE_PGMUNLOCKED); 118 count_vm_event(UNEVICTABLE_PGMUNLOCKED);
122 119
123 putback_lru_page(page); 120 putback_lru_page(page);
124 } else { 121 } else {
125 /* 122 /*
126 * We lost the race. let try_to_unmap() deal 123 * Some other task has removed the page from the LRU.
127 * with it. At least we get the page state and 124 * putback_lru_page() will take care of removing the
128 * mlock stats right. However, page is still on 125 * page from the unevictable list, if necessary.
129 * the noreclaim list. We'll fix that up when 126 * vmscan [page_referenced()] will move the page back
130 * the page is eventually freed or we scan the 127 * to the unevictable list if some other vma has it
131 * noreclaim list. 128 * mlocked.
132 */ 129 */
133 if (PageUnevictable(page)) 130 if (PageUnevictable(page))
134 count_vm_event(UNEVICTABLE_PGSTRANDED); 131 count_vm_event(UNEVICTABLE_PGSTRANDED);
diff --git a/mm/mmap.c b/mm/mmap.c
index ed70a68e882a..d9c77b2dbe9d 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1198,8 +1198,20 @@ munmap_back:
1198 goto free_vma; 1198 goto free_vma;
1199 } 1199 }
1200 1200
1201 if (vma_wants_writenotify(vma)) 1201 if (vma_wants_writenotify(vma)) {
1202 pgprot_t pprot = vma->vm_page_prot;
1203
1204 /* Can vma->vm_page_prot have changed??
1205 *
1206 * Answer: Yes, drivers may have changed it in their
1207 * f_op->mmap method.
1208 *
1209 * Ensures that vmas marked as uncached stay that way.
1210 */
1202 vma->vm_page_prot = vm_get_page_prot(vm_flags & ~VM_SHARED); 1211 vma->vm_page_prot = vm_get_page_prot(vm_flags & ~VM_SHARED);
1212 if (pgprot_val(pprot) == pgprot_val(pgprot_noncached(pprot)))
1213 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1214 }
1203 1215
1204 vma_link(mm, vma, prev, rb_link, rb_parent); 1216 vma_link(mm, vma, prev, rb_link, rb_parent);
1205 file = vma->vm_file; 1217 file = vma->vm_file;
@@ -1811,10 +1823,10 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
1811} 1823}
1812 1824
1813/* 1825/*
1814 * Split a vma into two pieces at address 'addr', a new vma is allocated 1826 * __split_vma() bypasses sysctl_max_map_count checking. We use this on the
1815 * either for the first part or the tail. 1827 * munmap path where it doesn't make sense to fail.
1816 */ 1828 */
1817int split_vma(struct mm_struct * mm, struct vm_area_struct * vma, 1829static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
1818 unsigned long addr, int new_below) 1830 unsigned long addr, int new_below)
1819{ 1831{
1820 struct mempolicy *pol; 1832 struct mempolicy *pol;
@@ -1824,9 +1836,6 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
1824 ~(huge_page_mask(hstate_vma(vma))))) 1836 ~(huge_page_mask(hstate_vma(vma)))))
1825 return -EINVAL; 1837 return -EINVAL;
1826 1838
1827 if (mm->map_count >= sysctl_max_map_count)
1828 return -ENOMEM;
1829
1830 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 1839 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
1831 if (!new) 1840 if (!new)
1832 return -ENOMEM; 1841 return -ENOMEM;
@@ -1866,6 +1875,19 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
1866 return 0; 1875 return 0;
1867} 1876}
1868 1877
1878/*
1879 * Split a vma into two pieces at address 'addr', a new vma is allocated
1880 * either for the first part or the tail.
1881 */
1882int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
1883 unsigned long addr, int new_below)
1884{
1885 if (mm->map_count >= sysctl_max_map_count)
1886 return -ENOMEM;
1887
1888 return __split_vma(mm, vma, addr, new_below);
1889}
1890
1869/* Munmap is split into 2 main parts -- this part which finds 1891/* Munmap is split into 2 main parts -- this part which finds
1870 * what needs doing, and the areas themselves, which do the 1892 * what needs doing, and the areas themselves, which do the
1871 * work. This now handles partial unmappings. 1893 * work. This now handles partial unmappings.
@@ -1901,7 +1923,17 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
1901 * places tmp vma above, and higher split_vma places tmp vma below. 1923 * places tmp vma above, and higher split_vma places tmp vma below.
1902 */ 1924 */
1903 if (start > vma->vm_start) { 1925 if (start > vma->vm_start) {
1904 int error = split_vma(mm, vma, start, 0); 1926 int error;
1927
1928 /*
1929 * Make sure that map_count on return from munmap() will
1930 * not exceed its limit; but let map_count go just above
1931 * its limit temporarily, to help free resources as expected.
1932 */
1933 if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count)
1934 return -ENOMEM;
1935
1936 error = __split_vma(mm, vma, start, 0);
1905 if (error) 1937 if (error)
1906 return error; 1938 return error;
1907 prev = vma; 1939 prev = vma;
@@ -1910,7 +1942,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
1910 /* Does it split the last one? */ 1942 /* Does it split the last one? */
1911 last = find_vma(mm, end); 1943 last = find_vma(mm, end);
1912 if (last && end > last->vm_start) { 1944 if (last && end > last->vm_start) {
1913 int error = split_vma(mm, last, end, 1); 1945 int error = __split_vma(mm, last, end, 1);
1914 if (error) 1946 if (error)
1915 return error; 1947 return error;
1916 } 1948 }
diff --git a/mm/nommu.c b/mm/nommu.c
index 9876fa0c3ad3..8687973462bb 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -1143,9 +1143,6 @@ static int do_mmap_private(struct vm_area_struct *vma,
1143 if (ret < rlen) 1143 if (ret < rlen)
1144 memset(base + ret, 0, rlen - ret); 1144 memset(base + ret, 0, rlen - ret);
1145 1145
1146 } else {
1147 /* if it's an anonymous mapping, then just clear it */
1148 memset(base, 0, rlen);
1149 } 1146 }
1150 1147
1151 return 0; 1148 return 0;
@@ -1343,6 +1340,11 @@ unsigned long do_mmap_pgoff(struct file *file,
1343 goto error_just_free; 1340 goto error_just_free;
1344 add_nommu_region(region); 1341 add_nommu_region(region);
1345 1342
1343 /* clear anonymous mappings that don't ask for uninitialized data */
1344 if (!vma->vm_file && !(flags & MAP_UNINITIALIZED))
1345 memset((void *)region->vm_start, 0,
1346 region->vm_end - region->vm_start);
1347
1346 /* okay... we have a mapping; now we have to register it */ 1348 /* okay... we have a mapping; now we have to register it */
1347 result = vma->vm_start; 1349 result = vma->vm_start;
1348 1350
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index ea2147dabba6..f52481b1c1e5 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -196,27 +196,46 @@ unsigned long badness(struct task_struct *p, unsigned long uptime)
196/* 196/*
197 * Determine the type of allocation constraint. 197 * Determine the type of allocation constraint.
198 */ 198 */
199static inline enum oom_constraint constrained_alloc(struct zonelist *zonelist,
200 gfp_t gfp_mask)
201{
202#ifdef CONFIG_NUMA 199#ifdef CONFIG_NUMA
200static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
201 gfp_t gfp_mask, nodemask_t *nodemask)
202{
203 struct zone *zone; 203 struct zone *zone;
204 struct zoneref *z; 204 struct zoneref *z;
205 enum zone_type high_zoneidx = gfp_zone(gfp_mask); 205 enum zone_type high_zoneidx = gfp_zone(gfp_mask);
206 nodemask_t nodes = node_states[N_HIGH_MEMORY];
207 206
208 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) 207 /*
209 if (cpuset_zone_allowed_softwall(zone, gfp_mask)) 208 * Reach here only when __GFP_NOFAIL is used. So, we should avoid
210 node_clear(zone_to_nid(zone), nodes); 209 * to kill current.We have to random task kill in this case.
211 else 210 * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now.
212 return CONSTRAINT_CPUSET; 211 */
212 if (gfp_mask & __GFP_THISNODE)
213 return CONSTRAINT_NONE;
213 214
214 if (!nodes_empty(nodes)) 215 /*
216 * The nodemask here is a nodemask passed to alloc_pages(). Now,
217 * cpuset doesn't use this nodemask for its hardwall/softwall/hierarchy
218 * feature. mempolicy is an only user of nodemask here.
219 * check mempolicy's nodemask contains all N_HIGH_MEMORY
220 */
221 if (nodemask && !nodes_subset(node_states[N_HIGH_MEMORY], *nodemask))
215 return CONSTRAINT_MEMORY_POLICY; 222 return CONSTRAINT_MEMORY_POLICY;
216#endif
217 223
224 /* Check this allocation failure is caused by cpuset's wall function */
225 for_each_zone_zonelist_nodemask(zone, z, zonelist,
226 high_zoneidx, nodemask)
227 if (!cpuset_zone_allowed_softwall(zone, gfp_mask))
228 return CONSTRAINT_CPUSET;
229
230 return CONSTRAINT_NONE;
231}
232#else
233static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
234 gfp_t gfp_mask, nodemask_t *nodemask)
235{
218 return CONSTRAINT_NONE; 236 return CONSTRAINT_NONE;
219} 237}
238#endif
220 239
221/* 240/*
222 * Simple selection loop. We chose the process with the highest 241 * Simple selection loop. We chose the process with the highest
@@ -337,6 +356,24 @@ static void dump_tasks(const struct mem_cgroup *mem)
337 } while_each_thread(g, p); 356 } while_each_thread(g, p);
338} 357}
339 358
359static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
360 struct mem_cgroup *mem)
361{
362 pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, "
363 "oom_adj=%d\n",
364 current->comm, gfp_mask, order, current->signal->oom_adj);
365 task_lock(current);
366 cpuset_print_task_mems_allowed(current);
367 task_unlock(current);
368 dump_stack();
369 mem_cgroup_print_oom_info(mem, p);
370 show_mem();
371 if (sysctl_oom_dump_tasks)
372 dump_tasks(mem);
373}
374
375#define K(x) ((x) << (PAGE_SHIFT-10))
376
340/* 377/*
341 * Send SIGKILL to the selected process irrespective of CAP_SYS_RAW_IO 378 * Send SIGKILL to the selected process irrespective of CAP_SYS_RAW_IO
342 * flag though it's unlikely that we select a process with CAP_SYS_RAW_IO 379 * flag though it's unlikely that we select a process with CAP_SYS_RAW_IO
@@ -350,15 +387,23 @@ static void __oom_kill_task(struct task_struct *p, int verbose)
350 return; 387 return;
351 } 388 }
352 389
390 task_lock(p);
353 if (!p->mm) { 391 if (!p->mm) {
354 WARN_ON(1); 392 WARN_ON(1);
355 printk(KERN_WARNING "tried to kill an mm-less task!\n"); 393 printk(KERN_WARNING "tried to kill an mm-less task %d (%s)!\n",
394 task_pid_nr(p), p->comm);
395 task_unlock(p);
356 return; 396 return;
357 } 397 }
358 398
359 if (verbose) 399 if (verbose)
360 printk(KERN_ERR "Killed process %d (%s)\n", 400 printk(KERN_ERR "Killed process %d (%s) "
361 task_pid_nr(p), p->comm); 401 "vsz:%lukB, anon-rss:%lukB, file-rss:%lukB\n",
402 task_pid_nr(p), p->comm,
403 K(p->mm->total_vm),
404 K(get_mm_counter(p->mm, anon_rss)),
405 K(get_mm_counter(p->mm, file_rss)));
406 task_unlock(p);
362 407
363 /* 408 /*
364 * We give our sacrificial lamb high priority and access to 409 * We give our sacrificial lamb high priority and access to
@@ -395,20 +440,8 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
395{ 440{
396 struct task_struct *c; 441 struct task_struct *c;
397 442
398 if (printk_ratelimit()) { 443 if (printk_ratelimit())
399 printk(KERN_WARNING "%s invoked oom-killer: " 444 dump_header(p, gfp_mask, order, mem);
400 "gfp_mask=0x%x, order=%d, oom_adj=%d\n",
401 current->comm, gfp_mask, order,
402 current->signal->oom_adj);
403 task_lock(current);
404 cpuset_print_task_mems_allowed(current);
405 task_unlock(current);
406 dump_stack();
407 mem_cgroup_print_oom_info(mem, current);
408 show_mem();
409 if (sysctl_oom_dump_tasks)
410 dump_tasks(mem);
411 }
412 445
413 /* 446 /*
414 * If the task is already exiting, don't alarm the sysadmin or kill 447 * If the task is already exiting, don't alarm the sysadmin or kill
@@ -544,6 +577,7 @@ retry:
544 /* Found nothing?!?! Either we hang forever, or we panic. */ 577 /* Found nothing?!?! Either we hang forever, or we panic. */
545 if (!p) { 578 if (!p) {
546 read_unlock(&tasklist_lock); 579 read_unlock(&tasklist_lock);
580 dump_header(NULL, gfp_mask, order, NULL);
547 panic("Out of memory and no killable processes...\n"); 581 panic("Out of memory and no killable processes...\n");
548 } 582 }
549 583
@@ -599,7 +633,8 @@ rest_and_return:
599 * OR try to be smart about which process to kill. Note that we 633 * OR try to be smart about which process to kill. Note that we
600 * don't have to be perfect here, we just have to be good. 634 * don't have to be perfect here, we just have to be good.
601 */ 635 */
602void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order) 636void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
637 int order, nodemask_t *nodemask)
603{ 638{
604 unsigned long freed = 0; 639 unsigned long freed = 0;
605 enum oom_constraint constraint; 640 enum oom_constraint constraint;
@@ -609,14 +644,16 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order)
609 /* Got some memory back in the last second. */ 644 /* Got some memory back in the last second. */
610 return; 645 return;
611 646
612 if (sysctl_panic_on_oom == 2) 647 if (sysctl_panic_on_oom == 2) {
648 dump_header(NULL, gfp_mask, order, NULL);
613 panic("out of memory. Compulsory panic_on_oom is selected.\n"); 649 panic("out of memory. Compulsory panic_on_oom is selected.\n");
650 }
614 651
615 /* 652 /*
616 * Check if there were limitations on the allocation (only relevant for 653 * Check if there were limitations on the allocation (only relevant for
617 * NUMA) that may require different handling. 654 * NUMA) that may require different handling.
618 */ 655 */
619 constraint = constrained_alloc(zonelist, gfp_mask); 656 constraint = constrained_alloc(zonelist, gfp_mask, nodemask);
620 read_lock(&tasklist_lock); 657 read_lock(&tasklist_lock);
621 658
622 switch (constraint) { 659 switch (constraint) {
@@ -626,8 +663,10 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order)
626 break; 663 break;
627 664
628 case CONSTRAINT_NONE: 665 case CONSTRAINT_NONE:
629 if (sysctl_panic_on_oom) 666 if (sysctl_panic_on_oom) {
667 dump_header(NULL, gfp_mask, order, NULL);
630 panic("out of memory. panic_on_oom is selected\n"); 668 panic("out of memory. panic_on_oom is selected\n");
669 }
631 /* Fall-through */ 670 /* Fall-through */
632 case CONSTRAINT_CPUSET: 671 case CONSTRAINT_CPUSET:
633 __out_of_memory(gfp_mask, order); 672 __out_of_memory(gfp_mask, order);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2bc2ac63f41e..850c4a7e2fe5 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -486,7 +486,6 @@ static inline void __free_one_page(struct page *page,
486 zone->free_area[order].nr_free++; 486 zone->free_area[order].nr_free++;
487} 487}
488 488
489#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
490/* 489/*
491 * free_page_mlock() -- clean up attempts to free and mlocked() page. 490 * free_page_mlock() -- clean up attempts to free and mlocked() page.
492 * Page should not be on lru, so no need to fix that up. 491 * Page should not be on lru, so no need to fix that up.
@@ -497,9 +496,6 @@ static inline void free_page_mlock(struct page *page)
497 __dec_zone_page_state(page, NR_MLOCK); 496 __dec_zone_page_state(page, NR_MLOCK);
498 __count_vm_event(UNEVICTABLE_MLOCKFREED); 497 __count_vm_event(UNEVICTABLE_MLOCKFREED);
499} 498}
500#else
501static void free_page_mlock(struct page *page) { }
502#endif
503 499
504static inline int free_pages_check(struct page *page) 500static inline int free_pages_check(struct page *page)
505{ 501{
@@ -1658,12 +1654,22 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
1658 if (page) 1654 if (page)
1659 goto out; 1655 goto out;
1660 1656
1661 /* The OOM killer will not help higher order allocs */ 1657 if (!(gfp_mask & __GFP_NOFAIL)) {
1662 if (order > PAGE_ALLOC_COSTLY_ORDER && !(gfp_mask & __GFP_NOFAIL)) 1658 /* The OOM killer will not help higher order allocs */
1663 goto out; 1659 if (order > PAGE_ALLOC_COSTLY_ORDER)
1664 1660 goto out;
1661 /*
1662 * GFP_THISNODE contains __GFP_NORETRY and we never hit this.
1663 * Sanity check for bare calls of __GFP_THISNODE, not real OOM.
1664 * The caller should handle page allocation failure by itself if
1665 * it specifies __GFP_THISNODE.
1666 * Note: Hugepage uses it but will hit PAGE_ALLOC_COSTLY_ORDER.
1667 */
1668 if (gfp_mask & __GFP_THISNODE)
1669 goto out;
1670 }
1665 /* Exhausted what can be done so it's blamo time */ 1671 /* Exhausted what can be done so it's blamo time */
1666 out_of_memory(zonelist, gfp_mask, order); 1672 out_of_memory(zonelist, gfp_mask, order, nodemask);
1667 1673
1668out: 1674out:
1669 clear_zonelist_oom(zonelist, gfp_mask); 1675 clear_zonelist_oom(zonelist, gfp_mask);
@@ -3127,7 +3133,7 @@ static int __cpuinit process_zones(int cpu)
3127 3133
3128 if (percpu_pagelist_fraction) 3134 if (percpu_pagelist_fraction)
3129 setup_pagelist_highmark(zone_pcp(zone, cpu), 3135 setup_pagelist_highmark(zone_pcp(zone, cpu),
3130 (zone->present_pages / percpu_pagelist_fraction)); 3136 (zone->present_pages / percpu_pagelist_fraction));
3131 } 3137 }
3132 3138
3133 return 0; 3139 return 0;
diff --git a/mm/page_io.c b/mm/page_io.c
index c6f3e5071de3..a19af956ee1b 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -19,20 +19,15 @@
19#include <linux/writeback.h> 19#include <linux/writeback.h>
20#include <asm/pgtable.h> 20#include <asm/pgtable.h>
21 21
22static struct bio *get_swap_bio(gfp_t gfp_flags, pgoff_t index, 22static struct bio *get_swap_bio(gfp_t gfp_flags,
23 struct page *page, bio_end_io_t end_io) 23 struct page *page, bio_end_io_t end_io)
24{ 24{
25 struct bio *bio; 25 struct bio *bio;
26 26
27 bio = bio_alloc(gfp_flags, 1); 27 bio = bio_alloc(gfp_flags, 1);
28 if (bio) { 28 if (bio) {
29 struct swap_info_struct *sis; 29 bio->bi_sector = map_swap_page(page, &bio->bi_bdev);
30 swp_entry_t entry = { .val = index, }; 30 bio->bi_sector <<= PAGE_SHIFT - 9;
31
32 sis = get_swap_info_struct(swp_type(entry));
33 bio->bi_sector = map_swap_page(sis, swp_offset(entry)) *
34 (PAGE_SIZE >> 9);
35 bio->bi_bdev = sis->bdev;
36 bio->bi_io_vec[0].bv_page = page; 31 bio->bi_io_vec[0].bv_page = page;
37 bio->bi_io_vec[0].bv_len = PAGE_SIZE; 32 bio->bi_io_vec[0].bv_len = PAGE_SIZE;
38 bio->bi_io_vec[0].bv_offset = 0; 33 bio->bi_io_vec[0].bv_offset = 0;
@@ -102,8 +97,7 @@ int swap_writepage(struct page *page, struct writeback_control *wbc)
102 unlock_page(page); 97 unlock_page(page);
103 goto out; 98 goto out;
104 } 99 }
105 bio = get_swap_bio(GFP_NOIO, page_private(page), page, 100 bio = get_swap_bio(GFP_NOIO, page, end_swap_bio_write);
106 end_swap_bio_write);
107 if (bio == NULL) { 101 if (bio == NULL) {
108 set_page_dirty(page); 102 set_page_dirty(page);
109 unlock_page(page); 103 unlock_page(page);
@@ -127,8 +121,7 @@ int swap_readpage(struct page *page)
127 121
128 VM_BUG_ON(!PageLocked(page)); 122 VM_BUG_ON(!PageLocked(page));
129 VM_BUG_ON(PageUptodate(page)); 123 VM_BUG_ON(PageUptodate(page));
130 bio = get_swap_bio(GFP_KERNEL, page_private(page), page, 124 bio = get_swap_bio(GFP_KERNEL, page, end_swap_bio_read);
131 end_swap_bio_read);
132 if (bio == NULL) { 125 if (bio == NULL) {
133 unlock_page(page); 126 unlock_page(page);
134 ret = -ENOMEM; 127 ret = -ENOMEM;
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index d5878bed7841..7b47a57b6646 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -1,6 +1,7 @@
1#include <linux/mm.h> 1#include <linux/mm.h>
2#include <linux/highmem.h> 2#include <linux/highmem.h>
3#include <linux/sched.h> 3#include <linux/sched.h>
4#include <linux/hugetlb.h>
4 5
5static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, 6static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
6 struct mm_walk *walk) 7 struct mm_walk *walk)
@@ -107,6 +108,7 @@ int walk_page_range(unsigned long addr, unsigned long end,
107 pgd_t *pgd; 108 pgd_t *pgd;
108 unsigned long next; 109 unsigned long next;
109 int err = 0; 110 int err = 0;
111 struct vm_area_struct *vma;
110 112
111 if (addr >= end) 113 if (addr >= end)
112 return err; 114 return err;
@@ -117,11 +119,38 @@ int walk_page_range(unsigned long addr, unsigned long end,
117 pgd = pgd_offset(walk->mm, addr); 119 pgd = pgd_offset(walk->mm, addr);
118 do { 120 do {
119 next = pgd_addr_end(addr, end); 121 next = pgd_addr_end(addr, end);
122
123 /*
124 * handle hugetlb vma individually because pagetable walk for
125 * the hugetlb page is dependent on the architecture and
126 * we can't handled it in the same manner as non-huge pages.
127 */
128 vma = find_vma(walk->mm, addr);
129#ifdef CONFIG_HUGETLB_PAGE
130 if (vma && is_vm_hugetlb_page(vma)) {
131 pte_t *pte;
132 struct hstate *hs;
133
134 if (vma->vm_end < next)
135 next = vma->vm_end;
136 hs = hstate_vma(vma);
137 pte = huge_pte_offset(walk->mm,
138 addr & huge_page_mask(hs));
139 if (pte && !huge_pte_none(huge_ptep_get(pte))
140 && walk->hugetlb_entry)
141 err = walk->hugetlb_entry(pte, addr,
142 next, walk);
143 if (err)
144 break;
145 continue;
146 }
147#endif
120 if (pgd_none_or_clear_bad(pgd)) { 148 if (pgd_none_or_clear_bad(pgd)) {
121 if (walk->pte_hole) 149 if (walk->pte_hole)
122 err = walk->pte_hole(addr, next, walk); 150 err = walk->pte_hole(addr, next, walk);
123 if (err) 151 if (err)
124 break; 152 break;
153 pgd++;
125 continue; 154 continue;
126 } 155 }
127 if (walk->pgd_entry) 156 if (walk->pgd_entry)
@@ -131,7 +160,8 @@ int walk_page_range(unsigned long addr, unsigned long end,
131 err = walk_pud_range(pgd, addr, next, walk); 160 err = walk_pud_range(pgd, addr, next, walk);
132 if (err) 161 if (err)
133 break; 162 break;
134 } while (pgd++, addr = next, addr != end); 163 pgd++;
164 } while (addr = next, addr != end);
135 165
136 return err; 166 return err;
137} 167}
diff --git a/mm/rmap.c b/mm/rmap.c
index dd43373a483f..278cd277bdec 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -49,6 +49,7 @@
49#include <linux/swapops.h> 49#include <linux/swapops.h>
50#include <linux/slab.h> 50#include <linux/slab.h>
51#include <linux/init.h> 51#include <linux/init.h>
52#include <linux/ksm.h>
52#include <linux/rmap.h> 53#include <linux/rmap.h>
53#include <linux/rcupdate.h> 54#include <linux/rcupdate.h>
54#include <linux/module.h> 55#include <linux/module.h>
@@ -67,7 +68,7 @@ static inline struct anon_vma *anon_vma_alloc(void)
67 return kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); 68 return kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
68} 69}
69 70
70static inline void anon_vma_free(struct anon_vma *anon_vma) 71void anon_vma_free(struct anon_vma *anon_vma)
71{ 72{
72 kmem_cache_free(anon_vma_cachep, anon_vma); 73 kmem_cache_free(anon_vma_cachep, anon_vma);
73} 74}
@@ -171,7 +172,7 @@ void anon_vma_unlink(struct vm_area_struct *vma)
171 list_del(&vma->anon_vma_node); 172 list_del(&vma->anon_vma_node);
172 173
173 /* We must garbage collect the anon_vma if it's empty */ 174 /* We must garbage collect the anon_vma if it's empty */
174 empty = list_empty(&anon_vma->head); 175 empty = list_empty(&anon_vma->head) && !ksm_refcount(anon_vma);
175 spin_unlock(&anon_vma->lock); 176 spin_unlock(&anon_vma->lock);
176 177
177 if (empty) 178 if (empty)
@@ -183,6 +184,7 @@ static void anon_vma_ctor(void *data)
183 struct anon_vma *anon_vma = data; 184 struct anon_vma *anon_vma = data;
184 185
185 spin_lock_init(&anon_vma->lock); 186 spin_lock_init(&anon_vma->lock);
187 ksm_refcount_init(anon_vma);
186 INIT_LIST_HEAD(&anon_vma->head); 188 INIT_LIST_HEAD(&anon_vma->head);
187} 189}
188 190
@@ -202,8 +204,8 @@ struct anon_vma *page_lock_anon_vma(struct page *page)
202 unsigned long anon_mapping; 204 unsigned long anon_mapping;
203 205
204 rcu_read_lock(); 206 rcu_read_lock();
205 anon_mapping = (unsigned long) page->mapping; 207 anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping);
206 if (!(anon_mapping & PAGE_MAPPING_ANON)) 208 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
207 goto out; 209 goto out;
208 if (!page_mapped(page)) 210 if (!page_mapped(page))
209 goto out; 211 goto out;
@@ -248,8 +250,7 @@ vma_address(struct page *page, struct vm_area_struct *vma)
248unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) 250unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
249{ 251{
250 if (PageAnon(page)) { 252 if (PageAnon(page)) {
251 if ((void *)vma->anon_vma != 253 if (vma->anon_vma != page_anon_vma(page))
252 (void *)page->mapping - PAGE_MAPPING_ANON)
253 return -EFAULT; 254 return -EFAULT;
254 } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) { 255 } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) {
255 if (!vma->vm_file || 256 if (!vma->vm_file ||
@@ -337,21 +338,15 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
337 * Subfunctions of page_referenced: page_referenced_one called 338 * Subfunctions of page_referenced: page_referenced_one called
338 * repeatedly from either page_referenced_anon or page_referenced_file. 339 * repeatedly from either page_referenced_anon or page_referenced_file.
339 */ 340 */
340static int page_referenced_one(struct page *page, 341int page_referenced_one(struct page *page, struct vm_area_struct *vma,
341 struct vm_area_struct *vma, 342 unsigned long address, unsigned int *mapcount,
342 unsigned int *mapcount, 343 unsigned long *vm_flags)
343 unsigned long *vm_flags)
344{ 344{
345 struct mm_struct *mm = vma->vm_mm; 345 struct mm_struct *mm = vma->vm_mm;
346 unsigned long address;
347 pte_t *pte; 346 pte_t *pte;
348 spinlock_t *ptl; 347 spinlock_t *ptl;
349 int referenced = 0; 348 int referenced = 0;
350 349
351 address = vma_address(page, vma);
352 if (address == -EFAULT)
353 goto out;
354
355 pte = page_check_address(page, mm, address, &ptl, 0); 350 pte = page_check_address(page, mm, address, &ptl, 0);
356 if (!pte) 351 if (!pte)
357 goto out; 352 goto out;
@@ -388,9 +383,10 @@ static int page_referenced_one(struct page *page,
388out_unmap: 383out_unmap:
389 (*mapcount)--; 384 (*mapcount)--;
390 pte_unmap_unlock(pte, ptl); 385 pte_unmap_unlock(pte, ptl);
391out: 386
392 if (referenced) 387 if (referenced)
393 *vm_flags |= vma->vm_flags; 388 *vm_flags |= vma->vm_flags;
389out:
394 return referenced; 390 return referenced;
395} 391}
396 392
@@ -409,6 +405,9 @@ static int page_referenced_anon(struct page *page,
409 405
410 mapcount = page_mapcount(page); 406 mapcount = page_mapcount(page);
411 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { 407 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
408 unsigned long address = vma_address(page, vma);
409 if (address == -EFAULT)
410 continue;
412 /* 411 /*
413 * If we are reclaiming on behalf of a cgroup, skip 412 * If we are reclaiming on behalf of a cgroup, skip
414 * counting on behalf of references from different 413 * counting on behalf of references from different
@@ -416,7 +415,7 @@ static int page_referenced_anon(struct page *page,
416 */ 415 */
417 if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont)) 416 if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont))
418 continue; 417 continue;
419 referenced += page_referenced_one(page, vma, 418 referenced += page_referenced_one(page, vma, address,
420 &mapcount, vm_flags); 419 &mapcount, vm_flags);
421 if (!mapcount) 420 if (!mapcount)
422 break; 421 break;
@@ -474,6 +473,9 @@ static int page_referenced_file(struct page *page,
474 mapcount = page_mapcount(page); 473 mapcount = page_mapcount(page);
475 474
476 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 475 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
476 unsigned long address = vma_address(page, vma);
477 if (address == -EFAULT)
478 continue;
477 /* 479 /*
478 * If we are reclaiming on behalf of a cgroup, skip 480 * If we are reclaiming on behalf of a cgroup, skip
479 * counting on behalf of references from different 481 * counting on behalf of references from different
@@ -481,7 +483,7 @@ static int page_referenced_file(struct page *page,
481 */ 483 */
482 if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont)) 484 if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont))
483 continue; 485 continue;
484 referenced += page_referenced_one(page, vma, 486 referenced += page_referenced_one(page, vma, address,
485 &mapcount, vm_flags); 487 &mapcount, vm_flags);
486 if (!mapcount) 488 if (!mapcount)
487 break; 489 break;
@@ -507,46 +509,47 @@ int page_referenced(struct page *page,
507 unsigned long *vm_flags) 509 unsigned long *vm_flags)
508{ 510{
509 int referenced = 0; 511 int referenced = 0;
512 int we_locked = 0;
510 513
511 if (TestClearPageReferenced(page)) 514 if (TestClearPageReferenced(page))
512 referenced++; 515 referenced++;
513 516
514 *vm_flags = 0; 517 *vm_flags = 0;
515 if (page_mapped(page) && page->mapping) { 518 if (page_mapped(page) && page_rmapping(page)) {
516 if (PageAnon(page)) 519 if (!is_locked && (!PageAnon(page) || PageKsm(page))) {
520 we_locked = trylock_page(page);
521 if (!we_locked) {
522 referenced++;
523 goto out;
524 }
525 }
526 if (unlikely(PageKsm(page)))
527 referenced += page_referenced_ksm(page, mem_cont,
528 vm_flags);
529 else if (PageAnon(page))
517 referenced += page_referenced_anon(page, mem_cont, 530 referenced += page_referenced_anon(page, mem_cont,
518 vm_flags); 531 vm_flags);
519 else if (is_locked) 532 else if (page->mapping)
520 referenced += page_referenced_file(page, mem_cont, 533 referenced += page_referenced_file(page, mem_cont,
521 vm_flags); 534 vm_flags);
522 else if (!trylock_page(page)) 535 if (we_locked)
523 referenced++;
524 else {
525 if (page->mapping)
526 referenced += page_referenced_file(page,
527 mem_cont, vm_flags);
528 unlock_page(page); 536 unlock_page(page);
529 }
530 } 537 }
531 538out:
532 if (page_test_and_clear_young(page)) 539 if (page_test_and_clear_young(page))
533 referenced++; 540 referenced++;
534 541
535 return referenced; 542 return referenced;
536} 543}
537 544
538static int page_mkclean_one(struct page *page, struct vm_area_struct *vma) 545static int page_mkclean_one(struct page *page, struct vm_area_struct *vma,
546 unsigned long address)
539{ 547{
540 struct mm_struct *mm = vma->vm_mm; 548 struct mm_struct *mm = vma->vm_mm;
541 unsigned long address;
542 pte_t *pte; 549 pte_t *pte;
543 spinlock_t *ptl; 550 spinlock_t *ptl;
544 int ret = 0; 551 int ret = 0;
545 552
546 address = vma_address(page, vma);
547 if (address == -EFAULT)
548 goto out;
549
550 pte = page_check_address(page, mm, address, &ptl, 1); 553 pte = page_check_address(page, mm, address, &ptl, 1);
551 if (!pte) 554 if (!pte)
552 goto out; 555 goto out;
@@ -578,8 +581,12 @@ static int page_mkclean_file(struct address_space *mapping, struct page *page)
578 581
579 spin_lock(&mapping->i_mmap_lock); 582 spin_lock(&mapping->i_mmap_lock);
580 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 583 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
581 if (vma->vm_flags & VM_SHARED) 584 if (vma->vm_flags & VM_SHARED) {
582 ret += page_mkclean_one(page, vma); 585 unsigned long address = vma_address(page, vma);
586 if (address == -EFAULT)
587 continue;
588 ret += page_mkclean_one(page, vma, address);
589 }
583 } 590 }
584 spin_unlock(&mapping->i_mmap_lock); 591 spin_unlock(&mapping->i_mmap_lock);
585 return ret; 592 return ret;
@@ -620,14 +627,7 @@ static void __page_set_anon_rmap(struct page *page,
620 BUG_ON(!anon_vma); 627 BUG_ON(!anon_vma);
621 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 628 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
622 page->mapping = (struct address_space *) anon_vma; 629 page->mapping = (struct address_space *) anon_vma;
623
624 page->index = linear_page_index(vma, address); 630 page->index = linear_page_index(vma, address);
625
626 /*
627 * nr_mapped state can be updated without turning off
628 * interrupts because it is not modified via interrupt.
629 */
630 __inc_zone_page_state(page, NR_ANON_PAGES);
631} 631}
632 632
633/** 633/**
@@ -665,14 +665,23 @@ static void __page_check_anon_rmap(struct page *page,
665 * @vma: the vm area in which the mapping is added 665 * @vma: the vm area in which the mapping is added
666 * @address: the user virtual address mapped 666 * @address: the user virtual address mapped
667 * 667 *
668 * The caller needs to hold the pte lock and the page must be locked. 668 * The caller needs to hold the pte lock, and the page must be locked in
669 * the anon_vma case: to serialize mapping,index checking after setting,
670 * and to ensure that PageAnon is not being upgraded racily to PageKsm
671 * (but PageKsm is never downgraded to PageAnon).
669 */ 672 */
670void page_add_anon_rmap(struct page *page, 673void page_add_anon_rmap(struct page *page,
671 struct vm_area_struct *vma, unsigned long address) 674 struct vm_area_struct *vma, unsigned long address)
672{ 675{
676 int first = atomic_inc_and_test(&page->_mapcount);
677 if (first)
678 __inc_zone_page_state(page, NR_ANON_PAGES);
679 if (unlikely(PageKsm(page)))
680 return;
681
673 VM_BUG_ON(!PageLocked(page)); 682 VM_BUG_ON(!PageLocked(page));
674 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); 683 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
675 if (atomic_inc_and_test(&page->_mapcount)) 684 if (first)
676 __page_set_anon_rmap(page, vma, address); 685 __page_set_anon_rmap(page, vma, address);
677 else 686 else
678 __page_check_anon_rmap(page, vma, address); 687 __page_check_anon_rmap(page, vma, address);
@@ -694,6 +703,7 @@ void page_add_new_anon_rmap(struct page *page,
694 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); 703 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
695 SetPageSwapBacked(page); 704 SetPageSwapBacked(page);
696 atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */ 705 atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */
706 __inc_zone_page_state(page, NR_ANON_PAGES);
697 __page_set_anon_rmap(page, vma, address); 707 __page_set_anon_rmap(page, vma, address);
698 if (page_evictable(page, vma)) 708 if (page_evictable(page, vma))
699 lru_cache_add_lru(page, LRU_ACTIVE_ANON); 709 lru_cache_add_lru(page, LRU_ACTIVE_ANON);
@@ -711,7 +721,7 @@ void page_add_file_rmap(struct page *page)
711{ 721{
712 if (atomic_inc_and_test(&page->_mapcount)) { 722 if (atomic_inc_and_test(&page->_mapcount)) {
713 __inc_zone_page_state(page, NR_FILE_MAPPED); 723 __inc_zone_page_state(page, NR_FILE_MAPPED);
714 mem_cgroup_update_mapped_file_stat(page, 1); 724 mem_cgroup_update_file_mapped(page, 1);
715 } 725 }
716} 726}
717 727
@@ -743,8 +753,8 @@ void page_remove_rmap(struct page *page)
743 __dec_zone_page_state(page, NR_ANON_PAGES); 753 __dec_zone_page_state(page, NR_ANON_PAGES);
744 } else { 754 } else {
745 __dec_zone_page_state(page, NR_FILE_MAPPED); 755 __dec_zone_page_state(page, NR_FILE_MAPPED);
756 mem_cgroup_update_file_mapped(page, -1);
746 } 757 }
747 mem_cgroup_update_mapped_file_stat(page, -1);
748 /* 758 /*
749 * It would be tidy to reset the PageAnon mapping here, 759 * It would be tidy to reset the PageAnon mapping here,
750 * but that might overwrite a racing page_add_anon_rmap 760 * but that might overwrite a racing page_add_anon_rmap
@@ -760,20 +770,15 @@ void page_remove_rmap(struct page *page)
760 * Subfunctions of try_to_unmap: try_to_unmap_one called 770 * Subfunctions of try_to_unmap: try_to_unmap_one called
761 * repeatedly from either try_to_unmap_anon or try_to_unmap_file. 771 * repeatedly from either try_to_unmap_anon or try_to_unmap_file.
762 */ 772 */
763static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, 773int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
764 enum ttu_flags flags) 774 unsigned long address, enum ttu_flags flags)
765{ 775{
766 struct mm_struct *mm = vma->vm_mm; 776 struct mm_struct *mm = vma->vm_mm;
767 unsigned long address;
768 pte_t *pte; 777 pte_t *pte;
769 pte_t pteval; 778 pte_t pteval;
770 spinlock_t *ptl; 779 spinlock_t *ptl;
771 int ret = SWAP_AGAIN; 780 int ret = SWAP_AGAIN;
772 781
773 address = vma_address(page, vma);
774 if (address == -EFAULT)
775 goto out;
776
777 pte = page_check_address(page, mm, address, &ptl, 0); 782 pte = page_check_address(page, mm, address, &ptl, 0);
778 if (!pte) 783 if (!pte)
779 goto out; 784 goto out;
@@ -784,10 +789,11 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
784 * skipped over this mm) then we should reactivate it. 789 * skipped over this mm) then we should reactivate it.
785 */ 790 */
786 if (!(flags & TTU_IGNORE_MLOCK)) { 791 if (!(flags & TTU_IGNORE_MLOCK)) {
787 if (vma->vm_flags & VM_LOCKED) { 792 if (vma->vm_flags & VM_LOCKED)
788 ret = SWAP_MLOCK; 793 goto out_mlock;
794
795 if (TTU_ACTION(flags) == TTU_MUNLOCK)
789 goto out_unmap; 796 goto out_unmap;
790 }
791 } 797 }
792 if (!(flags & TTU_IGNORE_ACCESS)) { 798 if (!(flags & TTU_IGNORE_ACCESS)) {
793 if (ptep_clear_flush_young_notify(vma, address, pte)) { 799 if (ptep_clear_flush_young_notify(vma, address, pte)) {
@@ -822,7 +828,11 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
822 * Store the swap location in the pte. 828 * Store the swap location in the pte.
823 * See handle_pte_fault() ... 829 * See handle_pte_fault() ...
824 */ 830 */
825 swap_duplicate(entry); 831 if (swap_duplicate(entry) < 0) {
832 set_pte_at(mm, address, pte, pteval);
833 ret = SWAP_FAIL;
834 goto out_unmap;
835 }
826 if (list_empty(&mm->mmlist)) { 836 if (list_empty(&mm->mmlist)) {
827 spin_lock(&mmlist_lock); 837 spin_lock(&mmlist_lock);
828 if (list_empty(&mm->mmlist)) 838 if (list_empty(&mm->mmlist))
@@ -849,7 +859,6 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
849 } else 859 } else
850 dec_mm_counter(mm, file_rss); 860 dec_mm_counter(mm, file_rss);
851 861
852
853 page_remove_rmap(page); 862 page_remove_rmap(page);
854 page_cache_release(page); 863 page_cache_release(page);
855 864
@@ -857,6 +866,27 @@ out_unmap:
857 pte_unmap_unlock(pte, ptl); 866 pte_unmap_unlock(pte, ptl);
858out: 867out:
859 return ret; 868 return ret;
869
870out_mlock:
871 pte_unmap_unlock(pte, ptl);
872
873
874 /*
875 * We need mmap_sem locking, Otherwise VM_LOCKED check makes
876 * unstable result and race. Plus, We can't wait here because
877 * we now hold anon_vma->lock or mapping->i_mmap_lock.
878 * if trylock failed, the page remain in evictable lru and later
879 * vmscan could retry to move the page to unevictable lru if the
880 * page is actually mlocked.
881 */
882 if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
883 if (vma->vm_flags & VM_LOCKED) {
884 mlock_vma_page(page);
885 ret = SWAP_MLOCK;
886 }
887 up_read(&vma->vm_mm->mmap_sem);
888 }
889 return ret;
860} 890}
861 891
862/* 892/*
@@ -922,11 +952,10 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
922 return ret; 952 return ret;
923 953
924 /* 954 /*
925 * MLOCK_PAGES => feature is configured. 955 * If we can acquire the mmap_sem for read, and vma is VM_LOCKED,
926 * if we can acquire the mmap_sem for read, and vma is VM_LOCKED,
927 * keep the sem while scanning the cluster for mlocking pages. 956 * keep the sem while scanning the cluster for mlocking pages.
928 */ 957 */
929 if (MLOCK_PAGES && down_read_trylock(&vma->vm_mm->mmap_sem)) { 958 if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
930 locked_vma = (vma->vm_flags & VM_LOCKED); 959 locked_vma = (vma->vm_flags & VM_LOCKED);
931 if (!locked_vma) 960 if (!locked_vma)
932 up_read(&vma->vm_mm->mmap_sem); /* don't need it */ 961 up_read(&vma->vm_mm->mmap_sem); /* don't need it */
@@ -976,29 +1005,11 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
976 return ret; 1005 return ret;
977} 1006}
978 1007
979/*
980 * common handling for pages mapped in VM_LOCKED vmas
981 */
982static int try_to_mlock_page(struct page *page, struct vm_area_struct *vma)
983{
984 int mlocked = 0;
985
986 if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
987 if (vma->vm_flags & VM_LOCKED) {
988 mlock_vma_page(page);
989 mlocked++; /* really mlocked the page */
990 }
991 up_read(&vma->vm_mm->mmap_sem);
992 }
993 return mlocked;
994}
995
996/** 1008/**
997 * try_to_unmap_anon - unmap or unlock anonymous page using the object-based 1009 * try_to_unmap_anon - unmap or unlock anonymous page using the object-based
998 * rmap method 1010 * rmap method
999 * @page: the page to unmap/unlock 1011 * @page: the page to unmap/unlock
1000 * @unlock: request for unlock rather than unmap [unlikely] 1012 * @flags: action and flags
1001 * @migration: unmapping for migration - ignored if @unlock
1002 * 1013 *
1003 * Find all the mappings of a page using the mapping pointer and the vma chains 1014 * Find all the mappings of a page using the mapping pointer and the vma chains
1004 * contained in the anon_vma struct it points to. 1015 * contained in the anon_vma struct it points to.
@@ -1014,42 +1025,22 @@ static int try_to_unmap_anon(struct page *page, enum ttu_flags flags)
1014{ 1025{
1015 struct anon_vma *anon_vma; 1026 struct anon_vma *anon_vma;
1016 struct vm_area_struct *vma; 1027 struct vm_area_struct *vma;
1017 unsigned int mlocked = 0;
1018 int ret = SWAP_AGAIN; 1028 int ret = SWAP_AGAIN;
1019 int unlock = TTU_ACTION(flags) == TTU_MUNLOCK;
1020
1021 if (MLOCK_PAGES && unlikely(unlock))
1022 ret = SWAP_SUCCESS; /* default for try_to_munlock() */
1023 1029
1024 anon_vma = page_lock_anon_vma(page); 1030 anon_vma = page_lock_anon_vma(page);
1025 if (!anon_vma) 1031 if (!anon_vma)
1026 return ret; 1032 return ret;
1027 1033
1028 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { 1034 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
1029 if (MLOCK_PAGES && unlikely(unlock)) { 1035 unsigned long address = vma_address(page, vma);
1030 if (!((vma->vm_flags & VM_LOCKED) && 1036 if (address == -EFAULT)
1031 page_mapped_in_vma(page, vma))) 1037 continue;
1032 continue; /* must visit all unlocked vmas */ 1038 ret = try_to_unmap_one(page, vma, address, flags);
1033 ret = SWAP_MLOCK; /* saw at least one mlocked vma */ 1039 if (ret != SWAP_AGAIN || !page_mapped(page))
1034 } else { 1040 break;
1035 ret = try_to_unmap_one(page, vma, flags);
1036 if (ret == SWAP_FAIL || !page_mapped(page))
1037 break;
1038 }
1039 if (ret == SWAP_MLOCK) {
1040 mlocked = try_to_mlock_page(page, vma);
1041 if (mlocked)
1042 break; /* stop if actually mlocked page */
1043 }
1044 } 1041 }
1045 1042
1046 page_unlock_anon_vma(anon_vma); 1043 page_unlock_anon_vma(anon_vma);
1047
1048 if (mlocked)
1049 ret = SWAP_MLOCK; /* actually mlocked the page */
1050 else if (ret == SWAP_MLOCK)
1051 ret = SWAP_AGAIN; /* saw VM_LOCKED vma */
1052
1053 return ret; 1044 return ret;
1054} 1045}
1055 1046
@@ -1079,48 +1070,30 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
1079 unsigned long max_nl_cursor = 0; 1070 unsigned long max_nl_cursor = 0;
1080 unsigned long max_nl_size = 0; 1071 unsigned long max_nl_size = 0;
1081 unsigned int mapcount; 1072 unsigned int mapcount;
1082 unsigned int mlocked = 0;
1083 int unlock = TTU_ACTION(flags) == TTU_MUNLOCK;
1084
1085 if (MLOCK_PAGES && unlikely(unlock))
1086 ret = SWAP_SUCCESS; /* default for try_to_munlock() */
1087 1073
1088 spin_lock(&mapping->i_mmap_lock); 1074 spin_lock(&mapping->i_mmap_lock);
1089 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 1075 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
1090 if (MLOCK_PAGES && unlikely(unlock)) { 1076 unsigned long address = vma_address(page, vma);
1091 if (!((vma->vm_flags & VM_LOCKED) && 1077 if (address == -EFAULT)
1092 page_mapped_in_vma(page, vma))) 1078 continue;
1093 continue; /* must visit all vmas */ 1079 ret = try_to_unmap_one(page, vma, address, flags);
1094 ret = SWAP_MLOCK; 1080 if (ret != SWAP_AGAIN || !page_mapped(page))
1095 } else { 1081 goto out;
1096 ret = try_to_unmap_one(page, vma, flags);
1097 if (ret == SWAP_FAIL || !page_mapped(page))
1098 goto out;
1099 }
1100 if (ret == SWAP_MLOCK) {
1101 mlocked = try_to_mlock_page(page, vma);
1102 if (mlocked)
1103 break; /* stop if actually mlocked page */
1104 }
1105 } 1082 }
1106 1083
1107 if (mlocked) 1084 if (list_empty(&mapping->i_mmap_nonlinear))
1108 goto out; 1085 goto out;
1109 1086
1110 if (list_empty(&mapping->i_mmap_nonlinear)) 1087 /*
1088 * We don't bother to try to find the munlocked page in nonlinears.
1089 * It's costly. Instead, later, page reclaim logic may call
1090 * try_to_unmap(TTU_MUNLOCK) and recover PG_mlocked lazily.
1091 */
1092 if (TTU_ACTION(flags) == TTU_MUNLOCK)
1111 goto out; 1093 goto out;
1112 1094
1113 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, 1095 list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
1114 shared.vm_set.list) { 1096 shared.vm_set.list) {
1115 if (MLOCK_PAGES && unlikely(unlock)) {
1116 if (!(vma->vm_flags & VM_LOCKED))
1117 continue; /* must visit all vmas */
1118 ret = SWAP_MLOCK; /* leave mlocked == 0 */
1119 goto out; /* no need to look further */
1120 }
1121 if (!MLOCK_PAGES && !(flags & TTU_IGNORE_MLOCK) &&
1122 (vma->vm_flags & VM_LOCKED))
1123 continue;
1124 cursor = (unsigned long) vma->vm_private_data; 1097 cursor = (unsigned long) vma->vm_private_data;
1125 if (cursor > max_nl_cursor) 1098 if (cursor > max_nl_cursor)
1126 max_nl_cursor = cursor; 1099 max_nl_cursor = cursor;
@@ -1153,16 +1126,12 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
1153 do { 1126 do {
1154 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, 1127 list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
1155 shared.vm_set.list) { 1128 shared.vm_set.list) {
1156 if (!MLOCK_PAGES && !(flags & TTU_IGNORE_MLOCK) &&
1157 (vma->vm_flags & VM_LOCKED))
1158 continue;
1159 cursor = (unsigned long) vma->vm_private_data; 1129 cursor = (unsigned long) vma->vm_private_data;
1160 while ( cursor < max_nl_cursor && 1130 while ( cursor < max_nl_cursor &&
1161 cursor < vma->vm_end - vma->vm_start) { 1131 cursor < vma->vm_end - vma->vm_start) {
1162 ret = try_to_unmap_cluster(cursor, &mapcount, 1132 if (try_to_unmap_cluster(cursor, &mapcount,
1163 vma, page); 1133 vma, page) == SWAP_MLOCK)
1164 if (ret == SWAP_MLOCK) 1134 ret = SWAP_MLOCK;
1165 mlocked = 2; /* to return below */
1166 cursor += CLUSTER_SIZE; 1135 cursor += CLUSTER_SIZE;
1167 vma->vm_private_data = (void *) cursor; 1136 vma->vm_private_data = (void *) cursor;
1168 if ((int)mapcount <= 0) 1137 if ((int)mapcount <= 0)
@@ -1183,10 +1152,6 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
1183 vma->vm_private_data = NULL; 1152 vma->vm_private_data = NULL;
1184out: 1153out:
1185 spin_unlock(&mapping->i_mmap_lock); 1154 spin_unlock(&mapping->i_mmap_lock);
1186 if (mlocked)
1187 ret = SWAP_MLOCK; /* actually mlocked the page */
1188 else if (ret == SWAP_MLOCK)
1189 ret = SWAP_AGAIN; /* saw VM_LOCKED vma */
1190 return ret; 1155 return ret;
1191} 1156}
1192 1157
@@ -1210,7 +1175,9 @@ int try_to_unmap(struct page *page, enum ttu_flags flags)
1210 1175
1211 BUG_ON(!PageLocked(page)); 1176 BUG_ON(!PageLocked(page));
1212 1177
1213 if (PageAnon(page)) 1178 if (unlikely(PageKsm(page)))
1179 ret = try_to_unmap_ksm(page, flags);
1180 else if (PageAnon(page))
1214 ret = try_to_unmap_anon(page, flags); 1181 ret = try_to_unmap_anon(page, flags);
1215 else 1182 else
1216 ret = try_to_unmap_file(page, flags); 1183 ret = try_to_unmap_file(page, flags);
@@ -1229,17 +1196,98 @@ int try_to_unmap(struct page *page, enum ttu_flags flags)
1229 * 1196 *
1230 * Return values are: 1197 * Return values are:
1231 * 1198 *
1232 * SWAP_SUCCESS - no vma's holding page mlocked. 1199 * SWAP_AGAIN - no vma is holding page mlocked, or,
1233 * SWAP_AGAIN - page mapped in mlocked vma -- couldn't acquire mmap sem 1200 * SWAP_AGAIN - page mapped in mlocked vma -- couldn't acquire mmap sem
1201 * SWAP_FAIL - page cannot be located at present
1234 * SWAP_MLOCK - page is now mlocked. 1202 * SWAP_MLOCK - page is now mlocked.
1235 */ 1203 */
1236int try_to_munlock(struct page *page) 1204int try_to_munlock(struct page *page)
1237{ 1205{
1238 VM_BUG_ON(!PageLocked(page) || PageLRU(page)); 1206 VM_BUG_ON(!PageLocked(page) || PageLRU(page));
1239 1207
1240 if (PageAnon(page)) 1208 if (unlikely(PageKsm(page)))
1209 return try_to_unmap_ksm(page, TTU_MUNLOCK);
1210 else if (PageAnon(page))
1241 return try_to_unmap_anon(page, TTU_MUNLOCK); 1211 return try_to_unmap_anon(page, TTU_MUNLOCK);
1242 else 1212 else
1243 return try_to_unmap_file(page, TTU_MUNLOCK); 1213 return try_to_unmap_file(page, TTU_MUNLOCK);
1244} 1214}
1245 1215
1216#ifdef CONFIG_MIGRATION
1217/*
1218 * rmap_walk() and its helpers rmap_walk_anon() and rmap_walk_file():
1219 * Called by migrate.c to remove migration ptes, but might be used more later.
1220 */
1221static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *,
1222 struct vm_area_struct *, unsigned long, void *), void *arg)
1223{
1224 struct anon_vma *anon_vma;
1225 struct vm_area_struct *vma;
1226 int ret = SWAP_AGAIN;
1227
1228 /*
1229 * Note: remove_migration_ptes() cannot use page_lock_anon_vma()
1230 * because that depends on page_mapped(); but not all its usages
1231 * are holding mmap_sem, which also gave the necessary guarantee
1232 * (that this anon_vma's slab has not already been destroyed).
1233 * This needs to be reviewed later: avoiding page_lock_anon_vma()
1234 * is risky, and currently limits the usefulness of rmap_walk().
1235 */
1236 anon_vma = page_anon_vma(page);
1237 if (!anon_vma)
1238 return ret;
1239 spin_lock(&anon_vma->lock);
1240 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
1241 unsigned long address = vma_address(page, vma);
1242 if (address == -EFAULT)
1243 continue;
1244 ret = rmap_one(page, vma, address, arg);
1245 if (ret != SWAP_AGAIN)
1246 break;
1247 }
1248 spin_unlock(&anon_vma->lock);
1249 return ret;
1250}
1251
1252static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *,
1253 struct vm_area_struct *, unsigned long, void *), void *arg)
1254{
1255 struct address_space *mapping = page->mapping;
1256 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
1257 struct vm_area_struct *vma;
1258 struct prio_tree_iter iter;
1259 int ret = SWAP_AGAIN;
1260
1261 if (!mapping)
1262 return ret;
1263 spin_lock(&mapping->i_mmap_lock);
1264 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
1265 unsigned long address = vma_address(page, vma);
1266 if (address == -EFAULT)
1267 continue;
1268 ret = rmap_one(page, vma, address, arg);
1269 if (ret != SWAP_AGAIN)
1270 break;
1271 }
1272 /*
1273 * No nonlinear handling: being always shared, nonlinear vmas
1274 * never contain migration ptes. Decide what to do about this
1275 * limitation to linear when we need rmap_walk() on nonlinear.
1276 */
1277 spin_unlock(&mapping->i_mmap_lock);
1278 return ret;
1279}
1280
1281int rmap_walk(struct page *page, int (*rmap_one)(struct page *,
1282 struct vm_area_struct *, unsigned long, void *), void *arg)
1283{
1284 VM_BUG_ON(!PageLocked(page));
1285
1286 if (unlikely(PageKsm(page)))
1287 return rmap_walk_ksm(page, rmap_one, arg);
1288 else if (PageAnon(page))
1289 return rmap_walk_anon(page, rmap_one, arg);
1290 else
1291 return rmap_walk_file(page, rmap_one, arg);
1292}
1293#endif /* CONFIG_MIGRATION */
diff --git a/mm/shmem.c b/mm/shmem.c
index 356dd99566ec..4fb41c83daca 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1017,7 +1017,14 @@ int shmem_unuse(swp_entry_t entry, struct page *page)
1017 goto out; 1017 goto out;
1018 } 1018 }
1019 mutex_unlock(&shmem_swaplist_mutex); 1019 mutex_unlock(&shmem_swaplist_mutex);
1020out: return found; /* 0 or 1 or -ENOMEM */ 1020 /*
1021 * Can some race bring us here? We've been holding page lock,
1022 * so I think not; but would rather try again later than BUG()
1023 */
1024 unlock_page(page);
1025 page_cache_release(page);
1026out:
1027 return (found < 0) ? found : 0;
1021} 1028}
1022 1029
1023/* 1030/*
@@ -1080,7 +1087,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
1080 else 1087 else
1081 inode = NULL; 1088 inode = NULL;
1082 spin_unlock(&info->lock); 1089 spin_unlock(&info->lock);
1083 swap_duplicate(swap); 1090 swap_shmem_alloc(swap);
1084 BUG_ON(page_mapped(page)); 1091 BUG_ON(page_mapped(page));
1085 page_cache_release(page); /* pagecache ref */ 1092 page_cache_release(page); /* pagecache ref */
1086 swap_writepage(page, wbc); 1093 swap_writepage(page, wbc);
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 9c590eef7912..6c0585b16418 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -22,6 +22,7 @@
22#include <linux/seq_file.h> 22#include <linux/seq_file.h>
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/ksm.h>
25#include <linux/rmap.h> 26#include <linux/rmap.h>
26#include <linux/security.h> 27#include <linux/security.h>
27#include <linux/backing-dev.h> 28#include <linux/backing-dev.h>
@@ -35,11 +36,15 @@
35#include <linux/swapops.h> 36#include <linux/swapops.h>
36#include <linux/page_cgroup.h> 37#include <linux/page_cgroup.h>
37 38
39static bool swap_count_continued(struct swap_info_struct *, pgoff_t,
40 unsigned char);
41static void free_swap_count_continuations(struct swap_info_struct *);
42static sector_t map_swap_entry(swp_entry_t, struct block_device**);
43
38static DEFINE_SPINLOCK(swap_lock); 44static DEFINE_SPINLOCK(swap_lock);
39static unsigned int nr_swapfiles; 45static unsigned int nr_swapfiles;
40long nr_swap_pages; 46long nr_swap_pages;
41long total_swap_pages; 47long total_swap_pages;
42static int swap_overflow;
43static int least_priority; 48static int least_priority;
44 49
45static const char Bad_file[] = "Bad swap file entry "; 50static const char Bad_file[] = "Bad swap file entry ";
@@ -49,42 +54,20 @@ static const char Unused_offset[] = "Unused swap offset entry ";
49 54
50static struct swap_list_t swap_list = {-1, -1}; 55static struct swap_list_t swap_list = {-1, -1};
51 56
52static struct swap_info_struct swap_info[MAX_SWAPFILES]; 57static struct swap_info_struct *swap_info[MAX_SWAPFILES];
53 58
54static DEFINE_MUTEX(swapon_mutex); 59static DEFINE_MUTEX(swapon_mutex);
55 60
56/* For reference count accounting in swap_map */ 61static inline unsigned char swap_count(unsigned char ent)
57/* enum for swap_map[] handling. internal use only */
58enum {
59 SWAP_MAP = 0, /* ops for reference from swap users */
60 SWAP_CACHE, /* ops for reference from swap cache */
61};
62
63static inline int swap_count(unsigned short ent)
64{
65 return ent & SWAP_COUNT_MASK;
66}
67
68static inline bool swap_has_cache(unsigned short ent)
69{ 62{
70 return !!(ent & SWAP_HAS_CACHE); 63 return ent & ~SWAP_HAS_CACHE; /* may include SWAP_HAS_CONT flag */
71} 64}
72 65
73static inline unsigned short encode_swapmap(int count, bool has_cache) 66/* returns 1 if swap entry is freed */
74{
75 unsigned short ret = count;
76
77 if (has_cache)
78 return SWAP_HAS_CACHE | ret;
79 return ret;
80}
81
82/* returnes 1 if swap entry is freed */
83static int 67static int
84__try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset) 68__try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset)
85{ 69{
86 int type = si - swap_info; 70 swp_entry_t entry = swp_entry(si->type, offset);
87 swp_entry_t entry = swp_entry(type, offset);
88 struct page *page; 71 struct page *page;
89 int ret = 0; 72 int ret = 0;
90 73
@@ -120,7 +103,7 @@ void swap_unplug_io_fn(struct backing_dev_info *unused_bdi, struct page *page)
120 down_read(&swap_unplug_sem); 103 down_read(&swap_unplug_sem);
121 entry.val = page_private(page); 104 entry.val = page_private(page);
122 if (PageSwapCache(page)) { 105 if (PageSwapCache(page)) {
123 struct block_device *bdev = swap_info[swp_type(entry)].bdev; 106 struct block_device *bdev = swap_info[swp_type(entry)]->bdev;
124 struct backing_dev_info *bdi; 107 struct backing_dev_info *bdi;
125 108
126 /* 109 /*
@@ -146,23 +129,28 @@ void swap_unplug_io_fn(struct backing_dev_info *unused_bdi, struct page *page)
146static int discard_swap(struct swap_info_struct *si) 129static int discard_swap(struct swap_info_struct *si)
147{ 130{
148 struct swap_extent *se; 131 struct swap_extent *se;
132 sector_t start_block;
133 sector_t nr_blocks;
149 int err = 0; 134 int err = 0;
150 135
151 list_for_each_entry(se, &si->extent_list, list) { 136 /* Do not discard the swap header page! */
152 sector_t start_block = se->start_block << (PAGE_SHIFT - 9); 137 se = &si->first_swap_extent;
153 sector_t nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9); 138 start_block = (se->start_block + 1) << (PAGE_SHIFT - 9);
139 nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9);
140 if (nr_blocks) {
141 err = blkdev_issue_discard(si->bdev, start_block,
142 nr_blocks, GFP_KERNEL, DISCARD_FL_BARRIER);
143 if (err)
144 return err;
145 cond_resched();
146 }
154 147
155 if (se->start_page == 0) { 148 list_for_each_entry(se, &si->first_swap_extent.list, list) {
156 /* Do not discard the swap header page! */ 149 start_block = se->start_block << (PAGE_SHIFT - 9);
157 start_block += 1 << (PAGE_SHIFT - 9); 150 nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9);
158 nr_blocks -= 1 << (PAGE_SHIFT - 9);
159 if (!nr_blocks)
160 continue;
161 }
162 151
163 err = blkdev_issue_discard(si->bdev, start_block, 152 err = blkdev_issue_discard(si->bdev, start_block,
164 nr_blocks, GFP_KERNEL, 153 nr_blocks, GFP_KERNEL, DISCARD_FL_BARRIER);
165 DISCARD_FL_BARRIER);
166 if (err) 154 if (err)
167 break; 155 break;
168 156
@@ -201,14 +189,11 @@ static void discard_swap_cluster(struct swap_info_struct *si,
201 start_block <<= PAGE_SHIFT - 9; 189 start_block <<= PAGE_SHIFT - 9;
202 nr_blocks <<= PAGE_SHIFT - 9; 190 nr_blocks <<= PAGE_SHIFT - 9;
203 if (blkdev_issue_discard(si->bdev, start_block, 191 if (blkdev_issue_discard(si->bdev, start_block,
204 nr_blocks, GFP_NOIO, 192 nr_blocks, GFP_NOIO, DISCARD_FL_BARRIER))
205 DISCARD_FL_BARRIER))
206 break; 193 break;
207 } 194 }
208 195
209 lh = se->list.next; 196 lh = se->list.next;
210 if (lh == &si->extent_list)
211 lh = lh->next;
212 se = list_entry(lh, struct swap_extent, list); 197 se = list_entry(lh, struct swap_extent, list);
213 } 198 }
214} 199}
@@ -223,7 +208,7 @@ static int wait_for_discard(void *word)
223#define LATENCY_LIMIT 256 208#define LATENCY_LIMIT 256
224 209
225static inline unsigned long scan_swap_map(struct swap_info_struct *si, 210static inline unsigned long scan_swap_map(struct swap_info_struct *si,
226 int cache) 211 unsigned char usage)
227{ 212{
228 unsigned long offset; 213 unsigned long offset;
229 unsigned long scan_base; 214 unsigned long scan_base;
@@ -354,10 +339,7 @@ checks:
354 si->lowest_bit = si->max; 339 si->lowest_bit = si->max;
355 si->highest_bit = 0; 340 si->highest_bit = 0;
356 } 341 }
357 if (cache == SWAP_CACHE) /* at usual swap-out via vmscan.c */ 342 si->swap_map[offset] = usage;
358 si->swap_map[offset] = encode_swapmap(0, true);
359 else /* at suspend */
360 si->swap_map[offset] = encode_swapmap(1, false);
361 si->cluster_next = offset + 1; 343 si->cluster_next = offset + 1;
362 si->flags -= SWP_SCANNING; 344 si->flags -= SWP_SCANNING;
363 345
@@ -467,10 +449,10 @@ swp_entry_t get_swap_page(void)
467 nr_swap_pages--; 449 nr_swap_pages--;
468 450
469 for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) { 451 for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) {
470 si = swap_info + type; 452 si = swap_info[type];
471 next = si->next; 453 next = si->next;
472 if (next < 0 || 454 if (next < 0 ||
473 (!wrapped && si->prio != swap_info[next].prio)) { 455 (!wrapped && si->prio != swap_info[next]->prio)) {
474 next = swap_list.head; 456 next = swap_list.head;
475 wrapped++; 457 wrapped++;
476 } 458 }
@@ -482,7 +464,7 @@ swp_entry_t get_swap_page(void)
482 464
483 swap_list.next = next; 465 swap_list.next = next;
484 /* This is called for allocating swap entry for cache */ 466 /* This is called for allocating swap entry for cache */
485 offset = scan_swap_map(si, SWAP_CACHE); 467 offset = scan_swap_map(si, SWAP_HAS_CACHE);
486 if (offset) { 468 if (offset) {
487 spin_unlock(&swap_lock); 469 spin_unlock(&swap_lock);
488 return swp_entry(type, offset); 470 return swp_entry(type, offset);
@@ -503,11 +485,11 @@ swp_entry_t get_swap_page_of_type(int type)
503 pgoff_t offset; 485 pgoff_t offset;
504 486
505 spin_lock(&swap_lock); 487 spin_lock(&swap_lock);
506 si = swap_info + type; 488 si = swap_info[type];
507 if (si->flags & SWP_WRITEOK) { 489 if (si && (si->flags & SWP_WRITEOK)) {
508 nr_swap_pages--; 490 nr_swap_pages--;
509 /* This is called for allocating swap entry, not cache */ 491 /* This is called for allocating swap entry, not cache */
510 offset = scan_swap_map(si, SWAP_MAP); 492 offset = scan_swap_map(si, 1);
511 if (offset) { 493 if (offset) {
512 spin_unlock(&swap_lock); 494 spin_unlock(&swap_lock);
513 return swp_entry(type, offset); 495 return swp_entry(type, offset);
@@ -518,9 +500,9 @@ swp_entry_t get_swap_page_of_type(int type)
518 return (swp_entry_t) {0}; 500 return (swp_entry_t) {0};
519} 501}
520 502
521static struct swap_info_struct * swap_info_get(swp_entry_t entry) 503static struct swap_info_struct *swap_info_get(swp_entry_t entry)
522{ 504{
523 struct swap_info_struct * p; 505 struct swap_info_struct *p;
524 unsigned long offset, type; 506 unsigned long offset, type;
525 507
526 if (!entry.val) 508 if (!entry.val)
@@ -528,7 +510,7 @@ static struct swap_info_struct * swap_info_get(swp_entry_t entry)
528 type = swp_type(entry); 510 type = swp_type(entry);
529 if (type >= nr_swapfiles) 511 if (type >= nr_swapfiles)
530 goto bad_nofile; 512 goto bad_nofile;
531 p = & swap_info[type]; 513 p = swap_info[type];
532 if (!(p->flags & SWP_USED)) 514 if (!(p->flags & SWP_USED))
533 goto bad_device; 515 goto bad_device;
534 offset = swp_offset(entry); 516 offset = swp_offset(entry);
@@ -554,41 +536,56 @@ out:
554 return NULL; 536 return NULL;
555} 537}
556 538
557static int swap_entry_free(struct swap_info_struct *p, 539static unsigned char swap_entry_free(struct swap_info_struct *p,
558 swp_entry_t ent, int cache) 540 swp_entry_t entry, unsigned char usage)
559{ 541{
560 unsigned long offset = swp_offset(ent); 542 unsigned long offset = swp_offset(entry);
561 int count = swap_count(p->swap_map[offset]); 543 unsigned char count;
562 bool has_cache; 544 unsigned char has_cache;
563 545
564 has_cache = swap_has_cache(p->swap_map[offset]); 546 count = p->swap_map[offset];
547 has_cache = count & SWAP_HAS_CACHE;
548 count &= ~SWAP_HAS_CACHE;
565 549
566 if (cache == SWAP_MAP) { /* dropping usage count of swap */ 550 if (usage == SWAP_HAS_CACHE) {
567 if (count < SWAP_MAP_MAX) {
568 count--;
569 p->swap_map[offset] = encode_swapmap(count, has_cache);
570 }
571 } else { /* dropping swap cache flag */
572 VM_BUG_ON(!has_cache); 551 VM_BUG_ON(!has_cache);
573 p->swap_map[offset] = encode_swapmap(count, false); 552 has_cache = 0;
574 553 } else if (count == SWAP_MAP_SHMEM) {
554 /*
555 * Or we could insist on shmem.c using a special
556 * swap_shmem_free() and free_shmem_swap_and_cache()...
557 */
558 count = 0;
559 } else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) {
560 if (count == COUNT_CONTINUED) {
561 if (swap_count_continued(p, offset, count))
562 count = SWAP_MAP_MAX | COUNT_CONTINUED;
563 else
564 count = SWAP_MAP_MAX;
565 } else
566 count--;
575 } 567 }
576 /* return code. */ 568
577 count = p->swap_map[offset]; 569 if (!count)
570 mem_cgroup_uncharge_swap(entry);
571
572 usage = count | has_cache;
573 p->swap_map[offset] = usage;
574
578 /* free if no reference */ 575 /* free if no reference */
579 if (!count) { 576 if (!usage) {
580 if (offset < p->lowest_bit) 577 if (offset < p->lowest_bit)
581 p->lowest_bit = offset; 578 p->lowest_bit = offset;
582 if (offset > p->highest_bit) 579 if (offset > p->highest_bit)
583 p->highest_bit = offset; 580 p->highest_bit = offset;
584 if (p->prio > swap_info[swap_list.next].prio) 581 if (swap_list.next >= 0 &&
585 swap_list.next = p - swap_info; 582 p->prio > swap_info[swap_list.next]->prio)
583 swap_list.next = p->type;
586 nr_swap_pages++; 584 nr_swap_pages++;
587 p->inuse_pages--; 585 p->inuse_pages--;
588 } 586 }
589 if (!swap_count(count)) 587
590 mem_cgroup_uncharge_swap(ent); 588 return usage;
591 return count;
592} 589}
593 590
594/* 591/*
@@ -597,11 +594,11 @@ static int swap_entry_free(struct swap_info_struct *p,
597 */ 594 */
598void swap_free(swp_entry_t entry) 595void swap_free(swp_entry_t entry)
599{ 596{
600 struct swap_info_struct * p; 597 struct swap_info_struct *p;
601 598
602 p = swap_info_get(entry); 599 p = swap_info_get(entry);
603 if (p) { 600 if (p) {
604 swap_entry_free(p, entry, SWAP_MAP); 601 swap_entry_free(p, entry, 1);
605 spin_unlock(&swap_lock); 602 spin_unlock(&swap_lock);
606 } 603 }
607} 604}
@@ -612,26 +609,21 @@ void swap_free(swp_entry_t entry)
612void swapcache_free(swp_entry_t entry, struct page *page) 609void swapcache_free(swp_entry_t entry, struct page *page)
613{ 610{
614 struct swap_info_struct *p; 611 struct swap_info_struct *p;
615 int ret; 612 unsigned char count;
616 613
617 p = swap_info_get(entry); 614 p = swap_info_get(entry);
618 if (p) { 615 if (p) {
619 ret = swap_entry_free(p, entry, SWAP_CACHE); 616 count = swap_entry_free(p, entry, SWAP_HAS_CACHE);
620 if (page) { 617 if (page)
621 bool swapout; 618 mem_cgroup_uncharge_swapcache(page, entry, count != 0);
622 if (ret)
623 swapout = true; /* the end of swap out */
624 else
625 swapout = false; /* no more swap users! */
626 mem_cgroup_uncharge_swapcache(page, entry, swapout);
627 }
628 spin_unlock(&swap_lock); 619 spin_unlock(&swap_lock);
629 } 620 }
630 return;
631} 621}
632 622
633/* 623/*
634 * How many references to page are currently swapped out? 624 * How many references to page are currently swapped out?
625 * This does not give an exact answer when swap count is continued,
626 * but does include the high COUNT_CONTINUED flag to allow for that.
635 */ 627 */
636static inline int page_swapcount(struct page *page) 628static inline int page_swapcount(struct page *page)
637{ 629{
@@ -659,6 +651,8 @@ int reuse_swap_page(struct page *page)
659 int count; 651 int count;
660 652
661 VM_BUG_ON(!PageLocked(page)); 653 VM_BUG_ON(!PageLocked(page));
654 if (unlikely(PageKsm(page)))
655 return 0;
662 count = page_mapcount(page); 656 count = page_mapcount(page);
663 if (count <= 1 && PageSwapCache(page)) { 657 if (count <= 1 && PageSwapCache(page)) {
664 count += page_swapcount(page); 658 count += page_swapcount(page);
@@ -667,7 +661,7 @@ int reuse_swap_page(struct page *page)
667 SetPageDirty(page); 661 SetPageDirty(page);
668 } 662 }
669 } 663 }
670 return count == 1; 664 return count <= 1;
671} 665}
672 666
673/* 667/*
@@ -704,7 +698,7 @@ int free_swap_and_cache(swp_entry_t entry)
704 698
705 p = swap_info_get(entry); 699 p = swap_info_get(entry);
706 if (p) { 700 if (p) {
707 if (swap_entry_free(p, entry, SWAP_MAP) == SWAP_HAS_CACHE) { 701 if (swap_entry_free(p, entry, 1) == SWAP_HAS_CACHE) {
708 page = find_get_page(&swapper_space, entry.val); 702 page = find_get_page(&swapper_space, entry.val);
709 if (page && !trylock_page(page)) { 703 if (page && !trylock_page(page)) {
710 page_cache_release(page); 704 page_cache_release(page);
@@ -741,14 +735,14 @@ int free_swap_and_cache(swp_entry_t entry)
741int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p) 735int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p)
742{ 736{
743 struct block_device *bdev = NULL; 737 struct block_device *bdev = NULL;
744 int i; 738 int type;
745 739
746 if (device) 740 if (device)
747 bdev = bdget(device); 741 bdev = bdget(device);
748 742
749 spin_lock(&swap_lock); 743 spin_lock(&swap_lock);
750 for (i = 0; i < nr_swapfiles; i++) { 744 for (type = 0; type < nr_swapfiles; type++) {
751 struct swap_info_struct *sis = swap_info + i; 745 struct swap_info_struct *sis = swap_info[type];
752 746
753 if (!(sis->flags & SWP_WRITEOK)) 747 if (!(sis->flags & SWP_WRITEOK))
754 continue; 748 continue;
@@ -758,20 +752,18 @@ int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p)
758 *bdev_p = bdgrab(sis->bdev); 752 *bdev_p = bdgrab(sis->bdev);
759 753
760 spin_unlock(&swap_lock); 754 spin_unlock(&swap_lock);
761 return i; 755 return type;
762 } 756 }
763 if (bdev == sis->bdev) { 757 if (bdev == sis->bdev) {
764 struct swap_extent *se; 758 struct swap_extent *se = &sis->first_swap_extent;
765 759
766 se = list_entry(sis->extent_list.next,
767 struct swap_extent, list);
768 if (se->start_block == offset) { 760 if (se->start_block == offset) {
769 if (bdev_p) 761 if (bdev_p)
770 *bdev_p = bdgrab(sis->bdev); 762 *bdev_p = bdgrab(sis->bdev);
771 763
772 spin_unlock(&swap_lock); 764 spin_unlock(&swap_lock);
773 bdput(bdev); 765 bdput(bdev);
774 return i; 766 return type;
775 } 767 }
776 } 768 }
777 } 769 }
@@ -783,6 +775,21 @@ int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p)
783} 775}
784 776
785/* 777/*
778 * Get the (PAGE_SIZE) block corresponding to given offset on the swapdev
779 * corresponding to given index in swap_info (swap type).
780 */
781sector_t swapdev_block(int type, pgoff_t offset)
782{
783 struct block_device *bdev;
784
785 if ((unsigned int)type >= nr_swapfiles)
786 return 0;
787 if (!(swap_info[type]->flags & SWP_WRITEOK))
788 return 0;
789 return map_swap_entry(swp_entry(type, offset), &bdev);
790}
791
792/*
786 * Return either the total number of swap pages of given type, or the number 793 * Return either the total number of swap pages of given type, or the number
787 * of free pages of that type (depending on @free) 794 * of free pages of that type (depending on @free)
788 * 795 *
@@ -792,18 +799,20 @@ unsigned int count_swap_pages(int type, int free)
792{ 799{
793 unsigned int n = 0; 800 unsigned int n = 0;
794 801
795 if (type < nr_swapfiles) { 802 spin_lock(&swap_lock);
796 spin_lock(&swap_lock); 803 if ((unsigned int)type < nr_swapfiles) {
797 if (swap_info[type].flags & SWP_WRITEOK) { 804 struct swap_info_struct *sis = swap_info[type];
798 n = swap_info[type].pages; 805
806 if (sis->flags & SWP_WRITEOK) {
807 n = sis->pages;
799 if (free) 808 if (free)
800 n -= swap_info[type].inuse_pages; 809 n -= sis->inuse_pages;
801 } 810 }
802 spin_unlock(&swap_lock);
803 } 811 }
812 spin_unlock(&swap_lock);
804 return n; 813 return n;
805} 814}
806#endif 815#endif /* CONFIG_HIBERNATION */
807 816
808/* 817/*
809 * No need to decide whether this PTE shares the swap entry with others, 818 * No need to decide whether this PTE shares the swap entry with others,
@@ -932,7 +941,7 @@ static int unuse_vma(struct vm_area_struct *vma,
932 unsigned long addr, end, next; 941 unsigned long addr, end, next;
933 int ret; 942 int ret;
934 943
935 if (page->mapping) { 944 if (page_anon_vma(page)) {
936 addr = page_address_in_vma(page, vma); 945 addr = page_address_in_vma(page, vma);
937 if (addr == -EFAULT) 946 if (addr == -EFAULT)
938 return 0; 947 return 0;
@@ -988,7 +997,7 @@ static unsigned int find_next_to_unuse(struct swap_info_struct *si,
988{ 997{
989 unsigned int max = si->max; 998 unsigned int max = si->max;
990 unsigned int i = prev; 999 unsigned int i = prev;
991 int count; 1000 unsigned char count;
992 1001
993 /* 1002 /*
994 * No need for swap_lock here: we're just looking 1003 * No need for swap_lock here: we're just looking
@@ -1024,16 +1033,14 @@ static unsigned int find_next_to_unuse(struct swap_info_struct *si,
1024 */ 1033 */
1025static int try_to_unuse(unsigned int type) 1034static int try_to_unuse(unsigned int type)
1026{ 1035{
1027 struct swap_info_struct * si = &swap_info[type]; 1036 struct swap_info_struct *si = swap_info[type];
1028 struct mm_struct *start_mm; 1037 struct mm_struct *start_mm;
1029 unsigned short *swap_map; 1038 unsigned char *swap_map;
1030 unsigned short swcount; 1039 unsigned char swcount;
1031 struct page *page; 1040 struct page *page;
1032 swp_entry_t entry; 1041 swp_entry_t entry;
1033 unsigned int i = 0; 1042 unsigned int i = 0;
1034 int retval = 0; 1043 int retval = 0;
1035 int reset_overflow = 0;
1036 int shmem;
1037 1044
1038 /* 1045 /*
1039 * When searching mms for an entry, a good strategy is to 1046 * When searching mms for an entry, a good strategy is to
@@ -1047,8 +1054,7 @@ static int try_to_unuse(unsigned int type)
1047 * together, child after parent. If we race with dup_mmap(), we 1054 * together, child after parent. If we race with dup_mmap(), we
1048 * prefer to resolve parent before child, lest we miss entries 1055 * prefer to resolve parent before child, lest we miss entries
1049 * duplicated after we scanned child: using last mm would invert 1056 * duplicated after we scanned child: using last mm would invert
1050 * that. Though it's only a serious concern when an overflowed 1057 * that.
1051 * swap count is reset from SWAP_MAP_MAX, preventing a rescan.
1052 */ 1058 */
1053 start_mm = &init_mm; 1059 start_mm = &init_mm;
1054 atomic_inc(&init_mm.mm_users); 1060 atomic_inc(&init_mm.mm_users);
@@ -1110,17 +1116,18 @@ static int try_to_unuse(unsigned int type)
1110 1116
1111 /* 1117 /*
1112 * Remove all references to entry. 1118 * Remove all references to entry.
1113 * Whenever we reach init_mm, there's no address space
1114 * to search, but use it as a reminder to search shmem.
1115 */ 1119 */
1116 shmem = 0;
1117 swcount = *swap_map; 1120 swcount = *swap_map;
1118 if (swap_count(swcount)) { 1121 if (swap_count(swcount) == SWAP_MAP_SHMEM) {
1119 if (start_mm == &init_mm) 1122 retval = shmem_unuse(entry, page);
1120 shmem = shmem_unuse(entry, page); 1123 /* page has already been unlocked and released */
1121 else 1124 if (retval < 0)
1122 retval = unuse_mm(start_mm, entry, page); 1125 break;
1126 continue;
1123 } 1127 }
1128 if (swap_count(swcount) && start_mm != &init_mm)
1129 retval = unuse_mm(start_mm, entry, page);
1130
1124 if (swap_count(*swap_map)) { 1131 if (swap_count(*swap_map)) {
1125 int set_start_mm = (*swap_map >= swcount); 1132 int set_start_mm = (*swap_map >= swcount);
1126 struct list_head *p = &start_mm->mmlist; 1133 struct list_head *p = &start_mm->mmlist;
@@ -1131,7 +1138,7 @@ static int try_to_unuse(unsigned int type)
1131 atomic_inc(&new_start_mm->mm_users); 1138 atomic_inc(&new_start_mm->mm_users);
1132 atomic_inc(&prev_mm->mm_users); 1139 atomic_inc(&prev_mm->mm_users);
1133 spin_lock(&mmlist_lock); 1140 spin_lock(&mmlist_lock);
1134 while (swap_count(*swap_map) && !retval && !shmem && 1141 while (swap_count(*swap_map) && !retval &&
1135 (p = p->next) != &start_mm->mmlist) { 1142 (p = p->next) != &start_mm->mmlist) {
1136 mm = list_entry(p, struct mm_struct, mmlist); 1143 mm = list_entry(p, struct mm_struct, mmlist);
1137 if (!atomic_inc_not_zero(&mm->mm_users)) 1144 if (!atomic_inc_not_zero(&mm->mm_users))
@@ -1145,10 +1152,9 @@ static int try_to_unuse(unsigned int type)
1145 swcount = *swap_map; 1152 swcount = *swap_map;
1146 if (!swap_count(swcount)) /* any usage ? */ 1153 if (!swap_count(swcount)) /* any usage ? */
1147 ; 1154 ;
1148 else if (mm == &init_mm) { 1155 else if (mm == &init_mm)
1149 set_start_mm = 1; 1156 set_start_mm = 1;
1150 shmem = shmem_unuse(entry, page); 1157 else
1151 } else
1152 retval = unuse_mm(mm, entry, page); 1158 retval = unuse_mm(mm, entry, page);
1153 1159
1154 if (set_start_mm && *swap_map < swcount) { 1160 if (set_start_mm && *swap_map < swcount) {
@@ -1164,13 +1170,6 @@ static int try_to_unuse(unsigned int type)
1164 mmput(start_mm); 1170 mmput(start_mm);
1165 start_mm = new_start_mm; 1171 start_mm = new_start_mm;
1166 } 1172 }
1167 if (shmem) {
1168 /* page has already been unlocked and released */
1169 if (shmem > 0)
1170 continue;
1171 retval = shmem;
1172 break;
1173 }
1174 if (retval) { 1173 if (retval) {
1175 unlock_page(page); 1174 unlock_page(page);
1176 page_cache_release(page); 1175 page_cache_release(page);
@@ -1178,30 +1177,6 @@ static int try_to_unuse(unsigned int type)
1178 } 1177 }
1179 1178
1180 /* 1179 /*
1181 * How could swap count reach 0x7ffe ?
1182 * There's no way to repeat a swap page within an mm
1183 * (except in shmem, where it's the shared object which takes
1184 * the reference count)?
1185 * We believe SWAP_MAP_MAX cannot occur.(if occur, unsigned
1186 * short is too small....)
1187 * If that's wrong, then we should worry more about
1188 * exit_mmap() and do_munmap() cases described above:
1189 * we might be resetting SWAP_MAP_MAX too early here.
1190 * We know "Undead"s can happen, they're okay, so don't
1191 * report them; but do report if we reset SWAP_MAP_MAX.
1192 */
1193 /* We might release the lock_page() in unuse_mm(). */
1194 if (!PageSwapCache(page) || page_private(page) != entry.val)
1195 goto retry;
1196
1197 if (swap_count(*swap_map) == SWAP_MAP_MAX) {
1198 spin_lock(&swap_lock);
1199 *swap_map = encode_swapmap(0, true);
1200 spin_unlock(&swap_lock);
1201 reset_overflow = 1;
1202 }
1203
1204 /*
1205 * If a reference remains (rare), we would like to leave 1180 * If a reference remains (rare), we would like to leave
1206 * the page in the swap cache; but try_to_unmap could 1181 * the page in the swap cache; but try_to_unmap could
1207 * then re-duplicate the entry once we drop page lock, 1182 * then re-duplicate the entry once we drop page lock,
@@ -1213,6 +1188,12 @@ static int try_to_unuse(unsigned int type)
1213 * read from disk into another page. Splitting into two 1188 * read from disk into another page. Splitting into two
1214 * pages would be incorrect if swap supported "shared 1189 * pages would be incorrect if swap supported "shared
1215 * private" pages, but they are handled by tmpfs files. 1190 * private" pages, but they are handled by tmpfs files.
1191 *
1192 * Given how unuse_vma() targets one particular offset
1193 * in an anon_vma, once the anon_vma has been determined,
1194 * this splitting happens to be just what is needed to
1195 * handle where KSM pages have been swapped out: re-reading
1196 * is unnecessarily slow, but we can fix that later on.
1216 */ 1197 */
1217 if (swap_count(*swap_map) && 1198 if (swap_count(*swap_map) &&
1218 PageDirty(page) && PageSwapCache(page)) { 1199 PageDirty(page) && PageSwapCache(page)) {
@@ -1242,7 +1223,6 @@ static int try_to_unuse(unsigned int type)
1242 * mark page dirty so shrink_page_list will preserve it. 1223 * mark page dirty so shrink_page_list will preserve it.
1243 */ 1224 */
1244 SetPageDirty(page); 1225 SetPageDirty(page);
1245retry:
1246 unlock_page(page); 1226 unlock_page(page);
1247 page_cache_release(page); 1227 page_cache_release(page);
1248 1228
@@ -1254,10 +1234,6 @@ retry:
1254 } 1234 }
1255 1235
1256 mmput(start_mm); 1236 mmput(start_mm);
1257 if (reset_overflow) {
1258 printk(KERN_WARNING "swapoff: cleared swap entry overflow\n");
1259 swap_overflow = 0;
1260 }
1261 return retval; 1237 return retval;
1262} 1238}
1263 1239
@@ -1270,10 +1246,10 @@ retry:
1270static void drain_mmlist(void) 1246static void drain_mmlist(void)
1271{ 1247{
1272 struct list_head *p, *next; 1248 struct list_head *p, *next;
1273 unsigned int i; 1249 unsigned int type;
1274 1250
1275 for (i = 0; i < nr_swapfiles; i++) 1251 for (type = 0; type < nr_swapfiles; type++)
1276 if (swap_info[i].inuse_pages) 1252 if (swap_info[type]->inuse_pages)
1277 return; 1253 return;
1278 spin_lock(&mmlist_lock); 1254 spin_lock(&mmlist_lock);
1279 list_for_each_safe(p, next, &init_mm.mmlist) 1255 list_for_each_safe(p, next, &init_mm.mmlist)
@@ -1283,12 +1259,23 @@ static void drain_mmlist(void)
1283 1259
1284/* 1260/*
1285 * Use this swapdev's extent info to locate the (PAGE_SIZE) block which 1261 * Use this swapdev's extent info to locate the (PAGE_SIZE) block which
1286 * corresponds to page offset `offset'. 1262 * corresponds to page offset for the specified swap entry.
1263 * Note that the type of this function is sector_t, but it returns page offset
1264 * into the bdev, not sector offset.
1287 */ 1265 */
1288sector_t map_swap_page(struct swap_info_struct *sis, pgoff_t offset) 1266static sector_t map_swap_entry(swp_entry_t entry, struct block_device **bdev)
1289{ 1267{
1290 struct swap_extent *se = sis->curr_swap_extent; 1268 struct swap_info_struct *sis;
1291 struct swap_extent *start_se = se; 1269 struct swap_extent *start_se;
1270 struct swap_extent *se;
1271 pgoff_t offset;
1272
1273 sis = swap_info[swp_type(entry)];
1274 *bdev = sis->bdev;
1275
1276 offset = swp_offset(entry);
1277 start_se = sis->curr_swap_extent;
1278 se = start_se;
1292 1279
1293 for ( ; ; ) { 1280 for ( ; ; ) {
1294 struct list_head *lh; 1281 struct list_head *lh;
@@ -1298,40 +1285,31 @@ sector_t map_swap_page(struct swap_info_struct *sis, pgoff_t offset)
1298 return se->start_block + (offset - se->start_page); 1285 return se->start_block + (offset - se->start_page);
1299 } 1286 }
1300 lh = se->list.next; 1287 lh = se->list.next;
1301 if (lh == &sis->extent_list)
1302 lh = lh->next;
1303 se = list_entry(lh, struct swap_extent, list); 1288 se = list_entry(lh, struct swap_extent, list);
1304 sis->curr_swap_extent = se; 1289 sis->curr_swap_extent = se;
1305 BUG_ON(se == start_se); /* It *must* be present */ 1290 BUG_ON(se == start_se); /* It *must* be present */
1306 } 1291 }
1307} 1292}
1308 1293
1309#ifdef CONFIG_HIBERNATION
1310/* 1294/*
1311 * Get the (PAGE_SIZE) block corresponding to given offset on the swapdev 1295 * Returns the page offset into bdev for the specified page's swap entry.
1312 * corresponding to given index in swap_info (swap type).
1313 */ 1296 */
1314sector_t swapdev_block(int swap_type, pgoff_t offset) 1297sector_t map_swap_page(struct page *page, struct block_device **bdev)
1315{ 1298{
1316 struct swap_info_struct *sis; 1299 swp_entry_t entry;
1317 1300 entry.val = page_private(page);
1318 if (swap_type >= nr_swapfiles) 1301 return map_swap_entry(entry, bdev);
1319 return 0;
1320
1321 sis = swap_info + swap_type;
1322 return (sis->flags & SWP_WRITEOK) ? map_swap_page(sis, offset) : 0;
1323} 1302}
1324#endif /* CONFIG_HIBERNATION */
1325 1303
1326/* 1304/*
1327 * Free all of a swapdev's extent information 1305 * Free all of a swapdev's extent information
1328 */ 1306 */
1329static void destroy_swap_extents(struct swap_info_struct *sis) 1307static void destroy_swap_extents(struct swap_info_struct *sis)
1330{ 1308{
1331 while (!list_empty(&sis->extent_list)) { 1309 while (!list_empty(&sis->first_swap_extent.list)) {
1332 struct swap_extent *se; 1310 struct swap_extent *se;
1333 1311
1334 se = list_entry(sis->extent_list.next, 1312 se = list_entry(sis->first_swap_extent.list.next,
1335 struct swap_extent, list); 1313 struct swap_extent, list);
1336 list_del(&se->list); 1314 list_del(&se->list);
1337 kfree(se); 1315 kfree(se);
@@ -1352,8 +1330,15 @@ add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
1352 struct swap_extent *new_se; 1330 struct swap_extent *new_se;
1353 struct list_head *lh; 1331 struct list_head *lh;
1354 1332
1355 lh = sis->extent_list.prev; /* The highest page extent */ 1333 if (start_page == 0) {
1356 if (lh != &sis->extent_list) { 1334 se = &sis->first_swap_extent;
1335 sis->curr_swap_extent = se;
1336 se->start_page = 0;
1337 se->nr_pages = nr_pages;
1338 se->start_block = start_block;
1339 return 1;
1340 } else {
1341 lh = sis->first_swap_extent.list.prev; /* Highest extent */
1357 se = list_entry(lh, struct swap_extent, list); 1342 se = list_entry(lh, struct swap_extent, list);
1358 BUG_ON(se->start_page + se->nr_pages != start_page); 1343 BUG_ON(se->start_page + se->nr_pages != start_page);
1359 if (se->start_block + se->nr_pages == start_block) { 1344 if (se->start_block + se->nr_pages == start_block) {
@@ -1373,7 +1358,7 @@ add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
1373 new_se->nr_pages = nr_pages; 1358 new_se->nr_pages = nr_pages;
1374 new_se->start_block = start_block; 1359 new_se->start_block = start_block;
1375 1360
1376 list_add_tail(&new_se->list, &sis->extent_list); 1361 list_add_tail(&new_se->list, &sis->first_swap_extent.list);
1377 return 1; 1362 return 1;
1378} 1363}
1379 1364
@@ -1425,7 +1410,7 @@ static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span)
1425 if (S_ISBLK(inode->i_mode)) { 1410 if (S_ISBLK(inode->i_mode)) {
1426 ret = add_swap_extent(sis, 0, sis->max, 0); 1411 ret = add_swap_extent(sis, 0, sis->max, 0);
1427 *span = sis->pages; 1412 *span = sis->pages;
1428 goto done; 1413 goto out;
1429 } 1414 }
1430 1415
1431 blkbits = inode->i_blkbits; 1416 blkbits = inode->i_blkbits;
@@ -1496,25 +1481,22 @@ reprobe:
1496 sis->max = page_no; 1481 sis->max = page_no;
1497 sis->pages = page_no - 1; 1482 sis->pages = page_no - 1;
1498 sis->highest_bit = page_no - 1; 1483 sis->highest_bit = page_no - 1;
1499done: 1484out:
1500 sis->curr_swap_extent = list_entry(sis->extent_list.prev, 1485 return ret;
1501 struct swap_extent, list);
1502 goto out;
1503bad_bmap: 1486bad_bmap:
1504 printk(KERN_ERR "swapon: swapfile has holes\n"); 1487 printk(KERN_ERR "swapon: swapfile has holes\n");
1505 ret = -EINVAL; 1488 ret = -EINVAL;
1506out: 1489 goto out;
1507 return ret;
1508} 1490}
1509 1491
1510SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) 1492SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
1511{ 1493{
1512 struct swap_info_struct * p = NULL; 1494 struct swap_info_struct *p = NULL;
1513 unsigned short *swap_map; 1495 unsigned char *swap_map;
1514 struct file *swap_file, *victim; 1496 struct file *swap_file, *victim;
1515 struct address_space *mapping; 1497 struct address_space *mapping;
1516 struct inode *inode; 1498 struct inode *inode;
1517 char * pathname; 1499 char *pathname;
1518 int i, type, prev; 1500 int i, type, prev;
1519 int err; 1501 int err;
1520 1502
@@ -1535,8 +1517,8 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
1535 mapping = victim->f_mapping; 1517 mapping = victim->f_mapping;
1536 prev = -1; 1518 prev = -1;
1537 spin_lock(&swap_lock); 1519 spin_lock(&swap_lock);
1538 for (type = swap_list.head; type >= 0; type = swap_info[type].next) { 1520 for (type = swap_list.head; type >= 0; type = swap_info[type]->next) {
1539 p = swap_info + type; 1521 p = swap_info[type];
1540 if (p->flags & SWP_WRITEOK) { 1522 if (p->flags & SWP_WRITEOK) {
1541 if (p->swap_file->f_mapping == mapping) 1523 if (p->swap_file->f_mapping == mapping)
1542 break; 1524 break;
@@ -1555,18 +1537,17 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
1555 spin_unlock(&swap_lock); 1537 spin_unlock(&swap_lock);
1556 goto out_dput; 1538 goto out_dput;
1557 } 1539 }
1558 if (prev < 0) { 1540 if (prev < 0)
1559 swap_list.head = p->next; 1541 swap_list.head = p->next;
1560 } else { 1542 else
1561 swap_info[prev].next = p->next; 1543 swap_info[prev]->next = p->next;
1562 }
1563 if (type == swap_list.next) { 1544 if (type == swap_list.next) {
1564 /* just pick something that's safe... */ 1545 /* just pick something that's safe... */
1565 swap_list.next = swap_list.head; 1546 swap_list.next = swap_list.head;
1566 } 1547 }
1567 if (p->prio < 0) { 1548 if (p->prio < 0) {
1568 for (i = p->next; i >= 0; i = swap_info[i].next) 1549 for (i = p->next; i >= 0; i = swap_info[i]->next)
1569 swap_info[i].prio = p->prio--; 1550 swap_info[i]->prio = p->prio--;
1570 least_priority++; 1551 least_priority++;
1571 } 1552 }
1572 nr_swap_pages -= p->pages; 1553 nr_swap_pages -= p->pages;
@@ -1584,16 +1565,16 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
1584 if (p->prio < 0) 1565 if (p->prio < 0)
1585 p->prio = --least_priority; 1566 p->prio = --least_priority;
1586 prev = -1; 1567 prev = -1;
1587 for (i = swap_list.head; i >= 0; i = swap_info[i].next) { 1568 for (i = swap_list.head; i >= 0; i = swap_info[i]->next) {
1588 if (p->prio >= swap_info[i].prio) 1569 if (p->prio >= swap_info[i]->prio)
1589 break; 1570 break;
1590 prev = i; 1571 prev = i;
1591 } 1572 }
1592 p->next = i; 1573 p->next = i;
1593 if (prev < 0) 1574 if (prev < 0)
1594 swap_list.head = swap_list.next = p - swap_info; 1575 swap_list.head = swap_list.next = type;
1595 else 1576 else
1596 swap_info[prev].next = p - swap_info; 1577 swap_info[prev]->next = type;
1597 nr_swap_pages += p->pages; 1578 nr_swap_pages += p->pages;
1598 total_swap_pages += p->pages; 1579 total_swap_pages += p->pages;
1599 p->flags |= SWP_WRITEOK; 1580 p->flags |= SWP_WRITEOK;
@@ -1606,6 +1587,9 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
1606 up_write(&swap_unplug_sem); 1587 up_write(&swap_unplug_sem);
1607 1588
1608 destroy_swap_extents(p); 1589 destroy_swap_extents(p);
1590 if (p->flags & SWP_CONTINUED)
1591 free_swap_count_continuations(p);
1592
1609 mutex_lock(&swapon_mutex); 1593 mutex_lock(&swapon_mutex);
1610 spin_lock(&swap_lock); 1594 spin_lock(&swap_lock);
1611 drain_mmlist(); 1595 drain_mmlist();
@@ -1653,8 +1637,8 @@ out:
1653/* iterator */ 1637/* iterator */
1654static void *swap_start(struct seq_file *swap, loff_t *pos) 1638static void *swap_start(struct seq_file *swap, loff_t *pos)
1655{ 1639{
1656 struct swap_info_struct *ptr = swap_info; 1640 struct swap_info_struct *si;
1657 int i; 1641 int type;
1658 loff_t l = *pos; 1642 loff_t l = *pos;
1659 1643
1660 mutex_lock(&swapon_mutex); 1644 mutex_lock(&swapon_mutex);
@@ -1662,11 +1646,13 @@ static void *swap_start(struct seq_file *swap, loff_t *pos)
1662 if (!l) 1646 if (!l)
1663 return SEQ_START_TOKEN; 1647 return SEQ_START_TOKEN;
1664 1648
1665 for (i = 0; i < nr_swapfiles; i++, ptr++) { 1649 for (type = 0; type < nr_swapfiles; type++) {
1666 if (!(ptr->flags & SWP_USED) || !ptr->swap_map) 1650 smp_rmb(); /* read nr_swapfiles before swap_info[type] */
1651 si = swap_info[type];
1652 if (!(si->flags & SWP_USED) || !si->swap_map)
1667 continue; 1653 continue;
1668 if (!--l) 1654 if (!--l)
1669 return ptr; 1655 return si;
1670 } 1656 }
1671 1657
1672 return NULL; 1658 return NULL;
@@ -1674,21 +1660,21 @@ static void *swap_start(struct seq_file *swap, loff_t *pos)
1674 1660
1675static void *swap_next(struct seq_file *swap, void *v, loff_t *pos) 1661static void *swap_next(struct seq_file *swap, void *v, loff_t *pos)
1676{ 1662{
1677 struct swap_info_struct *ptr; 1663 struct swap_info_struct *si = v;
1678 struct swap_info_struct *endptr = swap_info + nr_swapfiles; 1664 int type;
1679 1665
1680 if (v == SEQ_START_TOKEN) 1666 if (v == SEQ_START_TOKEN)
1681 ptr = swap_info; 1667 type = 0;
1682 else { 1668 else
1683 ptr = v; 1669 type = si->type + 1;
1684 ptr++;
1685 }
1686 1670
1687 for (; ptr < endptr; ptr++) { 1671 for (; type < nr_swapfiles; type++) {
1688 if (!(ptr->flags & SWP_USED) || !ptr->swap_map) 1672 smp_rmb(); /* read nr_swapfiles before swap_info[type] */
1673 si = swap_info[type];
1674 if (!(si->flags & SWP_USED) || !si->swap_map)
1689 continue; 1675 continue;
1690 ++*pos; 1676 ++*pos;
1691 return ptr; 1677 return si;
1692 } 1678 }
1693 1679
1694 return NULL; 1680 return NULL;
@@ -1701,24 +1687,24 @@ static void swap_stop(struct seq_file *swap, void *v)
1701 1687
1702static int swap_show(struct seq_file *swap, void *v) 1688static int swap_show(struct seq_file *swap, void *v)
1703{ 1689{
1704 struct swap_info_struct *ptr = v; 1690 struct swap_info_struct *si = v;
1705 struct file *file; 1691 struct file *file;
1706 int len; 1692 int len;
1707 1693
1708 if (ptr == SEQ_START_TOKEN) { 1694 if (si == SEQ_START_TOKEN) {
1709 seq_puts(swap,"Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n"); 1695 seq_puts(swap,"Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n");
1710 return 0; 1696 return 0;
1711 } 1697 }
1712 1698
1713 file = ptr->swap_file; 1699 file = si->swap_file;
1714 len = seq_path(swap, &file->f_path, " \t\n\\"); 1700 len = seq_path(swap, &file->f_path, " \t\n\\");
1715 seq_printf(swap, "%*s%s\t%u\t%u\t%d\n", 1701 seq_printf(swap, "%*s%s\t%u\t%u\t%d\n",
1716 len < 40 ? 40 - len : 1, " ", 1702 len < 40 ? 40 - len : 1, " ",
1717 S_ISBLK(file->f_path.dentry->d_inode->i_mode) ? 1703 S_ISBLK(file->f_path.dentry->d_inode->i_mode) ?
1718 "partition" : "file\t", 1704 "partition" : "file\t",
1719 ptr->pages << (PAGE_SHIFT - 10), 1705 si->pages << (PAGE_SHIFT - 10),
1720 ptr->inuse_pages << (PAGE_SHIFT - 10), 1706 si->inuse_pages << (PAGE_SHIFT - 10),
1721 ptr->prio); 1707 si->prio);
1722 return 0; 1708 return 0;
1723} 1709}
1724 1710
@@ -1765,7 +1751,7 @@ late_initcall(max_swapfiles_check);
1765 */ 1751 */
1766SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) 1752SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
1767{ 1753{
1768 struct swap_info_struct * p; 1754 struct swap_info_struct *p;
1769 char *name = NULL; 1755 char *name = NULL;
1770 struct block_device *bdev = NULL; 1756 struct block_device *bdev = NULL;
1771 struct file *swap_file = NULL; 1757 struct file *swap_file = NULL;
@@ -1779,30 +1765,52 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
1779 sector_t span; 1765 sector_t span;
1780 unsigned long maxpages = 1; 1766 unsigned long maxpages = 1;
1781 unsigned long swapfilepages; 1767 unsigned long swapfilepages;
1782 unsigned short *swap_map = NULL; 1768 unsigned char *swap_map = NULL;
1783 struct page *page = NULL; 1769 struct page *page = NULL;
1784 struct inode *inode = NULL; 1770 struct inode *inode = NULL;
1785 int did_down = 0; 1771 int did_down = 0;
1786 1772
1787 if (!capable(CAP_SYS_ADMIN)) 1773 if (!capable(CAP_SYS_ADMIN))
1788 return -EPERM; 1774 return -EPERM;
1775
1776 p = kzalloc(sizeof(*p), GFP_KERNEL);
1777 if (!p)
1778 return -ENOMEM;
1779
1789 spin_lock(&swap_lock); 1780 spin_lock(&swap_lock);
1790 p = swap_info; 1781 for (type = 0; type < nr_swapfiles; type++) {
1791 for (type = 0 ; type < nr_swapfiles ; type++,p++) 1782 if (!(swap_info[type]->flags & SWP_USED))
1792 if (!(p->flags & SWP_USED))
1793 break; 1783 break;
1784 }
1794 error = -EPERM; 1785 error = -EPERM;
1795 if (type >= MAX_SWAPFILES) { 1786 if (type >= MAX_SWAPFILES) {
1796 spin_unlock(&swap_lock); 1787 spin_unlock(&swap_lock);
1788 kfree(p);
1797 goto out; 1789 goto out;
1798 } 1790 }
1799 if (type >= nr_swapfiles) 1791 if (type >= nr_swapfiles) {
1800 nr_swapfiles = type+1; 1792 p->type = type;
1801 memset(p, 0, sizeof(*p)); 1793 swap_info[type] = p;
1802 INIT_LIST_HEAD(&p->extent_list); 1794 /*
1795 * Write swap_info[type] before nr_swapfiles, in case a
1796 * racing procfs swap_start() or swap_next() is reading them.
1797 * (We never shrink nr_swapfiles, we never free this entry.)
1798 */
1799 smp_wmb();
1800 nr_swapfiles++;
1801 } else {
1802 kfree(p);
1803 p = swap_info[type];
1804 /*
1805 * Do not memset this entry: a racing procfs swap_next()
1806 * would be relying on p->type to remain valid.
1807 */
1808 }
1809 INIT_LIST_HEAD(&p->first_swap_extent.list);
1803 p->flags = SWP_USED; 1810 p->flags = SWP_USED;
1804 p->next = -1; 1811 p->next = -1;
1805 spin_unlock(&swap_lock); 1812 spin_unlock(&swap_lock);
1813
1806 name = getname(specialfile); 1814 name = getname(specialfile);
1807 error = PTR_ERR(name); 1815 error = PTR_ERR(name);
1808 if (IS_ERR(name)) { 1816 if (IS_ERR(name)) {
@@ -1822,7 +1830,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
1822 1830
1823 error = -EBUSY; 1831 error = -EBUSY;
1824 for (i = 0; i < nr_swapfiles; i++) { 1832 for (i = 0; i < nr_swapfiles; i++) {
1825 struct swap_info_struct *q = &swap_info[i]; 1833 struct swap_info_struct *q = swap_info[i];
1826 1834
1827 if (i == type || !q->swap_file) 1835 if (i == type || !q->swap_file)
1828 continue; 1836 continue;
@@ -1897,6 +1905,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
1897 1905
1898 p->lowest_bit = 1; 1906 p->lowest_bit = 1;
1899 p->cluster_next = 1; 1907 p->cluster_next = 1;
1908 p->cluster_nr = 0;
1900 1909
1901 /* 1910 /*
1902 * Find out how many pages are allowed for a single swap 1911 * Find out how many pages are allowed for a single swap
@@ -1932,13 +1941,13 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
1932 goto bad_swap; 1941 goto bad_swap;
1933 1942
1934 /* OK, set up the swap map and apply the bad block list */ 1943 /* OK, set up the swap map and apply the bad block list */
1935 swap_map = vmalloc(maxpages * sizeof(short)); 1944 swap_map = vmalloc(maxpages);
1936 if (!swap_map) { 1945 if (!swap_map) {
1937 error = -ENOMEM; 1946 error = -ENOMEM;
1938 goto bad_swap; 1947 goto bad_swap;
1939 } 1948 }
1940 1949
1941 memset(swap_map, 0, maxpages * sizeof(short)); 1950 memset(swap_map, 0, maxpages);
1942 for (i = 0; i < swap_header->info.nr_badpages; i++) { 1951 for (i = 0; i < swap_header->info.nr_badpages; i++) {
1943 int page_nr = swap_header->info.badpages[i]; 1952 int page_nr = swap_header->info.badpages[i];
1944 if (page_nr <= 0 || page_nr >= swap_header->info.last_page) { 1953 if (page_nr <= 0 || page_nr >= swap_header->info.last_page) {
@@ -2003,18 +2012,16 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
2003 2012
2004 /* insert swap space into swap_list: */ 2013 /* insert swap space into swap_list: */
2005 prev = -1; 2014 prev = -1;
2006 for (i = swap_list.head; i >= 0; i = swap_info[i].next) { 2015 for (i = swap_list.head; i >= 0; i = swap_info[i]->next) {
2007 if (p->prio >= swap_info[i].prio) { 2016 if (p->prio >= swap_info[i]->prio)
2008 break; 2017 break;
2009 }
2010 prev = i; 2018 prev = i;
2011 } 2019 }
2012 p->next = i; 2020 p->next = i;
2013 if (prev < 0) { 2021 if (prev < 0)
2014 swap_list.head = swap_list.next = p - swap_info; 2022 swap_list.head = swap_list.next = type;
2015 } else { 2023 else
2016 swap_info[prev].next = p - swap_info; 2024 swap_info[prev]->next = type;
2017 }
2018 spin_unlock(&swap_lock); 2025 spin_unlock(&swap_lock);
2019 mutex_unlock(&swapon_mutex); 2026 mutex_unlock(&swapon_mutex);
2020 error = 0; 2027 error = 0;
@@ -2051,15 +2058,15 @@ out:
2051 2058
2052void si_swapinfo(struct sysinfo *val) 2059void si_swapinfo(struct sysinfo *val)
2053{ 2060{
2054 unsigned int i; 2061 unsigned int type;
2055 unsigned long nr_to_be_unused = 0; 2062 unsigned long nr_to_be_unused = 0;
2056 2063
2057 spin_lock(&swap_lock); 2064 spin_lock(&swap_lock);
2058 for (i = 0; i < nr_swapfiles; i++) { 2065 for (type = 0; type < nr_swapfiles; type++) {
2059 if (!(swap_info[i].flags & SWP_USED) || 2066 struct swap_info_struct *si = swap_info[type];
2060 (swap_info[i].flags & SWP_WRITEOK)) 2067
2061 continue; 2068 if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK))
2062 nr_to_be_unused += swap_info[i].inuse_pages; 2069 nr_to_be_unused += si->inuse_pages;
2063 } 2070 }
2064 val->freeswap = nr_swap_pages + nr_to_be_unused; 2071 val->freeswap = nr_swap_pages + nr_to_be_unused;
2065 val->totalswap = total_swap_pages + nr_to_be_unused; 2072 val->totalswap = total_swap_pages + nr_to_be_unused;
@@ -2069,101 +2076,107 @@ void si_swapinfo(struct sysinfo *val)
2069/* 2076/*
2070 * Verify that a swap entry is valid and increment its swap map count. 2077 * Verify that a swap entry is valid and increment its swap map count.
2071 * 2078 *
2072 * Note: if swap_map[] reaches SWAP_MAP_MAX the entries are treated as
2073 * "permanent", but will be reclaimed by the next swapoff.
2074 * Returns error code in following case. 2079 * Returns error code in following case.
2075 * - success -> 0 2080 * - success -> 0
2076 * - swp_entry is invalid -> EINVAL 2081 * - swp_entry is invalid -> EINVAL
2077 * - swp_entry is migration entry -> EINVAL 2082 * - swp_entry is migration entry -> EINVAL
2078 * - swap-cache reference is requested but there is already one. -> EEXIST 2083 * - swap-cache reference is requested but there is already one. -> EEXIST
2079 * - swap-cache reference is requested but the entry is not used. -> ENOENT 2084 * - swap-cache reference is requested but the entry is not used. -> ENOENT
2085 * - swap-mapped reference requested but needs continued swap count. -> ENOMEM
2080 */ 2086 */
2081static int __swap_duplicate(swp_entry_t entry, bool cache) 2087static int __swap_duplicate(swp_entry_t entry, unsigned char usage)
2082{ 2088{
2083 struct swap_info_struct * p; 2089 struct swap_info_struct *p;
2084 unsigned long offset, type; 2090 unsigned long offset, type;
2085 int result = -EINVAL; 2091 unsigned char count;
2086 int count; 2092 unsigned char has_cache;
2087 bool has_cache; 2093 int err = -EINVAL;
2088 2094
2089 if (non_swap_entry(entry)) 2095 if (non_swap_entry(entry))
2090 return -EINVAL; 2096 goto out;
2091 2097
2092 type = swp_type(entry); 2098 type = swp_type(entry);
2093 if (type >= nr_swapfiles) 2099 if (type >= nr_swapfiles)
2094 goto bad_file; 2100 goto bad_file;
2095 p = type + swap_info; 2101 p = swap_info[type];
2096 offset = swp_offset(entry); 2102 offset = swp_offset(entry);
2097 2103
2098 spin_lock(&swap_lock); 2104 spin_lock(&swap_lock);
2099
2100 if (unlikely(offset >= p->max)) 2105 if (unlikely(offset >= p->max))
2101 goto unlock_out; 2106 goto unlock_out;
2102 2107
2103 count = swap_count(p->swap_map[offset]); 2108 count = p->swap_map[offset];
2104 has_cache = swap_has_cache(p->swap_map[offset]); 2109 has_cache = count & SWAP_HAS_CACHE;
2110 count &= ~SWAP_HAS_CACHE;
2111 err = 0;
2105 2112
2106 if (cache == SWAP_CACHE) { /* called for swapcache/swapin-readahead */ 2113 if (usage == SWAP_HAS_CACHE) {
2107 2114
2108 /* set SWAP_HAS_CACHE if there is no cache and entry is used */ 2115 /* set SWAP_HAS_CACHE if there is no cache and entry is used */
2109 if (!has_cache && count) { 2116 if (!has_cache && count)
2110 p->swap_map[offset] = encode_swapmap(count, true); 2117 has_cache = SWAP_HAS_CACHE;
2111 result = 0; 2118 else if (has_cache) /* someone else added cache */
2112 } else if (has_cache) /* someone added cache */ 2119 err = -EEXIST;
2113 result = -EEXIST; 2120 else /* no users remaining */
2114 else if (!count) /* no users */ 2121 err = -ENOENT;
2115 result = -ENOENT;
2116 2122
2117 } else if (count || has_cache) { 2123 } else if (count || has_cache) {
2118 if (count < SWAP_MAP_MAX - 1) { 2124
2119 p->swap_map[offset] = encode_swapmap(count + 1, 2125 if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX)
2120 has_cache); 2126 count += usage;
2121 result = 0; 2127 else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX)
2122 } else if (count <= SWAP_MAP_MAX) { 2128 err = -EINVAL;
2123 if (swap_overflow++ < 5) 2129 else if (swap_count_continued(p, offset, count))
2124 printk(KERN_WARNING 2130 count = COUNT_CONTINUED;
2125 "swap_dup: swap entry overflow\n"); 2131 else
2126 p->swap_map[offset] = encode_swapmap(SWAP_MAP_MAX, 2132 err = -ENOMEM;
2127 has_cache);
2128 result = 0;
2129 }
2130 } else 2133 } else
2131 result = -ENOENT; /* unused swap entry */ 2134 err = -ENOENT; /* unused swap entry */
2135
2136 p->swap_map[offset] = count | has_cache;
2137
2132unlock_out: 2138unlock_out:
2133 spin_unlock(&swap_lock); 2139 spin_unlock(&swap_lock);
2134out: 2140out:
2135 return result; 2141 return err;
2136 2142
2137bad_file: 2143bad_file:
2138 printk(KERN_ERR "swap_dup: %s%08lx\n", Bad_file, entry.val); 2144 printk(KERN_ERR "swap_dup: %s%08lx\n", Bad_file, entry.val);
2139 goto out; 2145 goto out;
2140} 2146}
2147
2148/*
2149 * Help swapoff by noting that swap entry belongs to shmem/tmpfs
2150 * (in which case its reference count is never incremented).
2151 */
2152void swap_shmem_alloc(swp_entry_t entry)
2153{
2154 __swap_duplicate(entry, SWAP_MAP_SHMEM);
2155}
2156
2141/* 2157/*
2142 * increase reference count of swap entry by 1. 2158 * increase reference count of swap entry by 1.
2143 */ 2159 */
2144void swap_duplicate(swp_entry_t entry) 2160int swap_duplicate(swp_entry_t entry)
2145{ 2161{
2146 __swap_duplicate(entry, SWAP_MAP); 2162 int err = 0;
2163
2164 while (!err && __swap_duplicate(entry, 1) == -ENOMEM)
2165 err = add_swap_count_continuation(entry, GFP_ATOMIC);
2166 return err;
2147} 2167}
2148 2168
2149/* 2169/*
2150 * @entry: swap entry for which we allocate swap cache. 2170 * @entry: swap entry for which we allocate swap cache.
2151 * 2171 *
2152 * Called when allocating swap cache for exising swap entry, 2172 * Called when allocating swap cache for existing swap entry,
2153 * This can return error codes. Returns 0 at success. 2173 * This can return error codes. Returns 0 at success.
2154 * -EBUSY means there is a swap cache. 2174 * -EBUSY means there is a swap cache.
2155 * Note: return code is different from swap_duplicate(). 2175 * Note: return code is different from swap_duplicate().
2156 */ 2176 */
2157int swapcache_prepare(swp_entry_t entry) 2177int swapcache_prepare(swp_entry_t entry)
2158{ 2178{
2159 return __swap_duplicate(entry, SWAP_CACHE); 2179 return __swap_duplicate(entry, SWAP_HAS_CACHE);
2160}
2161
2162
2163struct swap_info_struct *
2164get_swap_info_struct(unsigned type)
2165{
2166 return &swap_info[type];
2167} 2180}
2168 2181
2169/* 2182/*
@@ -2181,7 +2194,7 @@ int valid_swaphandles(swp_entry_t entry, unsigned long *offset)
2181 if (!our_page_cluster) /* no readahead */ 2194 if (!our_page_cluster) /* no readahead */
2182 return 0; 2195 return 0;
2183 2196
2184 si = &swap_info[swp_type(entry)]; 2197 si = swap_info[swp_type(entry)];
2185 target = swp_offset(entry); 2198 target = swp_offset(entry);
2186 base = (target >> our_page_cluster) << our_page_cluster; 2199 base = (target >> our_page_cluster) << our_page_cluster;
2187 end = base + (1 << our_page_cluster); 2200 end = base + (1 << our_page_cluster);
@@ -2217,3 +2230,219 @@ int valid_swaphandles(swp_entry_t entry, unsigned long *offset)
2217 *offset = ++toff; 2230 *offset = ++toff;
2218 return nr_pages? ++nr_pages: 0; 2231 return nr_pages? ++nr_pages: 0;
2219} 2232}
2233
2234/*
2235 * add_swap_count_continuation - called when a swap count is duplicated
2236 * beyond SWAP_MAP_MAX, it allocates a new page and links that to the entry's
2237 * page of the original vmalloc'ed swap_map, to hold the continuation count
2238 * (for that entry and for its neighbouring PAGE_SIZE swap entries). Called
2239 * again when count is duplicated beyond SWAP_MAP_MAX * SWAP_CONT_MAX, etc.
2240 *
2241 * These continuation pages are seldom referenced: the common paths all work
2242 * on the original swap_map, only referring to a continuation page when the
2243 * low "digit" of a count is incremented or decremented through SWAP_MAP_MAX.
2244 *
2245 * add_swap_count_continuation(, GFP_ATOMIC) can be called while holding
2246 * page table locks; if it fails, add_swap_count_continuation(, GFP_KERNEL)
2247 * can be called after dropping locks.
2248 */
2249int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
2250{
2251 struct swap_info_struct *si;
2252 struct page *head;
2253 struct page *page;
2254 struct page *list_page;
2255 pgoff_t offset;
2256 unsigned char count;
2257
2258 /*
2259 * When debugging, it's easier to use __GFP_ZERO here; but it's better
2260 * for latency not to zero a page while GFP_ATOMIC and holding locks.
2261 */
2262 page = alloc_page(gfp_mask | __GFP_HIGHMEM);
2263
2264 si = swap_info_get(entry);
2265 if (!si) {
2266 /*
2267 * An acceptable race has occurred since the failing
2268 * __swap_duplicate(): the swap entry has been freed,
2269 * perhaps even the whole swap_map cleared for swapoff.
2270 */
2271 goto outer;
2272 }
2273
2274 offset = swp_offset(entry);
2275 count = si->swap_map[offset] & ~SWAP_HAS_CACHE;
2276
2277 if ((count & ~COUNT_CONTINUED) != SWAP_MAP_MAX) {
2278 /*
2279 * The higher the swap count, the more likely it is that tasks
2280 * will race to add swap count continuation: we need to avoid
2281 * over-provisioning.
2282 */
2283 goto out;
2284 }
2285
2286 if (!page) {
2287 spin_unlock(&swap_lock);
2288 return -ENOMEM;
2289 }
2290
2291 /*
2292 * We are fortunate that although vmalloc_to_page uses pte_offset_map,
2293 * no architecture is using highmem pages for kernel pagetables: so it
2294 * will not corrupt the GFP_ATOMIC caller's atomic pagetable kmaps.
2295 */
2296 head = vmalloc_to_page(si->swap_map + offset);
2297 offset &= ~PAGE_MASK;
2298
2299 /*
2300 * Page allocation does not initialize the page's lru field,
2301 * but it does always reset its private field.
2302 */
2303 if (!page_private(head)) {
2304 BUG_ON(count & COUNT_CONTINUED);
2305 INIT_LIST_HEAD(&head->lru);
2306 set_page_private(head, SWP_CONTINUED);
2307 si->flags |= SWP_CONTINUED;
2308 }
2309
2310 list_for_each_entry(list_page, &head->lru, lru) {
2311 unsigned char *map;
2312
2313 /*
2314 * If the previous map said no continuation, but we've found
2315 * a continuation page, free our allocation and use this one.
2316 */
2317 if (!(count & COUNT_CONTINUED))
2318 goto out;
2319
2320 map = kmap_atomic(list_page, KM_USER0) + offset;
2321 count = *map;
2322 kunmap_atomic(map, KM_USER0);
2323
2324 /*
2325 * If this continuation count now has some space in it,
2326 * free our allocation and use this one.
2327 */
2328 if ((count & ~COUNT_CONTINUED) != SWAP_CONT_MAX)
2329 goto out;
2330 }
2331
2332 list_add_tail(&page->lru, &head->lru);
2333 page = NULL; /* now it's attached, don't free it */
2334out:
2335 spin_unlock(&swap_lock);
2336outer:
2337 if (page)
2338 __free_page(page);
2339 return 0;
2340}
2341
2342/*
2343 * swap_count_continued - when the original swap_map count is incremented
2344 * from SWAP_MAP_MAX, check if there is already a continuation page to carry
2345 * into, carry if so, or else fail until a new continuation page is allocated;
2346 * when the original swap_map count is decremented from 0 with continuation,
2347 * borrow from the continuation and report whether it still holds more.
2348 * Called while __swap_duplicate() or swap_entry_free() holds swap_lock.
2349 */
2350static bool swap_count_continued(struct swap_info_struct *si,
2351 pgoff_t offset, unsigned char count)
2352{
2353 struct page *head;
2354 struct page *page;
2355 unsigned char *map;
2356
2357 head = vmalloc_to_page(si->swap_map + offset);
2358 if (page_private(head) != SWP_CONTINUED) {
2359 BUG_ON(count & COUNT_CONTINUED);
2360 return false; /* need to add count continuation */
2361 }
2362
2363 offset &= ~PAGE_MASK;
2364 page = list_entry(head->lru.next, struct page, lru);
2365 map = kmap_atomic(page, KM_USER0) + offset;
2366
2367 if (count == SWAP_MAP_MAX) /* initial increment from swap_map */
2368 goto init_map; /* jump over SWAP_CONT_MAX checks */
2369
2370 if (count == (SWAP_MAP_MAX | COUNT_CONTINUED)) { /* incrementing */
2371 /*
2372 * Think of how you add 1 to 999
2373 */
2374 while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) {
2375 kunmap_atomic(map, KM_USER0);
2376 page = list_entry(page->lru.next, struct page, lru);
2377 BUG_ON(page == head);
2378 map = kmap_atomic(page, KM_USER0) + offset;
2379 }
2380 if (*map == SWAP_CONT_MAX) {
2381 kunmap_atomic(map, KM_USER0);
2382 page = list_entry(page->lru.next, struct page, lru);
2383 if (page == head)
2384 return false; /* add count continuation */
2385 map = kmap_atomic(page, KM_USER0) + offset;
2386init_map: *map = 0; /* we didn't zero the page */
2387 }
2388 *map += 1;
2389 kunmap_atomic(map, KM_USER0);
2390 page = list_entry(page->lru.prev, struct page, lru);
2391 while (page != head) {
2392 map = kmap_atomic(page, KM_USER0) + offset;
2393 *map = COUNT_CONTINUED;
2394 kunmap_atomic(map, KM_USER0);
2395 page = list_entry(page->lru.prev, struct page, lru);
2396 }
2397 return true; /* incremented */
2398
2399 } else { /* decrementing */
2400 /*
2401 * Think of how you subtract 1 from 1000
2402 */
2403 BUG_ON(count != COUNT_CONTINUED);
2404 while (*map == COUNT_CONTINUED) {
2405 kunmap_atomic(map, KM_USER0);
2406 page = list_entry(page->lru.next, struct page, lru);
2407 BUG_ON(page == head);
2408 map = kmap_atomic(page, KM_USER0) + offset;
2409 }
2410 BUG_ON(*map == 0);
2411 *map -= 1;
2412 if (*map == 0)
2413 count = 0;
2414 kunmap_atomic(map, KM_USER0);
2415 page = list_entry(page->lru.prev, struct page, lru);
2416 while (page != head) {
2417 map = kmap_atomic(page, KM_USER0) + offset;
2418 *map = SWAP_CONT_MAX | count;
2419 count = COUNT_CONTINUED;
2420 kunmap_atomic(map, KM_USER0);
2421 page = list_entry(page->lru.prev, struct page, lru);
2422 }
2423 return count == COUNT_CONTINUED;
2424 }
2425}
2426
2427/*
2428 * free_swap_count_continuations - swapoff free all the continuation pages
2429 * appended to the swap_map, after swap_map is quiesced, before vfree'ing it.
2430 */
2431static void free_swap_count_continuations(struct swap_info_struct *si)
2432{
2433 pgoff_t offset;
2434
2435 for (offset = 0; offset < si->max; offset += PAGE_SIZE) {
2436 struct page *head;
2437 head = vmalloc_to_page(si->swap_map + offset);
2438 if (page_private(head)) {
2439 struct list_head *this, *next;
2440 list_for_each_safe(this, next, &head->lru) {
2441 struct page *page;
2442 page = list_entry(this, struct page, lru);
2443 list_del(this);
2444 __free_page(page);
2445 }
2446 }
2447 }
2448}
diff --git a/mm/truncate.c b/mm/truncate.c
index 2c147a7e5f2c..342deee22684 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -272,6 +272,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
272 pagevec_release(&pvec); 272 pagevec_release(&pvec);
273 break; 273 break;
274 } 274 }
275 mem_cgroup_uncharge_start();
275 for (i = 0; i < pagevec_count(&pvec); i++) { 276 for (i = 0; i < pagevec_count(&pvec); i++) {
276 struct page *page = pvec.pages[i]; 277 struct page *page = pvec.pages[i];
277 278
@@ -286,6 +287,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
286 unlock_page(page); 287 unlock_page(page);
287 } 288 }
288 pagevec_release(&pvec); 289 pagevec_release(&pvec);
290 mem_cgroup_uncharge_end();
289 } 291 }
290} 292}
291EXPORT_SYMBOL(truncate_inode_pages_range); 293EXPORT_SYMBOL(truncate_inode_pages_range);
@@ -327,6 +329,7 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
327 pagevec_init(&pvec, 0); 329 pagevec_init(&pvec, 0);
328 while (next <= end && 330 while (next <= end &&
329 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { 331 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
332 mem_cgroup_uncharge_start();
330 for (i = 0; i < pagevec_count(&pvec); i++) { 333 for (i = 0; i < pagevec_count(&pvec); i++) {
331 struct page *page = pvec.pages[i]; 334 struct page *page = pvec.pages[i];
332 pgoff_t index; 335 pgoff_t index;
@@ -354,6 +357,7 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
354 break; 357 break;
355 } 358 }
356 pagevec_release(&pvec); 359 pagevec_release(&pvec);
360 mem_cgroup_uncharge_end();
357 cond_resched(); 361 cond_resched();
358 } 362 }
359 return ret; 363 return ret;
@@ -428,6 +432,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
428 while (next <= end && !wrapped && 432 while (next <= end && !wrapped &&
429 pagevec_lookup(&pvec, mapping, next, 433 pagevec_lookup(&pvec, mapping, next,
430 min(end - next, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) { 434 min(end - next, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
435 mem_cgroup_uncharge_start();
431 for (i = 0; i < pagevec_count(&pvec); i++) { 436 for (i = 0; i < pagevec_count(&pvec); i++) {
432 struct page *page = pvec.pages[i]; 437 struct page *page = pvec.pages[i];
433 pgoff_t page_index; 438 pgoff_t page_index;
@@ -477,6 +482,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
477 unlock_page(page); 482 unlock_page(page);
478 } 483 }
479 pagevec_release(&pvec); 484 pagevec_release(&pvec);
485 mem_cgroup_uncharge_end();
480 cond_resched(); 486 cond_resched();
481 } 487 }
482 return ret; 488 return ret;
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 9b08d790df6f..37e69295f250 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1411,6 +1411,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
1411{ 1411{
1412 struct page **pages; 1412 struct page **pages;
1413 unsigned int nr_pages, array_size, i; 1413 unsigned int nr_pages, array_size, i;
1414 gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
1414 1415
1415 nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT; 1416 nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT;
1416 array_size = (nr_pages * sizeof(struct page *)); 1417 array_size = (nr_pages * sizeof(struct page *));
@@ -1418,13 +1419,11 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
1418 area->nr_pages = nr_pages; 1419 area->nr_pages = nr_pages;
1419 /* Please note that the recursion is strictly bounded. */ 1420 /* Please note that the recursion is strictly bounded. */
1420 if (array_size > PAGE_SIZE) { 1421 if (array_size > PAGE_SIZE) {
1421 pages = __vmalloc_node(array_size, 1, gfp_mask | __GFP_ZERO, 1422 pages = __vmalloc_node(array_size, 1, nested_gfp|__GFP_HIGHMEM,
1422 PAGE_KERNEL, node, caller); 1423 PAGE_KERNEL, node, caller);
1423 area->flags |= VM_VPAGES; 1424 area->flags |= VM_VPAGES;
1424 } else { 1425 } else {
1425 pages = kmalloc_node(array_size, 1426 pages = kmalloc_node(array_size, nested_gfp, node);
1426 (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO,
1427 node);
1428 } 1427 }
1429 area->pages = pages; 1428 area->pages = pages;
1430 area->caller = caller; 1429 area->caller = caller;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 777af57fd8c8..885207a6b6b7 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -55,6 +55,11 @@ struct scan_control {
55 /* Number of pages freed so far during a call to shrink_zones() */ 55 /* Number of pages freed so far during a call to shrink_zones() */
56 unsigned long nr_reclaimed; 56 unsigned long nr_reclaimed;
57 57
58 /* How many pages shrink_list() should reclaim */
59 unsigned long nr_to_reclaim;
60
61 unsigned long hibernation_mode;
62
58 /* This context's GFP mask */ 63 /* This context's GFP mask */
59 gfp_t gfp_mask; 64 gfp_t gfp_mask;
60 65
@@ -66,12 +71,6 @@ struct scan_control {
66 /* Can pages be swapped as part of reclaim? */ 71 /* Can pages be swapped as part of reclaim? */
67 int may_swap; 72 int may_swap;
68 73
69 /* This context's SWAP_CLUSTER_MAX. If freeing memory for
70 * suspend, we effectively ignore SWAP_CLUSTER_MAX.
71 * In this context, it doesn't matter that we scan the
72 * whole list at once. */
73 int swap_cluster_max;
74
75 int swappiness; 74 int swappiness;
76 75
77 int all_unreclaimable; 76 int all_unreclaimable;
@@ -358,7 +357,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
358 * stalls if we need to run get_block(). We could test 357 * stalls if we need to run get_block(). We could test
359 * PagePrivate for that. 358 * PagePrivate for that.
360 * 359 *
361 * If this process is currently in generic_file_write() against 360 * If this process is currently in __generic_file_aio_write() against
362 * this page's queue, we can perform writeback even if that 361 * this page's queue, we can perform writeback even if that
363 * will block. 362 * will block.
364 * 363 *
@@ -1132,7 +1131,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
1132 unsigned long nr_anon; 1131 unsigned long nr_anon;
1133 unsigned long nr_file; 1132 unsigned long nr_file;
1134 1133
1135 nr_taken = sc->isolate_pages(sc->swap_cluster_max, 1134 nr_taken = sc->isolate_pages(SWAP_CLUSTER_MAX,
1136 &page_list, &nr_scan, sc->order, mode, 1135 &page_list, &nr_scan, sc->order, mode,
1137 zone, sc->mem_cgroup, 0, file); 1136 zone, sc->mem_cgroup, 0, file);
1138 1137
@@ -1166,10 +1165,8 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
1166 __mod_zone_page_state(zone, NR_ISOLATED_ANON, nr_anon); 1165 __mod_zone_page_state(zone, NR_ISOLATED_ANON, nr_anon);
1167 __mod_zone_page_state(zone, NR_ISOLATED_FILE, nr_file); 1166 __mod_zone_page_state(zone, NR_ISOLATED_FILE, nr_file);
1168 1167
1169 reclaim_stat->recent_scanned[0] += count[LRU_INACTIVE_ANON]; 1168 reclaim_stat->recent_scanned[0] += nr_anon;
1170 reclaim_stat->recent_scanned[0] += count[LRU_ACTIVE_ANON]; 1169 reclaim_stat->recent_scanned[1] += nr_file;
1171 reclaim_stat->recent_scanned[1] += count[LRU_INACTIVE_FILE];
1172 reclaim_stat->recent_scanned[1] += count[LRU_ACTIVE_FILE];
1173 1170
1174 spin_unlock_irq(&zone->lru_lock); 1171 spin_unlock_irq(&zone->lru_lock);
1175 1172
@@ -1464,20 +1461,26 @@ static int inactive_file_is_low(struct zone *zone, struct scan_control *sc)
1464 return low; 1461 return low;
1465} 1462}
1466 1463
1464static int inactive_list_is_low(struct zone *zone, struct scan_control *sc,
1465 int file)
1466{
1467 if (file)
1468 return inactive_file_is_low(zone, sc);
1469 else
1470 return inactive_anon_is_low(zone, sc);
1471}
1472
1467static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, 1473static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
1468 struct zone *zone, struct scan_control *sc, int priority) 1474 struct zone *zone, struct scan_control *sc, int priority)
1469{ 1475{
1470 int file = is_file_lru(lru); 1476 int file = is_file_lru(lru);
1471 1477
1472 if (lru == LRU_ACTIVE_FILE && inactive_file_is_low(zone, sc)) { 1478 if (is_active_lru(lru)) {
1473 shrink_active_list(nr_to_scan, zone, sc, priority, file); 1479 if (inactive_list_is_low(zone, sc, file))
1480 shrink_active_list(nr_to_scan, zone, sc, priority, file);
1474 return 0; 1481 return 0;
1475 } 1482 }
1476 1483
1477 if (lru == LRU_ACTIVE_ANON && inactive_anon_is_low(zone, sc)) {
1478 shrink_active_list(nr_to_scan, zone, sc, priority, file);
1479 return 0;
1480 }
1481 return shrink_inactive_list(nr_to_scan, zone, sc, priority, file); 1484 return shrink_inactive_list(nr_to_scan, zone, sc, priority, file);
1482} 1485}
1483 1486
@@ -1567,15 +1570,14 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
1567 * until we collected @swap_cluster_max pages to scan. 1570 * until we collected @swap_cluster_max pages to scan.
1568 */ 1571 */
1569static unsigned long nr_scan_try_batch(unsigned long nr_to_scan, 1572static unsigned long nr_scan_try_batch(unsigned long nr_to_scan,
1570 unsigned long *nr_saved_scan, 1573 unsigned long *nr_saved_scan)
1571 unsigned long swap_cluster_max)
1572{ 1574{
1573 unsigned long nr; 1575 unsigned long nr;
1574 1576
1575 *nr_saved_scan += nr_to_scan; 1577 *nr_saved_scan += nr_to_scan;
1576 nr = *nr_saved_scan; 1578 nr = *nr_saved_scan;
1577 1579
1578 if (nr >= swap_cluster_max) 1580 if (nr >= SWAP_CLUSTER_MAX)
1579 *nr_saved_scan = 0; 1581 *nr_saved_scan = 0;
1580 else 1582 else
1581 nr = 0; 1583 nr = 0;
@@ -1594,7 +1596,7 @@ static void shrink_zone(int priority, struct zone *zone,
1594 unsigned long percent[2]; /* anon @ 0; file @ 1 */ 1596 unsigned long percent[2]; /* anon @ 0; file @ 1 */
1595 enum lru_list l; 1597 enum lru_list l;
1596 unsigned long nr_reclaimed = sc->nr_reclaimed; 1598 unsigned long nr_reclaimed = sc->nr_reclaimed;
1597 unsigned long swap_cluster_max = sc->swap_cluster_max; 1599 unsigned long nr_to_reclaim = sc->nr_to_reclaim;
1598 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); 1600 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1599 int noswap = 0; 1601 int noswap = 0;
1600 1602
@@ -1616,15 +1618,15 @@ static void shrink_zone(int priority, struct zone *zone,
1616 scan = (scan * percent[file]) / 100; 1618 scan = (scan * percent[file]) / 100;
1617 } 1619 }
1618 nr[l] = nr_scan_try_batch(scan, 1620 nr[l] = nr_scan_try_batch(scan,
1619 &reclaim_stat->nr_saved_scan[l], 1621 &reclaim_stat->nr_saved_scan[l]);
1620 swap_cluster_max);
1621 } 1622 }
1622 1623
1623 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || 1624 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
1624 nr[LRU_INACTIVE_FILE]) { 1625 nr[LRU_INACTIVE_FILE]) {
1625 for_each_evictable_lru(l) { 1626 for_each_evictable_lru(l) {
1626 if (nr[l]) { 1627 if (nr[l]) {
1627 nr_to_scan = min(nr[l], swap_cluster_max); 1628 nr_to_scan = min_t(unsigned long,
1629 nr[l], SWAP_CLUSTER_MAX);
1628 nr[l] -= nr_to_scan; 1630 nr[l] -= nr_to_scan;
1629 1631
1630 nr_reclaimed += shrink_list(l, nr_to_scan, 1632 nr_reclaimed += shrink_list(l, nr_to_scan,
@@ -1639,8 +1641,7 @@ static void shrink_zone(int priority, struct zone *zone,
1639 * with multiple processes reclaiming pages, the total 1641 * with multiple processes reclaiming pages, the total
1640 * freeing target can get unreasonably large. 1642 * freeing target can get unreasonably large.
1641 */ 1643 */
1642 if (nr_reclaimed > swap_cluster_max && 1644 if (nr_reclaimed >= nr_to_reclaim && priority < DEF_PRIORITY)
1643 priority < DEF_PRIORITY && !current_is_kswapd())
1644 break; 1645 break;
1645 } 1646 }
1646 1647
@@ -1738,6 +1739,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
1738 struct zoneref *z; 1739 struct zoneref *z;
1739 struct zone *zone; 1740 struct zone *zone;
1740 enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask); 1741 enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
1742 unsigned long writeback_threshold;
1741 1743
1742 delayacct_freepages_start(); 1744 delayacct_freepages_start();
1743 1745
@@ -1773,7 +1775,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
1773 } 1775 }
1774 } 1776 }
1775 total_scanned += sc->nr_scanned; 1777 total_scanned += sc->nr_scanned;
1776 if (sc->nr_reclaimed >= sc->swap_cluster_max) { 1778 if (sc->nr_reclaimed >= sc->nr_to_reclaim) {
1777 ret = sc->nr_reclaimed; 1779 ret = sc->nr_reclaimed;
1778 goto out; 1780 goto out;
1779 } 1781 }
@@ -1785,14 +1787,15 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
1785 * that's undesirable in laptop mode, where we *want* lumpy 1787 * that's undesirable in laptop mode, where we *want* lumpy
1786 * writeout. So in laptop mode, write out the whole world. 1788 * writeout. So in laptop mode, write out the whole world.
1787 */ 1789 */
1788 if (total_scanned > sc->swap_cluster_max + 1790 writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2;
1789 sc->swap_cluster_max / 2) { 1791 if (total_scanned > writeback_threshold) {
1790 wakeup_flusher_threads(laptop_mode ? 0 : total_scanned); 1792 wakeup_flusher_threads(laptop_mode ? 0 : total_scanned);
1791 sc->may_writepage = 1; 1793 sc->may_writepage = 1;
1792 } 1794 }
1793 1795
1794 /* Take a nap, wait for some writeback to complete */ 1796 /* Take a nap, wait for some writeback to complete */
1795 if (sc->nr_scanned && priority < DEF_PRIORITY - 2) 1797 if (!sc->hibernation_mode && sc->nr_scanned &&
1798 priority < DEF_PRIORITY - 2)
1796 congestion_wait(BLK_RW_ASYNC, HZ/10); 1799 congestion_wait(BLK_RW_ASYNC, HZ/10);
1797 } 1800 }
1798 /* top priority shrink_zones still had more to do? don't OOM, then */ 1801 /* top priority shrink_zones still had more to do? don't OOM, then */
@@ -1831,7 +1834,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
1831 struct scan_control sc = { 1834 struct scan_control sc = {
1832 .gfp_mask = gfp_mask, 1835 .gfp_mask = gfp_mask,
1833 .may_writepage = !laptop_mode, 1836 .may_writepage = !laptop_mode,
1834 .swap_cluster_max = SWAP_CLUSTER_MAX, 1837 .nr_to_reclaim = SWAP_CLUSTER_MAX,
1835 .may_unmap = 1, 1838 .may_unmap = 1,
1836 .may_swap = 1, 1839 .may_swap = 1,
1837 .swappiness = vm_swappiness, 1840 .swappiness = vm_swappiness,
@@ -1855,7 +1858,6 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
1855 .may_writepage = !laptop_mode, 1858 .may_writepage = !laptop_mode,
1856 .may_unmap = 1, 1859 .may_unmap = 1,
1857 .may_swap = !noswap, 1860 .may_swap = !noswap,
1858 .swap_cluster_max = SWAP_CLUSTER_MAX,
1859 .swappiness = swappiness, 1861 .swappiness = swappiness,
1860 .order = 0, 1862 .order = 0,
1861 .mem_cgroup = mem, 1863 .mem_cgroup = mem,
@@ -1889,7 +1891,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
1889 .may_writepage = !laptop_mode, 1891 .may_writepage = !laptop_mode,
1890 .may_unmap = 1, 1892 .may_unmap = 1,
1891 .may_swap = !noswap, 1893 .may_swap = !noswap,
1892 .swap_cluster_max = SWAP_CLUSTER_MAX, 1894 .nr_to_reclaim = SWAP_CLUSTER_MAX,
1893 .swappiness = swappiness, 1895 .swappiness = swappiness,
1894 .order = 0, 1896 .order = 0,
1895 .mem_cgroup = mem_cont, 1897 .mem_cgroup = mem_cont,
@@ -1904,6 +1906,30 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
1904} 1906}
1905#endif 1907#endif
1906 1908
1909/* is kswapd sleeping prematurely? */
1910static int sleeping_prematurely(pg_data_t *pgdat, int order, long remaining)
1911{
1912 int i;
1913
1914 /* If a direct reclaimer woke kswapd within HZ/10, it's premature */
1915 if (remaining)
1916 return 1;
1917
1918 /* If after HZ/10, a zone is below the high mark, it's premature */
1919 for (i = 0; i < pgdat->nr_zones; i++) {
1920 struct zone *zone = pgdat->node_zones + i;
1921
1922 if (!populated_zone(zone))
1923 continue;
1924
1925 if (!zone_watermark_ok(zone, order, high_wmark_pages(zone),
1926 0, 0))
1927 return 1;
1928 }
1929
1930 return 0;
1931}
1932
1907/* 1933/*
1908 * For kswapd, balance_pgdat() will work across all this node's zones until 1934 * For kswapd, balance_pgdat() will work across all this node's zones until
1909 * they are all at high_wmark_pages(zone). 1935 * they are all at high_wmark_pages(zone).
@@ -1936,7 +1962,11 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
1936 .gfp_mask = GFP_KERNEL, 1962 .gfp_mask = GFP_KERNEL,
1937 .may_unmap = 1, 1963 .may_unmap = 1,
1938 .may_swap = 1, 1964 .may_swap = 1,
1939 .swap_cluster_max = SWAP_CLUSTER_MAX, 1965 /*
1966 * kswapd doesn't want to be bailed out while reclaim. because
1967 * we want to put equal scanning pressure on each zone.
1968 */
1969 .nr_to_reclaim = ULONG_MAX,
1940 .swappiness = vm_swappiness, 1970 .swappiness = vm_swappiness,
1941 .order = order, 1971 .order = order,
1942 .mem_cgroup = NULL, 1972 .mem_cgroup = NULL,
@@ -1961,6 +1991,7 @@ loop_again:
1961 for (priority = DEF_PRIORITY; priority >= 0; priority--) { 1991 for (priority = DEF_PRIORITY; priority >= 0; priority--) {
1962 int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */ 1992 int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
1963 unsigned long lru_pages = 0; 1993 unsigned long lru_pages = 0;
1994 int has_under_min_watermark_zone = 0;
1964 1995
1965 /* The swap token gets in the way of swapout... */ 1996 /* The swap token gets in the way of swapout... */
1966 if (!priority) 1997 if (!priority)
@@ -2067,6 +2098,15 @@ loop_again:
2067 if (total_scanned > SWAP_CLUSTER_MAX * 2 && 2098 if (total_scanned > SWAP_CLUSTER_MAX * 2 &&
2068 total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2) 2099 total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2)
2069 sc.may_writepage = 1; 2100 sc.may_writepage = 1;
2101
2102 /*
2103 * We are still under min water mark. it mean we have
2104 * GFP_ATOMIC allocation failure risk. Hurry up!
2105 */
2106 if (!zone_watermark_ok(zone, order, min_wmark_pages(zone),
2107 end_zone, 0))
2108 has_under_min_watermark_zone = 1;
2109
2070 } 2110 }
2071 if (all_zones_ok) 2111 if (all_zones_ok)
2072 break; /* kswapd: all done */ 2112 break; /* kswapd: all done */
@@ -2074,8 +2114,12 @@ loop_again:
2074 * OK, kswapd is getting into trouble. Take a nap, then take 2114 * OK, kswapd is getting into trouble. Take a nap, then take
2075 * another pass across the zones. 2115 * another pass across the zones.
2076 */ 2116 */
2077 if (total_scanned && priority < DEF_PRIORITY - 2) 2117 if (total_scanned && (priority < DEF_PRIORITY - 2)) {
2078 congestion_wait(BLK_RW_ASYNC, HZ/10); 2118 if (has_under_min_watermark_zone)
2119 count_vm_event(KSWAPD_SKIP_CONGESTION_WAIT);
2120 else
2121 congestion_wait(BLK_RW_ASYNC, HZ/10);
2122 }
2079 2123
2080 /* 2124 /*
2081 * We do this so kswapd doesn't build up large priorities for 2125 * We do this so kswapd doesn't build up large priorities for
@@ -2173,6 +2217,7 @@ static int kswapd(void *p)
2173 order = 0; 2217 order = 0;
2174 for ( ; ; ) { 2218 for ( ; ; ) {
2175 unsigned long new_order; 2219 unsigned long new_order;
2220 int ret;
2176 2221
2177 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); 2222 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
2178 new_order = pgdat->kswapd_max_order; 2223 new_order = pgdat->kswapd_max_order;
@@ -2184,19 +2229,45 @@ static int kswapd(void *p)
2184 */ 2229 */
2185 order = new_order; 2230 order = new_order;
2186 } else { 2231 } else {
2187 if (!freezing(current)) 2232 if (!freezing(current) && !kthread_should_stop()) {
2188 schedule(); 2233 long remaining = 0;
2234
2235 /* Try to sleep for a short interval */
2236 if (!sleeping_prematurely(pgdat, order, remaining)) {
2237 remaining = schedule_timeout(HZ/10);
2238 finish_wait(&pgdat->kswapd_wait, &wait);
2239 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
2240 }
2241
2242 /*
2243 * After a short sleep, check if it was a
2244 * premature sleep. If not, then go fully
2245 * to sleep until explicitly woken up
2246 */
2247 if (!sleeping_prematurely(pgdat, order, remaining))
2248 schedule();
2249 else {
2250 if (remaining)
2251 count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
2252 else
2253 count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
2254 }
2255 }
2189 2256
2190 order = pgdat->kswapd_max_order; 2257 order = pgdat->kswapd_max_order;
2191 } 2258 }
2192 finish_wait(&pgdat->kswapd_wait, &wait); 2259 finish_wait(&pgdat->kswapd_wait, &wait);
2193 2260
2194 if (!try_to_freeze()) { 2261 ret = try_to_freeze();
2195 /* We can speed up thawing tasks if we don't call 2262 if (kthread_should_stop())
2196 * balance_pgdat after returning from the refrigerator 2263 break;
2197 */ 2264
2265 /*
2266 * We can speed up thawing tasks if we don't call balance_pgdat
2267 * after returning from the refrigerator
2268 */
2269 if (!ret)
2198 balance_pgdat(pgdat, order); 2270 balance_pgdat(pgdat, order);
2199 }
2200 } 2271 }
2201 return 0; 2272 return 0;
2202} 2273}
@@ -2260,148 +2331,43 @@ unsigned long zone_reclaimable_pages(struct zone *zone)
2260 2331
2261#ifdef CONFIG_HIBERNATION 2332#ifdef CONFIG_HIBERNATION
2262/* 2333/*
2263 * Helper function for shrink_all_memory(). Tries to reclaim 'nr_pages' pages 2334 * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
2264 * from LRU lists system-wide, for given pass and priority.
2265 *
2266 * For pass > 3 we also try to shrink the LRU lists that contain a few pages
2267 */
2268static void shrink_all_zones(unsigned long nr_pages, int prio,
2269 int pass, struct scan_control *sc)
2270{
2271 struct zone *zone;
2272 unsigned long nr_reclaimed = 0;
2273 struct zone_reclaim_stat *reclaim_stat;
2274
2275 for_each_populated_zone(zone) {
2276 enum lru_list l;
2277
2278 if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY)
2279 continue;
2280
2281 for_each_evictable_lru(l) {
2282 enum zone_stat_item ls = NR_LRU_BASE + l;
2283 unsigned long lru_pages = zone_page_state(zone, ls);
2284
2285 /* For pass = 0, we don't shrink the active list */
2286 if (pass == 0 && (l == LRU_ACTIVE_ANON ||
2287 l == LRU_ACTIVE_FILE))
2288 continue;
2289
2290 reclaim_stat = get_reclaim_stat(zone, sc);
2291 reclaim_stat->nr_saved_scan[l] +=
2292 (lru_pages >> prio) + 1;
2293 if (reclaim_stat->nr_saved_scan[l]
2294 >= nr_pages || pass > 3) {
2295 unsigned long nr_to_scan;
2296
2297 reclaim_stat->nr_saved_scan[l] = 0;
2298 nr_to_scan = min(nr_pages, lru_pages);
2299 nr_reclaimed += shrink_list(l, nr_to_scan, zone,
2300 sc, prio);
2301 if (nr_reclaimed >= nr_pages) {
2302 sc->nr_reclaimed += nr_reclaimed;
2303 return;
2304 }
2305 }
2306 }
2307 }
2308 sc->nr_reclaimed += nr_reclaimed;
2309}
2310
2311/*
2312 * Try to free `nr_pages' of memory, system-wide, and return the number of
2313 * freed pages. 2335 * freed pages.
2314 * 2336 *
2315 * Rather than trying to age LRUs the aim is to preserve the overall 2337 * Rather than trying to age LRUs the aim is to preserve the overall
2316 * LRU order by reclaiming preferentially 2338 * LRU order by reclaiming preferentially
2317 * inactive > active > active referenced > active mapped 2339 * inactive > active > active referenced > active mapped
2318 */ 2340 */
2319unsigned long shrink_all_memory(unsigned long nr_pages) 2341unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
2320{ 2342{
2321 unsigned long lru_pages, nr_slab;
2322 int pass;
2323 struct reclaim_state reclaim_state; 2343 struct reclaim_state reclaim_state;
2324 struct scan_control sc = { 2344 struct scan_control sc = {
2325 .gfp_mask = GFP_KERNEL, 2345 .gfp_mask = GFP_HIGHUSER_MOVABLE,
2326 .may_unmap = 0, 2346 .may_swap = 1,
2347 .may_unmap = 1,
2327 .may_writepage = 1, 2348 .may_writepage = 1,
2349 .nr_to_reclaim = nr_to_reclaim,
2350 .hibernation_mode = 1,
2351 .swappiness = vm_swappiness,
2352 .order = 0,
2328 .isolate_pages = isolate_pages_global, 2353 .isolate_pages = isolate_pages_global,
2329 .nr_reclaimed = 0,
2330 }; 2354 };
2355 struct zonelist * zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
2356 struct task_struct *p = current;
2357 unsigned long nr_reclaimed;
2331 2358
2332 current->reclaim_state = &reclaim_state; 2359 p->flags |= PF_MEMALLOC;
2333 2360 lockdep_set_current_reclaim_state(sc.gfp_mask);
2334 lru_pages = global_reclaimable_pages(); 2361 reclaim_state.reclaimed_slab = 0;
2335 nr_slab = global_page_state(NR_SLAB_RECLAIMABLE); 2362 p->reclaim_state = &reclaim_state;
2336 /* If slab caches are huge, it's better to hit them first */
2337 while (nr_slab >= lru_pages) {
2338 reclaim_state.reclaimed_slab = 0;
2339 shrink_slab(nr_pages, sc.gfp_mask, lru_pages);
2340 if (!reclaim_state.reclaimed_slab)
2341 break;
2342
2343 sc.nr_reclaimed += reclaim_state.reclaimed_slab;
2344 if (sc.nr_reclaimed >= nr_pages)
2345 goto out;
2346
2347 nr_slab -= reclaim_state.reclaimed_slab;
2348 }
2349
2350 /*
2351 * We try to shrink LRUs in 5 passes:
2352 * 0 = Reclaim from inactive_list only
2353 * 1 = Reclaim from active list but don't reclaim mapped
2354 * 2 = 2nd pass of type 1
2355 * 3 = Reclaim mapped (normal reclaim)
2356 * 4 = 2nd pass of type 3
2357 */
2358 for (pass = 0; pass < 5; pass++) {
2359 int prio;
2360
2361 /* Force reclaiming mapped pages in the passes #3 and #4 */
2362 if (pass > 2)
2363 sc.may_unmap = 1;
2364
2365 for (prio = DEF_PRIORITY; prio >= 0; prio--) {
2366 unsigned long nr_to_scan = nr_pages - sc.nr_reclaimed;
2367
2368 sc.nr_scanned = 0;
2369 sc.swap_cluster_max = nr_to_scan;
2370 shrink_all_zones(nr_to_scan, prio, pass, &sc);
2371 if (sc.nr_reclaimed >= nr_pages)
2372 goto out;
2373
2374 reclaim_state.reclaimed_slab = 0;
2375 shrink_slab(sc.nr_scanned, sc.gfp_mask,
2376 global_reclaimable_pages());
2377 sc.nr_reclaimed += reclaim_state.reclaimed_slab;
2378 if (sc.nr_reclaimed >= nr_pages)
2379 goto out;
2380
2381 if (sc.nr_scanned && prio < DEF_PRIORITY - 2)
2382 congestion_wait(BLK_RW_ASYNC, HZ / 10);
2383 }
2384 }
2385
2386 /*
2387 * If sc.nr_reclaimed = 0, we could not shrink LRUs, but there may be
2388 * something in slab caches
2389 */
2390 if (!sc.nr_reclaimed) {
2391 do {
2392 reclaim_state.reclaimed_slab = 0;
2393 shrink_slab(nr_pages, sc.gfp_mask,
2394 global_reclaimable_pages());
2395 sc.nr_reclaimed += reclaim_state.reclaimed_slab;
2396 } while (sc.nr_reclaimed < nr_pages &&
2397 reclaim_state.reclaimed_slab > 0);
2398 }
2399 2363
2364 nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
2400 2365
2401out: 2366 p->reclaim_state = NULL;
2402 current->reclaim_state = NULL; 2367 lockdep_clear_current_reclaim_state();
2368 p->flags &= ~PF_MEMALLOC;
2403 2369
2404 return sc.nr_reclaimed; 2370 return nr_reclaimed;
2405} 2371}
2406#endif /* CONFIG_HIBERNATION */ 2372#endif /* CONFIG_HIBERNATION */
2407 2373
@@ -2451,6 +2417,17 @@ int kswapd_run(int nid)
2451 return ret; 2417 return ret;
2452} 2418}
2453 2419
2420/*
2421 * Called by memory hotplug when all memory in a node is offlined.
2422 */
2423void kswapd_stop(int nid)
2424{
2425 struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
2426
2427 if (kswapd)
2428 kthread_stop(kswapd);
2429}
2430
2454static int __init kswapd_init(void) 2431static int __init kswapd_init(void)
2455{ 2432{
2456 int nid; 2433 int nid;
@@ -2553,8 +2530,8 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
2553 .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE), 2530 .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
2554 .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP), 2531 .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
2555 .may_swap = 1, 2532 .may_swap = 1,
2556 .swap_cluster_max = max_t(unsigned long, nr_pages, 2533 .nr_to_reclaim = max_t(unsigned long, nr_pages,
2557 SWAP_CLUSTER_MAX), 2534 SWAP_CLUSTER_MAX),
2558 .gfp_mask = gfp_mask, 2535 .gfp_mask = gfp_mask,
2559 .swappiness = vm_swappiness, 2536 .swappiness = vm_swappiness,
2560 .order = order, 2537 .order = order,
diff --git a/mm/vmstat.c b/mm/vmstat.c
index dad2327e4580..6051fbab67ba 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -683,6 +683,9 @@ static const char * const vmstat_text[] = {
683 "slabs_scanned", 683 "slabs_scanned",
684 "kswapd_steal", 684 "kswapd_steal",
685 "kswapd_inodesteal", 685 "kswapd_inodesteal",
686 "kswapd_low_wmark_hit_quickly",
687 "kswapd_high_wmark_hit_quickly",
688 "kswapd_skip_congestion_wait",
686 "pageoutrun", 689 "pageoutrun",
687 "allocstall", 690 "allocstall",
688 691
diff --git a/net/core/dev.c b/net/core/dev.c
index 6fe7d739e59b..be9924f60ec3 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -5035,6 +5035,11 @@ int register_netdevice(struct net_device *dev)
5035 rollback_registered(dev); 5035 rollback_registered(dev);
5036 dev->reg_state = NETREG_UNREGISTERED; 5036 dev->reg_state = NETREG_UNREGISTERED;
5037 } 5037 }
5038 /*
5039 * Prevent userspace races by waiting until the network
5040 * device is fully setup before sending notifications.
5041 */
5042 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
5038 5043
5039out: 5044out:
5040 return ret; 5045 return ret;
@@ -5597,6 +5602,12 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
5597 /* Notify protocols, that a new device appeared. */ 5602 /* Notify protocols, that a new device appeared. */
5598 call_netdevice_notifiers(NETDEV_REGISTER, dev); 5603 call_netdevice_notifiers(NETDEV_REGISTER, dev);
5599 5604
5605 /*
5606 * Prevent userspace races by waiting until the network
5607 * device is fully setup before sending notifications.
5608 */
5609 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
5610
5600 synchronize_net(); 5611 synchronize_net();
5601 err = 0; 5612 err = 0;
5602out: 5613out:
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 33148a568199..794bcb897ff0 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1364,15 +1364,15 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi
1364 case NETDEV_UNREGISTER: 1364 case NETDEV_UNREGISTER:
1365 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U); 1365 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
1366 break; 1366 break;
1367 case NETDEV_REGISTER:
1368 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
1369 break;
1370 case NETDEV_UP: 1367 case NETDEV_UP:
1371 case NETDEV_DOWN: 1368 case NETDEV_DOWN:
1372 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING); 1369 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1373 break; 1370 break;
1371 case NETDEV_POST_INIT:
1372 case NETDEV_REGISTER:
1374 case NETDEV_CHANGE: 1373 case NETDEV_CHANGE:
1375 case NETDEV_GOING_DOWN: 1374 case NETDEV_GOING_DOWN:
1375 case NETDEV_UNREGISTER_BATCH:
1376 break; 1376 break;
1377 default: 1377 default:
1378 rtmsg_ifinfo(RTM_NEWLINK, dev, 0); 1378 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index bfa3e7865a8c..93c4e060c91e 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -93,7 +93,7 @@ static int sock_pipe_buf_steal(struct pipe_inode_info *pipe,
93 93
94 94
95/* Pipe buffer operations for a socket. */ 95/* Pipe buffer operations for a socket. */
96static struct pipe_buf_operations sock_pipe_buf_ops = { 96static const struct pipe_buf_operations sock_pipe_buf_ops = {
97 .can_merge = 0, 97 .can_merge = 0,
98 .map = generic_pipe_buf_map, 98 .map = generic_pipe_buf_map,
99 .unmap = generic_pipe_buf_unmap, 99 .unmap = generic_pipe_buf_unmap,
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 70491d9035eb..0c94a1ac2946 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -166,7 +166,7 @@ config IP_PNP_DHCP
166 166
167 If unsure, say Y. Note that if you want to use DHCP, a DHCP server 167 If unsure, say Y. Note that if you want to use DHCP, a DHCP server
168 must be operating on your network. Read 168 must be operating on your network. Read
169 <file:Documentation/filesystems/nfsroot.txt> for details. 169 <file:Documentation/filesystems/nfs/nfsroot.txt> for details.
170 170
171config IP_PNP_BOOTP 171config IP_PNP_BOOTP
172 bool "IP: BOOTP support" 172 bool "IP: BOOTP support"
@@ -181,7 +181,7 @@ config IP_PNP_BOOTP
181 does BOOTP itself, providing all necessary information on the kernel 181 does BOOTP itself, providing all necessary information on the kernel
182 command line, you can say N here. If unsure, say Y. Note that if you 182 command line, you can say N here. If unsure, say Y. Note that if you
183 want to use BOOTP, a BOOTP server must be operating on your network. 183 want to use BOOTP, a BOOTP server must be operating on your network.
184 Read <file:Documentation/filesystems/nfsroot.txt> for details. 184 Read <file:Documentation/filesystems/nfs/nfsroot.txt> for details.
185 185
186config IP_PNP_RARP 186config IP_PNP_RARP
187 bool "IP: RARP support" 187 bool "IP: RARP support"
@@ -194,7 +194,7 @@ config IP_PNP_RARP
194 older protocol which is being obsoleted by BOOTP and DHCP), say Y 194 older protocol which is being obsoleted by BOOTP and DHCP), say Y
195 here. Note that if you want to use RARP, a RARP server must be 195 here. Note that if you want to use RARP, a RARP server must be
196 operating on your network. Read 196 operating on your network. Read
197 <file:Documentation/filesystems/nfsroot.txt> for details. 197 <file:Documentation/filesystems/nfs/nfsroot.txt> for details.
198 198
199# not yet ready.. 199# not yet ready..
200# bool ' IP: ARP support' CONFIG_IP_PNP_ARP 200# bool ' IP: ARP support' CONFIG_IP_PNP_ARP
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 4e08b7f2331c..10a6a604bf32 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -1446,7 +1446,7 @@ late_initcall(ip_auto_config);
1446 1446
1447/* 1447/*
1448 * Decode any IP configuration options in the "ip=" or "nfsaddrs=" kernel 1448 * Decode any IP configuration options in the "ip=" or "nfsaddrs=" kernel
1449 * command line parameter. See Documentation/filesystems/nfsroot.txt. 1449 * command line parameter. See Documentation/filesystems/nfs/nfsroot.txt.
1450 */ 1450 */
1451static int __init ic_proto_name(char *name) 1451static int __init ic_proto_name(char *name)
1452{ 1452{
diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
index fa2d6b6fc3e5..331ead3ebd1b 100644
--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
+++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
@@ -14,6 +14,7 @@
14#include <net/route.h> 14#include <net/route.h>
15#include <net/ip.h> 15#include <net/ip.h>
16 16
17#include <linux/netfilter_bridge.h>
17#include <linux/netfilter_ipv4.h> 18#include <linux/netfilter_ipv4.h>
18#include <net/netfilter/ipv4/nf_defrag_ipv4.h> 19#include <net/netfilter/ipv4/nf_defrag_ipv4.h>
19 20
@@ -34,6 +35,20 @@ static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
34 return err; 35 return err;
35} 36}
36 37
38static enum ip_defrag_users nf_ct_defrag_user(unsigned int hooknum,
39 struct sk_buff *skb)
40{
41#ifdef CONFIG_BRIDGE_NETFILTER
42 if (skb->nf_bridge &&
43 skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)
44 return IP_DEFRAG_CONNTRACK_BRIDGE_IN;
45#endif
46 if (hooknum == NF_INET_PRE_ROUTING)
47 return IP_DEFRAG_CONNTRACK_IN;
48 else
49 return IP_DEFRAG_CONNTRACK_OUT;
50}
51
37static unsigned int ipv4_conntrack_defrag(unsigned int hooknum, 52static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
38 struct sk_buff *skb, 53 struct sk_buff *skb,
39 const struct net_device *in, 54 const struct net_device *in,
@@ -50,10 +65,8 @@ static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
50#endif 65#endif
51 /* Gather fragments. */ 66 /* Gather fragments. */
52 if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) { 67 if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) {
53 if (nf_ct_ipv4_gather_frags(skb, 68 enum ip_defrag_users user = nf_ct_defrag_user(hooknum, skb);
54 hooknum == NF_INET_PRE_ROUTING ? 69 if (nf_ct_ipv4_gather_frags(skb, user))
55 IP_DEFRAG_CONNTRACK_IN :
56 IP_DEFRAG_CONNTRACK_OUT))
57 return NF_STOLEN; 70 return NF_STOLEN;
58 } 71 }
59 return NF_ACCEPT; 72 return NF_ACCEPT;
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 26399ad2a289..66fd80ef2473 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -277,6 +277,13 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
277 277
278 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV); 278 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV);
279 279
280 /* check for timestamp cookie support */
281 memset(&tcp_opt, 0, sizeof(tcp_opt));
282 tcp_parse_options(skb, &tcp_opt, &hash_location, 0);
283
284 if (tcp_opt.saw_tstamp)
285 cookie_check_timestamp(&tcp_opt);
286
280 ret = NULL; 287 ret = NULL;
281 req = inet_reqsk_alloc(&tcp_request_sock_ops); /* for safety */ 288 req = inet_reqsk_alloc(&tcp_request_sock_ops); /* for safety */
282 if (!req) 289 if (!req)
@@ -292,6 +299,12 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
292 ireq->loc_addr = ip_hdr(skb)->daddr; 299 ireq->loc_addr = ip_hdr(skb)->daddr;
293 ireq->rmt_addr = ip_hdr(skb)->saddr; 300 ireq->rmt_addr = ip_hdr(skb)->saddr;
294 ireq->ecn_ok = 0; 301 ireq->ecn_ok = 0;
302 ireq->snd_wscale = tcp_opt.snd_wscale;
303 ireq->rcv_wscale = tcp_opt.rcv_wscale;
304 ireq->sack_ok = tcp_opt.sack_ok;
305 ireq->wscale_ok = tcp_opt.wscale_ok;
306 ireq->tstamp_ok = tcp_opt.saw_tstamp;
307 req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
295 308
296 /* We throwed the options of the initial SYN away, so we hope 309 /* We throwed the options of the initial SYN away, so we hope
297 * the ACK carries the same options again (see RFC1122 4.2.3.8) 310 * the ACK carries the same options again (see RFC1122 4.2.3.8)
@@ -340,20 +353,6 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
340 } 353 }
341 } 354 }
342 355
343 /* check for timestamp cookie support */
344 memset(&tcp_opt, 0, sizeof(tcp_opt));
345 tcp_parse_options(skb, &tcp_opt, &hash_location, 0, &rt->u.dst);
346
347 if (tcp_opt.saw_tstamp)
348 cookie_check_timestamp(&tcp_opt);
349
350 ireq->snd_wscale = tcp_opt.snd_wscale;
351 ireq->rcv_wscale = tcp_opt.rcv_wscale;
352 ireq->sack_ok = tcp_opt.sack_ok;
353 ireq->wscale_ok = tcp_opt.wscale_ok;
354 ireq->tstamp_ok = tcp_opt.saw_tstamp;
355 req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
356
357 /* Try to redo what tcp_v4_send_synack did. */ 356 /* Try to redo what tcp_v4_send_synack did. */
358 req->window_clamp = tp->window_clamp ? :dst_metric(&rt->u.dst, RTAX_WINDOW); 357 req->window_clamp = tp->window_clamp ? :dst_metric(&rt->u.dst, RTAX_WINDOW);
359 358
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 12cab7d74dba..28e029632493 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3727,7 +3727,7 @@ old_ack:
3727 * the fast version below fails. 3727 * the fast version below fails.
3728 */ 3728 */
3729void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, 3729void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
3730 u8 **hvpp, int estab, struct dst_entry *dst) 3730 u8 **hvpp, int estab)
3731{ 3731{
3732 unsigned char *ptr; 3732 unsigned char *ptr;
3733 struct tcphdr *th = tcp_hdr(skb); 3733 struct tcphdr *th = tcp_hdr(skb);
@@ -3766,8 +3766,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
3766 break; 3766 break;
3767 case TCPOPT_WINDOW: 3767 case TCPOPT_WINDOW:
3768 if (opsize == TCPOLEN_WINDOW && th->syn && 3768 if (opsize == TCPOLEN_WINDOW && th->syn &&
3769 !estab && sysctl_tcp_window_scaling && 3769 !estab && sysctl_tcp_window_scaling) {
3770 !dst_feature(dst, RTAX_FEATURE_NO_WSCALE)) {
3771 __u8 snd_wscale = *(__u8 *)ptr; 3770 __u8 snd_wscale = *(__u8 *)ptr;
3772 opt_rx->wscale_ok = 1; 3771 opt_rx->wscale_ok = 1;
3773 if (snd_wscale > 14) { 3772 if (snd_wscale > 14) {
@@ -3783,8 +3782,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
3783 case TCPOPT_TIMESTAMP: 3782 case TCPOPT_TIMESTAMP:
3784 if ((opsize == TCPOLEN_TIMESTAMP) && 3783 if ((opsize == TCPOLEN_TIMESTAMP) &&
3785 ((estab && opt_rx->tstamp_ok) || 3784 ((estab && opt_rx->tstamp_ok) ||
3786 (!estab && sysctl_tcp_timestamps && 3785 (!estab && sysctl_tcp_timestamps))) {
3787 !dst_feature(dst, RTAX_FEATURE_NO_TSTAMP)))) {
3788 opt_rx->saw_tstamp = 1; 3786 opt_rx->saw_tstamp = 1;
3789 opt_rx->rcv_tsval = get_unaligned_be32(ptr); 3787 opt_rx->rcv_tsval = get_unaligned_be32(ptr);
3790 opt_rx->rcv_tsecr = get_unaligned_be32(ptr + 4); 3788 opt_rx->rcv_tsecr = get_unaligned_be32(ptr + 4);
@@ -3792,8 +3790,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
3792 break; 3790 break;
3793 case TCPOPT_SACK_PERM: 3791 case TCPOPT_SACK_PERM:
3794 if (opsize == TCPOLEN_SACK_PERM && th->syn && 3792 if (opsize == TCPOLEN_SACK_PERM && th->syn &&
3795 !estab && sysctl_tcp_sack && 3793 !estab && sysctl_tcp_sack) {
3796 !dst_feature(dst, RTAX_FEATURE_NO_SACK)) {
3797 opt_rx->sack_ok = 1; 3794 opt_rx->sack_ok = 1;
3798 tcp_sack_reset(opt_rx); 3795 tcp_sack_reset(opt_rx);
3799 } 3796 }
@@ -3878,7 +3875,7 @@ static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th,
3878 if (tcp_parse_aligned_timestamp(tp, th)) 3875 if (tcp_parse_aligned_timestamp(tp, th))
3879 return 1; 3876 return 1;
3880 } 3877 }
3881 tcp_parse_options(skb, &tp->rx_opt, hvpp, 1, NULL); 3878 tcp_parse_options(skb, &tp->rx_opt, hvpp, 1);
3882 return 1; 3879 return 1;
3883} 3880}
3884 3881
@@ -4133,10 +4130,8 @@ static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq,
4133static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq) 4130static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq)
4134{ 4131{
4135 struct tcp_sock *tp = tcp_sk(sk); 4132 struct tcp_sock *tp = tcp_sk(sk);
4136 struct dst_entry *dst = __sk_dst_get(sk);
4137 4133
4138 if (tcp_is_sack(tp) && sysctl_tcp_dsack && 4134 if (tcp_is_sack(tp) && sysctl_tcp_dsack) {
4139 !dst_feature(dst, RTAX_FEATURE_NO_DSACK)) {
4140 int mib_idx; 4135 int mib_idx;
4141 4136
4142 if (before(seq, tp->rcv_nxt)) 4137 if (before(seq, tp->rcv_nxt))
@@ -4165,15 +4160,13 @@ static void tcp_dsack_extend(struct sock *sk, u32 seq, u32 end_seq)
4165static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb) 4160static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb)
4166{ 4161{
4167 struct tcp_sock *tp = tcp_sk(sk); 4162 struct tcp_sock *tp = tcp_sk(sk);
4168 struct dst_entry *dst = __sk_dst_get(sk);
4169 4163
4170 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && 4164 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
4171 before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 4165 before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
4172 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); 4166 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
4173 tcp_enter_quickack_mode(sk); 4167 tcp_enter_quickack_mode(sk);
4174 4168
4175 if (tcp_is_sack(tp) && sysctl_tcp_dsack && 4169 if (tcp_is_sack(tp) && sysctl_tcp_dsack) {
4176 !dst_feature(dst, RTAX_FEATURE_NO_DSACK)) {
4177 u32 end_seq = TCP_SKB_CB(skb)->end_seq; 4170 u32 end_seq = TCP_SKB_CB(skb)->end_seq;
4178 4171
4179 if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) 4172 if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))
@@ -5428,11 +5421,10 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
5428 u8 *hash_location; 5421 u8 *hash_location;
5429 struct inet_connection_sock *icsk = inet_csk(sk); 5422 struct inet_connection_sock *icsk = inet_csk(sk);
5430 struct tcp_sock *tp = tcp_sk(sk); 5423 struct tcp_sock *tp = tcp_sk(sk);
5431 struct dst_entry *dst = __sk_dst_get(sk);
5432 struct tcp_cookie_values *cvp = tp->cookie_values; 5424 struct tcp_cookie_values *cvp = tp->cookie_values;
5433 int saved_clamp = tp->rx_opt.mss_clamp; 5425 int saved_clamp = tp->rx_opt.mss_clamp;
5434 5426
5435 tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0, dst); 5427 tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0);
5436 5428
5437 if (th->ack) { 5429 if (th->ack) {
5438 /* rfc793: 5430 /* rfc793:
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 15e96030ce47..65b8ebfd078a 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1262,20 +1262,10 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1262 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops; 1262 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1263#endif 1263#endif
1264 1264
1265 ireq = inet_rsk(req);
1266 ireq->loc_addr = daddr;
1267 ireq->rmt_addr = saddr;
1268 ireq->no_srccheck = inet_sk(sk)->transparent;
1269 ireq->opt = tcp_v4_save_options(sk, skb);
1270
1271 dst = inet_csk_route_req(sk, req);
1272 if(!dst)
1273 goto drop_and_free;
1274
1275 tcp_clear_options(&tmp_opt); 1265 tcp_clear_options(&tmp_opt);
1276 tmp_opt.mss_clamp = TCP_MSS_DEFAULT; 1266 tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
1277 tmp_opt.user_mss = tp->rx_opt.user_mss; 1267 tmp_opt.user_mss = tp->rx_opt.user_mss;
1278 tcp_parse_options(skb, &tmp_opt, &hash_location, 0, dst); 1268 tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
1279 1269
1280 if (tmp_opt.cookie_plus > 0 && 1270 if (tmp_opt.cookie_plus > 0 &&
1281 tmp_opt.saw_tstamp && 1271 tmp_opt.saw_tstamp &&
@@ -1319,8 +1309,14 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1319 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp; 1309 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1320 tcp_openreq_init(req, &tmp_opt, skb); 1310 tcp_openreq_init(req, &tmp_opt, skb);
1321 1311
1312 ireq = inet_rsk(req);
1313 ireq->loc_addr = daddr;
1314 ireq->rmt_addr = saddr;
1315 ireq->no_srccheck = inet_sk(sk)->transparent;
1316 ireq->opt = tcp_v4_save_options(sk, skb);
1317
1322 if (security_inet_conn_request(sk, skb, req)) 1318 if (security_inet_conn_request(sk, skb, req))
1323 goto drop_and_release; 1319 goto drop_and_free;
1324 1320
1325 if (!want_cookie) 1321 if (!want_cookie)
1326 TCP_ECN_create_request(req, tcp_hdr(skb)); 1322 TCP_ECN_create_request(req, tcp_hdr(skb));
@@ -1345,6 +1341,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1345 */ 1341 */
1346 if (tmp_opt.saw_tstamp && 1342 if (tmp_opt.saw_tstamp &&
1347 tcp_death_row.sysctl_tw_recycle && 1343 tcp_death_row.sysctl_tw_recycle &&
1344 (dst = inet_csk_route_req(sk, req)) != NULL &&
1348 (peer = rt_get_peer((struct rtable *)dst)) != NULL && 1345 (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
1349 peer->v4daddr == saddr) { 1346 peer->v4daddr == saddr) {
1350 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL && 1347 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 87accec8d097..f206ee5dda80 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -95,9 +95,9 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
95 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); 95 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
96 int paws_reject = 0; 96 int paws_reject = 0;
97 97
98 tmp_opt.saw_tstamp = 0;
98 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) { 99 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
99 tmp_opt.tstamp_ok = 1; 100 tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
100 tcp_parse_options(skb, &tmp_opt, &hash_location, 1, NULL);
101 101
102 if (tmp_opt.saw_tstamp) { 102 if (tmp_opt.saw_tstamp) {
103 tmp_opt.ts_recent = tcptw->tw_ts_recent; 103 tmp_opt.ts_recent = tcptw->tw_ts_recent;
@@ -526,9 +526,9 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
526 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK); 526 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
527 int paws_reject = 0; 527 int paws_reject = 0;
528 528
529 if ((th->doff > (sizeof(*th) >> 2)) && (req->ts_recent)) { 529 tmp_opt.saw_tstamp = 0;
530 tmp_opt.tstamp_ok = 1; 530 if (th->doff > (sizeof(struct tcphdr)>>2)) {
531 tcp_parse_options(skb, &tmp_opt, &hash_location, 1, NULL); 531 tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
532 532
533 if (tmp_opt.saw_tstamp) { 533 if (tmp_opt.saw_tstamp) {
534 tmp_opt.ts_recent = req->ts_recent; 534 tmp_opt.ts_recent = req->ts_recent;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 93316a96d820..383ce237640f 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -553,7 +553,6 @@ static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb,
553 struct tcp_md5sig_key **md5) { 553 struct tcp_md5sig_key **md5) {
554 struct tcp_sock *tp = tcp_sk(sk); 554 struct tcp_sock *tp = tcp_sk(sk);
555 struct tcp_cookie_values *cvp = tp->cookie_values; 555 struct tcp_cookie_values *cvp = tp->cookie_values;
556 struct dst_entry *dst = __sk_dst_get(sk);
557 unsigned remaining = MAX_TCP_OPTION_SPACE; 556 unsigned remaining = MAX_TCP_OPTION_SPACE;
558 u8 cookie_size = (!tp->rx_opt.cookie_out_never && cvp != NULL) ? 557 u8 cookie_size = (!tp->rx_opt.cookie_out_never && cvp != NULL) ?
559 tcp_cookie_size_check(cvp->cookie_desired) : 558 tcp_cookie_size_check(cvp->cookie_desired) :
@@ -581,22 +580,18 @@ static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb,
581 opts->mss = tcp_advertise_mss(sk); 580 opts->mss = tcp_advertise_mss(sk);
582 remaining -= TCPOLEN_MSS_ALIGNED; 581 remaining -= TCPOLEN_MSS_ALIGNED;
583 582
584 if (likely(sysctl_tcp_timestamps && 583 if (likely(sysctl_tcp_timestamps && *md5 == NULL)) {
585 !dst_feature(dst, RTAX_FEATURE_NO_TSTAMP) &&
586 *md5 == NULL)) {
587 opts->options |= OPTION_TS; 584 opts->options |= OPTION_TS;
588 opts->tsval = TCP_SKB_CB(skb)->when; 585 opts->tsval = TCP_SKB_CB(skb)->when;
589 opts->tsecr = tp->rx_opt.ts_recent; 586 opts->tsecr = tp->rx_opt.ts_recent;
590 remaining -= TCPOLEN_TSTAMP_ALIGNED; 587 remaining -= TCPOLEN_TSTAMP_ALIGNED;
591 } 588 }
592 if (likely(sysctl_tcp_window_scaling && 589 if (likely(sysctl_tcp_window_scaling)) {
593 !dst_feature(dst, RTAX_FEATURE_NO_WSCALE))) {
594 opts->ws = tp->rx_opt.rcv_wscale; 590 opts->ws = tp->rx_opt.rcv_wscale;
595 opts->options |= OPTION_WSCALE; 591 opts->options |= OPTION_WSCALE;
596 remaining -= TCPOLEN_WSCALE_ALIGNED; 592 remaining -= TCPOLEN_WSCALE_ALIGNED;
597 } 593 }
598 if (likely(sysctl_tcp_sack && 594 if (likely(sysctl_tcp_sack)) {
599 !dst_feature(dst, RTAX_FEATURE_NO_SACK))) {
600 opts->options |= OPTION_SACK_ADVERTISE; 595 opts->options |= OPTION_SACK_ADVERTISE;
601 if (unlikely(!(OPTION_TS & opts->options))) 596 if (unlikely(!(OPTION_TS & opts->options)))
602 remaining -= TCPOLEN_SACKPERM_ALIGNED; 597 remaining -= TCPOLEN_SACKPERM_ALIGNED;
@@ -2527,9 +2522,7 @@ static void tcp_connect_init(struct sock *sk)
2527 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT. 2522 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.
2528 */ 2523 */
2529 tp->tcp_header_len = sizeof(struct tcphdr) + 2524 tp->tcp_header_len = sizeof(struct tcphdr) +
2530 (sysctl_tcp_timestamps && 2525 (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0);
2531 (!dst_feature(dst, RTAX_FEATURE_NO_TSTAMP) ?
2532 TCPOLEN_TSTAMP_ALIGNED : 0));
2533 2526
2534#ifdef CONFIG_TCP_MD5SIG 2527#ifdef CONFIG_TCP_MD5SIG
2535 if (tp->af_specific->md5_lookup(sk, sk) != NULL) 2528 if (tp->af_specific->md5_lookup(sk, sk) != NULL)
@@ -2555,8 +2548,7 @@ static void tcp_connect_init(struct sock *sk)
2555 tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), 2548 tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
2556 &tp->rcv_wnd, 2549 &tp->rcv_wnd,
2557 &tp->window_clamp, 2550 &tp->window_clamp,
2558 (sysctl_tcp_window_scaling && 2551 sysctl_tcp_window_scaling,
2559 !dst_feature(dst, RTAX_FEATURE_NO_WSCALE)),
2560 &rcv_wscale); 2552 &rcv_wscale);
2561 2553
2562 tp->rx_opt.rcv_wscale = rcv_wscale; 2554 tp->rx_opt.rcv_wscale = rcv_wscale;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 1f9534846ca9..f0126fdd7e04 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -216,9 +216,8 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
216 * force rand to be an odd multiple of UDP_HTABLE_SIZE 216 * force rand to be an odd multiple of UDP_HTABLE_SIZE
217 */ 217 */
218 rand = (rand | 1) * (udptable->mask + 1); 218 rand = (rand | 1) * (udptable->mask + 1);
219 for (last = first + udptable->mask + 1; 219 last = first + udptable->mask + 1;
220 first != last; 220 do {
221 first++) {
222 hslot = udp_hashslot(udptable, net, first); 221 hslot = udp_hashslot(udptable, net, first);
223 bitmap_zero(bitmap, PORTS_PER_CHAIN); 222 bitmap_zero(bitmap, PORTS_PER_CHAIN);
224 spin_lock_bh(&hslot->lock); 223 spin_lock_bh(&hslot->lock);
@@ -238,7 +237,7 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
238 snum += rand; 237 snum += rand;
239 } while (snum != first); 238 } while (snum != first);
240 spin_unlock_bh(&hslot->lock); 239 spin_unlock_bh(&hslot->lock);
241 } 240 } while (++first != last);
242 goto fail; 241 goto fail;
243 } else { 242 } else {
244 hslot = udp_hashslot(udptable, net, snum); 243 hslot = udp_hashslot(udptable, net, snum);
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index 5f2ec208a8c3..0956ebabbff2 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -20,6 +20,7 @@
20#include <net/ipv6.h> 20#include <net/ipv6.h>
21#include <net/inet_frag.h> 21#include <net/inet_frag.h>
22 22
23#include <linux/netfilter_bridge.h>
23#include <linux/netfilter_ipv6.h> 24#include <linux/netfilter_ipv6.h>
24#include <net/netfilter/nf_conntrack.h> 25#include <net/netfilter/nf_conntrack.h>
25#include <net/netfilter/nf_conntrack_helper.h> 26#include <net/netfilter/nf_conntrack_helper.h>
@@ -187,6 +188,21 @@ out:
187 return nf_conntrack_confirm(skb); 188 return nf_conntrack_confirm(skb);
188} 189}
189 190
191static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum,
192 struct sk_buff *skb)
193{
194#ifdef CONFIG_BRIDGE_NETFILTER
195 if (skb->nf_bridge &&
196 skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)
197 return IP6_DEFRAG_CONNTRACK_BRIDGE_IN;
198#endif
199 if (hooknum == NF_INET_PRE_ROUTING)
200 return IP6_DEFRAG_CONNTRACK_IN;
201 else
202 return IP6_DEFRAG_CONNTRACK_OUT;
203
204}
205
190static unsigned int ipv6_defrag(unsigned int hooknum, 206static unsigned int ipv6_defrag(unsigned int hooknum,
191 struct sk_buff *skb, 207 struct sk_buff *skb,
192 const struct net_device *in, 208 const struct net_device *in,
@@ -199,8 +215,7 @@ static unsigned int ipv6_defrag(unsigned int hooknum,
199 if (skb->nfct) 215 if (skb->nfct)
200 return NF_ACCEPT; 216 return NF_ACCEPT;
201 217
202 reasm = nf_ct_frag6_gather(skb); 218 reasm = nf_ct_frag6_gather(skb, nf_ct6_defrag_user(hooknum, skb));
203
204 /* queued */ 219 /* queued */
205 if (reasm == NULL) 220 if (reasm == NULL)
206 return NF_STOLEN; 221 return NF_STOLEN;
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index e0b9424fa1b2..312c20adc83f 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -168,13 +168,14 @@ out:
168/* Creation primitives. */ 168/* Creation primitives. */
169 169
170static __inline__ struct nf_ct_frag6_queue * 170static __inline__ struct nf_ct_frag6_queue *
171fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst) 171fq_find(__be32 id, u32 user, struct in6_addr *src, struct in6_addr *dst)
172{ 172{
173 struct inet_frag_queue *q; 173 struct inet_frag_queue *q;
174 struct ip6_create_arg arg; 174 struct ip6_create_arg arg;
175 unsigned int hash; 175 unsigned int hash;
176 176
177 arg.id = id; 177 arg.id = id;
178 arg.user = user;
178 arg.src = src; 179 arg.src = src;
179 arg.dst = dst; 180 arg.dst = dst;
180 181
@@ -559,7 +560,7 @@ find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff)
559 return 0; 560 return 0;
560} 561}
561 562
562struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb) 563struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user)
563{ 564{
564 struct sk_buff *clone; 565 struct sk_buff *clone;
565 struct net_device *dev = skb->dev; 566 struct net_device *dev = skb->dev;
@@ -605,7 +606,7 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb)
605 if (atomic_read(&nf_init_frags.mem) > nf_init_frags.high_thresh) 606 if (atomic_read(&nf_init_frags.mem) > nf_init_frags.high_thresh)
606 nf_ct_frag6_evictor(); 607 nf_ct_frag6_evictor();
607 608
608 fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr); 609 fq = fq_find(fhdr->identification, user, &hdr->saddr, &hdr->daddr);
609 if (fq == NULL) { 610 if (fq == NULL) {
610 pr_debug("Can't find and can't create new queue\n"); 611 pr_debug("Can't find and can't create new queue\n");
611 goto ret_orig; 612 goto ret_orig;
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 4d98549a6868..3b3a95607125 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -72,6 +72,7 @@ struct frag_queue
72 struct inet_frag_queue q; 72 struct inet_frag_queue q;
73 73
74 __be32 id; /* fragment id */ 74 __be32 id; /* fragment id */
75 u32 user;
75 struct in6_addr saddr; 76 struct in6_addr saddr;
76 struct in6_addr daddr; 77 struct in6_addr daddr;
77 78
@@ -141,7 +142,7 @@ int ip6_frag_match(struct inet_frag_queue *q, void *a)
141 struct ip6_create_arg *arg = a; 142 struct ip6_create_arg *arg = a;
142 143
143 fq = container_of(q, struct frag_queue, q); 144 fq = container_of(q, struct frag_queue, q);
144 return (fq->id == arg->id && 145 return (fq->id == arg->id && fq->user == arg->user &&
145 ipv6_addr_equal(&fq->saddr, arg->src) && 146 ipv6_addr_equal(&fq->saddr, arg->src) &&
146 ipv6_addr_equal(&fq->daddr, arg->dst)); 147 ipv6_addr_equal(&fq->daddr, arg->dst));
147} 148}
@@ -163,6 +164,7 @@ void ip6_frag_init(struct inet_frag_queue *q, void *a)
163 struct ip6_create_arg *arg = a; 164 struct ip6_create_arg *arg = a;
164 165
165 fq->id = arg->id; 166 fq->id = arg->id;
167 fq->user = arg->user;
166 ipv6_addr_copy(&fq->saddr, arg->src); 168 ipv6_addr_copy(&fq->saddr, arg->src);
167 ipv6_addr_copy(&fq->daddr, arg->dst); 169 ipv6_addr_copy(&fq->daddr, arg->dst);
168} 170}
@@ -243,6 +245,7 @@ fq_find(struct net *net, __be32 id, struct in6_addr *src, struct in6_addr *dst,
243 unsigned int hash; 245 unsigned int hash;
244 246
245 arg.id = id; 247 arg.id = id;
248 arg.user = IP6_DEFRAG_LOCAL_DELIVER;
246 arg.src = src; 249 arg.src = src;
247 arg.dst = dst; 250 arg.dst = dst;
248 251
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 5b9af508b8f2..7208a06576c6 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -185,6 +185,13 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
185 185
186 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV); 186 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV);
187 187
188 /* check for timestamp cookie support */
189 memset(&tcp_opt, 0, sizeof(tcp_opt));
190 tcp_parse_options(skb, &tcp_opt, &hash_location, 0);
191
192 if (tcp_opt.saw_tstamp)
193 cookie_check_timestamp(&tcp_opt);
194
188 ret = NULL; 195 ret = NULL;
189 req = inet6_reqsk_alloc(&tcp6_request_sock_ops); 196 req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
190 if (!req) 197 if (!req)
@@ -218,6 +225,12 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
218 req->expires = 0UL; 225 req->expires = 0UL;
219 req->retrans = 0; 226 req->retrans = 0;
220 ireq->ecn_ok = 0; 227 ireq->ecn_ok = 0;
228 ireq->snd_wscale = tcp_opt.snd_wscale;
229 ireq->rcv_wscale = tcp_opt.rcv_wscale;
230 ireq->sack_ok = tcp_opt.sack_ok;
231 ireq->wscale_ok = tcp_opt.wscale_ok;
232 ireq->tstamp_ok = tcp_opt.saw_tstamp;
233 req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
221 treq->rcv_isn = ntohl(th->seq) - 1; 234 treq->rcv_isn = ntohl(th->seq) - 1;
222 treq->snt_isn = cookie; 235 treq->snt_isn = cookie;
223 236
@@ -253,21 +266,6 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
253 goto out_free; 266 goto out_free;
254 } 267 }
255 268
256 /* check for timestamp cookie support */
257 memset(&tcp_opt, 0, sizeof(tcp_opt));
258 tcp_parse_options(skb, &tcp_opt, &hash_location, 0, dst);
259
260 if (tcp_opt.saw_tstamp)
261 cookie_check_timestamp(&tcp_opt);
262
263 req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
264
265 ireq->snd_wscale = tcp_opt.snd_wscale;
266 ireq->rcv_wscale = tcp_opt.rcv_wscale;
267 ireq->sack_ok = tcp_opt.sack_ok;
268 ireq->wscale_ok = tcp_opt.wscale_ok;
269 ireq->tstamp_ok = tcp_opt.saw_tstamp;
270
271 req->window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW); 269 req->window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW);
272 tcp_select_initial_window(tcp_full_space(sk), req->mss, 270 tcp_select_initial_window(tcp_full_space(sk), req->mss,
273 &req->rcv_wnd, &req->window_clamp, 271 &req->rcv_wnd, &req->window_clamp,
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index ee9cf62458d4..febfd595a40d 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1169,7 +1169,6 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1169 struct inet6_request_sock *treq; 1169 struct inet6_request_sock *treq;
1170 struct ipv6_pinfo *np = inet6_sk(sk); 1170 struct ipv6_pinfo *np = inet6_sk(sk);
1171 struct tcp_sock *tp = tcp_sk(sk); 1171 struct tcp_sock *tp = tcp_sk(sk);
1172 struct dst_entry *dst = __sk_dst_get(sk);
1173 __u32 isn = TCP_SKB_CB(skb)->when; 1172 __u32 isn = TCP_SKB_CB(skb)->when;
1174#ifdef CONFIG_SYN_COOKIES 1173#ifdef CONFIG_SYN_COOKIES
1175 int want_cookie = 0; 1174 int want_cookie = 0;
@@ -1208,7 +1207,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1208 tcp_clear_options(&tmp_opt); 1207 tcp_clear_options(&tmp_opt);
1209 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); 1208 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1210 tmp_opt.user_mss = tp->rx_opt.user_mss; 1209 tmp_opt.user_mss = tp->rx_opt.user_mss;
1211 tcp_parse_options(skb, &tmp_opt, &hash_location, 0, dst); 1210 tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
1212 1211
1213 if (tmp_opt.cookie_plus > 0 && 1212 if (tmp_opt.cookie_plus > 0 &&
1214 tmp_opt.saw_tstamp && 1213 tmp_opt.saw_tstamp &&
diff --git a/net/irda/irnet/irnet.h b/net/irda/irnet/irnet.h
index b001c361ad30..4300df35d37d 100644
--- a/net/irda/irnet/irnet.h
+++ b/net/irda/irnet/irnet.h
@@ -249,6 +249,7 @@
249#include <linux/poll.h> 249#include <linux/poll.h>
250#include <linux/capability.h> 250#include <linux/capability.h>
251#include <linux/ctype.h> /* isspace() */ 251#include <linux/ctype.h> /* isspace() */
252#include <linux/string.h> /* skip_spaces() */
252#include <asm/uaccess.h> 253#include <asm/uaccess.h>
253#include <linux/init.h> 254#include <linux/init.h>
254 255
diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c
index 7dea882dbb75..156020d138b5 100644
--- a/net/irda/irnet/irnet_ppp.c
+++ b/net/irda/irnet/irnet_ppp.c
@@ -76,9 +76,8 @@ irnet_ctrl_write(irnet_socket * ap,
76 /* Look at the next command */ 76 /* Look at the next command */
77 start = next; 77 start = next;
78 78
79 /* Scrap whitespaces before the command */ 79 /* Scrap whitespaces before the command */
80 while(isspace(*start)) 80 start = skip_spaces(start);
81 start++;
82 81
83 /* ',' is our command separator */ 82 /* ',' is our command separator */
84 next = strchr(start, ','); 83 next = strchr(start, ',');
@@ -133,8 +132,7 @@ irnet_ctrl_write(irnet_socket * ap,
133 char * endp; 132 char * endp;
134 133
135 /* Scrap whitespaces before the command */ 134 /* Scrap whitespaces before the command */
136 while(isspace(*begp)) 135 begp = skip_spaces(begp);
137 begp++;
138 136
139 /* Convert argument to a number (last arg is the base) */ 137 /* Convert argument to a number (last arg is the base) */
140 addr = simple_strtoul(begp, &endp, 16); 138 addr = simple_strtoul(begp, &endp, 16);
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 1e428863574f..c18286a2167b 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -221,7 +221,7 @@ static int afiucv_pm_restore_thaw(struct device *dev)
221 return 0; 221 return 0;
222} 222}
223 223
224static struct dev_pm_ops afiucv_pm_ops = { 224static const struct dev_pm_ops afiucv_pm_ops = {
225 .prepare = afiucv_pm_prepare, 225 .prepare = afiucv_pm_prepare,
226 .complete = afiucv_pm_complete, 226 .complete = afiucv_pm_complete,
227 .freeze = afiucv_pm_freeze, 227 .freeze = afiucv_pm_freeze,
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index 3b1f5f5f8de7..fd8b28361a64 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -93,7 +93,7 @@ static int iucv_pm_freeze(struct device *);
93static int iucv_pm_thaw(struct device *); 93static int iucv_pm_thaw(struct device *);
94static int iucv_pm_restore(struct device *); 94static int iucv_pm_restore(struct device *);
95 95
96static struct dev_pm_ops iucv_pm_ops = { 96static const struct dev_pm_ops iucv_pm_ops = {
97 .prepare = iucv_pm_prepare, 97 .prepare = iucv_pm_prepare,
98 .complete = iucv_pm_complete, 98 .complete = iucv_pm_complete,
99 .freeze = iucv_pm_freeze, 99 .freeze = iucv_pm_freeze,
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index b95699f00545..847ffca40184 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1366,6 +1366,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb,
1366 == sysctl_ip_vs_sync_threshold[0])) || 1366 == sysctl_ip_vs_sync_threshold[0])) ||
1367 ((cp->protocol == IPPROTO_TCP) && (cp->old_state != cp->state) && 1367 ((cp->protocol == IPPROTO_TCP) && (cp->old_state != cp->state) &&
1368 ((cp->state == IP_VS_TCP_S_FIN_WAIT) || 1368 ((cp->state == IP_VS_TCP_S_FIN_WAIT) ||
1369 (cp->state == IP_VS_TCP_S_CLOSE) ||
1369 (cp->state == IP_VS_TCP_S_CLOSE_WAIT) || 1370 (cp->state == IP_VS_TCP_S_CLOSE_WAIT) ||
1370 (cp->state == IP_VS_TCP_S_TIME_WAIT))))) 1371 (cp->state == IP_VS_TCP_S_TIME_WAIT)))))
1371 ip_vs_sync_conn(cp); 1372 ip_vs_sync_conn(cp);
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index e55a6861d26f..6bde12da2fe0 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -2714,6 +2714,8 @@ static int ip_vs_genl_parse_service(struct ip_vs_service_user_kern *usvc,
2714 if (!(nla_af && (nla_fwmark || (nla_port && nla_protocol && nla_addr)))) 2714 if (!(nla_af && (nla_fwmark || (nla_port && nla_protocol && nla_addr))))
2715 return -EINVAL; 2715 return -EINVAL;
2716 2716
2717 memset(usvc, 0, sizeof(*usvc));
2718
2717 usvc->af = nla_get_u16(nla_af); 2719 usvc->af = nla_get_u16(nla_af);
2718#ifdef CONFIG_IP_VS_IPV6 2720#ifdef CONFIG_IP_VS_IPV6
2719 if (usvc->af != AF_INET && usvc->af != AF_INET6) 2721 if (usvc->af != AF_INET && usvc->af != AF_INET6)
@@ -2901,6 +2903,8 @@ static int ip_vs_genl_parse_dest(struct ip_vs_dest_user_kern *udest,
2901 if (!(nla_addr && nla_port)) 2903 if (!(nla_addr && nla_port))
2902 return -EINVAL; 2904 return -EINVAL;
2903 2905
2906 memset(udest, 0, sizeof(*udest));
2907
2904 nla_memcpy(&udest->addr, nla_addr, sizeof(udest->addr)); 2908 nla_memcpy(&udest->addr, nla_addr, sizeof(udest->addr));
2905 udest->port = nla_get_u16(nla_port); 2909 udest->port = nla_get_u16(nla_port);
2906 2910
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index eb0ceb846527..fc70a49c0afd 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -482,8 +482,7 @@ static ssize_t recent_old_proc_write(struct file *file,
482 if (copy_from_user(buf, input, size)) 482 if (copy_from_user(buf, input, size))
483 return -EFAULT; 483 return -EFAULT;
484 484
485 while (isspace(*c)) 485 c = skip_spaces(c);
486 c++;
487 486
488 if (size - (c - buf) < 5) 487 if (size - (c - buf) < 5)
489 return c - buf; 488 return c - buf;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 020562164b56..e0516a22be2e 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -415,7 +415,7 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
415{ 415{
416 struct sock *sk = sock->sk; 416 struct sock *sk = sock->sk;
417 struct sockaddr_pkt *saddr = (struct sockaddr_pkt *)msg->msg_name; 417 struct sockaddr_pkt *saddr = (struct sockaddr_pkt *)msg->msg_name;
418 struct sk_buff *skb; 418 struct sk_buff *skb = NULL;
419 struct net_device *dev; 419 struct net_device *dev;
420 __be16 proto = 0; 420 __be16 proto = 0;
421 int err; 421 int err;
@@ -437,6 +437,7 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
437 */ 437 */
438 438
439 saddr->spkt_device[13] = 0; 439 saddr->spkt_device[13] = 0;
440retry:
440 rcu_read_lock(); 441 rcu_read_lock();
441 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device); 442 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
442 err = -ENODEV; 443 err = -ENODEV;
@@ -456,58 +457,48 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
456 if (len > dev->mtu + dev->hard_header_len) 457 if (len > dev->mtu + dev->hard_header_len)
457 goto out_unlock; 458 goto out_unlock;
458 459
459 err = -ENOBUFS; 460 if (!skb) {
460 skb = sock_wmalloc(sk, len + LL_RESERVED_SPACE(dev), 0, GFP_KERNEL); 461 size_t reserved = LL_RESERVED_SPACE(dev);
461 462 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
462 /* 463
463 * If the write buffer is full, then tough. At this level the user 464 rcu_read_unlock();
464 * gets to deal with the problem - do your own algorithmic backoffs. 465 skb = sock_wmalloc(sk, len + reserved, 0, GFP_KERNEL);
465 * That's far more flexible. 466 if (skb == NULL)
466 */ 467 return -ENOBUFS;
467 468 /* FIXME: Save some space for broken drivers that write a hard
468 if (skb == NULL) 469 * header at transmission time by themselves. PPP is the notable
469 goto out_unlock; 470 * one here. This should really be fixed at the driver level.
470 471 */
471 /* 472 skb_reserve(skb, reserved);
472 * Fill it in 473 skb_reset_network_header(skb);
473 */ 474
474 475 /* Try to align data part correctly */
475 /* FIXME: Save some space for broken drivers that write a 476 if (hhlen) {
476 * hard header at transmission time by themselves. PPP is the 477 skb->data -= hhlen;
477 * notable one here. This should really be fixed at the driver level. 478 skb->tail -= hhlen;
478 */ 479 if (len < hhlen)
479 skb_reserve(skb, LL_RESERVED_SPACE(dev)); 480 skb_reset_network_header(skb);
480 skb_reset_network_header(skb); 481 }
481 482 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
482 /* Try to align data part correctly */ 483 if (err)
483 if (dev->header_ops) { 484 goto out_free;
484 skb->data -= dev->hard_header_len; 485 goto retry;
485 skb->tail -= dev->hard_header_len;
486 if (len < dev->hard_header_len)
487 skb_reset_network_header(skb);
488 } 486 }
489 487
490 /* Returns -EFAULT on error */ 488
491 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
492 skb->protocol = proto; 489 skb->protocol = proto;
493 skb->dev = dev; 490 skb->dev = dev;
494 skb->priority = sk->sk_priority; 491 skb->priority = sk->sk_priority;
495 skb->mark = sk->sk_mark; 492 skb->mark = sk->sk_mark;
496 if (err)
497 goto out_free;
498
499 /*
500 * Now send it
501 */
502 493
503 dev_queue_xmit(skb); 494 dev_queue_xmit(skb);
504 rcu_read_unlock(); 495 rcu_read_unlock();
505 return len; 496 return len;
506 497
507out_free:
508 kfree_skb(skb);
509out_unlock: 498out_unlock:
510 rcu_read_unlock(); 499 rcu_read_unlock();
500out_free:
501 kfree_skb(skb);
511 return err; 502 return err;
512} 503}
513 504
diff --git a/net/rds/ib.c b/net/rds/ib.c
index 536ebe5d3f6b..3b8992361042 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -182,8 +182,8 @@ static int rds_ib_conn_info_visitor(struct rds_connection *conn,
182 ic = conn->c_transport_data; 182 ic = conn->c_transport_data;
183 dev_addr = &ic->i_cm_id->route.addr.dev_addr; 183 dev_addr = &ic->i_cm_id->route.addr.dev_addr;
184 184
185 ib_addr_get_sgid(dev_addr, (union ib_gid *) &iinfo->src_gid); 185 rdma_addr_get_sgid(dev_addr, (union ib_gid *) &iinfo->src_gid);
186 ib_addr_get_dgid(dev_addr, (union ib_gid *) &iinfo->dst_gid); 186 rdma_addr_get_dgid(dev_addr, (union ib_gid *) &iinfo->dst_gid);
187 187
188 rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client); 188 rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client);
189 iinfo->max_send_wr = ic->i_send_ring.w_nr; 189 iinfo->max_send_wr = ic->i_send_ring.w_nr;
diff --git a/net/rds/iw.c b/net/rds/iw.c
index db224f7c2937..b28fa8525b24 100644
--- a/net/rds/iw.c
+++ b/net/rds/iw.c
@@ -184,8 +184,8 @@ static int rds_iw_conn_info_visitor(struct rds_connection *conn,
184 ic = conn->c_transport_data; 184 ic = conn->c_transport_data;
185 dev_addr = &ic->i_cm_id->route.addr.dev_addr; 185 dev_addr = &ic->i_cm_id->route.addr.dev_addr;
186 186
187 ib_addr_get_sgid(dev_addr, (union ib_gid *) &iinfo->src_gid); 187 rdma_addr_get_sgid(dev_addr, (union ib_gid *) &iinfo->src_gid);
188 ib_addr_get_dgid(dev_addr, (union ib_gid *) &iinfo->dst_gid); 188 rdma_addr_get_dgid(dev_addr, (union ib_gid *) &iinfo->dst_gid);
189 189
190 rds_iwdev = ib_get_client_data(ic->i_cm_id->device, &rds_iw_client); 190 rds_iwdev = ib_get_client_data(ic->i_cm_id->device, &rds_iw_client);
191 iinfo->max_send_wr = ic->i_send_ring.w_nr; 191 iinfo->max_send_wr = ic->i_send_ring.w_nr;
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index b845e2293dfe..1c924ee0a1ef 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -16,8 +16,6 @@
16 16
17#define RPCDBG_FACILITY RPCDBG_SVCXPRT 17#define RPCDBG_FACILITY RPCDBG_SVCXPRT
18 18
19#define SVC_MAX_WAKING 5
20
21static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt); 19static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt);
22static int svc_deferred_recv(struct svc_rqst *rqstp); 20static int svc_deferred_recv(struct svc_rqst *rqstp);
23static struct cache_deferred_req *svc_defer(struct cache_req *req); 21static struct cache_deferred_req *svc_defer(struct cache_req *req);
@@ -306,7 +304,6 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
306 struct svc_pool *pool; 304 struct svc_pool *pool;
307 struct svc_rqst *rqstp; 305 struct svc_rqst *rqstp;
308 int cpu; 306 int cpu;
309 int thread_avail;
310 307
311 if (!(xprt->xpt_flags & 308 if (!(xprt->xpt_flags &
312 ((1<<XPT_CONN)|(1<<XPT_DATA)|(1<<XPT_CLOSE)|(1<<XPT_DEFERRED)))) 309 ((1<<XPT_CONN)|(1<<XPT_DATA)|(1<<XPT_CLOSE)|(1<<XPT_DEFERRED))))
@@ -318,6 +315,12 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
318 315
319 spin_lock_bh(&pool->sp_lock); 316 spin_lock_bh(&pool->sp_lock);
320 317
318 if (!list_empty(&pool->sp_threads) &&
319 !list_empty(&pool->sp_sockets))
320 printk(KERN_ERR
321 "svc_xprt_enqueue: "
322 "threads and transports both waiting??\n");
323
321 if (test_bit(XPT_DEAD, &xprt->xpt_flags)) { 324 if (test_bit(XPT_DEAD, &xprt->xpt_flags)) {
322 /* Don't enqueue dead transports */ 325 /* Don't enqueue dead transports */
323 dprintk("svc: transport %p is dead, not enqueued\n", xprt); 326 dprintk("svc: transport %p is dead, not enqueued\n", xprt);
@@ -358,15 +361,7 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
358 } 361 }
359 362
360 process: 363 process:
361 /* Work out whether threads are available */ 364 if (!list_empty(&pool->sp_threads)) {
362 thread_avail = !list_empty(&pool->sp_threads); /* threads are asleep */
363 if (pool->sp_nwaking >= SVC_MAX_WAKING) {
364 /* too many threads are runnable and trying to wake up */
365 thread_avail = 0;
366 pool->sp_stats.overloads_avoided++;
367 }
368
369 if (thread_avail) {
370 rqstp = list_entry(pool->sp_threads.next, 365 rqstp = list_entry(pool->sp_threads.next,
371 struct svc_rqst, 366 struct svc_rqst,
372 rq_list); 367 rq_list);
@@ -381,8 +376,6 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
381 svc_xprt_get(xprt); 376 svc_xprt_get(xprt);
382 rqstp->rq_reserved = serv->sv_max_mesg; 377 rqstp->rq_reserved = serv->sv_max_mesg;
383 atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); 378 atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
384 rqstp->rq_waking = 1;
385 pool->sp_nwaking++;
386 pool->sp_stats.threads_woken++; 379 pool->sp_stats.threads_woken++;
387 BUG_ON(xprt->xpt_pool != pool); 380 BUG_ON(xprt->xpt_pool != pool);
388 wake_up(&rqstp->rq_wait); 381 wake_up(&rqstp->rq_wait);
@@ -651,11 +644,6 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
651 return -EINTR; 644 return -EINTR;
652 645
653 spin_lock_bh(&pool->sp_lock); 646 spin_lock_bh(&pool->sp_lock);
654 if (rqstp->rq_waking) {
655 rqstp->rq_waking = 0;
656 pool->sp_nwaking--;
657 BUG_ON(pool->sp_nwaking < 0);
658 }
659 xprt = svc_xprt_dequeue(pool); 647 xprt = svc_xprt_dequeue(pool);
660 if (xprt) { 648 if (xprt) {
661 rqstp->rq_xprt = xprt; 649 rqstp->rq_xprt = xprt;
@@ -1204,16 +1192,15 @@ static int svc_pool_stats_show(struct seq_file *m, void *p)
1204 struct svc_pool *pool = p; 1192 struct svc_pool *pool = p;
1205 1193
1206 if (p == SEQ_START_TOKEN) { 1194 if (p == SEQ_START_TOKEN) {
1207 seq_puts(m, "# pool packets-arrived sockets-enqueued threads-woken overloads-avoided threads-timedout\n"); 1195 seq_puts(m, "# pool packets-arrived sockets-enqueued threads-woken threads-timedout\n");
1208 return 0; 1196 return 0;
1209 } 1197 }
1210 1198
1211 seq_printf(m, "%u %lu %lu %lu %lu %lu\n", 1199 seq_printf(m, "%u %lu %lu %lu %lu\n",
1212 pool->sp_id, 1200 pool->sp_id,
1213 pool->sp_stats.packets, 1201 pool->sp_stats.packets,
1214 pool->sp_stats.sockets_queued, 1202 pool->sp_stats.sockets_queued,
1215 pool->sp_stats.threads_woken, 1203 pool->sp_stats.threads_woken,
1216 pool->sp_stats.overloads_avoided,
1217 pool->sp_stats.threads_timedout); 1204 pool->sp_stats.threads_timedout);
1218 1205
1219 return 0; 1206 return 0;
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 4a8f6558718a..d8c041114497 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -655,23 +655,25 @@ static struct unix_gid *unix_gid_lookup(uid_t uid)
655 return NULL; 655 return NULL;
656} 656}
657 657
658static int unix_gid_find(uid_t uid, struct group_info **gip, 658static struct group_info *unix_gid_find(uid_t uid, struct svc_rqst *rqstp)
659 struct svc_rqst *rqstp)
660{ 659{
661 struct unix_gid *ug = unix_gid_lookup(uid); 660 struct unix_gid *ug;
661 struct group_info *gi;
662 int ret;
663
664 ug = unix_gid_lookup(uid);
662 if (!ug) 665 if (!ug)
663 return -EAGAIN; 666 return ERR_PTR(-EAGAIN);
664 switch (cache_check(&unix_gid_cache, &ug->h, &rqstp->rq_chandle)) { 667 ret = cache_check(&unix_gid_cache, &ug->h, &rqstp->rq_chandle);
668 switch (ret) {
665 case -ENOENT: 669 case -ENOENT:
666 *gip = NULL; 670 return ERR_PTR(-ENOENT);
667 return 0;
668 case 0: 671 case 0:
669 *gip = ug->gi; 672 gi = get_group_info(ug->gi);
670 get_group_info(*gip);
671 cache_put(&ug->h, &unix_gid_cache); 673 cache_put(&ug->h, &unix_gid_cache);
672 return 0; 674 return gi;
673 default: 675 default:
674 return -EAGAIN; 676 return ERR_PTR(-EAGAIN);
675 } 677 }
676} 678}
677 679
@@ -681,6 +683,8 @@ svcauth_unix_set_client(struct svc_rqst *rqstp)
681 struct sockaddr_in *sin; 683 struct sockaddr_in *sin;
682 struct sockaddr_in6 *sin6, sin6_storage; 684 struct sockaddr_in6 *sin6, sin6_storage;
683 struct ip_map *ipm; 685 struct ip_map *ipm;
686 struct group_info *gi;
687 struct svc_cred *cred = &rqstp->rq_cred;
684 688
685 switch (rqstp->rq_addr.ss_family) { 689 switch (rqstp->rq_addr.ss_family) {
686 case AF_INET: 690 case AF_INET:
@@ -721,6 +725,17 @@ svcauth_unix_set_client(struct svc_rqst *rqstp)
721 ip_map_cached_put(rqstp, ipm); 725 ip_map_cached_put(rqstp, ipm);
722 break; 726 break;
723 } 727 }
728
729 gi = unix_gid_find(cred->cr_uid, rqstp);
730 switch (PTR_ERR(gi)) {
731 case -EAGAIN:
732 return SVC_DROP;
733 case -ENOENT:
734 break;
735 default:
736 put_group_info(cred->cr_group_info);
737 cred->cr_group_info = gi;
738 }
724 return SVC_OK; 739 return SVC_OK;
725} 740}
726 741
@@ -817,19 +832,11 @@ svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp)
817 slen = svc_getnl(argv); /* gids length */ 832 slen = svc_getnl(argv); /* gids length */
818 if (slen > 16 || (len -= (slen + 2)*4) < 0) 833 if (slen > 16 || (len -= (slen + 2)*4) < 0)
819 goto badcred; 834 goto badcred;
820 if (unix_gid_find(cred->cr_uid, &cred->cr_group_info, rqstp) 835 cred->cr_group_info = groups_alloc(slen);
821 == -EAGAIN) 836 if (cred->cr_group_info == NULL)
822 return SVC_DROP; 837 return SVC_DROP;
823 if (cred->cr_group_info == NULL) { 838 for (i = 0; i < slen; i++)
824 cred->cr_group_info = groups_alloc(slen); 839 GROUP_AT(cred->cr_group_info, i) = svc_getnl(argv);
825 if (cred->cr_group_info == NULL)
826 return SVC_DROP;
827 for (i = 0; i < slen; i++)
828 GROUP_AT(cred->cr_group_info, i) = svc_getnl(argv);
829 } else {
830 for (i = 0; i < slen ; i++)
831 svc_getnl(argv);
832 }
833 if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) { 840 if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) {
834 *authp = rpc_autherr_badverf; 841 *authp = rpc_autherr_badverf;
835 return SVC_DENIED; 842 return SVC_DENIED;
diff --git a/scripts/get_maintainer.pl b/scripts/get_maintainer.pl
index 81a67a458e78..445e8845f0a4 100755
--- a/scripts/get_maintainer.pl
+++ b/scripts/get_maintainer.pl
@@ -13,7 +13,7 @@
13use strict; 13use strict;
14 14
15my $P = $0; 15my $P = $0;
16my $V = '0.21'; 16my $V = '0.23';
17 17
18use Getopt::Long qw(:config no_auto_abbrev); 18use Getopt::Long qw(:config no_auto_abbrev);
19 19
@@ -23,16 +23,19 @@ my $email_usename = 1;
23my $email_maintainer = 1; 23my $email_maintainer = 1;
24my $email_list = 1; 24my $email_list = 1;
25my $email_subscriber_list = 0; 25my $email_subscriber_list = 0;
26my $email_git = 1;
27my $email_git_penguin_chiefs = 0; 26my $email_git_penguin_chiefs = 0;
27my $email_git = 1;
28my $email_git_blame = 0;
28my $email_git_min_signatures = 1; 29my $email_git_min_signatures = 1;
29my $email_git_max_maintainers = 5; 30my $email_git_max_maintainers = 5;
30my $email_git_min_percent = 5; 31my $email_git_min_percent = 5;
31my $email_git_since = "1-year-ago"; 32my $email_git_since = "1-year-ago";
32my $email_git_blame = 0; 33my $email_hg_since = "-365";
33my $email_remove_duplicates = 1; 34my $email_remove_duplicates = 1;
34my $output_multiline = 1; 35my $output_multiline = 1;
35my $output_separator = ", "; 36my $output_separator = ", ";
37my $output_roles = 0;
38my $output_rolestats = 0;
36my $scm = 0; 39my $scm = 0;
37my $web = 0; 40my $web = 0;
38my $subsystem = 0; 41my $subsystem = 0;
@@ -64,21 +67,52 @@ my $penguin_chiefs = "\(" . join("|",@penguin_chief_names) . "\)";
64my $rfc822_lwsp = "(?:(?:\\r\\n)?[ \\t])"; 67my $rfc822_lwsp = "(?:(?:\\r\\n)?[ \\t])";
65my $rfc822_char = '[\\000-\\377]'; 68my $rfc822_char = '[\\000-\\377]';
66 69
70# VCS command support: class-like functions and strings
71
72my %VCS_cmds;
73
74my %VCS_cmds_git = (
75 "execute_cmd" => \&git_execute_cmd,
76 "available" => '(which("git") ne "") && (-d ".git")',
77 "find_signers_cmd" => "git log --since=\$email_git_since -- \$file",
78 "find_commit_signers_cmd" => "git log -1 \$commit",
79 "blame_range_cmd" => "git blame -l -L \$diff_start,+\$diff_length \$file",
80 "blame_file_cmd" => "git blame -l \$file",
81 "commit_pattern" => "^commit [0-9a-f]{40,40}",
82 "blame_commit_pattern" => "^([0-9a-f]+) "
83);
84
85my %VCS_cmds_hg = (
86 "execute_cmd" => \&hg_execute_cmd,
87 "available" => '(which("hg") ne "") && (-d ".hg")',
88 "find_signers_cmd" =>
89 "hg log --date=\$email_hg_since" .
90 " --template='commit {node}\\n{desc}\\n' -- \$file",
91 "find_commit_signers_cmd" => "hg log --template='{desc}\\n' -r \$commit",
92 "blame_range_cmd" => "", # not supported
93 "blame_file_cmd" => "hg blame -c \$file",
94 "commit_pattern" => "^commit [0-9a-f]{40,40}",
95 "blame_commit_pattern" => "^([0-9a-f]+):"
96);
97
67if (!GetOptions( 98if (!GetOptions(
68 'email!' => \$email, 99 'email!' => \$email,
69 'git!' => \$email_git, 100 'git!' => \$email_git,
101 'git-blame!' => \$email_git_blame,
70 'git-chief-penguins!' => \$email_git_penguin_chiefs, 102 'git-chief-penguins!' => \$email_git_penguin_chiefs,
71 'git-min-signatures=i' => \$email_git_min_signatures, 103 'git-min-signatures=i' => \$email_git_min_signatures,
72 'git-max-maintainers=i' => \$email_git_max_maintainers, 104 'git-max-maintainers=i' => \$email_git_max_maintainers,
73 'git-min-percent=i' => \$email_git_min_percent, 105 'git-min-percent=i' => \$email_git_min_percent,
74 'git-since=s' => \$email_git_since, 106 'git-since=s' => \$email_git_since,
75 'git-blame!' => \$email_git_blame, 107 'hg-since=s' => \$email_hg_since,
76 'remove-duplicates!' => \$email_remove_duplicates, 108 'remove-duplicates!' => \$email_remove_duplicates,
77 'm!' => \$email_maintainer, 109 'm!' => \$email_maintainer,
78 'n!' => \$email_usename, 110 'n!' => \$email_usename,
79 'l!' => \$email_list, 111 'l!' => \$email_list,
80 's!' => \$email_subscriber_list, 112 's!' => \$email_subscriber_list,
81 'multiline!' => \$output_multiline, 113 'multiline!' => \$output_multiline,
114 'roles!' => \$output_roles,
115 'rolestats!' => \$output_rolestats,
82 'separator=s' => \$output_separator, 116 'separator=s' => \$output_separator,
83 'subsystem!' => \$subsystem, 117 'subsystem!' => \$subsystem,
84 'status!' => \$status, 118 'status!' => \$status,
@@ -90,8 +124,7 @@ if (!GetOptions(
90 'v|version' => \$version, 124 'v|version' => \$version,
91 'h|help' => \$help, 125 'h|help' => \$help,
92 )) { 126 )) {
93 usage(); 127 die "$P: invalid argument - use --help if necessary\n";
94 die "$P: invalid argument\n";
95} 128}
96 129
97if ($help != 0) { 130if ($help != 0) {
@@ -113,6 +146,10 @@ if ($output_separator ne ", ") {
113 $output_multiline = 0; 146 $output_multiline = 0;
114} 147}
115 148
149if ($output_rolestats) {
150 $output_roles = 1;
151}
152
116my $selections = $email + $scm + $status + $subsystem + $web; 153my $selections = $email + $scm + $status + $subsystem + $web;
117if ($selections == 0) { 154if ($selections == 0) {
118 usage(); 155 usage();
@@ -175,7 +212,7 @@ if ($email_remove_duplicates) {
175 next if ($line =~ m/^\s*$/); 212 next if ($line =~ m/^\s*$/);
176 213
177 my ($name, $address) = parse_email($line); 214 my ($name, $address) = parse_email($line);
178 $line = format_email($name, $address); 215 $line = format_email($name, $address, $email_usename);
179 216
180 next if ($line =~ m/^\s*$/); 217 next if ($line =~ m/^\s*$/);
181 218
@@ -207,12 +244,10 @@ foreach my $file (@ARGV) {
207 push(@files, $file); 244 push(@files, $file);
208 if (-f $file && $keywords) { 245 if (-f $file && $keywords) {
209 open(FILE, "<$file") or die "$P: Can't open ${file}\n"; 246 open(FILE, "<$file") or die "$P: Can't open ${file}\n";
210 while (<FILE>) { 247 my $text = do { local($/) ; <FILE> };
211 my $patch_line = $_; 248 foreach my $line (keys %keyword_hash) {
212 foreach my $line (keys %keyword_hash) { 249 if ($text =~ m/$keyword_hash{$line}/x) {
213 if ($patch_line =~ m/^.*$keyword_hash{$line}/x) { 250 push(@keyword_tvi, $line);
214 push(@keyword_tvi, $line);
215 }
216 } 251 }
217 } 252 }
218 close(FILE); 253 close(FILE);
@@ -304,11 +339,11 @@ foreach my $file (@files) {
304 } 339 }
305 340
306 if ($email && $email_git) { 341 if ($email && $email_git) {
307 recent_git_signoffs($file); 342 vcs_file_signoffs($file);
308 } 343 }
309 344
310 if ($email && $email_git_blame) { 345 if ($email && $email_git_blame) {
311 git_assign_blame($file); 346 vcs_file_blame($file);
312 } 347 }
313} 348}
314 349
@@ -324,11 +359,11 @@ if ($email) {
324 if ($chief =~ m/^(.*):(.*)/) { 359 if ($chief =~ m/^(.*):(.*)/) {
325 my $email_address; 360 my $email_address;
326 361
327 $email_address = format_email($1, $2); 362 $email_address = format_email($1, $2, $email_usename);
328 if ($email_git_penguin_chiefs) { 363 if ($email_git_penguin_chiefs) {
329 push(@email_to, $email_address); 364 push(@email_to, [$email_address, 'chief penguin']);
330 } else { 365 } else {
331 @email_to = grep(!/${email_address}/, @email_to); 366 @email_to = grep($_->[0] !~ /${email_address}/, @email_to);
332 } 367 }
333 } 368 }
334 } 369 }
@@ -342,7 +377,7 @@ if ($email || $email_list) {
342 if ($email_list) { 377 if ($email_list) {
343 @to = (@to, @list_to); 378 @to = (@to, @list_to);
344 } 379 }
345 output(uniq(@to)); 380 output(merge_email(@to));
346} 381}
347 382
348if ($scm) { 383if ($scm) {
@@ -398,13 +433,16 @@ MAINTAINER field selection options:
398 --git-min-signatures => number of signatures required (default: 1) 433 --git-min-signatures => number of signatures required (default: 1)
399 --git-max-maintainers => maximum maintainers to add (default: 5) 434 --git-max-maintainers => maximum maintainers to add (default: 5)
400 --git-min-percent => minimum percentage of commits required (default: 5) 435 --git-min-percent => minimum percentage of commits required (default: 5)
401 --git-since => git history to use (default: 1-year-ago)
402 --git-blame => use git blame to find modified commits for patch or file 436 --git-blame => use git blame to find modified commits for patch or file
437 --git-since => git history to use (default: 1-year-ago)
438 --hg-since => hg history to use (default: -365)
403 --m => include maintainer(s) if any 439 --m => include maintainer(s) if any
404 --n => include name 'Full Name <addr\@domain.tld>' 440 --n => include name 'Full Name <addr\@domain.tld>'
405 --l => include list(s) if any 441 --l => include list(s) if any
406 --s => include subscriber only list(s) if any 442 --s => include subscriber only list(s) if any
407 --remove-duplicates => minimize duplicate email names/addresses 443 --remove-duplicates => minimize duplicate email names/addresses
444 --roles => show roles (status:subsystem, git-signer, list, etc...)
445 --rolestats => show roles and statistics (commits/total_commits, %)
408 --scm => print SCM tree(s) if any 446 --scm => print SCM tree(s) if any
409 --status => print status if any 447 --status => print status if any
410 --subsystem => print subsystem name if any 448 --subsystem => print subsystem name if any
@@ -430,11 +468,24 @@ Notes:
430 directory are examined as git recurses directories. 468 directory are examined as git recurses directories.
431 Any specified X: (exclude) pattern matches are _not_ ignored. 469 Any specified X: (exclude) pattern matches are _not_ ignored.
432 Used with "--nogit", directory is used as a pattern match, 470 Used with "--nogit", directory is used as a pattern match,
433 no individual file within the directory or subdirectory 471 no individual file within the directory or subdirectory
434 is matched. 472 is matched.
435 Used with "--git-blame", does not iterate all files in directory 473 Used with "--git-blame", does not iterate all files in directory
436 Using "--git-blame" is slow and may add old committers and authors 474 Using "--git-blame" is slow and may add old committers and authors
437 that are no longer active maintainers to the output. 475 that are no longer active maintainers to the output.
476 Using "--roles" or "--rolestats" with git send-email --cc-cmd or any
477 other automated tools that expect only ["name"] <email address>
478 may not work because of additional output after <email address>.
479 Using "--rolestats" and "--git-blame" shows the #/total=% commits,
480 not the percentage of the entire file authored. # of commits is
481 not a good measure of amount of code authored. 1 major commit may
482 contain a thousand lines, 5 trivial commits may modify a single line.
483 If git is not installed, but mercurial (hg) is installed and an .hg
484 repository exists, the following options apply to mercurial:
485 --git,
486 --git-min-signatures, --git-max-maintainers, --git-min-percent, and
487 --git-blame
488 Use --hg-since not --git-since to control date selection
438EOT 489EOT
439} 490}
440 491
@@ -493,7 +544,7 @@ sub parse_email {
493} 544}
494 545
495sub format_email { 546sub format_email {
496 my ($name, $address) = @_; 547 my ($name, $address, $usename) = @_;
497 548
498 my $formatted_email; 549 my $formatted_email;
499 550
@@ -506,11 +557,11 @@ sub format_email {
506 $name = "\"$name\""; 557 $name = "\"$name\"";
507 } 558 }
508 559
509 if ($email_usename) { 560 if ($usename) {
510 if ("$name" eq "") { 561 if ("$name" eq "") {
511 $formatted_email = "$address"; 562 $formatted_email = "$address";
512 } else { 563 } else {
513 $formatted_email = "$name <${address}>"; 564 $formatted_email = "$name <$address>";
514 } 565 }
515 } else { 566 } else {
516 $formatted_email = $address; 567 $formatted_email = $address;
@@ -547,6 +598,71 @@ sub find_ending_index {
547 return $index; 598 return $index;
548} 599}
549 600
601sub get_maintainer_role {
602 my ($index) = @_;
603
604 my $i;
605 my $start = find_starting_index($index);
606 my $end = find_ending_index($index);
607
608 my $role;
609 my $subsystem = $typevalue[$start];
610 if (length($subsystem) > 20) {
611 $subsystem = substr($subsystem, 0, 17);
612 $subsystem =~ s/\s*$//;
613 $subsystem = $subsystem . "...";
614 }
615
616 for ($i = $start + 1; $i < $end; $i++) {
617 my $tv = $typevalue[$i];
618 if ($tv =~ m/^(\C):\s*(.*)/) {
619 my $ptype = $1;
620 my $pvalue = $2;
621 if ($ptype eq "S") {
622 $role = $pvalue;
623 }
624 }
625 }
626
627 $role = lc($role);
628 if ($role eq "supported") {
629 $role = "supporter";
630 } elsif ($role eq "maintained") {
631 $role = "maintainer";
632 } elsif ($role eq "odd fixes") {
633 $role = "odd fixer";
634 } elsif ($role eq "orphan") {
635 $role = "orphan minder";
636 } elsif ($role eq "obsolete") {
637 $role = "obsolete minder";
638 } elsif ($role eq "buried alive in reporters") {
639 $role = "chief penguin";
640 }
641
642 return $role . ":" . $subsystem;
643}
644
645sub get_list_role {
646 my ($index) = @_;
647
648 my $i;
649 my $start = find_starting_index($index);
650 my $end = find_ending_index($index);
651
652 my $subsystem = $typevalue[$start];
653 if (length($subsystem) > 20) {
654 $subsystem = substr($subsystem, 0, 17);
655 $subsystem =~ s/\s*$//;
656 $subsystem = $subsystem . "...";
657 }
658
659 if ($subsystem eq "THE REST") {
660 $subsystem = "";
661 }
662
663 return $subsystem;
664}
665
550sub add_categories { 666sub add_categories {
551 my ($index) = @_; 667 my ($index) = @_;
552 668
@@ -564,17 +680,22 @@ sub add_categories {
564 if ($ptype eq "L") { 680 if ($ptype eq "L") {
565 my $list_address = $pvalue; 681 my $list_address = $pvalue;
566 my $list_additional = ""; 682 my $list_additional = "";
683 my $list_role = get_list_role($i);
684
685 if ($list_role ne "") {
686 $list_role = ":" . $list_role;
687 }
567 if ($list_address =~ m/([^\s]+)\s+(.*)$/) { 688 if ($list_address =~ m/([^\s]+)\s+(.*)$/) {
568 $list_address = $1; 689 $list_address = $1;
569 $list_additional = $2; 690 $list_additional = $2;
570 } 691 }
571 if ($list_additional =~ m/subscribers-only/) { 692 if ($list_additional =~ m/subscribers-only/) {
572 if ($email_subscriber_list) { 693 if ($email_subscriber_list) {
573 push(@list_to, $list_address); 694 push(@list_to, [$list_address, "subscriber list${list_role}"]);
574 } 695 }
575 } else { 696 } else {
576 if ($email_list) { 697 if ($email_list) {
577 push(@list_to, $list_address); 698 push(@list_to, [$list_address, "open list${list_role}"]);
578 } 699 }
579 } 700 }
580 } elsif ($ptype eq "M") { 701 } elsif ($ptype eq "M") {
@@ -585,13 +706,14 @@ sub add_categories {
585 if ($tv =~ m/^(\C):\s*(.*)/) { 706 if ($tv =~ m/^(\C):\s*(.*)/) {
586 if ($1 eq "P") { 707 if ($1 eq "P") {
587 $name = $2; 708 $name = $2;
588 $pvalue = format_email($name, $address); 709 $pvalue = format_email($name, $address, $email_usename);
589 } 710 }
590 } 711 }
591 } 712 }
592 } 713 }
593 if ($email_maintainer) { 714 if ($email_maintainer) {
594 push_email_addresses($pvalue); 715 my $role = get_maintainer_role($i);
716 push_email_addresses($pvalue, $role);
595 } 717 }
596 } elsif ($ptype eq "T") { 718 } elsif ($ptype eq "T") {
597 push(@scm, $pvalue); 719 push(@scm, $pvalue);
@@ -618,7 +740,7 @@ sub email_inuse {
618} 740}
619 741
620sub push_email_address { 742sub push_email_address {
621 my ($line) = @_; 743 my ($line, $role) = @_;
622 744
623 my ($name, $address) = parse_email($line); 745 my ($name, $address) = parse_email($line);
624 746
@@ -627,9 +749,9 @@ sub push_email_address {
627 } 749 }
628 750
629 if (!$email_remove_duplicates) { 751 if (!$email_remove_duplicates) {
630 push(@email_to, format_email($name, $address)); 752 push(@email_to, [format_email($name, $address, $email_usename), $role]);
631 } elsif (!email_inuse($name, $address)) { 753 } elsif (!email_inuse($name, $address)) {
632 push(@email_to, format_email($name, $address)); 754 push(@email_to, [format_email($name, $address, $email_usename), $role]);
633 $email_hash_name{$name}++; 755 $email_hash_name{$name}++;
634 $email_hash_address{$address}++; 756 $email_hash_address{$address}++;
635 } 757 }
@@ -638,24 +760,52 @@ sub push_email_address {
638} 760}
639 761
640sub push_email_addresses { 762sub push_email_addresses {
641 my ($address) = @_; 763 my ($address, $role) = @_;
642 764
643 my @address_list = (); 765 my @address_list = ();
644 766
645 if (rfc822_valid($address)) { 767 if (rfc822_valid($address)) {
646 push_email_address($address); 768 push_email_address($address, $role);
647 } elsif (@address_list = rfc822_validlist($address)) { 769 } elsif (@address_list = rfc822_validlist($address)) {
648 my $array_count = shift(@address_list); 770 my $array_count = shift(@address_list);
649 while (my $entry = shift(@address_list)) { 771 while (my $entry = shift(@address_list)) {
650 push_email_address($entry); 772 push_email_address($entry, $role);
651 } 773 }
652 } else { 774 } else {
653 if (!push_email_address($address)) { 775 if (!push_email_address($address, $role)) {
654 warn("Invalid MAINTAINERS address: '" . $address . "'\n"); 776 warn("Invalid MAINTAINERS address: '" . $address . "'\n");
655 } 777 }
656 } 778 }
657} 779}
658 780
781sub add_role {
782 my ($line, $role) = @_;
783
784 my ($name, $address) = parse_email($line);
785 my $email = format_email($name, $address, $email_usename);
786
787 foreach my $entry (@email_to) {
788 if ($email_remove_duplicates) {
789 my ($entry_name, $entry_address) = parse_email($entry->[0]);
790 if ($name eq $entry_name || $address eq $entry_address) {
791 if ($entry->[1] eq "") {
792 $entry->[1] = "$role";
793 } else {
794 $entry->[1] = "$entry->[1],$role";
795 }
796 }
797 } else {
798 if ($email eq $entry->[0]) {
799 if ($entry->[1] eq "") {
800 $entry->[1] = "$role";
801 } else {
802 $entry->[1] = "$entry->[1],$role";
803 }
804 }
805 }
806 }
807}
808
659sub which { 809sub which {
660 my ($bin) = @_; 810 my ($bin) = @_;
661 811
@@ -669,7 +819,7 @@ sub which {
669} 819}
670 820
671sub mailmap { 821sub mailmap {
672 my @lines = @_; 822 my (@lines) = @_;
673 my %hash; 823 my %hash;
674 824
675 foreach my $line (@lines) { 825 foreach my $line (@lines) {
@@ -678,14 +828,14 @@ sub mailmap {
678 $hash{$name} = $address; 828 $hash{$name} = $address;
679 } elsif ($address ne $hash{$name}) { 829 } elsif ($address ne $hash{$name}) {
680 $address = $hash{$name}; 830 $address = $hash{$name};
681 $line = format_email($name, $address); 831 $line = format_email($name, $address, $email_usename);
682 } 832 }
683 if (exists($mailmap{$name})) { 833 if (exists($mailmap{$name})) {
684 my $obj = $mailmap{$name}; 834 my $obj = $mailmap{$name};
685 foreach my $map_address (@$obj) { 835 foreach my $map_address (@$obj) {
686 if (($map_address eq $address) && 836 if (($map_address eq $address) &&
687 ($map_address ne $hash{$name})) { 837 ($map_address ne $hash{$name})) {
688 $line = format_email($name, $hash{$name}); 838 $line = format_email($name, $hash{$name}, $email_usename);
689 } 839 }
690 } 840 }
691 } 841 }
@@ -694,34 +844,38 @@ sub mailmap {
694 return @lines; 844 return @lines;
695} 845}
696 846
697sub recent_git_signoffs { 847sub git_execute_cmd {
698 my ($file) = @_; 848 my ($cmd) = @_;
699
700 my $sign_offs = "";
701 my $cmd = "";
702 my $output = "";
703 my $count = 0;
704 my @lines = (); 849 my @lines = ();
705 my %hash;
706 my $total_sign_offs;
707 850
708 if (which("git") eq "") { 851 my $output = `$cmd`;
709 warn("$P: git not found. Add --nogit to options?\n"); 852 $output =~ s/^\s*//gm;
710 return; 853 @lines = split("\n", $output);
711 }
712 if (!(-d ".git")) {
713 warn("$P: .git directory not found. Use a git repository for better results.\n");
714 warn("$P: perhaps 'git clone git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git'\n");
715 return;
716 }
717 854
718 $cmd = "git log --since=${email_git_since} -- ${file}"; 855 return @lines;
856}
719 857
720 $output = `${cmd}`; 858sub hg_execute_cmd {
721 $output =~ s/^\s*//gm; 859 my ($cmd) = @_;
860 my @lines = ();
722 861
862 my $output = `$cmd`;
723 @lines = split("\n", $output); 863 @lines = split("\n", $output);
724 864
865 return @lines;
866}
867
868sub vcs_find_signers {
869 my ($cmd) = @_;
870 my @lines = ();
871 my $commits;
872
873 @lines = &{$VCS_cmds{"execute_cmd"}}($cmd);
874
875 my $pattern = $VCS_cmds{"commit_pattern"};
876
877 $commits = grep(/$pattern/, @lines); # of commits
878
725 @lines = grep(/^[-_ a-z]+by:.*\@.*$/i, @lines); 879 @lines = grep(/^[-_ a-z]+by:.*\@.*$/i, @lines);
726 if (!$email_git_penguin_chiefs) { 880 if (!$email_git_penguin_chiefs) {
727 @lines = grep(!/${penguin_chiefs}/i, @lines); 881 @lines = grep(!/${penguin_chiefs}/i, @lines);
@@ -729,111 +883,183 @@ sub recent_git_signoffs {
729 # cut -f2- -d":" 883 # cut -f2- -d":"
730 s/.*:\s*(.+)\s*/$1/ for (@lines); 884 s/.*:\s*(.+)\s*/$1/ for (@lines);
731 885
732 $total_sign_offs = @lines; 886## Reformat email addresses (with names) to avoid badly written signatures
733 887
734 if ($email_remove_duplicates) { 888 foreach my $line (@lines) {
735 @lines = mailmap(@lines); 889 my ($name, $address) = parse_email($line);
890 $line = format_email($name, $address, 1);
736 } 891 }
737 892
738 @lines = sort(@lines); 893 return ($commits, @lines);
739
740 # uniq -c
741 $hash{$_}++ for @lines;
742
743 # sort -rn
744 foreach my $line (sort {$hash{$b} <=> $hash{$a}} keys %hash) {
745 my $sign_offs = $hash{$line};
746 $count++;
747 last if ($sign_offs < $email_git_min_signatures ||
748 $count > $email_git_max_maintainers ||
749 $sign_offs * 100 / $total_sign_offs < $email_git_min_percent);
750 push_email_address($line);
751 }
752} 894}
753 895
754sub save_commits { 896sub vcs_save_commits {
755 my ($cmd, @commits) = @_; 897 my ($cmd) = @_;
756 my $output;
757 my @lines = (); 898 my @lines = ();
899 my @commits = ();
758 900
759 $output = `${cmd}`; 901 @lines = &{$VCS_cmds{"execute_cmd"}}($cmd);
760 902
761 @lines = split("\n", $output);
762 foreach my $line (@lines) { 903 foreach my $line (@lines) {
763 if ($line =~ m/^(\w+) /) { 904 if ($line =~ m/$VCS_cmds{"blame_commit_pattern"}/) {
764 push (@commits, $1); 905 push(@commits, $1);
765 } 906 }
766 } 907 }
908
767 return @commits; 909 return @commits;
768} 910}
769 911
770sub git_assign_blame { 912sub vcs_blame {
771 my ($file) = @_; 913 my ($file) = @_;
772
773 my @lines = ();
774 my @commits = ();
775 my $cmd; 914 my $cmd;
776 my $output; 915 my @commits = ();
777 my %hash; 916
778 my $total_sign_offs; 917 return @commits if (!(-f $file));
779 my $count; 918
919 if (@range && $VCS_cmds{"blame_range_cmd"} eq "") {
920 my @all_commits = ();
921
922 $cmd = $VCS_cmds{"blame_file_cmd"};
923 $cmd =~ s/(\$\w+)/$1/eeg; #interpolate $cmd
924 @all_commits = vcs_save_commits($cmd);
780 925
781 if (@range) {
782 foreach my $file_range_diff (@range) { 926 foreach my $file_range_diff (@range) {
783 next if (!($file_range_diff =~ m/(.+):(.+):(.+)/)); 927 next if (!($file_range_diff =~ m/(.+):(.+):(.+)/));
784 my $diff_file = $1; 928 my $diff_file = $1;
785 my $diff_start = $2; 929 my $diff_start = $2;
786 my $diff_length = $3; 930 my $diff_length = $3;
787 next if (!("$file" eq "$diff_file")); 931 next if ("$file" ne "$diff_file");
788 $cmd = "git blame -l -L $diff_start,+$diff_length $file"; 932 for (my $i = $diff_start; $i < $diff_start + $diff_length; $i++) {
789 @commits = save_commits($cmd, @commits); 933 push(@commits, $all_commits[$i]);
934 }
790 } 935 }
791 } else { 936 } elsif (@range) {
792 if (-f $file) { 937 foreach my $file_range_diff (@range) {
793 $cmd = "git blame -l $file"; 938 next if (!($file_range_diff =~ m/(.+):(.+):(.+)/));
794 @commits = save_commits($cmd, @commits); 939 my $diff_file = $1;
940 my $diff_start = $2;
941 my $diff_length = $3;
942 next if ("$file" ne "$diff_file");
943 $cmd = $VCS_cmds{"blame_range_cmd"};
944 $cmd =~ s/(\$\w+)/$1/eeg; #interpolate $cmd
945 push(@commits, vcs_save_commits($cmd));
795 } 946 }
947 } else {
948 $cmd = $VCS_cmds{"blame_file_cmd"};
949 $cmd =~ s/(\$\w+)/$1/eeg; #interpolate $cmd
950 @commits = vcs_save_commits($cmd);
796 } 951 }
797 952
798 $total_sign_offs = 0; 953 return @commits;
799 @commits = uniq(@commits); 954}
800 foreach my $commit (@commits) {
801 $cmd = "git log -1 ${commit}";
802 955
803 $output = `${cmd}`; 956my $printed_novcs = 0;
804 $output =~ s/^\s*//gm; 957sub vcs_exists {
805 @lines = split("\n", $output); 958 %VCS_cmds = %VCS_cmds_git;
959 return 1 if eval $VCS_cmds{"available"};
960 %VCS_cmds = %VCS_cmds_hg;
961 return 1 if eval $VCS_cmds{"available"};
962 %VCS_cmds = ();
963 if (!$printed_novcs) {
964 warn("$P: No supported VCS found. Add --nogit to options?\n");
965 warn("Using a git repository produces better results.\n");
966 warn("Try Linus Torvalds' latest git repository using:\n");
967 warn("git clone git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git\n");
968 $printed_novcs = 1;
969 }
970 return 0;
971}
806 972
807 @lines = grep(/^[-_ a-z]+by:.*\@.*$/i, @lines); 973sub vcs_assign {
808 if (!$email_git_penguin_chiefs) { 974 my ($role, $divisor, @lines) = @_;
809 @lines = grep(!/${penguin_chiefs}/i, @lines);
810 }
811 975
812 # cut -f2- -d":" 976 my %hash;
813 s/.*:\s*(.+)\s*/$1/ for (@lines); 977 my $count = 0;
814 978
815 $total_sign_offs += @lines; 979 return if (@lines <= 0);
816 980
817 if ($email_remove_duplicates) { 981 if ($divisor <= 0) {
818 @lines = mailmap(@lines); 982 warn("Bad divisor in " . (caller(0))[3] . ": $divisor\n");
819 } 983 $divisor = 1;
984 }
820 985
821 $hash{$_}++ for @lines; 986 if ($email_remove_duplicates) {
987 @lines = mailmap(@lines);
822 } 988 }
823 989
824 $count = 0; 990 @lines = sort(@lines);
991
992 # uniq -c
993 $hash{$_}++ for @lines;
994
995 # sort -rn
825 foreach my $line (sort {$hash{$b} <=> $hash{$a}} keys %hash) { 996 foreach my $line (sort {$hash{$b} <=> $hash{$a}} keys %hash) {
826 my $sign_offs = $hash{$line}; 997 my $sign_offs = $hash{$line};
998 my $percent = $sign_offs * 100 / $divisor;
999
1000 $percent = 100 if ($percent > 100);
827 $count++; 1001 $count++;
828 last if ($sign_offs < $email_git_min_signatures || 1002 last if ($sign_offs < $email_git_min_signatures ||
829 $count > $email_git_max_maintainers || 1003 $count > $email_git_max_maintainers ||
830 $sign_offs * 100 / $total_sign_offs < $email_git_min_percent); 1004 $percent < $email_git_min_percent);
831 push_email_address($line); 1005 push_email_address($line, '');
1006 if ($output_rolestats) {
1007 my $fmt_percent = sprintf("%.0f", $percent);
1008 add_role($line, "$role:$sign_offs/$divisor=$fmt_percent%");
1009 } else {
1010 add_role($line, $role);
1011 }
1012 }
1013}
1014
1015sub vcs_file_signoffs {
1016 my ($file) = @_;
1017
1018 my @signers = ();
1019 my $commits;
1020
1021 return if (!vcs_exists());
1022
1023 my $cmd = $VCS_cmds{"find_signers_cmd"};
1024 $cmd =~ s/(\$\w+)/$1/eeg; # interpolate $cmd
1025
1026 ($commits, @signers) = vcs_find_signers($cmd);
1027 vcs_assign("commit_signer", $commits, @signers);
1028}
1029
1030sub vcs_file_blame {
1031 my ($file) = @_;
1032
1033 my @signers = ();
1034 my @commits = ();
1035 my $total_commits;
1036
1037 return if (!vcs_exists());
1038
1039 @commits = vcs_blame($file);
1040 @commits = uniq(@commits);
1041 $total_commits = @commits;
1042
1043 foreach my $commit (@commits) {
1044 my $commit_count;
1045 my @commit_signers = ();
1046
1047 my $cmd = $VCS_cmds{"find_commit_signers_cmd"};
1048 $cmd =~ s/(\$\w+)/$1/eeg; #interpolate $cmd
1049
1050 ($commit_count, @commit_signers) = vcs_find_signers($cmd);
1051 push(@signers, @commit_signers);
1052 }
1053
1054 if ($from_filename) {
1055 vcs_assign("commits", $total_commits, @signers);
1056 } else {
1057 vcs_assign("modified commits", $total_commits, @signers);
832 } 1058 }
833} 1059}
834 1060
835sub uniq { 1061sub uniq {
836 my @parms = @_; 1062 my (@parms) = @_;
837 1063
838 my %saw; 1064 my %saw;
839 @parms = grep(!$saw{$_}++, @parms); 1065 @parms = grep(!$saw{$_}++, @parms);
@@ -841,7 +1067,7 @@ sub uniq {
841} 1067}
842 1068
843sub sort_and_uniq { 1069sub sort_and_uniq {
844 my @parms = @_; 1070 my (@parms) = @_;
845 1071
846 my %saw; 1072 my %saw;
847 @parms = sort @parms; 1073 @parms = sort @parms;
@@ -849,8 +1075,27 @@ sub sort_and_uniq {
849 return @parms; 1075 return @parms;
850} 1076}
851 1077
1078sub merge_email {
1079 my @lines;
1080 my %saw;
1081
1082 for (@_) {
1083 my ($address, $role) = @$_;
1084 if (!$saw{$address}) {
1085 if ($output_roles) {
1086 push(@lines, "$address ($role)");
1087 } else {
1088 push(@lines, $address);
1089 }
1090 $saw{$address} = 1;
1091 }
1092 }
1093
1094 return @lines;
1095}
1096
852sub output { 1097sub output {
853 my @parms = @_; 1098 my (@parms) = @_;
854 1099
855 if ($output_multiline) { 1100 if ($output_multiline) {
856 foreach my $line (@parms) { 1101 foreach my $line (@parms) {
@@ -947,11 +1192,9 @@ sub rfc822_validlist ($) {
947 if ($s =~ m/^(?:$rfc822re)?(?:,(?:$rfc822re)?)*$/so && 1192 if ($s =~ m/^(?:$rfc822re)?(?:,(?:$rfc822re)?)*$/so &&
948 $s =~ m/^$rfc822_char*$/) { 1193 $s =~ m/^$rfc822_char*$/) {
949 while ($s =~ m/(?:^|,$rfc822_lwsp*)($rfc822re)/gos) { 1194 while ($s =~ m/(?:^|,$rfc822_lwsp*)($rfc822re)/gos) {
950 push @r, $1; 1195 push(@r, $1);
951 } 1196 }
952 return wantarray ? (scalar(@r), @r) : 1; 1197 return wantarray ? (scalar(@r), @r) : 1;
953 } 1198 }
954 else { 1199 return wantarray ? () : 0;
955 return wantarray ? () : 0;
956 }
957} 1200}
diff --git a/sound/arm/pxa2xx-ac97.c b/sound/arm/pxa2xx-ac97.c
index b4b48afb6de6..5d9411839cd7 100644
--- a/sound/arm/pxa2xx-ac97.c
+++ b/sound/arm/pxa2xx-ac97.c
@@ -159,7 +159,7 @@ static int pxa2xx_ac97_resume(struct device *dev)
159 return ret; 159 return ret;
160} 160}
161 161
162static struct dev_pm_ops pxa2xx_ac97_pm_ops = { 162static const struct dev_pm_ops pxa2xx_ac97_pm_ops = {
163 .suspend = pxa2xx_ac97_suspend, 163 .suspend = pxa2xx_ac97_suspend,
164 .resume = pxa2xx_ac97_resume, 164 .resume = pxa2xx_ac97_resume,
165}; 165};
diff --git a/sound/isa/gus/gus_mem.c b/sound/isa/gus/gus_mem.c
index 661205c4dcea..af888a022fc0 100644
--- a/sound/isa/gus/gus_mem.c
+++ b/sound/isa/gus/gus_mem.c
@@ -127,7 +127,8 @@ static struct snd_gf1_mem_block *snd_gf1_mem_share(struct snd_gf1_mem * alloc,
127 !share_id[2] && !share_id[3]) 127 !share_id[2] && !share_id[3])
128 return NULL; 128 return NULL;
129 for (block = alloc->first; block; block = block->next) 129 for (block = alloc->first; block; block = block->next)
130 if (!memcmp(share_id, block->share_id, sizeof(share_id))) 130 if (!memcmp(share_id, block->share_id,
131 sizeof(block->share_id)))
131 return block; 132 return block;
132 return NULL; 133 return NULL;
133} 134}
diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c
index 20cb60afb200..c11920623009 100644
--- a/sound/pci/ac97/ac97_codec.c
+++ b/sound/pci/ac97/ac97_codec.c
@@ -2122,7 +2122,7 @@ int snd_ac97_mixer(struct snd_ac97_bus *bus, struct snd_ac97_template *template,
2122 } 2122 }
2123 /* nothing should be in powerdown mode */ 2123 /* nothing should be in powerdown mode */
2124 snd_ac97_write_cache(ac97, AC97_GENERAL_PURPOSE, 0); 2124 snd_ac97_write_cache(ac97, AC97_GENERAL_PURPOSE, 0);
2125 end_time = jiffies + msecs_to_jiffies(120); 2125 end_time = jiffies + msecs_to_jiffies(5000);
2126 do { 2126 do {
2127 if ((snd_ac97_read(ac97, AC97_POWERDOWN) & 0x0f) == 0x0f) 2127 if ((snd_ac97_read(ac97, AC97_POWERDOWN) & 0x0f) == 0x0f)
2128 goto __ready_ok; 2128 goto __ready_ok;
diff --git a/sound/pci/cs5535audio/Makefile b/sound/pci/cs5535audio/Makefile
index fda7a94c992f..ccc642269b9e 100644
--- a/sound/pci/cs5535audio/Makefile
+++ b/sound/pci/cs5535audio/Makefile
@@ -4,9 +4,7 @@
4 4
5snd-cs5535audio-y := cs5535audio.o cs5535audio_pcm.o 5snd-cs5535audio-y := cs5535audio.o cs5535audio_pcm.o
6snd-cs5535audio-$(CONFIG_PM) += cs5535audio_pm.o 6snd-cs5535audio-$(CONFIG_PM) += cs5535audio_pm.o
7ifdef CONFIG_MGEODE_LX
8snd-cs5535audio-$(CONFIG_OLPC) += cs5535audio_olpc.o 7snd-cs5535audio-$(CONFIG_OLPC) += cs5535audio_olpc.o
9endif
10 8
11# Toplevel Module Dependency 9# Toplevel Module Dependency
12obj-$(CONFIG_SND_CS5535AUDIO) += snd-cs5535audio.o 10obj-$(CONFIG_SND_CS5535AUDIO) += snd-cs5535audio.o
diff --git a/sound/pci/cs5535audio/cs5535audio.c b/sound/pci/cs5535audio/cs5535audio.c
index 05f56e04849b..91e7faf69bbb 100644
--- a/sound/pci/cs5535audio/cs5535audio.c
+++ b/sound/pci/cs5535audio/cs5535audio.c
@@ -389,6 +389,7 @@ probefail_out:
389 389
390static void __devexit snd_cs5535audio_remove(struct pci_dev *pci) 390static void __devexit snd_cs5535audio_remove(struct pci_dev *pci)
391{ 391{
392 olpc_quirks_cleanup();
392 snd_card_free(pci_get_drvdata(pci)); 393 snd_card_free(pci_get_drvdata(pci));
393 pci_set_drvdata(pci, NULL); 394 pci_set_drvdata(pci, NULL);
394} 395}
diff --git a/sound/pci/cs5535audio/cs5535audio.h b/sound/pci/cs5535audio/cs5535audio.h
index 7a298ac662e3..51966d782a3c 100644
--- a/sound/pci/cs5535audio/cs5535audio.h
+++ b/sound/pci/cs5535audio/cs5535audio.h
@@ -99,10 +99,11 @@ int snd_cs5535audio_suspend(struct pci_dev *pci, pm_message_t state);
99int snd_cs5535audio_resume(struct pci_dev *pci); 99int snd_cs5535audio_resume(struct pci_dev *pci);
100#endif 100#endif
101 101
102#if defined(CONFIG_OLPC) && defined(CONFIG_MGEODE_LX) 102#ifdef CONFIG_OLPC
103void __devinit olpc_prequirks(struct snd_card *card, 103void __devinit olpc_prequirks(struct snd_card *card,
104 struct snd_ac97_template *ac97); 104 struct snd_ac97_template *ac97);
105int __devinit olpc_quirks(struct snd_card *card, struct snd_ac97 *ac97); 105int __devinit olpc_quirks(struct snd_card *card, struct snd_ac97 *ac97);
106void __devexit olpc_quirks_cleanup(void);
106void olpc_analog_input(struct snd_ac97 *ac97, int on); 107void olpc_analog_input(struct snd_ac97 *ac97, int on);
107void olpc_mic_bias(struct snd_ac97 *ac97, int on); 108void olpc_mic_bias(struct snd_ac97 *ac97, int on);
108 109
@@ -128,6 +129,7 @@ static inline int olpc_quirks(struct snd_card *card, struct snd_ac97 *ac97)
128{ 129{
129 return 0; 130 return 0;
130} 131}
132static inline void olpc_quirks_cleanup(void) { }
131static inline void olpc_analog_input(struct snd_ac97 *ac97, int on) { } 133static inline void olpc_analog_input(struct snd_ac97 *ac97, int on) { }
132static inline void olpc_mic_bias(struct snd_ac97 *ac97, int on) { } 134static inline void olpc_mic_bias(struct snd_ac97 *ac97, int on) { }
133static inline void olpc_capture_open(struct snd_ac97 *ac97) { } 135static inline void olpc_capture_open(struct snd_ac97 *ac97) { }
diff --git a/sound/pci/cs5535audio/cs5535audio_olpc.c b/sound/pci/cs5535audio/cs5535audio_olpc.c
index 5c6814335cd7..50da49be9ae5 100644
--- a/sound/pci/cs5535audio/cs5535audio_olpc.c
+++ b/sound/pci/cs5535audio/cs5535audio_olpc.c
@@ -13,10 +13,13 @@
13#include <sound/info.h> 13#include <sound/info.h>
14#include <sound/control.h> 14#include <sound/control.h>
15#include <sound/ac97_codec.h> 15#include <sound/ac97_codec.h>
16#include <linux/gpio.h>
16 17
17#include <asm/olpc.h> 18#include <asm/olpc.h>
18#include "cs5535audio.h" 19#include "cs5535audio.h"
19 20
21#define DRV_NAME "cs5535audio-olpc"
22
20/* 23/*
21 * OLPC has an additional feature on top of the regular AD1888 codec features. 24 * OLPC has an additional feature on top of the regular AD1888 codec features.
22 * It has an Analog Input mode that is switched into (after disabling the 25 * It has an Analog Input mode that is switched into (after disabling the
@@ -38,10 +41,7 @@ void olpc_analog_input(struct snd_ac97 *ac97, int on)
38 } 41 }
39 42
40 /* set Analog Input through GPIO */ 43 /* set Analog Input through GPIO */
41 if (on) 44 gpio_set_value(OLPC_GPIO_MIC_AC, on);
42 geode_gpio_set(OLPC_GPIO_MIC_AC, GPIO_OUTPUT_VAL);
43 else
44 geode_gpio_clear(OLPC_GPIO_MIC_AC, GPIO_OUTPUT_VAL);
45} 45}
46 46
47/* 47/*
@@ -73,8 +73,7 @@ static int olpc_dc_info(struct snd_kcontrol *kctl,
73 73
74static int olpc_dc_get(struct snd_kcontrol *kctl, struct snd_ctl_elem_value *v) 74static int olpc_dc_get(struct snd_kcontrol *kctl, struct snd_ctl_elem_value *v)
75{ 75{
76 v->value.integer.value[0] = geode_gpio_isset(OLPC_GPIO_MIC_AC, 76 v->value.integer.value[0] = gpio_get_value(OLPC_GPIO_MIC_AC);
77 GPIO_OUTPUT_VAL);
78 return 0; 77 return 0;
79} 78}
80 79
@@ -153,6 +152,12 @@ int __devinit olpc_quirks(struct snd_card *card, struct snd_ac97 *ac97)
153 if (!machine_is_olpc()) 152 if (!machine_is_olpc())
154 return 0; 153 return 0;
155 154
155 if (gpio_request(OLPC_GPIO_MIC_AC, DRV_NAME)) {
156 printk(KERN_ERR DRV_NAME ": unable to allocate MIC GPIO\n");
157 return -EIO;
158 }
159 gpio_direction_output(OLPC_GPIO_MIC_AC, 0);
160
156 /* drop the original AD1888 HPF control */ 161 /* drop the original AD1888 HPF control */
157 memset(&elem, 0, sizeof(elem)); 162 memset(&elem, 0, sizeof(elem));
158 elem.iface = SNDRV_CTL_ELEM_IFACE_MIXER; 163 elem.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
@@ -169,11 +174,18 @@ int __devinit olpc_quirks(struct snd_card *card, struct snd_ac97 *ac97)
169 for (i = 0; i < ARRAY_SIZE(olpc_cs5535audio_ctls); i++) { 174 for (i = 0; i < ARRAY_SIZE(olpc_cs5535audio_ctls); i++) {
170 err = snd_ctl_add(card, snd_ctl_new1(&olpc_cs5535audio_ctls[i], 175 err = snd_ctl_add(card, snd_ctl_new1(&olpc_cs5535audio_ctls[i],
171 ac97->private_data)); 176 ac97->private_data));
172 if (err < 0) 177 if (err < 0) {
178 gpio_free(OLPC_GPIO_MIC_AC);
173 return err; 179 return err;
180 }
174 } 181 }
175 182
176 /* turn off the mic by default */ 183 /* turn off the mic by default */
177 olpc_mic_bias(ac97, 0); 184 olpc_mic_bias(ac97, 0);
178 return 0; 185 return 0;
179} 186}
187
188void __devexit olpc_quirks_cleanup(void)
189{
190 gpio_free(OLPC_GPIO_MIC_AC);
191}
diff --git a/sound/pci/hda/hda_hwdep.c b/sound/pci/hda/hda_hwdep.c
index d24328661c6a..40ccb419b6e9 100644
--- a/sound/pci/hda/hda_hwdep.c
+++ b/sound/pci/hda/hda_hwdep.c
@@ -24,6 +24,7 @@
24#include <linux/compat.h> 24#include <linux/compat.h>
25#include <linux/mutex.h> 25#include <linux/mutex.h>
26#include <linux/ctype.h> 26#include <linux/ctype.h>
27#include <linux/string.h>
27#include <linux/firmware.h> 28#include <linux/firmware.h>
28#include <sound/core.h> 29#include <sound/core.h>
29#include "hda_codec.h" 30#include "hda_codec.h"
@@ -428,8 +429,7 @@ static int parse_hints(struct hda_codec *codec, const char *buf)
428 char *key, *val; 429 char *key, *val;
429 struct hda_hint *hint; 430 struct hda_hint *hint;
430 431
431 while (isspace(*buf)) 432 buf = skip_spaces(buf);
432 buf++;
433 if (!*buf || *buf == '#' || *buf == '\n') 433 if (!*buf || *buf == '#' || *buf == '\n')
434 return 0; 434 return 0;
435 if (*buf == '=') 435 if (*buf == '=')
@@ -444,8 +444,7 @@ static int parse_hints(struct hda_codec *codec, const char *buf)
444 return -EINVAL; 444 return -EINVAL;
445 } 445 }
446 *val++ = 0; 446 *val++ = 0;
447 while (isspace(*val)) 447 val = skip_spaces(val);
448 val++;
449 remove_trail_spaces(key); 448 remove_trail_spaces(key);
450 remove_trail_spaces(val); 449 remove_trail_spaces(val);
451 hint = get_hint(codec, key); 450 hint = get_hint(codec, key);
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index e54420e691ae..9b56f937913e 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2713,6 +2713,9 @@ static struct pci_device_id azx_ids[] = {
2713 { PCI_DEVICE(0x10de, 0x0ac1), .driver_data = AZX_DRIVER_NVIDIA }, 2713 { PCI_DEVICE(0x10de, 0x0ac1), .driver_data = AZX_DRIVER_NVIDIA },
2714 { PCI_DEVICE(0x10de, 0x0ac2), .driver_data = AZX_DRIVER_NVIDIA }, 2714 { PCI_DEVICE(0x10de, 0x0ac2), .driver_data = AZX_DRIVER_NVIDIA },
2715 { PCI_DEVICE(0x10de, 0x0ac3), .driver_data = AZX_DRIVER_NVIDIA }, 2715 { PCI_DEVICE(0x10de, 0x0ac3), .driver_data = AZX_DRIVER_NVIDIA },
2716 { PCI_DEVICE(0x10de, 0x0be2), .driver_data = AZX_DRIVER_NVIDIA },
2717 { PCI_DEVICE(0x10de, 0x0be3), .driver_data = AZX_DRIVER_NVIDIA },
2718 { PCI_DEVICE(0x10de, 0x0be4), .driver_data = AZX_DRIVER_NVIDIA },
2716 { PCI_DEVICE(0x10de, 0x0d94), .driver_data = AZX_DRIVER_NVIDIA }, 2719 { PCI_DEVICE(0x10de, 0x0d94), .driver_data = AZX_DRIVER_NVIDIA },
2717 { PCI_DEVICE(0x10de, 0x0d95), .driver_data = AZX_DRIVER_NVIDIA }, 2720 { PCI_DEVICE(0x10de, 0x0d95), .driver_data = AZX_DRIVER_NVIDIA },
2718 { PCI_DEVICE(0x10de, 0x0d96), .driver_data = AZX_DRIVER_NVIDIA }, 2721 { PCI_DEVICE(0x10de, 0x0d96), .driver_data = AZX_DRIVER_NVIDIA },
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index 447eda1f6770..1a36137e13ec 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -1789,6 +1789,14 @@ static int patch_ad1981(struct hda_codec *codec)
1789 1789
1790 codec->patch_ops.init = ad1981_hp_init; 1790 codec->patch_ops.init = ad1981_hp_init;
1791 codec->patch_ops.unsol_event = ad1981_hp_unsol_event; 1791 codec->patch_ops.unsol_event = ad1981_hp_unsol_event;
1792 /* set the upper-limit for mixer amp to 0dB for avoiding the
1793 * possible damage by overloading
1794 */
1795 snd_hda_override_amp_caps(codec, 0x11, HDA_INPUT,
1796 (0x17 << AC_AMPCAP_OFFSET_SHIFT) |
1797 (0x17 << AC_AMPCAP_NUM_STEPS_SHIFT) |
1798 (0x05 << AC_AMPCAP_STEP_SIZE_SHIFT) |
1799 (1 << AC_AMPCAP_MUTE_SHIFT));
1792 break; 1800 break;
1793 case AD1981_THINKPAD: 1801 case AD1981_THINKPAD:
1794 spec->mixers[0] = ad1981_thinkpad_mixers; 1802 spec->mixers[0] = ad1981_thinkpad_mixers;
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 888b6313eeca..aeed4cc5aa79 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -6248,6 +6248,7 @@ static const char *alc260_models[ALC260_MODEL_LAST] = {
6248 6248
6249static struct snd_pci_quirk alc260_cfg_tbl[] = { 6249static struct snd_pci_quirk alc260_cfg_tbl[] = {
6250 SND_PCI_QUIRK(0x1025, 0x007b, "Acer C20x", ALC260_ACER), 6250 SND_PCI_QUIRK(0x1025, 0x007b, "Acer C20x", ALC260_ACER),
6251 SND_PCI_QUIRK(0x1025, 0x007f, "Acer", ALC260_WILL),
6251 SND_PCI_QUIRK(0x1025, 0x008f, "Acer", ALC260_ACER), 6252 SND_PCI_QUIRK(0x1025, 0x008f, "Acer", ALC260_ACER),
6252 SND_PCI_QUIRK(0x1509, 0x4540, "Favorit 100XS", ALC260_FAVORIT100), 6253 SND_PCI_QUIRK(0x1509, 0x4540, "Favorit 100XS", ALC260_FAVORIT100),
6253 SND_PCI_QUIRK(0x103c, 0x2808, "HP d5700", ALC260_HP_3013), 6254 SND_PCI_QUIRK(0x103c, 0x2808, "HP d5700", ALC260_HP_3013),
diff --git a/sound/soc/codecs/wm8900.c b/sound/soc/codecs/wm8900.c
index c9438dd62df3..dbc368c08263 100644
--- a/sound/soc/codecs/wm8900.c
+++ b/sound/soc/codecs/wm8900.c
@@ -199,7 +199,7 @@ static void wm8900_reset(struct snd_soc_codec *codec)
199 snd_soc_write(codec, WM8900_REG_RESET, 0); 199 snd_soc_write(codec, WM8900_REG_RESET, 0);
200 200
201 memcpy(codec->reg_cache, wm8900_reg_defaults, 201 memcpy(codec->reg_cache, wm8900_reg_defaults,
202 sizeof(codec->reg_cache)); 202 sizeof(wm8900_reg_defaults));
203} 203}
204 204
205static int wm8900_hp_event(struct snd_soc_dapm_widget *w, 205static int wm8900_hp_event(struct snd_soc_dapm_widget *w,
diff --git a/sound/soc/s3c24xx/s3c24xx_simtec.c b/sound/soc/s3c24xx/s3c24xx_simtec.c
index d441c3b64631..4984754f3298 100644
--- a/sound/soc/s3c24xx/s3c24xx_simtec.c
+++ b/sound/soc/s3c24xx/s3c24xx_simtec.c
@@ -312,7 +312,7 @@ int simtec_audio_resume(struct device *dev)
312 return 0; 312 return 0;
313} 313}
314 314
315struct dev_pm_ops simtec_audio_pmops = { 315const struct dev_pm_ops simtec_audio_pmops = {
316 .resume = simtec_audio_resume, 316 .resume = simtec_audio_resume,
317}; 317};
318EXPORT_SYMBOL_GPL(simtec_audio_pmops); 318EXPORT_SYMBOL_GPL(simtec_audio_pmops);
diff --git a/sound/soc/s3c24xx/s3c24xx_simtec.h b/sound/soc/s3c24xx/s3c24xx_simtec.h
index 2714203af161..e18faee30cce 100644
--- a/sound/soc/s3c24xx/s3c24xx_simtec.h
+++ b/sound/soc/s3c24xx/s3c24xx_simtec.h
@@ -15,7 +15,7 @@ extern int simtec_audio_core_probe(struct platform_device *pdev,
15extern int simtec_audio_remove(struct platform_device *pdev); 15extern int simtec_audio_remove(struct platform_device *pdev);
16 16
17#ifdef CONFIG_PM 17#ifdef CONFIG_PM
18extern struct dev_pm_ops simtec_audio_pmops; 18extern const struct dev_pm_ops simtec_audio_pmops;
19#define simtec_audio_pm &simtec_audio_pmops 19#define simtec_audio_pm &simtec_audio_pmops
20#else 20#else
21#define simtec_audio_pm NULL 21#define simtec_audio_pm NULL
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index ef8f28284cb9..0a6440c6f54a 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -1236,7 +1236,7 @@ static int soc_poweroff(struct device *dev)
1236 return 0; 1236 return 0;
1237} 1237}
1238 1238
1239static struct dev_pm_ops soc_pm_ops = { 1239static const struct dev_pm_ops soc_pm_ops = {
1240 .suspend = soc_suspend, 1240 .suspend = soc_suspend,
1241 .resume = soc_resume, 1241 .resume = soc_resume,
1242 .poweroff = soc_poweroff, 1242 .poweroff = soc_poweroff,